-
Notifications
You must be signed in to change notification settings - Fork 22
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix deprecation warnings #38
Changes from 1 commit
1f2f105
4d3d01d
0ad2949
e0984b1
b7a39e7
255fe5f
530180d
0eb7c21
f40323a
5b3d77b
7a2e3d4
bad4ff4
3b2504d
1b71cc3
dcbf330
356426c
f9543d5
14a77fa
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -35,11 +35,11 @@ def setUp(self): | |
self.index = self.IndexFactory(self.lexicon) | ||
|
||
def test_index_document(self, docid=1): | ||
doc = "simple document contains five words" | ||
self.assert_(not self.index.has_doc(docid)) | ||
doc = 'simple document contains five words' | ||
self.assertTrue(not self.index.has_doc(docid)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could be |
||
self.index.index_doc(docid, doc) | ||
self.assert_(self.index.has_doc(docid)) | ||
self.assert_(self.index._docweight[docid]) | ||
self.assertTrue(self.index.has_doc(docid)) | ||
self.assertTrue(self.index._docweight[docid]) | ||
self.assertEqual(len(self.index._docweight), 1) | ||
self.assertEqual( | ||
len(self.index._docweight), self.index.document_count()) | ||
|
@@ -50,7 +50,7 @@ def test_index_document(self, docid=1): | |
self.index.length()) | ||
for map in self.index._wordinfo.values(): | ||
self.assertEqual(len(map), 1) | ||
self.assert_(docid in map) | ||
self.assertTrue(docid in map) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could have been rewritten as `self.assertIn(docid, map). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. +1 |
||
|
||
def test_unindex_document(self): | ||
docid = 1 | ||
|
@@ -66,10 +66,10 @@ def test_unindex_document(self): | |
|
||
def test_index_two_documents(self): | ||
self.test_index_document() | ||
doc = "another document just four" | ||
doc = 'another document just four' | ||
docid = 2 | ||
self.index.index_doc(docid, doc) | ||
self.assert_(self.index._docweight[docid]) | ||
self.assertTrue(self.index._docweight[docid]) | ||
self.assertEqual(len(self.index._docweight), 2) | ||
self.assertEqual( | ||
len(self.index._docweight), self.index.document_count()) | ||
|
@@ -78,14 +78,14 @@ def test_index_two_documents(self): | |
self.assertEqual(len(self.index.get_words(docid)), 4) | ||
self.assertEqual(len(self.index._wordinfo), | ||
self.index.length()) | ||
wids = self.lexicon.termToWordIds("document") | ||
wids = self.lexicon.termToWordIds('document') | ||
self.assertEqual(len(wids), 1) | ||
document_wid = wids[0] | ||
for wid, map in self.index._wordinfo.items(): | ||
if wid == document_wid: | ||
self.assertEqual(len(map), 2) | ||
self.assert_(1 in map) | ||
self.assert_(docid in map) | ||
self.assertTrue(1 in map) | ||
self.assertTrue(docid in map) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These two lines could also use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. +1 |
||
else: | ||
self.assertEqual(len(map), 1) | ||
|
||
|
@@ -97,62 +97,62 @@ def test_index_two_unindex_one(self): | |
self.assertEqual(len(self.index._docweight), 1) | ||
self.assertEqual( | ||
len(self.index._docweight), self.index.document_count()) | ||
self.assert_(self.index._docweight[docid]) | ||
self.assertTrue(self.index._docweight[docid]) | ||
self.assertEqual(len(self.index._wordinfo), 4) | ||
self.assertEqual(len(self.index._docwords), 1) | ||
self.assertEqual(len(self.index.get_words(docid)), 4) | ||
self.assertEqual(len(self.index._wordinfo), | ||
self.index.length()) | ||
for map in self.index._wordinfo.values(): | ||
self.assertEqual(len(map), 1) | ||
self.assert_(docid in map) | ||
self.assertTrue(docid in map) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This line could also use |
||
|
||
def test_index_duplicated_words(self, docid=1): | ||
doc = "very simple repeat repeat repeat document test" | ||
doc = 'very simple repeat repeat repeat document test' | ||
self.index.index_doc(docid, doc) | ||
self.assert_(self.index._docweight[docid]) | ||
self.assertTrue(self.index._docweight[docid]) | ||
self.assertEqual(len(self.index._wordinfo), 5) | ||
self.assertEqual(len(self.index._docwords), 1) | ||
self.assertEqual(len(self.index.get_words(docid)), 7) | ||
self.assertEqual(len(self.index._wordinfo), | ||
self.index.length()) | ||
self.assertEqual( | ||
len(self.index._docweight), self.index.document_count()) | ||
wids = self.lexicon.termToWordIds("repeat") | ||
wids = self.lexicon.termToWordIds('repeat') | ||
self.assertEqual(len(wids), 1) | ||
for wid, map in self.index._wordinfo.items(): | ||
self.assertEqual(len(map), 1) | ||
self.assert_(docid in map) | ||
self.assertTrue(docid in map) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This line could also use |
||
|
||
def test_simple_query_oneresult(self): | ||
self.index.index_doc(1, 'not the same document') | ||
results = self.index.search("document") | ||
results = self.index.search('document') | ||
self.assertEqual(list(results.keys()), [1]) | ||
|
||
def test_simple_query_noresults(self): | ||
self.index.index_doc(1, 'not the same document') | ||
results = self.index.search("frobnicate") | ||
results = self.index.search('frobnicate') | ||
self.assertEqual(list(results.keys()), []) | ||
|
||
def test_query_oneresult(self): | ||
self.index.index_doc(1, 'not the same document') | ||
self.index.index_doc(2, 'something about something else') | ||
results = self.index.search("document") | ||
results = self.index.search('document') | ||
self.assertEqual(list(results.keys()), [1]) | ||
|
||
def test_search_phrase(self): | ||
self.index.index_doc(1, "the quick brown fox jumps over the lazy dog") | ||
self.index.index_doc(2, "the quick fox jumps lazy over the brown dog") | ||
results = self.index.search_phrase("quick brown fox") | ||
self.index.index_doc(1, 'the quick brown fox jumps over the lazy dog') | ||
self.index.index_doc(2, 'the quick fox jumps lazy over the brown dog') | ||
results = self.index.search_phrase('quick brown fox') | ||
self.assertEqual(list(results.keys()), [1]) | ||
|
||
def test_search_glob(self): | ||
self.index.index_doc(1, "how now brown cow") | ||
self.index.index_doc(2, "hough nough browne cough") | ||
self.index.index_doc(3, "bar brawl") | ||
results = self.index.search_glob("bro*") | ||
self.index.index_doc(1, 'how now brown cow') | ||
self.index.index_doc(2, 'hough nough browne cough') | ||
self.index.index_doc(3, 'bar brawl') | ||
results = self.index.search_glob('bro*') | ||
self.assertEqual(list(results.keys()), [1, 2]) | ||
results = self.index.search_glob("b*") | ||
results = self.index.search_glob('b*') | ||
self.assertEqual(list(results.keys()), [1, 2, 3]) | ||
|
||
|
||
|
@@ -174,7 +174,7 @@ def tearDown(self): | |
self.storage.cleanup() | ||
|
||
def openDB(self): | ||
n = 'fs_tmp__%s' % os.getpid() | ||
n = 'fs_tmp__{0}'.format(os.getpid()) | ||
self.storage = FileStorage(n) | ||
self.db = DB(self.storage) | ||
|
||
|
@@ -271,12 +271,12 @@ def test_upgrade_document_count(self): | |
del self.index1.document_count | ||
self.index1.index_doc(1, 'gazes upon my shadow') | ||
self.index2.index_doc(1, 'gazes upon my shadow') | ||
self.assert_(self.index1.document_count.__class__ is Length) | ||
self.assertTrue(self.index1.document_count.__class__ is Length) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This line could also use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. +1 |
||
self.assertEqual( | ||
self.index1.document_count(), self.index2.document_count()) | ||
del self.index1.document_count | ||
self.index1.unindex_doc(0) | ||
self.index2.unindex_doc(0) | ||
self.assert_(self.index1.document_count.__class__ is Length) | ||
self.assertTrue(self.index1.document_count.__class__ is Length) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This line could also use |
||
self.assertEqual( | ||
self.index1.document_count(), self.index2.document_count()) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -110,7 +110,7 @@ def testTermToWordIds(self): | |
wids = lexicon.sourceToWordIds('cats and dogs') | ||
wids = lexicon.termToWordIds('dogs') | ||
self.assertEqual(len(wids), 1) | ||
self.assert_(wids[0] > 0) | ||
self.assertTrue(wids[0] > 0) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is |
||
|
||
def testMissingTermToWordIds(self): | ||
from Products.ZCTextIndex.Lexicon import Splitter | ||
|
@@ -132,7 +132,7 @@ def process_post_glob(self, lst): | |
wids = lexicon.sourceToWordIds('cats and dogs') | ||
wids = lexicon.termToWordIds('dogs') | ||
self.assertEqual(len(wids), 1) | ||
self.assert_(wids[0] > 0) | ||
self.assertTrue(wids[0] > 0) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could also be called with |
||
|
||
def testMissingTermToWordIdsWithProcess_post_glob(self): | ||
"""This test is for added process_post_glob""" | ||
|
@@ -155,7 +155,7 @@ def testOnePipelineElement(self): | |
wids = lexicon.sourceToWordIds('cats and dogs') | ||
wids = lexicon.termToWordIds('fish') | ||
self.assertEqual(len(wids), 1) | ||
self.assert_(wids[0] > 0) | ||
self.assertTrue(wids[0] > 0) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could also be called with |
||
|
||
def testSplitterAdaptorFold(self): | ||
from Products.ZCTextIndex.Lexicon import CaseNormalizer | ||
|
@@ -188,7 +188,7 @@ def testTwoElementPipeline(self): | |
wids = lexicon.sourceToWordIds('cats and dogs') | ||
wids = lexicon.termToWordIds('hsif') | ||
self.assertEqual(len(wids), 1) | ||
self.assert_(wids[0] > 0) | ||
self.assertTrue(wids[0] > 0) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could also be called with |
||
|
||
def testThreeElementPipeline(self): | ||
from Products.ZCTextIndex.Lexicon import Splitter | ||
|
@@ -201,7 +201,7 @@ def testThreeElementPipeline(self): | |
wids = lexicon.sourceToWordIds('cats and dogs') | ||
wids = lexicon.termToWordIds('hsif') | ||
self.assertEqual(len(wids), 1) | ||
self.assert_(wids[0] > 0) | ||
self.assertTrue(wids[0] > 0) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could also be called with |
||
|
||
def testSplitterLocaleAwareness(self): | ||
import locale | ||
|
@@ -219,7 +219,7 @@ def testSplitterLocaleAwareness(self): | |
return # This test doesn't work here :-( | ||
expected = ['m\xfclltonne', 'waschb\xe4r', | ||
'beh\xf6rde', '\xfcberflieger'] | ||
words = [" ".join(expected)] | ||
words = [' '.join(expected)] | ||
words = Splitter().process(words) | ||
self.assertEqual(words, expected) | ||
words = HTMLWordSplitter().process(words) | ||
|
@@ -247,29 +247,29 @@ def openDB(self): | |
from ZODB.DB import DB | ||
from ZODB.FileStorage import FileStorage | ||
|
||
n = 'fs_tmp__%s' % os.getpid() | ||
n = 'fs_tmp__{0}'.format(os.getpid()) | ||
self.storage = FileStorage(n) | ||
self.db = DB(self.storage) | ||
|
||
def testAddWordConflict(self): | ||
from Products.ZCTextIndex.Lexicon import Splitter | ||
|
||
self.l = self._makeOne(Splitter()) | ||
self.lex = self._makeOne(Splitter()) | ||
self.openDB() | ||
r1 = self.db.open().root() | ||
r1['l'] = self.l | ||
r1['lex'] = self.lex | ||
transaction.commit() | ||
|
||
r2 = self.db.open().root() | ||
copy = r2['l'] | ||
copy = r2['lex'] | ||
# Make sure the data is loaded | ||
list(copy._wids.items()) | ||
list(copy._words.items()) | ||
copy.length() | ||
|
||
self.assertEqual(self.l._p_serial, copy._p_serial) | ||
self.assertEqual(self.lex._p_serial, copy._p_serial) | ||
|
||
self.l.sourceToWordIds('mary had a little lamb') | ||
self.lex.sourceToWordIds('mary had a little lamb') | ||
transaction.commit() | ||
|
||
copy.sourceToWordIds('whose fleece was') | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
flake8-plone-api
should not be used on this repos, so ignoringP001
should not be necessary.