Showing 205 of 205 total issues
Avoid deeply nested control flow statements. Open
Open
for j, count in enumerate(counts):
self.set_item_num(
row = self.model().rowCount() - 2,
col = j,
val = count
Avoid deeply nested control flow statements. Open
Open
for col in cols:
row_to_exp.append(self.table.model().item(row, col).text())
Avoid deeply nested control flow statements. Open
Open
if ngram[i : i + len_search_term] == search_term:
ngrams_is_filtered.append((ngram, ngram_i))
# Check context settings
ngrams_is = (
Avoid deeply nested control flow statements. Open
Open
for i, langs in enumerate(langs_nlp_utils):
# Sentence/word tokenization
if i <= 1:
if lang_code_639_3 in langs:
doc_supported_lang += '|✔'
Avoid deeply nested control flow statements. Open
Open
for item in range(100):
item = wl_texts.Wl_Token(str(item))
freq_1, freq_2 = random.sample(range(100), 2)
freq_files_items[item] = [
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Colligation_Extractor', 'None'):
tags_right = text.tags[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
else:
# Span positions (Right)
for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(tags_right):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
incl_matched = True
break
# Search terms to be included not found in texts
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
if token.islower():
sentence_seg[i] = wl_texts.Wl_Token('')
# Uppercase
Avoid deeply nested control flow statements. Open
Open
for doc in nlp.pipe(lines):
for token in doc:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
for tokens in wl_nlp_utils.split_token_list(main, texts, pos_tagger):
# The Japanese model do not have a tagger component and Japanese POS tags are taken directly from SudachiPy
# See: https://github.com/explosion/spaCy/discussions/9983#discussioncomment-1910117
if lang == 'jpn':
docs.append(''.join(tokens))
Avoid deeply nested control flow statements. Open
Open
if freqs_totals[j][k]:
self.set_item_num(i, cols_freqs_start[j] + k * 2 + 1, freq / freqs_totals[j][k])
else:
self.set_item_num(i, cols_freqs_start[j] + k * 2 + 1, 0)
Avoid deeply nested control flow statements. Open
Open
for sentence in sentences:
tokens_multilevel[-1].append(main.nltk_nist_tokenizer.international_tokenize(sentence))
case 'nltk_nltk':
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(str(item.val))
# Floats
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'None'):
tokens_right = tokens[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
else:
# Span positions (Right)
for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'Within sentence segments'):
offsets_unit = offsets_sentence_segs
len_unit = len_sentence_segs
elif settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'Within sentences'):
offsets_unit = offsets_sentences
Avoid deeply nested control flow statements. Open
Open
with open(file_settings_display_lang, 'wb') as f:
pickle.dump(action.lang, f)
# Remove settings file
if os.path.exists(file_settings):
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tokens_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for file in glob.glob(os.path.join(
self.settings_custom['general']['imp']['temp_files']['default_path'], '*.*'
)):
os.remove(file)