Showing 204 of 204 total issues
Function add_header_vert
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def add_header_vert(
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(str(val_cum))
Avoid deeply nested control flow statements. Open
Open
for col in cols:
if self.table.model().item(row, col):
cell_text = self.table.model().item(row, col).text()
else:
cell_text = self.table.indexWidget(self.table.model().index(row, col)).text()
Avoid deeply nested control flow statements. Open
Open
if ngram[i : i + len_search_term] == search_term:
ngrams_is_filtered.append((ngram, ngram_i))
# Check context settings
ngrams_is = (
Avoid deeply nested control flow statements. Open
Open
for word in tr.split():
add_val_to_trs(trs_lexicon, word, vals)
else:
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tags_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
token.tag = token.tag.lower()
# Words
if settings['words']:
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sentences:
for token in sentence.words:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
for doc in nlp.pipe(docs):
for token in doc:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
if (
ngram in search_terms
and wl_matching.check_context(
j, tokens,
context_settings = settings['search_settings']['context_settings'],
Avoid deeply nested control flow statements. Open
Open
for k, ngram in enumerate(wl_nlp_utils.ngrams(text.get_tokens_flat(), len_search_term)):
if ngram == search_term:
points.append([x_start + k, i])
Avoid deeply nested control flow statements. Open
Open
if lens.any():
self.set_item_num(row, i, numpy.mean(lens))
self.set_item_num(row + 1, i, numpy.std(lens))
self.set_item_num(row + 2, i, numpy.var(lens))
self.set_item_num(row + 3, i, numpy.min(lens))
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(f'{item.val:.{precision_pcts}%}')
elif self.header_orientation == 'vert':
Avoid deeply nested control flow statements. Open
Open
for collocate in range(10):
collocate = wl_texts.Wl_Token(str(collocate))
stat_files_items[(node, collocate)] = [
random.uniform(0, val_max),
random.uniform(0, val_max),
Avoid deeply nested control flow statements. Open
Open
for tokens in wl_nlp_utils.split_token_list(main, texts, pos_tagger):
# The Japanese model do not have a tagger component and Japanese POS tags are taken directly from SudachiPy
# See: https://github.com/explosion/spaCy/discussions/9983#discussioncomment-1910117
if lang == 'jpn':
docs.append(''.join(tokens))
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
else:
Avoid deeply nested control flow statements. Open
Open
if line and line not in items_cur:
items_to_imp.append(line)
Function __init__
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def __init__(self, main, title, width = 0, height = 0, resizable = True, no_buttons = False):
Avoid deeply nested control flow statements. Open
Open
if ngram == search_term:
colligations_freqs_file_filtered[(node, collocate)] = freqs
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, para):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,