Showing 206 of 206 total issues
Function __init__
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def __init__(
Avoid deeply nested control flow statements. Open
Open
for col in cols:
if self.table.model().item(row, col):
cell_text = self.table.model().item(row, col).text()
else:
cell_text = self.table.indexWidget(self.table.model().index(row, col)).text()
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(f'{val_cum:.{precision_decimals}}')
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(str(item.val))
# Floats
Avoid deeply nested control flow statements. Open
Open
if len_raw_temp_tokens == len_processed_temp_tokens:
results_modified.extend(results_temp)
elif len_raw_temp_tokens < len_processed_temp_tokens:
results_modified.extend(results_temp[:len_raw_temp_tokens])
elif len_raw_temp_tokens > len_processed_temp_tokens:
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, text_no_tags):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
head = token.head
head_ref = None
for i_sentence_seg, sentence_seg in enumerate(sentence):
Function wl_test_pos_tag_models
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_test_pos_tag_models(lang, pos_tagger, test_sentence, tokens, results, results_universal):
Avoid deeply nested control flow statements. Open
Open
for util, trs in TRS_NLP_UTILS.items():
# Only replace language utility names after language names or at the end of text
if f' - {util}' in tr or tr.endswith(util):
if f' - {util}' in tr:
tr = tr.replace(f' - {util}', f' - {trs[0]}', 1)
Avoid deeply nested control flow statements. Open
Open
for i, langs in enumerate(langs_nlp_utils):
# Sentence/word tokenization
if i <= 1:
if lang_code_639_3 in langs:
doc_supported_lang += '|✔'
Avoid deeply nested control flow statements. Open
Open
for j, cell in enumerate(row):
for cell_tr, trs in ACKS_TRS.items():
if cell == cell_tr:
row[j] = trs[i - 1]
Avoid deeply nested control flow statements. Open
Open
if token.lemma_:
lemmas.append(token.lemma_)
else:
lemmas.append(token.text)
# Stanza
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(tags_right):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
incl_matched = True
break
# Search terms to be included not found in texts
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Colligation_Extractor', 'None'):
tags_right = text.tags[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
else:
# Span positions (Right)
for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
Avoid deeply nested control flow statements. Open
Open
for token in copy.deepcopy(parallel_unit):
parallel_unit_tokens_search.append(token)
if token.punc_mark:
parallel_unit_tokens_search.append(wl_texts.Wl_Token(token.punc_mark, lang = token.lang))
Avoid deeply nested control flow statements. Open
Open
if line and line not in items_cur:
items_to_imp.append(line)
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
Avoid deeply nested control flow statements. Open
Open
if not file_name:
wl_msg_boxes.Wl_Msg_Box_Warning(
self.main,
title = self.tr('Empty File Name'),
text = self.tr('''
Avoid deeply nested control flow statements. Open
Open
for j in range(10):
self.set_item_num(row + j, i, 0)