wordless/wl_nlp/wl_texts.py
Function __init__
has 11 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def __init__(
Function to_tokens
has 11 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def to_tokens(
Consider simplifying this complex logical expression. Open
Open
if (
self.tokenized
and (css_para and css_sentence and css_word)
and (soup.select_one(css_para) and soup.select_one(css_sentence) and soup.select_one(css_word))
):
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, para):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, text_no_tags):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
head = token.head
head_ref = None
for i_sentence_seg, sentence_seg in enumerate(sentence):
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
refs = head_refs[i_token]
if refs is not None:
token.head = self.tokens_multilevel[refs[0]][refs[1]][refs[2]][refs[3]]
Avoid deeply nested control flow statements. Open
Open
for tag in re.finditer(re_tags, para):
tags_tokens = self.add_tags_splitting(para[i_tag_end:tag.start()], tags_tokens)
tags_tokens[-1].append(tag.group())
i_tag_end = tag.end()
Avoid deeply nested control flow statements. Open
Open
if (para := para[i_tag_end:]):
tags_tokens = self.add_tags_splitting(para, tags_tokens)
# Insert tags at the start of the text
if self.tags_text_start and tags_tokens: