BLKSerene/Wordless

View on GitHub

Showing 204 of 204 total issues

Avoid deeply nested control flow statements.
Open

                            for j, collocate in enumerate(reversed(tags_left)):
                                if wl_matching.check_context(
                                    i, tokens,
                                    context_settings = settings['search_settings']['context_settings'],
                                    search_terms_incl = search_terms_incl,
Severity: Major
Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

    Avoid deeply nested control flow statements.
    Open

                        if token.lemma is not None:
                            lemmas.append(token.lemma)
                        else:
                            lemmas.append(token.text)
        else:
    Severity: Major
    Found in wordless/wl_nlp/wl_lemmatization.py - About 45 mins to fix

      Avoid deeply nested control flow statements.
      Open

                          for token, lemma_search in set(zip(tokens, lemmas_search)):
                              if re_match(lemma_matched, lemma_search, flags = re_flags):
                                  search_results.add(token)
      
      
      Severity: Major
      Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

        Avoid deeply nested control flow statements.
        Open

                            for i, token in enumerate(sentence_seg):
                                if wl_checks_tokens.is_word_alphabetic(token):
                                    sentence_seg[i] = ''
        
            # Numerals
        Severity: Major
        Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

          Avoid deeply nested control flow statements.
          Open

                              if token_properties:
                                  i_tag_start += len(doc)
                      else:
          Severity: Major
          Found in wordless/wl_nlp/wl_dependency_parsing.py - About 45 mins to fix

            Avoid deeply nested control flow statements.
            Open

                                        if (
                                            ngram in search_terms
                                            and wl_matching.check_context(
                                                j, tokens,
                                                context_settings = settings['search_settings']['context_settings'],
            Severity: Major
            Found in wordless/wl_concordancer_parallel.py - About 45 mins to fix

              Avoid deeply nested control flow statements.
              Open

                                          if not self.isRowHidden(row):
                                              item = self.model().item(row, col)
              
                                              item.setText(str(item.val))
                                  # Floats
              Severity: Major
              Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                Avoid deeply nested control flow statements.
                Open

                                for file_type, trs in TRS_FILE_TYPES.items():
                                    if tr == file_type:
                                        tr = trs[0]
                
                                        break
                Severity: Major
                Found in utils/wl_trs_translate.py - About 45 mins to fix

                  Avoid deeply nested control flow statements.
                  Open

                                              if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
                                                  tags_left = text.tags[max(0, i + window_left) : max(0, i + window_right + 1)]
                                              else:
                                                  # Span positions (Left)
                                                  for position in range(max(0, i + window_left), max(0, i + window_right + 1)):
                  Severity: Major
                  Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                    Avoid deeply nested control flow statements.
                    Open

                                        if len_raw_temp_tokens == len_processed_temp_tokens:
                                            results_modified.extend(results_temp)
                                        elif len_raw_temp_tokens < len_processed_temp_tokens:
                                            results_modified.extend(results_temp[:len_raw_temp_tokens])
                                        elif len_raw_temp_tokens > len_processed_temp_tokens:
                    Severity: Major
                    Found in wordless/wl_nlp/wl_nlp_utils.py - About 45 mins to fix

                      Avoid deeply nested control flow statements.
                      Open

                                              for tag in re.finditer(re_tags, para):
                                                  tags_tokens = self.add_tags_splitting(para[tag_last_end:tag.start()], tags_tokens)
                                                  tags_tokens[-1].append(tag.group())
                      
                                                  tag_last_end = tag.end()
                      Severity: Major
                      Found in wordless/wl_nlp/wl_texts.py - About 45 mins to fix

                        Avoid deeply nested control flow statements.
                        Open

                                            for sentence_seg in sentence:
                                                for i, token in enumerate(sentence_seg):
                                                    if token.isupper():
                                                        sentence_seg[i] = ''
                                # Title Case
                        Severity: Major
                        Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                          Avoid deeply nested control flow statements.
                          Open

                                                      if not self.isRowHidden(row):
                                                          item = self.model().item(row, col)
                          
                                                          val_cum += item.val
                                                          item.setText(f'{val_cum:.{precision_decimals}}')
                          Severity: Major
                          Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                            Avoid deeply nested control flow statements.
                            Open

                                                        if not self.isRowHidden(row):
                                                            item = self.model().item(row, col)
                            
                                                            val_cum += item.val
                                                            item.setText(f'{val_cum:.{precision_pcts}%}')
                            Severity: Major
                            Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                              Avoid deeply nested control flow statements.
                              Open

                                                  for i, langs in enumerate(langs_nlp_utils):
                                                      # Sentence/word tokenization
                                                      if i <= 1:
                                                          if lang_code_639_3 in langs:
                                                              doc_supported_lang += '|✔'
                              Severity: Major
                              Found in tests/wl_test_doc.py - About 45 mins to fix

                                Avoid deeply nested control flow statements.
                                Open

                                                    for sentence_seg in sentence:
                                                        for token in sentence_seg:
                                                            head = token.head
                                
                                                            for i_sentence_seg, sentence_seg in enumerate(sentence):
                                Severity: Major
                                Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                                  Avoid deeply nested control flow statements.
                                  Open

                                                      for sentence_seg in sentence:
                                                          for i, token in enumerate(sentence_seg):
                                                              if token.islower():
                                                                  sentence_seg[i] = ''
                                          # Uppercase
                                  Severity: Major
                                  Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                                    Avoid deeply nested control flow statements.
                                    Open

                                                        for sentence in doc.sents:
                                                            htmls.append(spacy.displacy.render(
                                                                sentence,
                                                                style = 'dep',
                                                                minify = True,
                                    Severity: Major
                                    Found in wordless/wl_nlp/wl_dependency_parsing.py - About 45 mins to fix

                                      Avoid deeply nested control flow statements.
                                      Open

                                                                  for token in copy.deepcopy(parallel_unit):
                                                                      parallel_unit_tokens_search.append(token)
                                      
                                                                      if token.punc_mark:
                                                                          parallel_unit_tokens_search.append(wl_texts.Wl_Token(token.punc_mark, lang = token.lang))
                                      Severity: Major
                                      Found in wordless/wl_concordancer_parallel.py - About 45 mins to fix

                                        Avoid deeply nested control flow statements.
                                        Open

                                                                    for node in nodes:
                                                                        len_node = len(node)
                                        
                                                                        for j, ngram in enumerate(wl_nlp_utils.ngrams(parallel_unit, len_node)):
                                                                            if ngram == tuple(node):
                                        Severity: Major
                                        Found in wordless/wl_concordancer_parallel.py - About 45 mins to fix
                                          Severity
                                          Category
                                          Status
                                          Source
                                          Language