BLKSerene/Wordless

View on GitHub

Showing 202 of 202 total issues

Avoid deeply nested control flow statements.
Open

                for file_type, trs in TRS_FILE_TYPES.items():
                    if tr == file_type:
                        tr = trs[0]

                        break
Severity: Major
Found in utils/wl_trs_translate.py - About 45 mins to fix

    Avoid deeply nested control flow statements.
    Open

                        if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
                            incl_matched = True
    
                            break
        # Search terms to be included not found in texts
    Severity: Major
    Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

      Avoid deeply nested control flow statements.
      Open

                          for sentence_seg in sentence:
                              for i, token in enumerate(sentence_seg):
                                  if token.isupper():
                                      sentence_seg[i] = ''
              # Title Case
      Severity: Major
      Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

        Avoid deeply nested control flow statements.
        Open

                                    for word in wl_word_tokenization.wl_word_tokenize_flat(main, tr, lang):
                                        add_val_to_trs(trs_lexicon, word, vals)
                                else:
        Severity: Major
        Found in utils/wl_generate_vader_dicts.py - About 45 mins to fix

          Avoid deeply nested control flow statements.
          Open

                              if token_properties:
                                  i_tag_start += len(doc)
          
          
          Severity: Major
          Found in wordless/wl_nlp/wl_dependency_parsing.py - About 45 mins to fix

            Avoid deeply nested control flow statements.
            Open

                                        for k, ngram in enumerate(wl_nlp_utils.ngrams(tokens, len_search_term)):
                                            if ngram == search_term:
                                                points.append([x_start + k / text.num_tokens * len_tokens_total, y_start - j])
                                                # Total
                                                points.append([x_start_total + k, 0])
            Severity: Major
            Found in wordless/wl_concordancer.py - About 45 mins to fix

              Avoid deeply nested control flow statements.
              Open

                                          if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
                                              tags_right = text.tags[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
                                          else:
                                              # Span positions (Right)
                                              for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
              Severity: Major
              Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                Avoid deeply nested control flow statements.
                Open

                                            for j, collocate in enumerate(tags_right):
                                                if wl_matching.check_context(
                                                    i, tokens,
                                                    context_settings = settings['search_settings']['context_settings'],
                                                    search_terms_incl = search_terms_incl,
                Severity: Major
                Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                  Avoid deeply nested control flow statements.
                  Open

                                      for i, token in enumerate(sentence_seg):
                                          if wl_checks_tokens.is_word_alphabetic(token):
                                              sentence_seg[i] = ''
                  
                      # Numerals
                  Severity: Major
                  Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                    Avoid deeply nested control flow statements.
                    Open

                                                for col in cols:
                                                    row_to_exp.append(self.table.model().item(row, col).text())
                    
                    
                    Severity: Major
                    Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                      Avoid deeply nested control flow statements.
                      Open

                                              if self.settings_tags == 'body_tag_settings' and tag_name == '*':
                                                  opening_tag_text = opening_tag_text.replace('*', _tr('wl_settings_files', 'TAG'))
                                                  closing_tag_text = self.model().item(row, 3).text().replace('*', _tr('wl_settings_files', 'TAG'))
                                                  preview.setText(opening_tag_text + _tr('wl_settings_files', 'token') + closing_tag_text)
                                              else:
                      Severity: Major
                      Found in wordless/wl_settings/wl_settings_files.py - About 45 mins to fix

                        Avoid deeply nested control flow statements.
                        Open

                                        if any((text in tr for text in [])):
                                            # Flag translation as unfinished to be reviewed manually
                                            unfinished = True
                        
                        
                        Severity: Major
                        Found in utils/wl_trs_translate.py - About 45 mins to fix

                          Avoid deeply nested control flow statements.
                          Open

                                                      for j, collocate in enumerate(reversed(tags_left)):
                                                          if wl_matching.check_context(
                                                              i, tokens,
                                                              context_settings = settings['search_settings']['context_settings'],
                                                              search_terms_incl = search_terms_incl,
                          Severity: Major
                          Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                            Avoid deeply nested control flow statements.
                            Open

                                                    for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, text_no_tags):
                                                        self.tokens_multilevel[-1].append([])
                            
                                                        for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
                                                            self.main,
                            Severity: Major
                            Found in wordless/wl_nlp/wl_texts.py - About 45 mins to fix

                              Avoid deeply nested control flow statements.
                              Open

                                                  if token.lemma is not None:
                                                      lemmas.append(token.lemma)
                                                  else:
                                                      lemmas.append(token.text)
                                  else:
                              Severity: Major
                              Found in wordless/wl_nlp/wl_lemmatization.py - About 45 mins to fix

                                Avoid deeply nested control flow statements.
                                Open

                                                    if re_match(search_term, dependency_relation.display_text(), flags = re_flags):
                                                        search_results.add(dependency_relation)
                                        else:
                                Severity: Major
                                Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

                                  Avoid deeply nested control flow statements.
                                  Open

                                                              for node in nodes:
                                                                  len_node = len(node)
                                  
                                                                  for j, ngram in enumerate(wl_nlp_utils.ngrams(parallel_unit, len_node)):
                                                                      if ngram == tuple(node):
                                  Severity: Major
                                  Found in wordless/wl_concordancer_parallel.py - About 45 mins to fix

                                    Avoid deeply nested control flow statements.
                                    Open

                                                            for tag in re.finditer(re_tags, para):
                                                                tags_tokens = self.add_tags_splitting(para[tag_last_end:tag.start()], tags_tokens)
                                                                tags_tokens[-1].append(tag.group())
                                    
                                                                tag_last_end = tag.end()
                                    Severity: Major
                                    Found in wordless/wl_nlp/wl_texts.py - About 45 mins to fix

                                      Avoid deeply nested control flow statements.
                                      Open

                                                              if i != item_row and item_text == text:
                                                                  wl_msg_boxes.Wl_Msg_Box_Warning(
                                                                      self.main,
                                                                      title = _tr('wl_lists', 'Duplicates Found'),
                                                                      text = _tr('wl_lists', '''
                                      Severity: Major
                                      Found in wordless/wl_widgets/wl_lists.py - About 45 mins to fix

                                        Avoid deeply nested control flow statements.
                                        Open

                                                                    for sentence in sentences:
                                                                        tokens_multilevel[-1].append(main.nltk_nist_tokenizer.international_tokenize(sentence))
                                                                case 'nltk_nltk':
                                        Severity: Major
                                        Found in wordless/wl_nlp/wl_word_tokenization.py - About 45 mins to fix
                                          Severity
                                          Category
                                          Status
                                          Source
                                          Language