BLKSerene/Wordless

View on GitHub

Showing 204 of 204 total issues

Function add_header_vert has 6 arguments (exceeds 4 allowed). Consider refactoring.
Open

    def add_header_vert(
Severity: Minor
Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

    Avoid deeply nested control flow statements.
    Open

                                if not self.isRowHidden(row):
                                    item = self.model().item(row, col)
    
                                    val_cum += item.val
                                    item.setText(str(val_cum))
    Severity: Major
    Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

      Avoid deeply nested control flow statements.
      Open

                                  for col in cols:
                                      if self.table.model().item(row, col):
                                          cell_text = self.table.model().item(row, col).text()
                                      else:
                                          cell_text = self.table.indexWidget(self.table.model().index(row, col)).text()
      Severity: Major
      Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

        Avoid deeply nested control flow statements.
        Open

                                    if ngram[i : i + len_search_term] == search_term:
                                        ngrams_is_filtered.append((ngram, ngram_i))
        
                        # Check context settings
                        ngrams_is = (
        Severity: Major
        Found in wordless/wl_ngram_generator.py - About 45 mins to fix

          Avoid deeply nested control flow statements.
          Open

                                      for word in tr.split():
                                          add_val_to_trs(trs_lexicon, word, vals)
                              else:
          Severity: Major
          Found in utils/wl_generate_vader_dicts.py - About 45 mins to fix

            Avoid deeply nested control flow statements.
            Open

                                        for j, collocate in enumerate(reversed(tags_left)):
                                            if wl_matching.check_context(
                                                i, tokens,
                                                context_settings = settings['search_settings']['context_settings'],
                                                search_terms_incl = search_terms_incl,
            Severity: Major
            Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

              Avoid deeply nested control flow statements.
              Open

                                  for token in sentence_seg:
                                      token.tag = token.tag.lower()
              
                  # Words
                  if settings['words']:
              Severity: Major
              Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                Avoid deeply nested control flow statements.
                Open

                                    for sentence in doc.sentences:
                                        for token in sentence.words:
                                            texts_tagged.append(token.text)
                
                                            if tagset in ['default', 'raw']:
                Severity: Major
                Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                  Avoid deeply nested control flow statements.
                  Open

                                      for doc in nlp.pipe(docs):
                                          for token in doc:
                                              texts_tagged.append(token.text)
                  
                                              if tagset in ['default', 'raw']:
                  Severity: Major
                  Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                    Avoid deeply nested control flow statements.
                    Open

                                                if (
                                                    ngram in search_terms
                                                    and wl_matching.check_context(
                                                        j, tokens,
                                                        context_settings = settings['search_settings']['context_settings'],
                    Severity: Major
                    Found in wordless/wl_concordancer_parallel.py - About 45 mins to fix

                      Avoid deeply nested control flow statements.
                      Open

                                                  for k, ngram in enumerate(wl_nlp_utils.ngrams(text.get_tokens_flat(), len_search_term)):
                                                      if ngram == search_term:
                                                          points.append([x_start + k, i])
                      
                      
                      Severity: Major
                      Found in wordless/wl_concordancer.py - About 45 mins to fix

                        Avoid deeply nested control flow statements.
                        Open

                                                    if lens.any():
                                                        self.set_item_num(row, i, numpy.mean(lens))
                                                        self.set_item_num(row + 1, i, numpy.std(lens))
                                                        self.set_item_num(row + 2, i, numpy.var(lens))
                                                        self.set_item_num(row + 3, i, numpy.min(lens))
                        Severity: Major
                        Found in wordless/wl_profiler.py - About 45 mins to fix

                          Avoid deeply nested control flow statements.
                          Open

                                                      if not self.isRowHidden(row):
                                                          item = self.model().item(row, col)
                          
                                                          item.setText(f'{item.val:.{precision_pcts}%}')
                                  elif self.header_orientation == 'vert':
                          Severity: Major
                          Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                            Avoid deeply nested control flow statements.
                            Open

                                                for collocate in range(10):
                                                    collocate = wl_texts.Wl_Token(str(collocate))
                                                    stat_files_items[(node, collocate)] = [
                                                        random.uniform(0, val_max),
                                                        random.uniform(0, val_max),
                            Severity: Major
                            Found in tests/tests_figs/test_figs_stats.py - About 45 mins to fix

                              Avoid deeply nested control flow statements.
                              Open

                                                  for tokens in wl_nlp_utils.split_token_list(main, texts, pos_tagger):
                                                      # The Japanese model do not have a tagger component and Japanese POS tags are taken directly from SudachiPy
                                                      # See: https://github.com/explosion/spaCy/discussions/9983#discussioncomment-1910117
                                                      if lang == 'jpn':
                                                          docs.append(''.join(tokens))
                              Severity: Major
                              Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                                Avoid deeply nested control flow statements.
                                Open

                                                    if token_properties:
                                                        i_tag_start += len(doc)
                                            else:
                                Severity: Major
                                Found in wordless/wl_nlp/wl_dependency_parsing.py - About 45 mins to fix

                                  Avoid deeply nested control flow statements.
                                  Open

                                                          if line and line not in items_cur:
                                                              items_to_imp.append(line)
                                  
                                  
                                  Severity: Major
                                  Found in wordless/wl_widgets/wl_lists.py - About 45 mins to fix

                                    Function __init__ has 6 arguments (exceeds 4 allowed). Consider refactoring.
                                    Open

                                        def __init__(self, main, title, width = 0, height = 0, resizable = True, no_buttons = False):
                                    Severity: Minor
                                    Found in wordless/wl_dialogs/wl_dialogs_errs.py - About 45 mins to fix

                                      Avoid deeply nested control flow statements.
                                      Open

                                                                  if ngram == search_term:
                                                                      colligations_freqs_file_filtered[(node, collocate)] = freqs
                                      
                                      
                                      Severity: Major
                                      Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                                        Avoid deeply nested control flow statements.
                                        Open

                                                                for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, para):
                                                                    self.tokens_multilevel[-1].append([])
                                        
                                                                    for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
                                                                        self.main,
                                        Severity: Major
                                        Found in wordless/wl_nlp/wl_texts.py - About 45 mins to fix
                                          Severity
                                          Category
                                          Status
                                          Source
                                          Language