BLKSerene/Wordless

View on GitHub

Showing 204 of 204 total issues

Function __init__ has 6 arguments (exceeds 4 allowed). Consider refactoring.
Open

    def __init__(
Severity: Minor
Found in wordless/wl_widgets/wl_lists.py - About 45 mins to fix

    Function wl_test_pos_tag_models has 6 arguments (exceeds 4 allowed). Consider refactoring.
    Open

    def wl_test_pos_tag_models(lang, pos_tagger, test_sentence, tokens, results, results_universal):
    Severity: Minor
    Found in tests/tests_nlp/test_pos_tagging.py - About 45 mins to fix

      Avoid deeply nested control flow statements.
      Open

                                  for word in tr.split():
                                      add_val_to_trs(trs_lexicon, word, vals)
                          else:
      Severity: Major
      Found in utils/wl_generate_vader_dicts.py - About 45 mins to fix

        Avoid deeply nested control flow statements.
        Open

                            if i == 0 and j == 0 and k == 0:
                                tokens = []
        
                                for l, token in enumerate(sentence_seg):
                                    # Do not remove the first token and set it to an empty token instead if it is a punctuation mark
        Severity: Major
        Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

          Function wl_pos_tag has 6 arguments (exceeds 4 allowed). Consider refactoring.
          Open

          def wl_pos_tag(main, inputs, lang, pos_tagger = 'default', tagset = 'default', force = False):
          Severity: Minor
          Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

            Avoid deeply nested control flow statements.
            Open

                                for doc in nlp.pipe(lines):
                                    for token in doc:
                                        texts_tagged.append(token.text)
            
                                        if tagset in ['default', 'raw']:
            Severity: Major
            Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

              Avoid deeply nested control flow statements.
              Open

                                          if lens.any():
                                              self.set_item_num(row, i, numpy.mean(lens))
                                              self.set_item_num(row + 1, i, numpy.std(lens))
                                              self.set_item_num(row + 2, i, numpy.var(lens))
                                              self.set_item_num(row + 3, i, numpy.min(lens))
              Severity: Major
              Found in wordless/wl_profiler.py - About 45 mins to fix

                Avoid deeply nested control flow statements.
                Open

                                            for col in cols:
                                                row_to_exp.append(self.table.model().item(row, col).text())
                
                
                Severity: Major
                Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                  Function wl_test_lemmatize_models has 6 arguments (exceeds 4 allowed). Consider refactoring.
                  Open

                  def wl_test_lemmatize_models(lang, lemmatizer, test_sentence, tokens, results, lang_exceptions = None):
                  Severity: Minor
                  Found in tests/tests_nlp/test_lemmatization.py - About 45 mins to fix

                    Avoid deeply nested control flow statements.
                    Open

                                    for util, trs in TRS_NLP_UTILS.items():
                                        # Only replace language util names after language names or at the end of text
                                        if f' - {util}' in tr or tr.endswith(util):
                                            if f' - {util}' in tr:
                                                tr = tr.replace(f' - {util}', f' - {trs[0]}', 1)
                    Severity: Major
                    Found in utils/wl_trs_translate.py - About 45 mins to fix

                      Avoid deeply nested control flow statements.
                      Open

                                                  if settings['token_settings']['punc_marks']:
                                                      node_tokens_search = list(ngram)
                      
                                                      # Remove empty tokens for searching in results
                                                      left_tokens_search = [token for token in copy.deepcopy(left_tokens_raw) if token]
                      Severity: Major
                      Found in wordless/wl_concordancer.py - About 45 mins to fix

                        Avoid deeply nested control flow statements.
                        Open

                                                    if not self.isRowHidden(row):
                                                        item = self.model().item(row, col)
                        
                                                        val_cum += item.val
                                                        item.setText(str(val_cum))
                        Severity: Major
                        Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                          Avoid deeply nested control flow statements.
                          Open

                                                      if not self.isRowHidden(row):
                                                          item = self.model().item(row, col)
                          
                                                          item.setText(f'{item.val:.{precision_decimals}}')
                                              # Percentages
                          Severity: Major
                          Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                            Avoid deeply nested control flow statements.
                            Open

                                                if prefer_raw:
                                                    # Always use original tokens
                                                    results_modified.extend(tokens_raw_temp)
                                                # eg. POS tagging
                                                else:
                            Severity: Major
                            Found in wordless/wl_nlp/wl_nlp_utils.py - About 45 mins to fix

                              Avoid deeply nested control flow statements.
                              Open

                                                  for token, lemma_search in set(zip(tokens, lemmas_search)):
                                                      if re_match(lemma_matched, lemma_search, flags = re_flags):
                                                          tokens_matched[search_term_token].add(token)
                              
                              
                              Severity: Major
                              Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

                                Avoid deeply nested control flow statements.
                                Open

                                                    for sentence in doc.sentences:
                                                        for token in sentence.words:
                                                            texts_tagged.append(token.text)
                                
                                                            if tagset in ['default', 'raw']:
                                Severity: Major
                                Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                                  Avoid deeply nested control flow statements.
                                  Open

                                                              for k, ngram in enumerate(wl_nlp_utils.ngrams(tokens, len_search_term)):
                                                                  if ngram == search_term:
                                                                      points.append([x_start + k / text.num_tokens * len_tokens_total, y_start - j])
                                                                      # Total
                                                                      points.append([x_start_total + k, 0])
                                  Severity: Major
                                  Found in wordless/wl_concordancer.py - About 45 mins to fix

                                    Function wl_test_sentiment_analyze_models has 6 arguments (exceeds 4 allowed). Consider refactoring.
                                    Open

                                    def wl_test_sentiment_analyze_models(lang, sentiment_analyzer, test_sentence, tokens, results, check_results = True):
                                    Severity: Minor
                                    Found in tests/tests_nlp/test_sentiment_analysis.py - About 45 mins to fix

                                      Avoid deeply nested control flow statements.
                                      Open

                                                                  if settings_limit_searching == _tr('wl_colligation_extractor', 'Within sentence segments'):
                                                                      offsets_unit = offsets_sentence_segs
                                                                      len_unit = len_sentence_segs
                                                                  elif settings_limit_searching == _tr('wl_colligation_extractor', 'Within sentences'):
                                                                      offsets_unit = offsets_sentences
                                      Severity: Major
                                      Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                                        Avoid deeply nested control flow statements.
                                        Open

                                                                    if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
                                                                        tags_left = text.tags[max(0, i + window_left) : i]
                                                                        tags_right = text.tags[i + ngram_size : i + ngram_size + window_right]
                                                                    else:
                                                                        # Span positions (Left)
                                        Severity: Major
                                        Found in wordless/wl_colligation_extractor.py - About 45 mins to fix
                                          Severity
                                          Category
                                          Status
                                          Source
                                          Language