BLKSerene/Wordless

View on GitHub

Showing 205 of 205 total issues

Avoid deeply nested control flow statements.
Open

                    if i == 0 and j == 0 and k == 0:
                        tokens = []

                        for l, token in enumerate(sentence_seg):
                            # Do not remove the first token and set it to an empty token instead if it is a punctuation mark
Severity: Major
Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

    Avoid deeply nested control flow statements.
    Open

                            for token in sentence_seg:
                                head = token.head
                                head_ref = None
    
                                for i_sentence_seg, sentence_seg in enumerate(sentence):
    Severity: Major
    Found in wordless/wl_nlp/wl_texts.py - About 45 mins to fix

      Function add_headers_vert has 6 arguments (exceeds 4 allowed). Consider refactoring.
      Open

          def add_headers_vert(
      Severity: Minor
      Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

        Avoid deeply nested control flow statements.
        Open

                            for sentence_seg in sentence:
                                for i, token in enumerate(sentence_seg):
                                    if token.isupper():
                                        sentence_seg[i] = wl_texts.Wl_Token('')
                # Title Case
        Severity: Major
        Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

          Avoid deeply nested control flow statements.
          Open

                              for token in sentence_seg:
                                  if wl_checks_tokens.is_punc(token.head):
                                      token.head = None
          
          
          Severity: Major
          Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

            Avoid deeply nested control flow statements.
            Open

                                for sentence in doc.sents:
                                    displacy_dict = spacy.displacy.parse_deps(sentence, options = options)
            
                                    if token_properties:
                                        for token, word in zip(sentence, displacy_dict['words']):
            Severity: Major
            Found in wordless/wl_nlp/wl_dependency_parsing.py - About 45 mins to fix

              Avoid deeply nested control flow statements.
              Open

                                          if settings['token_settings']['punc_marks']:
                                              node_tokens_search = list(ngram)
              
                                              # Remove empty tokens for searching in results
                                              left_tokens_search = [token for token in copy.deepcopy(left_tokens_raw) if token]
              Severity: Major
              Found in wordless/wl_concordancer.py - About 45 mins to fix

                Avoid deeply nested control flow statements.
                Open

                                            for j in range(10):
                                                self.set_item_num(row + j, i, 0)
                
                
                Severity: Major
                Found in wordless/wl_profiler.py - About 45 mins to fix

                  Function wl_spin_boxes_min_max has 6 arguments (exceeds 4 allowed). Consider refactoring.
                  Open

                  def wl_spin_boxes_min_max(
                  Severity: Minor
                  Found in wordless/wl_widgets/wl_boxes.py - About 45 mins to fix

                    Avoid deeply nested control flow statements.
                    Open

                                                for word in tr.split():
                                                    add_val_to_trs(trs_lexicon, word, vals)
                                        else:
                    Severity: Major
                    Found in utils/wl_generate_vader_dicts.py - About 45 mins to fix

                      Avoid deeply nested control flow statements.
                      Open

                                              if wl_matching.split_tag_embedded(opening_tag_text)[1] == '*':
                                                  opening_tag_text = opening_tag_text.replace('*', self.tr('TAG'))
                      
                      
                      Severity: Major
                      Found in wordless/wl_settings/wl_settings_files.py - About 45 mins to fix

                        Avoid deeply nested control flow statements.
                        Open

                                            if prefer_raw:
                                                # Always use original tokens
                                                results_modified.extend(tokens_raw_temp)
                                            # eg. POS tagging
                                            else:
                        Severity: Major
                        Found in wordless/wl_nlp/wl_nlp_utils.py - About 45 mins to fix

                          Function wl_test_sentiment_analyze_models has 6 arguments (exceeds 4 allowed). Consider refactoring.
                          Open

                          def wl_test_sentiment_analyze_models(lang, sentiment_analyzer, test_sentence, tokens, results, check_results = True):
                          Severity: Minor
                          Found in tests/tests_nlp/test_sentiment_analysis.py - About 45 mins to fix

                            Avoid deeply nested control flow statements.
                            Open

                                                for token, lemma_search in set(zip(tokens, lemmas_search)):
                                                    if re_match(lemma_matched, lemma_search, flags = re_flags):
                                                        tokens_matched[search_term_token].add(token)
                            
                            
                            Severity: Major
                            Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

                              Avoid deeply nested control flow statements.
                              Open

                                                  if re_match(lemma_matched, lemma_search, flags = re_flags):
                                                      tokens_matched[token_matched].add(token)
                              
                              
                              Severity: Major
                              Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

                                Avoid deeply nested control flow statements.
                                Open

                                                    for token in sentence_seg:
                                                        if token.tag is not None:
                                                            token.tag = token.tag.lower()
                                
                                                        if token.lemma is not None:
                                Severity: Major
                                Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                                  Function wl_pos_tag has 6 arguments (exceeds 4 allowed). Consider refactoring.
                                  Open

                                  def wl_pos_tag(main, inputs, lang, pos_tagger = 'default', tagset = 'default', force = False):
                                  Severity: Minor
                                  Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                                    Avoid deeply nested control flow statements.
                                    Open

                                                        for i, token in enumerate(sentence_seg):
                                                            if wl_checks_tokens.is_num(token):
                                                                sentence_seg[i] = wl_texts.Wl_Token('')
                                    
                                        # Replace token texts with lemmas
                                    Severity: Major
                                    Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                                      Avoid deeply nested control flow statements.
                                      Open

                                                                  for col in cols:
                                                                      if self.table.model().item(row, col):
                                                                          cell_text = self.table.model().item(row, col).text()
                                                                      else:
                                                                          cell_text = self.table.indexWidget(self.table.model().index(row, col)).text()
                                      Severity: Major
                                      Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                                        Avoid deeply nested control flow statements.
                                        Open

                                                            for sentence in doc.sentences:
                                                                for token in sentence.words:
                                                                    texts_tagged.append(token.text)
                                        
                                                                    if tagset in ['default', 'raw']:
                                        Severity: Major
                                        Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix
                                          Severity
                                          Category
                                          Status
                                          Source
                                          Language