BLKSerene/Wordless

View on GitHub

Showing 205 of 205 total issues

Avoid deeply nested control flow statements.
Open

                            for j, count in enumerate(counts):
                                self.set_item_num(
                                    row = self.model().rowCount() - 2,
                                    col = j,
                                    val = count
Severity: Major
Found in wordless/wl_profiler.py - About 45 mins to fix

    Avoid deeply nested control flow statements.
    Open

                                for col in cols:
                                    row_to_exp.append(self.table.model().item(row, col).text())
    
    
    Severity: Major
    Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

      Avoid deeply nested control flow statements.
      Open

                                  if ngram[i : i + len_search_term] == search_term:
                                      ngrams_is_filtered.append((ngram, ngram_i))
      
                      # Check context settings
                      ngrams_is = (
      Severity: Major
      Found in wordless/wl_ngram_generator.py - About 45 mins to fix

        Avoid deeply nested control flow statements.
        Open

                            for i, langs in enumerate(langs_nlp_utils):
                                # Sentence/word tokenization
                                if i <= 1:
                                    if lang_code_639_3 in langs:
                                        doc_supported_lang += '|✔'
        Severity: Major
        Found in tests/wl_test_doc.py - About 45 mins to fix

          Avoid deeply nested control flow statements.
          Open

                              for item in range(100):
                                  item = wl_texts.Wl_Token(str(item))
                                  freq_1, freq_2 = random.sample(range(100), 2)
          
                                  freq_files_items[item] = [
          Severity: Major
          Found in tests/tests_figs/test_figs_freqs.py - About 45 mins to fix

            Avoid deeply nested control flow statements.
            Open

                                        if settings_limit_searching == _tr('Wl_Worker_Colligation_Extractor', 'None'):
                                            tags_right = text.tags[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
                                        else:
                                            # Span positions (Right)
                                            for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
            Severity: Major
            Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

              Avoid deeply nested control flow statements.
              Open

                                          for j, collocate in enumerate(tags_right):
                                              if wl_matching.check_context(
                                                  i, tokens,
                                                  context_settings = settings['search_settings']['context_settings'],
                                                  search_terms_incl = search_terms_incl,
              Severity: Major
              Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                Avoid deeply nested control flow statements.
                Open

                                    if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
                                        incl_matched = True
                
                                        break
                    # Search terms to be included not found in texts
                Severity: Major
                Found in wordless/wl_nlp/wl_matching.py - About 45 mins to fix

                  Avoid deeply nested control flow statements.
                  Open

                                      for sentence_seg in sentence:
                                          for i, token in enumerate(sentence_seg):
                                              if token.islower():
                                                  sentence_seg[i] = wl_texts.Wl_Token('')
                          # Uppercase
                  Severity: Major
                  Found in wordless/wl_nlp/wl_token_processing.py - About 45 mins to fix

                    Avoid deeply nested control flow statements.
                    Open

                                        for doc in nlp.pipe(lines):
                                            for token in doc:
                                                texts_tagged.append(token.text)
                    
                                                if tagset in ['default', 'raw']:
                    Severity: Major
                    Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                      Avoid deeply nested control flow statements.
                      Open

                                          for tokens in wl_nlp_utils.split_token_list(main, texts, pos_tagger):
                                              # The Japanese model do not have a tagger component and Japanese POS tags are taken directly from SudachiPy
                                              # See: https://github.com/explosion/spaCy/discussions/9983#discussioncomment-1910117
                                              if lang == 'jpn':
                                                  docs.append(''.join(tokens))
                      Severity: Major
                      Found in wordless/wl_nlp/wl_pos_tagging.py - About 45 mins to fix

                        Avoid deeply nested control flow statements.
                        Open

                                                    if freqs_totals[j][k]:
                                                        self.set_item_num(i, cols_freqs_start[j] + k * 2 + 1, freq / freqs_totals[j][k])
                                                    else:
                                                        self.set_item_num(i, cols_freqs_start[j] + k * 2 + 1, 0)
                        
                        
                        Severity: Major
                        Found in wordless/wl_colligation_extractor.py - About 45 mins to fix

                          Avoid deeply nested control flow statements.
                          Open

                                                      for sentence in sentences:
                                                          tokens_multilevel[-1].append(main.nltk_nist_tokenizer.international_tokenize(sentence))
                                                  case 'nltk_nltk':
                          Severity: Major
                          Found in wordless/wl_nlp/wl_word_tokenization.py - About 45 mins to fix

                            Avoid deeply nested control flow statements.
                            Open

                                                        if not self.isRowHidden(row):
                                                            item = self.model().item(row, col)
                            
                                                            item.setText(str(item.val))
                                                # Floats
                            Severity: Major
                            Found in wordless/wl_widgets/wl_tables.py - About 45 mins to fix

                              Avoid deeply nested control flow statements.
                              Open

                                                  if token_properties:
                                                      i_tag_start += len(doc)
                              
                              
                              Severity: Major
                              Found in wordless/wl_nlp/wl_dependency_parsing.py - About 45 mins to fix

                                Avoid deeply nested control flow statements.
                                Open

                                                            if settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'None'):
                                                                tokens_right = tokens[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
                                                            else:
                                                                # Span positions (Right)
                                                                for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
                                Severity: Major
                                Found in wordless/wl_collocation_extractor.py - About 45 mins to fix

                                  Avoid deeply nested control flow statements.
                                  Open

                                                              if settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'Within sentence segments'):
                                                                  offsets_unit = offsets_sentence_segs
                                                                  len_unit = len_sentence_segs
                                                              elif settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'Within sentences'):
                                                                  offsets_unit = offsets_sentences
                                  Severity: Major
                                  Found in wordless/wl_collocation_extractor.py - About 45 mins to fix

                                    Avoid deeply nested control flow statements.
                                    Open

                                                            with open(file_settings_display_lang, 'wb') as f:
                                                                pickle.dump(action.lang, f)
                                    
                                                            # Remove settings file
                                                            if os.path.exists(file_settings):
                                    Severity: Major
                                    Found in wordless/wl_main.py - About 45 mins to fix

                                      Avoid deeply nested control flow statements.
                                      Open

                                                                  for j, collocate in enumerate(reversed(tokens_left)):
                                                                      if wl_matching.check_context(
                                                                          i, tokens,
                                                                          context_settings = settings['search_settings']['context_settings'],
                                                                          search_terms_incl = search_terms_incl,
                                      Severity: Major
                                      Found in wordless/wl_collocation_extractor.py - About 45 mins to fix

                                        Avoid deeply nested control flow statements.
                                        Open

                                                                for file in glob.glob(os.path.join(
                                                                    self.settings_custom['general']['imp']['temp_files']['default_path'], '*.*'
                                                                )):
                                                                    os.remove(file)
                                        
                                        
                                        Severity: Major
                                        Found in wordless/wl_main.py - About 45 mins to fix
                                          Severity
                                          Category
                                          Status
                                          Source
                                          Language