Ensembl/ensembl-hive

View on GitHub
sql/procedures.mysql

Summary

Maintainability
Test Coverage
/*

DESCRIPTION

    Some stored functions, views and procedures used in eHive


LICENSE

    Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
    Copyright [2016-2021] EMBL-European Bioinformatics Institute

    Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

         http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software distributed under the License
    is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and limitations under the License.

CONTACT

    Please subscribe to the Hive mailing list:  http://listserver.ebi.ac.uk/mailman/listinfo/ehive-users  to discuss Hive-related questions or to be notified of our updates

*/


-- show hive progress for analyses (turned into a view to give extra flexibility) ----------------
--
-- Thanks to Greg Jordan for the idea and the original version
--
-- Usage:
--       select * from progress;                                         # the whole table (may take ages to generate, depending on the size of your pipeline)
--       select * from progress where logic_name like 'family_blast%';   # only show family_blast-related analyses
--       select * from progress where retry_count>1;                     # only show jobs that have been tried more than once

CREATE OR REPLACE VIEW progress AS
    SELECT CONCAT( a.logic_name, '(', a.analysis_id, ')') analysis_name_and_id,
        MIN(rc.name) resource_class,
        j.status,
        j.retry_count,
        CASE WHEN j.status IS NULL THEN 0 ELSE count(*) END cnt,
        MIN(job_id) example_job_id
    FROM        analysis_base a
    LEFT JOIN   job j USING (analysis_id)
    LEFT JOIN   resource_class rc ON (a.resource_class_id=rc.resource_class_id)
    GROUP BY a.analysis_id, j.status, j.retry_count
    ORDER BY a.analysis_id, j.status;


-- a convenient view that also incorporates (otherwise redundant) analysis_id and logic_name ------------
--
-- Usage:
--       select * from msg;
--       select * from msg where analysis_id=18;
--       select * from msg where logic_name like 'family_blast%';

CREATE OR REPLACE VIEW msg AS
    SELECT a.analysis_id, a.logic_name, m.*
    FROM log_message m
    LEFT JOIN role USING (role_id)
    LEFT JOIN analysis_base a USING (analysis_id);


-- show statistics of Workers' real resource usage by analysis -------------------------------------------
--
-- Usage:
--       select * from resource_usage_stats;
--       select * from resource_usage_stats where logic_name like 'family_blast%';

CREATE OR REPLACE VIEW resource_usage_stats AS
    SELECT CONCAT(a.logic_name,'(',a.analysis_id,')') analysis,
           w.meadow_type,
           CONCAT(rc.name,'(',rc.resource_class_id,')') resource_class,
           u.exit_status,
           count(*) workers,
           min(mem_megs) AS min_mem_megs, round(avg(mem_megs),2) AS avg_mem_megs, max(mem_megs) AS max_mem_megs,
           min(swap_megs) AS min_swap_megs, round(avg(swap_megs),2) AS avg_swap_megs, max(swap_megs) AS max_swap_megs
    FROM resource_class rc
    JOIN analysis_base a USING(resource_class_id)
    LEFT JOIN role r USING(analysis_id)
    LEFT JOIN worker w USING(worker_id)
    LEFT JOIN worker_resource_usage u USING (worker_id)
    GROUP BY a.analysis_id, w.meadow_type, rc.resource_class_id, u.exit_status
    ORDER BY a.analysis_id, w.meadow_type, rc.resource_class_id, u.exit_status;


-- show the roles that are currently live (grouped by meadow_users, resource_classes and analyses) -------
--
-- Usage:
--       select * from live_roles;
--       select * from live_roles where resource_class_id=12;

CREATE OR REPLACE VIEW live_roles AS
    SELECT w.meadow_user, w.meadow_type, w.resource_class_id, rc.name resource_class_name, r.analysis_id, a.logic_name, count(*)
    FROM worker w
    JOIN role r USING(worker_id)
    LEFT JOIN resource_class rc ON w.resource_class_id=rc.resource_class_id
    LEFT JOIN analysis_base a USING(analysis_id)
    WHERE r.when_finished IS NULL
    GROUP BY w.meadow_user, w.meadow_type, w.resource_class_id, rc.name, r.analysis_id, a.logic_name;


-- time an analysis or group of analyses (given by a name pattern) ----------------------------------------
--
-- Usage:
--       call time_analysis('%');                # time the whole pipeline
--       call time_analysis('load_uniprot%');    # time the group of analyses dealing with loading Uniprot members
--       call time_analysis('mcl');              # time one specific analysis

DROP PROCEDURE IF EXISTS time_analysis;
CREATE PROCEDURE time_analysis(IN analyses_pattern CHAR(64))
READS SQL DATA
    SELECT
        COUNT(*)-COUNT(when_finished) AS still_running,
        (UNIX_TIMESTAMP(CASE WHEN COUNT(*)>COUNT(when_finished) THEN CURRENT_TIMESTAMP ELSE max(when_finished) END) - UNIX_TIMESTAMP(min(when_started)))/60 AS measured_in_minutes,
        (UNIX_TIMESTAMP(CASE WHEN COUNT(*)>COUNT(when_finished) THEN CURRENT_TIMESTAMP ELSE max(when_finished) END) - UNIX_TIMESTAMP(min(when_started)))/3600 AS measured_in_hours,
        (UNIX_TIMESTAMP(CASE WHEN COUNT(*)>COUNT(when_finished) THEN CURRENT_TIMESTAMP ELSE max(when_finished) END) - UNIX_TIMESTAMP(min(when_started)))/3600/24 AS measured_in_days
        FROM role JOIN analysis_base USING (analysis_id)
        WHERE logic_name LIKE analyses_pattern;


#### searches for a given string in job.input_id or analysis_data.data, and returns the  matching jobs.
#
# Thanks to Greg Jordan for the idea and the original version
#
# Usage:
#       call job_search('other_415');           # return all jobs whose input_id or analysis_data match the pattern

DROP PROCEDURE IF EXISTS job_search;
CREATE PROCEDURE job_search(IN srch CHAR(40))
READS SQL DATA
  SELECT
    a.analysis_id,
    a.logic_name,
    j.job_id AS job_id,
    j.status,
    j.retry_count,
    IFNULL(d.data, j.input_id) input_id
  FROM job j JOIN analysis_base a USING (analysis_id)
    LEFT JOIN analysis_data d ON j.input_id=concat('_ext_input_analysis_data_id ',d.analysis_data_id)
  WHERE j.input_id LIKE concat('%',srch,'%') OR d.data LIKE concat('%',srch,'%');


############## reset failed jobs for analysis #############################################
#
# Usage:
#       call reset_failed_jobs_for_analysis('load_uniprot');    # reset failed jobs of this particular analysis

DROP PROCEDURE IF EXISTS reset_failed_jobs_for_analysis;
CREATE PROCEDURE reset_failed_jobs_for_analysis(IN param_logic_name char(64))
MODIFIES SQL DATA
    UPDATE job j JOIN analysis_base a USING (analysis_id)
    SET j.status='READY', j.retry_count=0
    WHERE a.logic_name=param_logic_name
    AND   j.status='FAILED';


############## drop hive tables ###########################################################
#
# Usage:
#       call drop_hive_tables;      # just drop them all

DROP PROCEDURE IF EXISTS drop_hive_tables;
DELIMITER //
CREATE PROCEDURE drop_hive_tables()
MODIFIES SQL DATA
BEGIN
    DROP VIEW IF EXISTS msg, progress, resource_usage_stats, live_roles;
    DROP TABLE IF EXISTS pipeline_wide_parameters, analysis_stats_monitor, worker_resource_usage, resource_description, analysis_data, job_file, dataflow_target, dataflow_rule, analysis_ctrl_rule, analysis_stats, log_message, accu, job, role, worker, analysis_base, resource_class, hive_meta;
END; //
DELIMITER ;