Checking kit integrity on import.
This commit is contained in:
@@ -55,8 +55,8 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
; sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db
|
||||
sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\DB_backups\submissions-20230213.db
|
||||
sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db
|
||||
; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\DB_backups\submissions-20230213.db
|
||||
|
||||
|
||||
[post_write_hooks]
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
"""added versions to ref/kraken
|
||||
|
||||
Revision ID: 3d80e4a17a26
|
||||
Revises: 785bb1140878
|
||||
Create Date: 2023-03-02 13:09:30.750398
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import sqlite
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '3d80e4a17a26'
|
||||
down_revision = '785bb1140878'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
# op.drop_table('_alembic_tmp__submissions')
|
||||
with op.batch_alter_table('_control_samples', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('refseq_version', sa.String(length=16), nullable=True))
|
||||
batch_op.add_column(sa.Column('kraken2_version', sa.String(length=16), nullable=True))
|
||||
batch_op.add_column(sa.Column('kraken2_db_version', sa.String(length=32), nullable=True))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
|
||||
with op.batch_alter_table('_control_samples', schema=None) as batch_op:
|
||||
batch_op.drop_column('kraken2_db_version')
|
||||
batch_op.drop_column('kraken2_version')
|
||||
batch_op.drop_column('refseq_version')
|
||||
|
||||
# op.create_table('_alembic_tmp__submissions',
|
||||
# sa.Column('id', sa.INTEGER(), nullable=False),
|
||||
# sa.Column('rsl_plate_num', sa.VARCHAR(length=32), nullable=False),
|
||||
# sa.Column('submitter_plate_num', sa.VARCHAR(length=127), nullable=True),
|
||||
# sa.Column('submitted_date', sa.TIMESTAMP(), nullable=True),
|
||||
# sa.Column('submitting_lab_id', sa.INTEGER(), nullable=True),
|
||||
# sa.Column('sample_count', sa.INTEGER(), nullable=True),
|
||||
# sa.Column('extraction_kit_id', sa.INTEGER(), nullable=True),
|
||||
# sa.Column('submission_type', sa.VARCHAR(length=32), nullable=True),
|
||||
# sa.Column('technician', sa.VARCHAR(length=64), nullable=True),
|
||||
# sa.Column('reagents_id', sa.VARCHAR(), nullable=True),
|
||||
# sa.Column('extraction_info', sqlite.JSON(), nullable=True),
|
||||
# sa.Column('run_cost', sa.FLOAT(), nullable=True),
|
||||
# sa.Column('uploaded_by', sa.VARCHAR(length=32), nullable=True),
|
||||
# sa.ForeignKeyConstraint(['extraction_kit_id'], ['_kits.id'], ondelete='SET NULL'),
|
||||
# sa.ForeignKeyConstraint(['reagents_id'], ['_reagents.id'], ondelete='SET NULL'),
|
||||
# sa.ForeignKeyConstraint(['submitting_lab_id'], ['_organizations.id'], ondelete='SET NULL'),
|
||||
# sa.PrimaryKeyConstraint('id'),
|
||||
# sa.UniqueConstraint('rsl_plate_num'),
|
||||
# sa.UniqueConstraint('submitter_plate_num')
|
||||
# )
|
||||
# ### end Alembic commands ###
|
||||
@@ -1,6 +1,6 @@
|
||||
# __init__.py
|
||||
|
||||
# Version of the realpython-reader package
|
||||
__version__ = "202302.3b"
|
||||
__version__ = "202303.1b"
|
||||
__author__ = {"name":"Landon Wark", "email":"Landon.Wark@phac-aspc.gc.ca"}
|
||||
__copyright__ = "2022-2023, Government of Canada"
|
||||
|
||||
@@ -576,17 +576,23 @@ def get_all_controls_by_type(ctx:dict, con_type:str, start_date:date|None=None,
|
||||
list: Control instances.
|
||||
"""
|
||||
|
||||
# logger.debug(f"Using dates: {start_date} to {end_date}")
|
||||
query = ctx['database_session'].query(models.ControlType).filter_by(name=con_type)
|
||||
try:
|
||||
output = query.first().instances
|
||||
except AttributeError:
|
||||
output = None
|
||||
# Hacky solution to my not being able to get the sql query to work.
|
||||
logger.debug(f"Using dates: {start_date} to {end_date}")
|
||||
if start_date != None and end_date != None:
|
||||
output = [item for item in output if item.submitted_date.date() > start_date and item.submitted_date.date() < end_date]
|
||||
# logger.debug(f"Type {con_type}: {query.first()}")
|
||||
output = ctx['database_session'].query(models.Control).join(models.ControlType).filter_by(name=con_type).filter(models.Control.submitted_date.between(start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d"))).all()
|
||||
else:
|
||||
output = ctx['database_session'].query(models.Control).join(models.ControlType).filter_by(name=con_type).all()
|
||||
logger.debug(f"Returned controls between dates: {output}")
|
||||
return output
|
||||
# query = ctx['database_session'].query(models.ControlType).filter_by(name=con_type)
|
||||
# try:
|
||||
# output = query.first().instances
|
||||
# except AttributeError:
|
||||
# output = None
|
||||
# # Hacky solution to my not being able to get the sql query to work.
|
||||
# if start_date != None and end_date != None:
|
||||
# output = [item for item in output if item.submitted_date.date() > start_date and item.submitted_date.date() < end_date]
|
||||
# # logger.debug(f"Type {con_type}: {query.first()}")
|
||||
# return output
|
||||
|
||||
|
||||
def get_control_subtypes(ctx:dict, type:str, mode:str) -> list[str]:
|
||||
|
||||
@@ -39,9 +39,18 @@ class Control(Base):
|
||||
# UniqueConstraint('name', name='uq_control_name')
|
||||
submission_id = Column(INTEGER, ForeignKey("_submissions.id")) #: parent submission id
|
||||
submission = relationship("BacterialCulture", back_populates="controls", foreign_keys=[submission_id]) #: parent submission
|
||||
refseq_version = Column(String(16))
|
||||
kraken2_version = Column(String(16))
|
||||
kraken2_db_version = Column(String(32))
|
||||
|
||||
|
||||
def to_sub_dict(self):
|
||||
def to_sub_dict(self) -> dict:
|
||||
"""
|
||||
Converts object into convenient dictionary for use in submission summary
|
||||
|
||||
Returns:
|
||||
dict: output dictionary containing: Name, Type, Targets, Top Kraken results
|
||||
"""
|
||||
kraken = json.loads(self.kraken)
|
||||
kraken_cnt_total = sum([kraken[item]['kraken_count'] for item in kraken])
|
||||
new_kraken = []
|
||||
@@ -61,3 +70,46 @@ class Control(Base):
|
||||
}
|
||||
return output
|
||||
|
||||
def convert_by_mode(self, mode:str) -> list[dict]:
|
||||
"""
|
||||
split control object into analysis types
|
||||
|
||||
Args:
|
||||
control (models.Control): control to be parsed into list
|
||||
mode (str): analysis type
|
||||
|
||||
Returns:
|
||||
list[dict]: list of records
|
||||
"""
|
||||
output = []
|
||||
data = json.loads(getattr(self, mode))
|
||||
# if len(data) == 0:
|
||||
# data = self.create_dummy_data(mode)
|
||||
logger.debug(f"Length of data: {len(data)}")
|
||||
for genus in data:
|
||||
_dict = {}
|
||||
_dict['name'] = self.name
|
||||
_dict['submitted_date'] = self.submitted_date
|
||||
_dict['genus'] = genus
|
||||
_dict['target'] = 'Target' if genus.strip("*") in self.controltype.targets else "Off-target"
|
||||
|
||||
for key in data[genus]:
|
||||
_dict[key] = data[genus][key]
|
||||
if _dict[key] == {}:
|
||||
print(self.name, mode)
|
||||
output.append(_dict)
|
||||
# logger.debug(output)
|
||||
return output
|
||||
|
||||
def create_dummy_data(self, mode):
|
||||
match mode:
|
||||
case "contains":
|
||||
data = {"Nothing": {"contains_hashes":"0/400", "contains_ratio":0.0}}
|
||||
case "matches":
|
||||
data = {"Nothing": {"matches_hashes":"0/400", "matches_ratio":0.0}}
|
||||
case "kraken":
|
||||
data = {"Nothing": {"kraken_percent":0.0, "kraken_count":0}}
|
||||
case _:
|
||||
data = {}
|
||||
return data
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from sqlalchemy.orm import relationship
|
||||
from datetime import datetime as dt
|
||||
import logging
|
||||
import json
|
||||
from json.decoder import JSONDecodeError
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
@@ -74,6 +75,9 @@ class BasicSubmission(Base):
|
||||
ext_info = json.loads(self.extraction_info)
|
||||
except TypeError:
|
||||
ext_info = None
|
||||
except JSONDecodeError as e:
|
||||
ext_info = None
|
||||
logger.debug(f"Json error in {self.rsl_plate_num}: {e}")
|
||||
try:
|
||||
reagents = [item.to_sub_dict() for item in self.reagents]
|
||||
except:
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
|
||||
from pandas import DataFrame, concat
|
||||
from backend.db import models
|
||||
from operator import itemgetter
|
||||
# from backend.db import models
|
||||
import json
|
||||
import logging
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from datetime import date
|
||||
from datetime import date, timedelta
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
@@ -139,31 +140,32 @@ def make_report_html(df:DataFrame, start_date:date, end_date:date) -> str:
|
||||
# dfs['name'] = df
|
||||
# return dfs
|
||||
|
||||
def convert_control_by_mode(ctx:dict, control:models.Control, mode:str) -> list[dict]:
|
||||
"""
|
||||
split control object into analysis types
|
||||
# def convert_control_by_mode(ctx:dict, control:models.Control, mode:str) -> list[dict]:
|
||||
# """
|
||||
# split control object into analysis types... can I move this into the class itself?
|
||||
# turns out I can
|
||||
|
||||
Args:
|
||||
ctx (dict): settings passed from gui
|
||||
control (models.Control): control to be parsed into list
|
||||
mode (str): analysis type
|
||||
# Args:
|
||||
# ctx (dict): settings passed from gui
|
||||
# control (models.Control): control to be parsed into list
|
||||
# mode (str): analysis type
|
||||
|
||||
Returns:
|
||||
list[dict]: list of records
|
||||
"""
|
||||
output = []
|
||||
data = json.loads(getattr(control, mode))
|
||||
for genus in data:
|
||||
_dict = {}
|
||||
_dict['name'] = control.name
|
||||
_dict['submitted_date'] = control.submitted_date
|
||||
_dict['genus'] = genus
|
||||
_dict['target'] = 'Target' if genus.strip("*") in control.controltype.targets else "Off-target"
|
||||
for key in data[genus]:
|
||||
_dict[key] = data[genus][key]
|
||||
output.append(_dict)
|
||||
# logger.debug(output)
|
||||
return output
|
||||
# Returns:
|
||||
# list[dict]: list of records
|
||||
# """
|
||||
# output = []
|
||||
# data = json.loads(getattr(control, mode))
|
||||
# for genus in data:
|
||||
# _dict = {}
|
||||
# _dict['name'] = control.name
|
||||
# _dict['submitted_date'] = control.submitted_date
|
||||
# _dict['genus'] = genus
|
||||
# _dict['target'] = 'Target' if genus.strip("*") in control.controltype.targets else "Off-target"
|
||||
# for key in data[genus]:
|
||||
# _dict[key] = data[genus][key]
|
||||
# output.append(_dict)
|
||||
# # logger.debug(output)
|
||||
# return output
|
||||
|
||||
|
||||
def convert_data_list_to_df(ctx:dict, input:list[dict], subtype:str|None=None) -> DataFrame:
|
||||
@@ -178,17 +180,81 @@ def convert_data_list_to_df(ctx:dict, input:list[dict], subtype:str|None=None) -
|
||||
Returns:
|
||||
DataFrame: _description_
|
||||
"""
|
||||
# copy = input
|
||||
# for item in copy:
|
||||
# item['submitted_date'] = item['submitted_date'].strftime("%Y-%m-%d")
|
||||
# with open("controls.json", "w") as f:
|
||||
# f.write(json.dumps(copy))
|
||||
# for item in input:
|
||||
# logger.debug(item.keys())
|
||||
df = DataFrame.from_records(input)
|
||||
df.to_excel("test.xlsx", engine="openpyxl")
|
||||
safe = ['name', 'submitted_date', 'genus', 'target']
|
||||
# logger.debug(df)
|
||||
for column in df.columns:
|
||||
if "percent" in column:
|
||||
count_col = [item for item in df.columns if "count" in item][0]
|
||||
# The actual percentage from kraken was off due to exclusion of NaN, recalculating.
|
||||
df[column] = 100 * df[count_col] / df.groupby('submitted_date')[count_col].transform('sum')
|
||||
# df[column] = 100 * df[count_col] / df.groupby('submitted_date')[count_col].transform('sum')
|
||||
df[column] = 100 * df[count_col] / df.groupby('name')[count_col].transform('sum')
|
||||
if column not in safe:
|
||||
if subtype != None and column != subtype:
|
||||
del df[column]
|
||||
# logger.debug(df)
|
||||
# df.sort_values('submitted_date').to_excel("controls.xlsx", engine="openpyxl")
|
||||
df = displace_date(df)
|
||||
df.sort_values('submitted_date').to_excel("controls.xlsx", engine="openpyxl")
|
||||
df = df_column_renamer(df=df)
|
||||
return df
|
||||
|
||||
|
||||
def df_column_renamer(df:DataFrame) -> DataFrame:
|
||||
"""
|
||||
Ad hoc function I created to clarify some fields
|
||||
|
||||
Args:
|
||||
df (DataFrame): input dataframe
|
||||
|
||||
Returns:
|
||||
DataFrame: dataframe with 'clarified' column names
|
||||
"""
|
||||
df = df[df.columns.drop(list(df.filter(regex='_hashes')))]
|
||||
return df.rename(columns = {
|
||||
"contains_ratio":"contains_shared_hashes_ratio",
|
||||
"matches_ratio":"matches_shared_hashes_ratio",
|
||||
"kraken_count":"kraken2_read_count",
|
||||
"kraken_percent":"kraken2_read_percent"
|
||||
})
|
||||
|
||||
|
||||
def displace_date(df:DataFrame) -> DataFrame:
|
||||
"""
|
||||
This function serves to split samples that were submitted on the same date by incrementing dates.
|
||||
|
||||
Args:
|
||||
df (DataFrame): input dataframe composed of control records
|
||||
|
||||
Returns:
|
||||
DataFrame: output dataframe with dates incremented.
|
||||
"""
|
||||
# dict_list = []
|
||||
# for item in df['name'].unique():
|
||||
# dict_list.append(dict(name=item, date=df[df.name == item].iloc[0]['submitted_date']))
|
||||
logger.debug(f"Unique items: {df['name'].unique()}")
|
||||
# logger.debug(df.to_string())
|
||||
# the assumption is that closest names will have closest dates...
|
||||
dict_list = [dict(name=item, date=df[df.name == item].iloc[0]['submitted_date']) for item in sorted(df['name'].unique())]
|
||||
for ii, item in enumerate(dict_list):
|
||||
# if ii > 0:
|
||||
try:
|
||||
check = item['date'] == dict_list[ii-1]['date']
|
||||
except IndexError:
|
||||
check = False
|
||||
if check:
|
||||
logger.debug(f"We found one! Increment date!\n{item['date'] - timedelta(days=1)}")
|
||||
mask = df['name'] == item['name']
|
||||
# logger.debug(f"We will increment dates in: {df.loc[mask, 'submitted_date']}")
|
||||
df.loc[mask, 'submitted_date'] = df.loc[mask, 'submitted_date'].apply(lambda x: x + timedelta(days=1))
|
||||
# logger.debug(f"Do these look incremented: {df.loc[mask, 'submitted_date']}")
|
||||
return df
|
||||
|
||||
|
||||
@@ -20,17 +20,18 @@ from xhtml2pdf import pisa
|
||||
import yaml
|
||||
import pprint
|
||||
from backend.excel.parser import SheetParser
|
||||
from backend.excel.reports import convert_control_by_mode, convert_data_list_to_df
|
||||
from backend.excel.reports import convert_data_list_to_df
|
||||
from backend.db import (construct_submission_info, lookup_reagent,
|
||||
construct_reagent, store_reagent, store_submission, lookup_kittype_by_use,
|
||||
construct_reagent, store_submission, lookup_kittype_by_use,
|
||||
lookup_regent_by_type_name, lookup_all_orgs, lookup_submissions_by_date_range,
|
||||
get_all_Control_Types_names, create_kit_from_yaml, get_all_available_modes, get_all_controls_by_type,
|
||||
get_control_subtypes, lookup_all_submissions_by_type, get_all_controls, lookup_submission_by_rsl_num,
|
||||
create_org_from_yaml
|
||||
)
|
||||
from backend.db import lookup_kittype_by_name
|
||||
|
||||
from .functions import check_kit_integrity
|
||||
from tools import check_not_nan
|
||||
from tools import check_not_nan, create_reagent_list
|
||||
from backend.excel.reports import make_report_xlsx, make_report_html
|
||||
|
||||
import numpy
|
||||
@@ -108,8 +109,8 @@ class App(QMainWindow):
|
||||
self.importAction = QAction("&Import", self)
|
||||
self.addReagentAction = QAction("Add Reagent", self)
|
||||
self.generateReportAction = QAction("Make Report", self)
|
||||
self.addKitAction = QAction("Add Kit", self)
|
||||
self.addOrgAction = QAction("Add Org", self)
|
||||
self.addKitAction = QAction("Import Kit", self)
|
||||
self.addOrgAction = QAction("Import Org", self)
|
||||
self.joinControlsAction = QAction("Link Controls")
|
||||
self.joinExtractionAction = QAction("Link Ext Logs")
|
||||
self.helpAction = QAction("&About", self)
|
||||
@@ -137,6 +138,40 @@ class App(QMainWindow):
|
||||
about = AlertPop(message=output, status="information")
|
||||
about.exec()
|
||||
|
||||
def insert_reagent_import(self, item:str, prsr:SheetParser|None=None) -> QComboBox:
|
||||
add_widget = QComboBox()
|
||||
add_widget.setEditable(True)
|
||||
# Ensure that all reagenttypes have a name that matches the items in the excel parser
|
||||
query_var = item.replace("lot_", "")
|
||||
logger.debug(f"Query for: {query_var}")
|
||||
if prsr != None:
|
||||
if isinstance(prsr.sub[item], numpy.float64):
|
||||
logger.debug(f"{prsr.sub[item]['lot']} is a numpy float!")
|
||||
try:
|
||||
prsr.sub[item] = int(prsr.sub[item]['lot'])
|
||||
except ValueError:
|
||||
pass
|
||||
# query for reagents using type name from sheet and kit from sheet
|
||||
logger.debug(f"Attempting lookup of reagents by type: {query_var}")
|
||||
# below was lookup_reagent_by_type_name_and_kit_name, but I couldn't get it to work.
|
||||
relevant_reagents = [item.__str__() for item in lookup_regent_by_type_name(ctx=self.ctx, type_name=query_var)]#, kit_name=prsr.sub['extraction_kit'])]
|
||||
output_reg = []
|
||||
for reagent in relevant_reagents:
|
||||
if isinstance(reagent, set):
|
||||
for thing in reagent:
|
||||
output_reg.append(thing)
|
||||
elif isinstance(reagent, str):
|
||||
output_reg.append(reagent)
|
||||
relevant_reagents = output_reg
|
||||
# if reagent in sheet is not found insert it into items
|
||||
if prsr != None:
|
||||
logger.debug(f"Relevant reagents for {prsr.sub[item]}: {relevant_reagents}")
|
||||
if str(prsr.sub[item]['lot']) not in relevant_reagents and prsr.sub[item]['lot'] != 'nan':
|
||||
if check_not_nan(prsr.sub[item]['lot']):
|
||||
relevant_reagents.insert(0, str(prsr.sub[item]['lot']))
|
||||
logger.debug(f"New relevant reagents: {relevant_reagents}")
|
||||
add_widget.addItems(relevant_reagents)
|
||||
return add_widget
|
||||
|
||||
def importSubmission(self):
|
||||
"""
|
||||
@@ -155,6 +190,7 @@ class App(QMainWindow):
|
||||
try:
|
||||
prsr = SheetParser(fname, **self.ctx)
|
||||
except PermissionError:
|
||||
logger.error(f"Couldn't get permission to access file: {fname}")
|
||||
return
|
||||
logger.debug(f"prsr.sub = {prsr.sub}")
|
||||
# destroy any widgets from previous imports
|
||||
@@ -169,6 +205,7 @@ class App(QMainWindow):
|
||||
(?P<samples>)^samples$ |
|
||||
(?P<reagent>^lot_.*$)
|
||||
""", re.VERBOSE)
|
||||
# reagents = []
|
||||
for item in prsr.sub:
|
||||
logger.debug(f"Item: {item}")
|
||||
# attempt to match variable name to regex group
|
||||
@@ -207,6 +244,7 @@ class App(QMainWindow):
|
||||
add_widget.addItems(uses)
|
||||
else:
|
||||
add_widget.addItems(['bacterial_culture'])
|
||||
self.ext_kit = prsr.sub[item]
|
||||
case 'submitted_date':
|
||||
# create label
|
||||
self.table_widget.formlayout.addWidget(QLabel(item.replace("_", " ").title()))
|
||||
@@ -219,40 +257,41 @@ class App(QMainWindow):
|
||||
except:
|
||||
add_widget.setDate(date.today())
|
||||
case 'reagent':
|
||||
# TODO make this a function so I can add in missing reagents below when checking kit integrity.
|
||||
# create label
|
||||
self.table_widget.formlayout.addWidget(QLabel(item.replace("_", " ").title()))
|
||||
add_widget = QComboBox()
|
||||
add_widget.setEditable(True)
|
||||
# Ensure that all reagenttypes have a name that matches the items in the excel parser
|
||||
query_var = item.replace("lot_", "")
|
||||
logger.debug(f"Query for: {query_var}")
|
||||
if isinstance(prsr.sub[item], numpy.float64):
|
||||
logger.debug(f"{prsr.sub[item]['lot']} is a numpy float!")
|
||||
try:
|
||||
prsr.sub[item] = int(prsr.sub[item]['lot'])
|
||||
except ValueError:
|
||||
pass
|
||||
# query for reagents using type name from sheet and kit from sheet
|
||||
logger.debug(f"Attempting lookup of reagents by type: {query_var}")
|
||||
# below was lookup_reagent_by_type_name_and_kit_name, but I couldn't get it to work.
|
||||
relevant_reagents = [item.__str__() for item in lookup_regent_by_type_name(ctx=self.ctx, type_name=query_var)]#, kit_name=prsr.sub['extraction_kit'])]
|
||||
output_reg = []
|
||||
for reagent in relevant_reagents:
|
||||
if isinstance(reagent, set):
|
||||
for thing in reagent:
|
||||
output_reg.append(thing)
|
||||
elif isinstance(reagent, str):
|
||||
output_reg.append(reagent)
|
||||
relevant_reagents = output_reg
|
||||
logger.debug(f"Relevant reagents for {prsr.sub[item]}: {relevant_reagents}")
|
||||
# if reagent in sheet is not found insert it into items
|
||||
if str(prsr.sub[item]['lot']) not in relevant_reagents and prsr.sub[item]['lot'] != 'nan':
|
||||
if check_not_nan(prsr.sub[item]['lot']):
|
||||
relevant_reagents.insert(0, str(prsr.sub[item]['lot']))
|
||||
logger.debug(f"New relevant reagents: {relevant_reagents}")
|
||||
add_widget.addItems(relevant_reagents)
|
||||
# add_widget = QComboBox()
|
||||
# add_widget.setEditable(True)
|
||||
# # Ensure that all reagenttypes have a name that matches the items in the excel parser
|
||||
# query_var = item.replace("lot_", "")
|
||||
# logger.debug(f"Query for: {query_var}")
|
||||
# if isinstance(prsr.sub[item], numpy.float64):
|
||||
# logger.debug(f"{prsr.sub[item]['lot']} is a numpy float!")
|
||||
# try:
|
||||
# prsr.sub[item] = int(prsr.sub[item]['lot'])
|
||||
# except ValueError:
|
||||
# pass
|
||||
# # query for reagents using type name from sheet and kit from sheet
|
||||
# logger.debug(f"Attempting lookup of reagents by type: {query_var}")
|
||||
# # below was lookup_reagent_by_type_name_and_kit_name, but I couldn't get it to work.
|
||||
# relevant_reagents = [item.__str__() for item in lookup_regent_by_type_name(ctx=self.ctx, type_name=query_var)]#, kit_name=prsr.sub['extraction_kit'])]
|
||||
# output_reg = []
|
||||
# for reagent in relevant_reagents:
|
||||
# if isinstance(reagent, set):
|
||||
# for thing in reagent:
|
||||
# output_reg.append(thing)
|
||||
# elif isinstance(reagent, str):
|
||||
# output_reg.append(reagent)
|
||||
# relevant_reagents = output_reg
|
||||
# logger.debug(f"Relevant reagents for {prsr.sub[item]}: {relevant_reagents}")
|
||||
# # if reagent in sheet is not found insert it into items
|
||||
# if str(prsr.sub[item]['lot']) not in relevant_reagents and prsr.sub[item]['lot'] != 'nan':
|
||||
# if check_not_nan(prsr.sub[item]['lot']):
|
||||
# relevant_reagents.insert(0, str(prsr.sub[item]['lot']))
|
||||
# logger.debug(f"New relevant reagents: {relevant_reagents}")
|
||||
# add_widget.addItems(relevant_reagents)
|
||||
add_widget = self.insert_reagent_import(item, prsr=prsr)
|
||||
self.reagents[item] = prsr.sub[item]
|
||||
# TODO: make samples not appear in frame.
|
||||
case 'samples':
|
||||
# hold samples in 'self' until form submitted
|
||||
logger.debug(f"{item}: {prsr.sub[item]}")
|
||||
@@ -263,6 +302,17 @@ class App(QMainWindow):
|
||||
add_widget = QLineEdit()
|
||||
add_widget.setText(str(prsr.sub[item]).replace("_", " "))
|
||||
self.table_widget.formlayout.addWidget(add_widget)
|
||||
# compare self.reagents with expected reagents in kit
|
||||
if hasattr(self, 'ext_kit'):
|
||||
kit = lookup_kittype_by_name(ctx=self.ctx, name=self.ext_kit)
|
||||
kit_integrity = check_kit_integrity(kit, [item.replace("lot_", "") for item in self.reagents])
|
||||
if kit_integrity != None:
|
||||
msg = AlertPop(message=kit_integrity['message'], status="critical")
|
||||
msg.exec()
|
||||
for item in kit_integrity['missing']:
|
||||
self.table_widget.formlayout.addWidget(QLabel(f"Lot {item.replace('_', ' ').title()}"))
|
||||
add_widget =self.insert_reagent_import(item)
|
||||
self.table_widget.formlayout.addWidget(add_widget)
|
||||
# create submission button
|
||||
submit_btn = QPushButton("Submit")
|
||||
self.table_widget.formlayout.addWidget(submit_btn)
|
||||
@@ -580,11 +630,14 @@ class App(QMainWindow):
|
||||
# return
|
||||
fig = None
|
||||
else:
|
||||
data = []
|
||||
for control in controls:
|
||||
# data = []
|
||||
# for control in controls:
|
||||
# # change each control to list of dicts
|
||||
# # dicts = convert_control_by_mode(ctx=self.ctx, control=control, mode=self.mode)
|
||||
# dicts = control.convert_by_mode(mode=self.mode)
|
||||
# data.append(dicts)
|
||||
# change each control to list of dicts
|
||||
dicts = convert_control_by_mode(ctx=self.ctx, control=control, mode=self.mode)
|
||||
data.append(dicts)
|
||||
data = [control.convert_by_mode(mode=self.mode) for control in controls]
|
||||
# flatten data to one dimensional list
|
||||
data = [item for sublist in data for item in sublist]
|
||||
# logger.debug(data)
|
||||
@@ -677,6 +730,7 @@ class App(QMainWindow):
|
||||
with open(fname.__str__(), 'r') as f:
|
||||
runs = [col.strip().split(",") for col in f.readlines()]
|
||||
# check = []
|
||||
count = 0
|
||||
for run in runs:
|
||||
obj = dict(
|
||||
start_time=run[0].strip(),
|
||||
@@ -693,21 +747,42 @@ class App(QMainWindow):
|
||||
sub = lookup_submission_by_rsl_num(ctx=self.ctx, rsl_num=obj['rsl_plate_num'])
|
||||
try:
|
||||
logger.debug(f"Found submission: {sub.rsl_plate_num}")
|
||||
count += 1
|
||||
except AttributeError:
|
||||
continue
|
||||
output = json.dumps(obj)
|
||||
# output = json.dumps(obj)
|
||||
if sub.extraction_info != None:
|
||||
# try:
|
||||
# logger.debug(f"Attempting update on ext info: {sub.extraction_info} for {sub.rsl_plate_num}")
|
||||
existing = json.loads(sub.extraction_info)
|
||||
# except:
|
||||
# existing = None
|
||||
else:
|
||||
existing = None
|
||||
try:
|
||||
if output in sub.extraction_info:
|
||||
if json.dumps(obj) in sub.extraction_info:
|
||||
logger.debug(f"Looks like we already have that info.")
|
||||
continue
|
||||
except TypeError:
|
||||
pass
|
||||
if existing != None:
|
||||
try:
|
||||
sub.extraction_info += output
|
||||
# sub.extraction_info += output
|
||||
logger.debug(f"Updating {type(existing)}: {existing} with {type(obj)}: {obj}")
|
||||
existing.append(obj)
|
||||
logger.debug(f"Setting: {existing}")
|
||||
sub.extraction_info = json.dumps(existing)
|
||||
except TypeError:
|
||||
sub.extraction_info = output
|
||||
logger.error(f"Error updating!")
|
||||
sub.extraction_info = json.dumps([obj])
|
||||
logger.debug(f"Final ext info for {sub.rsl_plate_num}: {sub.extraction_info}")
|
||||
else:
|
||||
sub.extraction_info = json.dumps([obj])
|
||||
self.ctx['database_session'].add(sub)
|
||||
self.ctx["database_session"].commit()
|
||||
dlg = AlertPop(message=f"We added {count} logs to the database.", status='information')
|
||||
dlg.exec()
|
||||
|
||||
|
||||
|
||||
class AddSubForm(QWidget):
|
||||
|
||||
@@ -260,6 +260,7 @@ class ControlsDatePicker(QWidget):
|
||||
|
||||
self.start_date = QDateEdit(calendarPopup=True)
|
||||
# start date is three month prior to end date by default
|
||||
# edit: 2 month, but the variable name is the same cause I'm lazy
|
||||
threemonthsago = QDate.currentDate().addDays(-60)
|
||||
self.start_date.setDate(threemonthsago)
|
||||
self.end_date = QDateEdit(calendarPopup=True)
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
# from ..models import *
|
||||
from backend.db.models import *
|
||||
# from backend.db import lookup_kittype_by_name
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
def check_kit_integrity(sub:BasicSubmission):
|
||||
def check_kit_integrity(sub:BasicSubmission|KitType, reagenttypes:list|None=None) -> dict|None:
|
||||
logger.debug(type(sub))
|
||||
match sub:
|
||||
case BasicSubmission():
|
||||
ext_kit_rtypes = [reagenttype.name for reagenttype in sub.extraction_kit.reagent_types]
|
||||
logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||
reagenttypes = [reagent.type.name for reagent in sub.reagents]
|
||||
case KitType():
|
||||
ext_kit_rtypes = [reagenttype.name for reagenttype in sub.reagent_types]
|
||||
logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||
logger.debug(f"Submission reagents: {reagenttypes}")
|
||||
check = set(ext_kit_rtypes) == set(reagenttypes)
|
||||
logger.debug(f"Checking if reagents match kit contents: {check}")
|
||||
@@ -17,7 +23,11 @@ def check_kit_integrity(sub:BasicSubmission):
|
||||
if check:
|
||||
result = None
|
||||
else:
|
||||
result = {'message' : f"Couldn't verify reagents match listed kit components.\n\nIt looks like you are missing: {[x.upper() for x in ext_kit_rtypes if x not in common]}\n\nAlternatively, you may have set the wrong extraction kit."}
|
||||
missing = [x for x in ext_kit_rtypes if x not in common]
|
||||
result = {'message' : f"Couldn't verify reagents match listed kit components.\n\nIt looks like you are missing: {[item.upper() for item in missing]}\n\nAlternatively, you may have set the wrong extraction kit.", 'missing': missing}
|
||||
return result
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ def create_charts(ctx:dict, df:pd.DataFrame, ytitle:str|None=None) -> Figure:
|
||||
genera = []
|
||||
if df.empty:
|
||||
return None
|
||||
|
||||
for item in df['genus'].to_list():
|
||||
try:
|
||||
if item[-1] == "*":
|
||||
@@ -41,7 +42,7 @@ def create_charts(ctx:dict, df:pd.DataFrame, ytitle:str|None=None) -> Figure:
|
||||
# sort by and exclude from
|
||||
sorts = ['submitted_date', "target", "genus"]
|
||||
exclude = ['name', 'genera']
|
||||
modes = [item for item in df.columns if item not in sorts and item not in exclude and "_hashes" not in item]
|
||||
modes = [item for item in df.columns if item not in sorts and item not in exclude]# and "_hashes" not in item]
|
||||
# Set descending for any columns that have "{mode}" in the header.
|
||||
ascending = [False if item == "target" else True for item in sorts]
|
||||
df = df.sort_values(by=sorts, ascending=ascending)
|
||||
|
||||
@@ -47,8 +47,9 @@
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if sub['ext_info'] %}
|
||||
{% for entry in sub['ext_info'] %}
|
||||
<h3><u>Extraction Status:</u></h3>
|
||||
<p>{% for key, value in sub['ext_info'].items() %}
|
||||
<p>{% for key, value in entry.items() %}
|
||||
{% if loop.index == 1%}
|
||||
{{ key|replace('_', ' ')|title() }}: {{ value }}<br>
|
||||
{% else %}
|
||||
@@ -59,6 +60,7 @@
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}</p>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -19,8 +19,9 @@ Attached Controls:
|
||||
{% for genera in item['kraken'] %}
|
||||
{{ genera['name'] }}: {{ genera['kraken_count'] }} ({{ genera['kraken_percent'] }}){% endfor %}{% endif %}
|
||||
{% endfor %}{% endif %}
|
||||
{% if sub['ext_info'] %}
|
||||
{% if sub['ext_info'] %}{% for entry in sub['ext_info'] %}
|
||||
Extraction Status:
|
||||
{% for key, value in sub['ext_info'].items() %}
|
||||
{% for key, value in entry.items() %}
|
||||
{{ key|replace('_', ' ')|title() }}: {{ value }}{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -23,3 +23,7 @@ def check_is_power_user(ctx:dict) -> bool:
|
||||
logger.debug(f"Check encounteded unknown error: {type(e).__name__} - {e}")
|
||||
check = False
|
||||
return check
|
||||
|
||||
|
||||
def create_reagent_list(in_dict:dict) -> list[str]:
|
||||
return [item.strip("lot_") for item in in_dict.keys()]
|
||||
Reference in New Issue
Block a user