Pre-cleanup

This commit is contained in:
Landon Wark
2023-11-01 08:59:58 -05:00
parent f3a7d75c6a
commit 22a23b7838
18 changed files with 665 additions and 636 deletions

View File

@@ -1,4 +1,4 @@
'''
All database related operations.
'''
from .functions import *
# from .functions import *

View File

@@ -1,280 +0,0 @@
'''
Used to construct models from input dictionaries.
'''
from tools import Settings, check_regex_match, check_authorization, massage_common_reagents
from .. import models
from .lookups import *
import logging
from datetime import date, timedelta
from dateutil.parser import parse
from typing import Tuple
from sqlalchemy.exc import IntegrityError, SAWarning
from . import store_object
from backend.validators import RSLNamer
logger = logging.getLogger(f"submissions.{__name__}")
# def construct_reagent(ctx:Settings, info_dict:dict) -> models.Reagent:
# """
# Construct reagent object from dictionary
# NOTE: Depreciated in favour of Pydantic model .toSQL method
# Args:
# ctx (Settings): settings object passed down from gui
# info_dict (dict): dictionary to be converted
# Returns:
# models.Reagent: Constructed reagent object
# """
# reagent = models.Reagent()
# for item in info_dict:
# logger.debug(f"Reagent info item for {item}: {info_dict[item]}")
# # set fields based on keys in dictionary
# match item:
# case "lot":
# reagent.lot = info_dict[item].upper()
# case "expiry":
# if isinstance(info_dict[item], date):
# reagent.expiry = info_dict[item]
# else:
# reagent.expiry = parse(info_dict[item]).date()
# case "type":
# reagent_type = lookup_reagent_types(ctx=ctx, name=info_dict[item])
# if reagent_type != None:
# reagent.type.append(reagent_type)
# case "name":
# if item == None:
# reagent.name = reagent.type.name
# else:
# reagent.name = info_dict[item]
# # add end-of-life extension from reagent type to expiry date
# # NOTE: this will now be done only in the reporting phase to account for potential changes in end-of-life extensions
# return reagent
# def construct_submission_info(ctx:Settings, info_dict:dict) -> Tuple[models.BasicSubmission, dict]:
# """
# Construct submission object from dictionary pulled from gui form
# NOTE: Depreciated in favour of Pydantic model .toSQL method
# Args:
# ctx (Settings): settings object passed down from gui
# info_dict (dict): dictionary to be transformed
# Returns:
# models.BasicSubmission: Constructed submission object
# """
# # convert submission type into model name
# # model = get_polymorphic_subclass(polymorphic_identity=info_dict['submission_type'])
# model = models.BasicSubmission.find_polymorphic_subclass(polymorphic_identity=info_dict['submission_type'])
# logger.debug(f"We've got the model: {type(model)}")
# # Ensure an rsl plate number exists for the plate
# if not check_regex_match("^RSL", info_dict["rsl_plate_num"]):
# instance = None
# msg = "A proper RSL plate number is required."
# return instance, {'code': 2, 'message': "A proper RSL plate number is required."}
# else:
# # # enforce conventions on the rsl plate number from the form
# # # info_dict['rsl_plate_num'] = RSLNamer(ctx=ctx, instr=info_dict["rsl_plate_num"]).parsed_name
# info_dict['rsl_plate_num'] = RSLNamer(ctx=ctx, instr=info_dict["rsl_plate_num"], sub_type=info_dict['submission_type']).parsed_name
# # check database for existing object
# instance = lookup_submissions(ctx=ctx, rsl_number=info_dict['rsl_plate_num'])
# # get model based on submission type converted above
# # logger.debug(f"Looking at models for submission type: {query}")
# # if query return nothing, ie doesn't already exist in db
# if instance == None:
# instance = model()
# logger.debug(f"Submission doesn't exist yet, creating new instance: {instance}")
# msg = None
# code = 0
# else:
# code = 1
# msg = "This submission already exists.\nWould you like to overwrite?"
# for item in info_dict:
# value = info_dict[item]
# logger.debug(f"Setting {item} to {value}")
# # set fields based on keys in dictionary
# match item:
# case "extraction_kit":
# logger.debug(f"Looking up kit {value}")
# field_value = lookup_kit_types(ctx=ctx, name=value)
# logger.debug(f"Got {field_value} for kit {value}")
# case "submitting_lab":
# logger.debug(f"Looking up organization: {value}")
# field_value = lookup_organizations(ctx=ctx, name=value)
# logger.debug(f"Got {field_value} for organization {value}")
# case "submitter_plate_num":
# logger.debug(f"Submitter plate id: {value}")
# field_value = value
# case "samples":
# instance = construct_samples(ctx=ctx, instance=instance, samples=value)
# continue
# case "submission_type":
# field_value = lookup_submission_type(ctx=ctx, name=value)
# case _:
# field_value = value
# # insert into field
# try:
# setattr(instance, item, field_value)
# except AttributeError:
# logger.debug(f"Could not set attribute: {item} to {info_dict[item]}")
# continue
# except KeyError:
# continue
# # calculate cost of the run: immutable cost + mutable times number of columns
# # This is now attached to submission upon creation to preserve at-run costs incase of cost increase in the future.
# try:
# logger.debug(f"Calculating costs for procedure...")
# instance.calculate_base_cost()
# except (TypeError, AttributeError) as e:
# logger.debug(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using full plate cost.")
# instance.run_cost = instance.extraction_kit.cost_per_run
# logger.debug(f"Calculated base run cost of: {instance.run_cost}")
# # Apply any discounts that are applicable for client and kit.
# try:
# logger.debug("Checking and applying discounts...")
# discounts = [item.amount for item in lookup_discounts(ctx=ctx, kit_type=instance.extraction_kit, organization=instance.submitting_lab)]
# logger.debug(f"We got discounts: {discounts}")
# if len(discounts) > 0:
# discounts = sum(discounts)
# instance.run_cost = instance.run_cost - discounts
# except Exception as e:
# logger.error(f"An unknown exception occurred when calculating discounts: {e}")
# # We need to make sure there's a proper rsl plate number
# logger.debug(f"We've got a total cost of {instance.run_cost}")
# try:
# logger.debug(f"Constructed instance: {instance.to_string()}")
# except AttributeError as e:
# logger.debug(f"Something went wrong constructing instance {info_dict['rsl_plate_num']}: {e}")
# logger.debug(f"Constructed submissions message: {msg}")
# return instance, {'code':code, 'message':msg}
# def construct_samples(ctx:Settings, instance:models.BasicSubmission, samples:List[dict]) -> models.BasicSubmission:
# """
# constructs sample objects and adds to submission
# NOTE: Depreciated in favour of Pydantic model .toSQL method
# Args:
# ctx (Settings): settings passed down from gui
# instance (models.BasicSubmission): Submission samples scraped from.
# samples (List[dict]): List of parsed samples
# Returns:
# models.BasicSubmission: Updated submission object.
# """
# for sample in samples:
# sample_instance = lookup_samples(ctx=ctx, submitter_id=str(sample['sample'].submitter_id))
# if sample_instance == None:
# sample_instance = sample['sample']
# else:
# logger.warning(f"Sample {sample} already exists, creating association.")
# logger.debug(f"Adding {sample_instance.__dict__}")
# if sample_instance in instance.samples:
# logger.error(f"Looks like there's a duplicate sample on this plate: {sample_instance.submitter_id}!")
# continue
# try:
# with ctx.database_session.no_autoflush:
# try:
# sample_query = sample_instance.sample_type.replace('Sample', '').strip()
# logger.debug(f"Here is the sample instance type: {sample_instance}")
# try:
# assoc = getattr(models, f"{sample_query}Association")
# except AttributeError as e:
# logger.error(f"Couldn't get type specific association using {sample_instance.sample_type.replace('Sample', '').strip()}. Getting generic.")
# assoc = models.SubmissionSampleAssociation
# assoc = assoc(submission=instance, sample=sample_instance, row=sample['row'], column=sample['column'])
# instance.submission_sample_associations.append(assoc)
# except IntegrityError:
# logger.error(f"Hit integrity error for: {sample}")
# continue
# except SAWarning:
# logger.error(f"Looks like the association already exists for submission: {instance} and sample: {sample_instance}")
# continue
# except IntegrityError as e:
# logger.critical(e)
# continue
# return instance
# @check_authorization
# def construct_kit_from_yaml(ctx:Settings, kit_dict:dict) -> dict:
# """
# Create and store a new kit in the database based on a .yml file
# TODO: split into create and store functions
# Args:
# ctx (Settings): Context object passed down from frontend
# kit_dict (dict): Experiment dictionary created from yaml file
# Returns:
# dict: a dictionary containing results of db addition
# """
# # from tools import check_is_power_user, massage_common_reagents
# # Don't want just anyone adding kits
# # if not check_is_power_user(ctx=ctx):
# # logger.debug(f"{getuser()} does not have permission to add kits.")
# # return {'code':1, 'message':"This user does not have permission to add kits.", "status":"warning"}
# submission_type = lookup_submission_type(ctx=ctx, name=kit_dict['used_for'])
# logger.debug(f"Looked up submission type: {kit_dict['used_for']} and got {submission_type}")
# kit = models.KitType(name=kit_dict["kit_name"])
# kt_st_assoc = models.SubmissionTypeKitTypeAssociation(kit_type=kit, submission_type=submission_type)
# for k,v in kit_dict.items():
# if k not in ["reagent_types", "kit_name", "used_for"]:
# kt_st_assoc.set_attrib(k, v)
# kit.kit_submissiontype_associations.append(kt_st_assoc)
# # A kit contains multiple reagent types.
# for r in kit_dict['reagent_types']:
# logger.debug(f"Constructing reagent type: {r}")
# rtname = massage_common_reagents(r['rtname'])
# look_up = lookup_reagent_types(name=rtname)
# if look_up == None:
# rt = models.ReagentType(name=rtname.strip(), eol_ext=timedelta(30*r['eol']))
# else:
# rt = look_up
# uses = {kit_dict['used_for']:{k:v for k,v in r.items() if k not in ['eol']}}
# assoc = models.KitTypeReagentTypeAssociation(kit_type=kit, reagent_type=rt, uses=uses)
# # ctx.database_session.add(rt)
# store_object(ctx=ctx, object=rt)
# kit.kit_reagenttype_associations.append(assoc)
# logger.debug(f"Kit construction reagent type: {rt.__dict__}")
# logger.debug(f"Kit construction kit: {kit.__dict__}")
# store_object(ctx=ctx, object=kit)
# return {'code':0, 'message':'Kit has been added', 'status': 'information'}
# @check_authorization
# def construct_org_from_yaml(ctx:Settings, org:dict) -> dict:
# """
# Create and store a new organization based on a .yml file
# Args:
# ctx (Settings): Context object passed down from frontend
# org (dict): Dictionary containing organization info.
# Returns:
# dict: dictionary containing results of db addition
# """
# # from tools import check_is_power_user
# # # Don't want just anyone adding in clients
# # if not check_is_power_user(ctx=ctx):
# # logger.debug(f"{getuser()} does not have permission to add kits.")
# # return {'code':1, 'message':"This user does not have permission to add organizations."}
# # the yml can contain multiple clients
# for client in org:
# cli_org = models.Organization(name=client.replace(" ", "_").lower(), cost_centre=org[client]['cost centre'])
# # a client can contain multiple contacts
# for contact in org[client]['contacts']:
# cont_name = list(contact.keys())[0]
# # check if contact already exists
# look_up = ctx.database_session.query(models.Contact).filter(models.Contact.name==cont_name).first()
# if look_up == None:
# cli_cont = models.Contact(name=cont_name, phone=contact[cont_name]['phone'], email=contact[cont_name]['email'], organization=[cli_org])
# else:
# cli_cont = look_up
# cli_cont.organization.append(cli_org)
# ctx.database_session.add(cli_cont)
# logger.debug(f"Client creation contact: {cli_cont.__dict__}")
# logger.debug(f"Client creation client: {cli_org.__dict__}")
# ctx.database_session.add(cli_org)
# ctx.database_session.commit()
# return {"code":0, "message":"Organization has been added."}

View File

@@ -141,7 +141,10 @@ def lookup_reagent_types(ctx:Settings,
# logger.debug(f"Reagent reagent types: {reagent._sa_instance_state}")
result = list(set(kit_type.reagent_types).intersection(reagent.type))
logger.debug(f"Result: {result}")
return result[0]
try:
return result[0]
except IndexError:
return result
match name:
case str():
logger.debug(f"Looking up reagent type by name: {name}")
@@ -249,7 +252,7 @@ def lookup_submissions(ctx:Settings,
if chronologic:
# query.order_by(models.BasicSubmission.submitted_date)
query.order_by(model.submitted_date)
logger.debug(f"At the end of the search, the query gets: {query.all()}")
# logger.debug(f"At the end of the search, the query gets: {query.all()}")
return query_return(query=query, limit=limit)
def lookup_submission_type(ctx:Settings,

View File

@@ -1,6 +1,7 @@
'''
Contains convenience functions for using database
'''
import sys
from tools import Settings
from .lookups import *
import pandas as pd
@@ -13,6 +14,8 @@ from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityErr
from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError
from pprint import pformat
import logging
from backend.validators import pydant
logger = logging.getLogger(f"submissions.{__name__}")
@@ -172,7 +175,7 @@ def update_ww_sample(ctx:Settings, sample_obj:dict) -> dict|None:
result = store_object(ctx=ctx, object=assoc)
return result
def check_kit_integrity(sub:models.BasicSubmission|models.KitType, reagenttypes:list|None=None) -> dict|None:
def check_kit_integrity(ctx:Settings, sub:models.BasicSubmission|models.KitType|pydant.PydSubmission, reagenttypes:list=[]) -> dict|None:
"""
Ensures all reagents expected in kit are listed in Submission
@@ -185,20 +188,30 @@ def check_kit_integrity(sub:models.BasicSubmission|models.KitType, reagenttypes:
"""
logger.debug(type(sub))
# What type is sub?
reagenttypes = []
# reagenttypes = []
match sub:
case pydant.PydSubmission():
ext_kit = lookup_kit_types(ctx=ctx, name=sub.extraction_kit['value'])
ext_kit_rtypes = [item.name for item in ext_kit.get_reagents(required=True, submission_type=sub.submission_type['value'])]
reagenttypes = [item.type for item in sub.reagents]
case models.BasicSubmission():
# Get all required reagent types for this kit.
ext_kit_rtypes = [item.name for item in sub.extraction_kit.get_reagents(required=True, submission_type=sub.submission_type_name)]
# Overwrite function parameter reagenttypes
for reagent in sub.reagents:
logger.debug(f"For kit integrity, looking up reagent: {reagent}")
try:
rt = list(set(reagent.type).intersection(sub.extraction_kit.reagent_types))[0].name
# rt = list(set(reagent.type).intersection(sub.extraction_kit.reagent_types))[0].name
rt = lookup_reagent_types(ctx=ctx, kit_type=sub.extraction_kit, reagent=reagent)
logger.debug(f"Got reagent type: {rt}")
reagenttypes.append(rt)
if isinstance(rt, models.ReagentType):
reagenttypes.append(rt.name)
except AttributeError as e:
logger.error(f"Problem parsing reagents: {[f'{reagent.lot}, {reagent.type}' for reagent in sub.reagents]}")
reagenttypes.append(reagent.type[0].name)
except IndexError:
logger.error(f"No intersection of {reagent} type {reagent.type} and {sub.extraction_kit.reagent_types}")
raise ValueError(f"No intersection of {reagent} type {reagent.type} and {sub.extraction_kit.reagent_types}")
case models.KitType():
ext_kit_rtypes = [item.name for item in sub.get_reagents(required=True)]
case _:

View File

@@ -248,7 +248,7 @@ class Reagent(Base):
"expiry": place_holder.strftime("%Y-%m-%d")
}
def to_reagent_dict(self, extraction_kit:KitType=None) -> dict:
def to_reagent_dict(self, extraction_kit:KitType|str=None) -> dict:
"""
Returns basic reagent dictionary.
@@ -314,6 +314,7 @@ class SubmissionType(Base):
name = Column(String(128), unique=True) #: name of submission type
info_map = Column(JSON) #: Where basic information is found in the excel workbook corresponding to this type.
instances = relationship("BasicSubmission", backref="submission_type")
# regex = Column(String(512))
submissiontype_kit_associations = relationship(
"SubmissionTypeKitTypeAssociation",
@@ -325,6 +326,7 @@ class SubmissionType(Base):
def __repr__(self) -> str:
return f"<SubmissionType({self.name})>"
class SubmissionTypeKitTypeAssociation(Base):
"""

View File

@@ -47,6 +47,7 @@ class BasicSubmission(Base):
reagents = relationship("Reagent", back_populates="submissions", secondary=reagents_submissions) #: relationship to reagents
reagents_id = Column(String, ForeignKey("_reagents.id", ondelete="SET NULL", name="fk_BS_reagents_id")) #: id of used reagents
extraction_info = Column(JSON) #: unstructured output from the extraction table logger.
pcr_info = Column(JSON) #: unstructured output from pcr table logger or user(Artic)
run_cost = Column(FLOAT(2)) #: total cost of running the plate. Set from constant and mutable kit costs at time of creation.
uploaded_by = Column(String(32)) #: user name of person who submitted the submission to the database.
comment = Column(JSON)
@@ -211,12 +212,12 @@ class BasicSubmission(Base):
Calculate the number of columns in this submission
Returns:
int: largest column number
int: Number of unique columns.
"""
logger.debug(f"Here's the samples: {self.samples}")
columns = [assoc.column for assoc in self.submission_sample_associations]
columns = set([assoc.column for assoc in self.submission_sample_associations])
logger.debug(f"Here are the columns for {self.rsl_plate_num}: {columns}")
return max(columns)
return len(columns)
def hitpick_plate(self, plate_number:int|None=None) -> list:
"""
@@ -281,7 +282,7 @@ class BasicSubmission(Base):
Returns:
dict: Updated sample dictionary
"""
logger.debug(f"Called {cls.__name__} sample parser")
# logger.debug(f"Called {cls.__name__} sample parser")
return input_dict
@classmethod
@@ -461,7 +462,7 @@ class Wastewater(BasicSubmission):
"""
derivative submission type from BasicSubmission
"""
pcr_info = Column(JSON)
# pcr_info = Column(JSON)
ext_technician = Column(String(64))
pcr_technician = Column(String(64))
__mapper_args__ = {"polymorphic_identity": "Wastewater", "polymorphic_load": "inline"}
@@ -570,13 +571,16 @@ class Wastewater(BasicSubmission):
@classmethod
def get_regex(cls):
return "(?P<Wastewater>RSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)\d?(\D|$)R?\d?)?)"
# return "(?P<Wastewater>RSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)\d?(\D|$)R?\d?)?)"
# return "(?P<Wastewater>RSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)\d?([^_|\D]|$)R?\d?)?)"
return "(?P<Wastewater>RSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)?\d?([^_0123456789]|$)R?\d?)?)"
class WastewaterArtic(BasicSubmission):
"""
derivative submission type for artic wastewater
"""
__mapper_args__ = {"polymorphic_identity": "Wastewater Artic", "polymorphic_load": "inline"}
artic_technician = Column(String(64))
def calculate_base_cost(self):
"""
@@ -752,7 +756,7 @@ class BasicSample(Base):
@classmethod
def parse_sample(cls, input_dict:dict) -> dict:
logger.debug(f"Called {cls.__name__} sample parser")
# logger.debug(f"Called {cls.__name__} sample parser")
return input_dict
class WastewaterSample(BasicSample):

View File

@@ -7,7 +7,8 @@ from typing import List
import pandas as pd
import numpy as np
from pathlib import Path
from backend.db import models, lookup_kit_types, lookup_submission_type, lookup_samples
from backend.db import models
from backend.db.functions import lookup_kit_types, lookup_submission_type, lookup_samples
from backend.validators import PydSubmission, PydReagent, RSLNamer, PydSample
import logging
from collections import OrderedDict
@@ -32,7 +33,7 @@ class SheetParser(object):
filepath (Path | None, optional): file path to excel sheet. Defaults to None.
"""
self.ctx = ctx
logger.debug(f"Parsing {filepath.__str__()}")
logger.debug(f"\n\nParsing {filepath.__str__()}\n\n")
match filepath:
case Path():
self.filepath = filepath
@@ -48,7 +49,7 @@ class SheetParser(object):
raise FileNotFoundError(f"Couldn't parse file {self.filepath}")
self.sub = OrderedDict()
# make decision about type of sample we have
self.sub['submission_type'] = dict(value=RSLNamer.retrieve_submission_type(ctx=self.ctx, instr=self.filepath), parsed=False)
self.sub['submission_type'] = dict(value=RSLNamer.retrieve_submission_type(ctx=self.ctx, instr=self.filepath), missing=True)
# # grab the info map from the submission type in database
self.parse_info()
self.import_kit_validation_check()
@@ -98,12 +99,12 @@ class SheetParser(object):
if not check_not_nan(self.sub['extraction_kit']['value']):
dlg = KitSelector(ctx=self.ctx, title="Kit Needed", message="At minimum a kit is needed. Please select one.")
if dlg.exec():
self.sub['extraction_kit'] = dict(value=dlg.getValues(), parsed=False)
self.sub['extraction_kit'] = dict(value=dlg.getValues(), missing=True)
else:
raise ValueError("Extraction kit needed.")
else:
if isinstance(self.sub['extraction_kit'], str):
self.sub['extraction_kit'] = dict(value=self.sub['extraction_kit'], parsed=False)
self.sub['extraction_kit'] = dict(value=self.sub['extraction_kit'], missing=True)
def import_reagent_validation_check(self):
"""
@@ -130,6 +131,7 @@ class SheetParser(object):
class InfoParser(object):
def __init__(self, ctx:Settings, xl:pd.ExcelFile, submission_type:str):
logger.debug(f"\n\nHello from InfoParser!")
self.ctx = ctx
self.map = self.fetch_submission_info_map(submission_type=submission_type)
self.xl = xl
@@ -147,7 +149,7 @@ class InfoParser(object):
dict: Location map of all info for this submission type
"""
if isinstance(submission_type, str):
submission_type = dict(value=submission_type, parsed=False)
submission_type = dict(value=submission_type, missing=True)
logger.debug(f"Looking up submission type: {submission_type['value']}")
submission_type = lookup_submission_type(ctx=self.ctx, name=submission_type['value'])
info_map = submission_type.info_map
@@ -168,7 +170,7 @@ class InfoParser(object):
relevant = {}
for k, v in self.map.items():
if isinstance(v, str):
dicto[k] = dict(value=v, parsed=True)
dicto[k] = dict(value=v, missing=False)
continue
if k == "samples":
continue
@@ -183,16 +185,16 @@ class InfoParser(object):
if check_not_nan(value):
if value != "None":
try:
dicto[item] = dict(value=value, parsed=True)
dicto[item] = dict(value=value, missing=False)
except (KeyError, IndexError):
continue
else:
try:
dicto[item] = dict(value=value, parsed=False)
dicto[item] = dict(value=value, missing=True)
except (KeyError, IndexError):
continue
else:
dicto[item] = dict(value=convert_nans_to_nones(value), parsed=False)
dicto[item] = dict(value=convert_nans_to_nones(value), missing=True)
try:
check = dicto['submission_category'] not in ["", None]
except KeyError:
@@ -202,6 +204,7 @@ class InfoParser(object):
class ReagentParser(object):
def __init__(self, ctx:Settings, xl:pd.ExcelFile, submission_type:str, extraction_kit:str):
logger.debug("\n\nHello from ReagentParser!\n\n")
self.ctx = ctx
self.map = self.fetch_kit_info_map(extraction_kit=extraction_kit, submission_type=submission_type)
self.xl = xl
@@ -232,18 +235,18 @@ class ReagentParser(object):
lot = df.iat[relevant[item]['lot']['row']-1, relevant[item]['lot']['column']-1]
expiry = df.iat[relevant[item]['expiry']['row']-1, relevant[item]['expiry']['column']-1]
except (KeyError, IndexError):
listo.append(PydReagent(ctx=self.ctx, type=item.strip(), lot=None, exp=None, name=None, parsed=False))
listo.append(PydReagent(ctx=self.ctx, type=item.strip(), lot=None, expiry=None, name=None, missing=True))
continue
# If the cell is blank tell the PydReagent
if check_not_nan(lot):
parsed = True
missing = False
else:
parsed = False
missing = True
# logger.debug(f"Got lot for {item}-{name}: {lot} as {type(lot)}")
lot = str(lot)
logger.debug(f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}")
listo.append(PydReagent(ctx=self.ctx, type=item.strip(), lot=lot, expiry=expiry, name=name, parsed=parsed))
logger.debug(f"Returning listo: {listo}")
listo.append(PydReagent(ctx=self.ctx, type=item.strip(), lot=lot, expiry=expiry, name=name, missing=missing))
# logger.debug(f"Returning listo: {listo}")
return listo
class SampleParser(object):
@@ -260,6 +263,7 @@ class SampleParser(object):
df (pd.DataFrame): input sample dataframe
elution_map (pd.DataFrame | None, optional): optional map of elution plate. Defaults to None.
"""
logger.debug("\n\nHello from SampleParser!")
self.samples = []
self.ctx = ctx
self.xl = xl
@@ -310,6 +314,7 @@ class SampleParser(object):
# custom_mapper = get_polymorphic_subclass(models.BasicSubmission, self.submission_type)
custom_mapper = models.BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
df = custom_mapper.custom_platemap(self.xl, df)
logger.debug(f"Custom platemap:\n{df}")
return df
def construct_lookup_table(self, lookup_table_location:dict) -> pd.DataFrame:
@@ -369,10 +374,10 @@ class SampleParser(object):
for sample in self.samples:
# addition = self.lookup_table[self.lookup_table.isin([sample['submitter_id']]).any(axis=1)].squeeze().to_dict()
addition = self.lookup_table[self.lookup_table.isin([sample['submitter_id']]).any(axis=1)].squeeze()
logger.debug(addition)
# logger.debug(addition)
if isinstance(addition, pd.DataFrame) and not addition.empty:
addition = addition.iloc[0]
logger.debug(f"Lookuptable info: {addition.to_dict()}")
# logger.debug(f"Lookuptable info: {addition.to_dict()}")
for k,v in addition.to_dict().items():
# logger.debug(f"Checking {k} in lookup table.")
if check_not_nan(k) and isinstance(k, str):
@@ -395,7 +400,7 @@ class SampleParser(object):
self.lookup_table.loc[self.lookup_table['Well']==addition['Well']] = np.nan
except (ValueError, KeyError):
pass
logger.debug(f"Output sample dict: {sample}")
# logger.debug(f"Output sample dict: {sample}")
logger.debug(f"Final lookup_table: \n\n {self.lookup_table}")
def parse_samples(self, generate:bool=True) -> List[dict]|List[models.BasicSample]:
@@ -432,11 +437,7 @@ class SampleParser(object):
translated_dict['sample_type'] = f"{self.submission_type} Sample"
translated_dict = self.custom_sub_parser(translated_dict)
translated_dict = self.custom_sample_parser(translated_dict)
logger.debug(f"Here is the output of the custom parser: \n\n{translated_dict}\n\n")
# if generate:
# new_samples.append(self.generate_sample_object(translated_dict))
# else:
# new_samples.append(translated_dict)
# logger.debug(f"Here is the output of the custom parser:\n{translated_dict}")
new_samples.append(PydSample(**translated_dict))
return result, new_samples

View File

@@ -18,7 +18,7 @@ class RSLNamer(object):
if self.submission_type == None:
self.submission_type = self.retrieve_submission_type(ctx=self.ctx, instr=instr)
print(self.submission_type)
logger.debug(f"got submission type: {self.submission_type}")
if self.submission_type != None:
enforcer = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
self.parsed_name = self.retrieve_rsl_number(instr=instr, regex=enforcer.get_regex())
@@ -67,10 +67,12 @@ class RSLNamer(object):
Args:
in_str (str): string to be parsed
"""
logger.debug(f"Input string to be parsed: {instr}")
if regex == None:
regex = BasicSubmission.construct_regex()
else:
regex = re.compile(rf'{regex}', re.IGNORECASE | re.VERBOSE)
logger.debug(f"Using regex: {regex}")
match instr:
case Path():
m = regex.search(instr.stem)

View File

@@ -20,6 +20,8 @@ from backend.db.functions import (lookup_submissions, lookup_reagent_types, look
from backend.db.models import *
from sqlalchemy.exc import InvalidRequestError, StatementError
from PyQt6.QtWidgets import QComboBox, QWidget, QLabel, QVBoxLayout
from pprint import pformat
from openpyxl import load_workbook
logger = logging.getLogger(f"submissions.{__name__}")
@@ -29,7 +31,7 @@ class PydReagent(BaseModel):
type: str|None
expiry: date|None
name: str|None
parsed: bool = Field(default=False)
missing: bool = Field(default=True)
@field_validator("type", mode='before')
@classmethod
@@ -134,6 +136,11 @@ class PydSample(BaseModel, extra='allow'):
return [value]
return value
@field_validator("submitter_id", mode="before")
@classmethod
def int_to_str(cls, value):
return str(value)
def toSQL(self, ctx:Settings, submission):
result = None
self.__dict__.update(self.model_extra)
@@ -165,14 +172,14 @@ class PydSubmission(BaseModel, extra='allow'):
filepath: Path
submission_type: dict|None
# For defaults
submitter_plate_num: dict|None = Field(default=dict(value=None, parsed=False), validate_default=True)
rsl_plate_num: dict|None = Field(default=dict(value=None, parsed=False), validate_default=True)
submitter_plate_num: dict|None = Field(default=dict(value=None, missing=True), validate_default=True)
rsl_plate_num: dict|None = Field(default=dict(value=None, missing=True), validate_default=True)
submitted_date: dict|None
submitting_lab: dict|None
sample_count: dict|None
extraction_kit: dict|None
technician: dict|None
submission_category: dict|None = Field(default=dict(value=None, parsed=False), validate_default=True)
submission_category: dict|None = Field(default=dict(value=None, missing=True), validate_default=True)
reagents: List[dict]|List[PydReagent] = []
samples: List[Any]
@@ -181,7 +188,7 @@ class PydSubmission(BaseModel, extra='allow'):
def enforce_with_uuid(cls, value):
logger.debug(f"submitter plate id: {value}")
if value['value'] == None or value['value'] == "None":
return dict(value=uuid.uuid4().hex.upper(), parsed=False)
return dict(value=uuid.uuid4().hex.upper(), missing=True)
else:
return value
@@ -189,7 +196,7 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def rescue_date(cls, value):
if value == None:
return dict(value=date.today(), parsed=False)
return dict(value=date.today(), missing=True)
return value
@field_validator("submitted_date")
@@ -200,14 +207,14 @@ class PydSubmission(BaseModel, extra='allow'):
if isinstance(value['value'], date):
return value
if isinstance(value['value'], int):
return dict(value=datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value['value'] - 2).date(), parsed=False)
return dict(value=datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value['value'] - 2).date(), missing=True)
string = re.sub(r"(_|-)\d$", "", value['value'])
try:
output = dict(value=parse(string).date(), parsed=False)
output = dict(value=parse(string).date(), missing=True)
except ParserError as e:
logger.error(f"Problem parsing date: {e}")
try:
output = dict(value=parse(string.replace("-","")).date(), parsed=False)
output = dict(value=parse(string.replace("-","")).date(), missing=True)
except Exception as e:
logger.error(f"Problem with parse fallback: {e}")
return output
@@ -216,14 +223,14 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def rescue_submitting_lab(cls, value):
if value == None:
return dict(value=None, parsed=False)
return dict(value=None, missing=True)
return value
@field_validator("rsl_plate_num", mode='before')
@classmethod
def rescue_rsl_number(cls, value):
if value == None:
return dict(value=None, parsed=False)
return dict(value=None, missing=True)
return value
@field_validator("rsl_plate_num")
@@ -233,21 +240,21 @@ class PydSubmission(BaseModel, extra='allow'):
sub_type = values.data['submission_type']['value']
if check_not_nan(value['value']):
if lookup_submissions(ctx=values.data['ctx'], rsl_number=value['value']) == None:
return dict(value=value['value'], parsed=True)
return dict(value=value['value'], missing=False)
else:
logger.warning(f"Submission number {value} already exists in DB, attempting salvage with filepath")
# output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
return dict(value=output, parsed=False)
return dict(value=output, missing=True)
else:
output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
return dict(value=output, parsed=False)
return dict(value=output, missing=True)
@field_validator("technician", mode="before")
@classmethod
def rescue_tech(cls, value):
if value == None:
return dict(value=None, parsed=False)
return dict(value=None, missing=True)
return value
@field_validator("technician")
@@ -257,14 +264,14 @@ class PydSubmission(BaseModel, extra='allow'):
value['value'] = re.sub(r"\: \d", "", value['value'])
return value
else:
return dict(value=convert_nans_to_nones(value['value']), parsed=False)
return dict(value=convert_nans_to_nones(value['value']), missing=True)
return value
@field_validator("sample_count", mode='before')
@classmethod
def rescue_sample_count(cls, value):
if value == None:
return dict(value=None, parsed=False)
return dict(value=None, missing=True)
return value
@field_validator("extraction_kit", mode='before')
@@ -273,13 +280,13 @@ class PydSubmission(BaseModel, extra='allow'):
if check_not_nan(value):
if isinstance(value, str):
return dict(value=value, parsed=True)
return dict(value=value, missing=False)
elif isinstance(value, dict):
return value
else:
raise ValueError(f"No extraction kit found.")
if value == None:
return dict(value=None, parsed=False)
return dict(value=None, missing=True)
return value
@field_validator("submission_type", mode='before')
@@ -289,11 +296,11 @@ class PydSubmission(BaseModel, extra='allow'):
value = {"value": value}
if check_not_nan(value['value']):
value = value['value'].title()
return dict(value=value, parsed=True)
return dict(value=value, missing=False)
# else:
# return dict(value="RSL Name not found.")
else:
return dict(value=RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__()).submission_type.title(), parsed=False)
return dict(value=RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__()).submission_type.title(), missing=True)
@field_validator("submission_category")
@classmethod
@@ -318,9 +325,21 @@ class PydSubmission(BaseModel, extra='allow'):
output.append(dummy)
self.samples = output
def improved_dict(self):
fields = list(self.model_fields.keys()) + list(self.model_extra.keys())
output = {k:getattr(self, k) for k in fields}
return output
def find_missing(self):
info = {k:v for k,v in self.improved_dict().items() if isinstance(v, dict)}
missing_info = {k:v for k,v in info.items() if v['missing']}
missing_reagents = [reagent for reagent in self.reagents if reagent.missing]
return missing_info, missing_reagents
def toSQL(self):
code = 0
msg = None
status = None
self.__dict__.update(self.model_extra)
instance = lookup_submissions(ctx=self.ctx, rsl_number=self.rsl_plate_num['value'])
if instance == None:
@@ -358,6 +377,11 @@ class PydSubmission(BaseModel, extra='allow'):
field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for reagent in value]
case "submission_type":
field_value = lookup_submission_type(ctx=self.ctx, name=value)
case "sample_count":
if value == None:
field_value = len(self.samples)
else:
field_value = value
case "ctx" | "csv" | "filepath":
continue
case _:
@@ -394,9 +418,85 @@ class PydSubmission(BaseModel, extra='allow'):
except AttributeError as e:
logger.debug(f"Something went wrong constructing instance {self.rsl_plate_num}: {e}")
logger.debug(f"Constructed submissions message: {msg}")
return instance, {'code':code, 'message':msg}
return instance, {'code':code, 'message':msg, 'status':"Information"}
def toForm(self):
def toForm(self, parent:QWidget):
from frontend.custom_widgets.misc import SubmissionFormWidget
return SubmissionFormWidget(parent=parent, **self.improved_dict())
def autofill_excel(self, missing_only:bool=True):
if missing_only:
info, reagents = self.find_missing()
else:
info = {k:v for k,v in self.improved_dict().items() if isinstance(v, dict)}
reagents = self.reagents
if len(reagents + list(info.keys())) == 0:
return None
logger.debug(f"We have blank info and/or reagents in the excel sheet.\n\tLet's try to fill them in.")
extraction_kit = lookup_kit_types(ctx=self.ctx, name=self.extraction_kit['value'])
logger.debug(f"We have the extraction kit: {extraction_kit.name}")
excel_map = extraction_kit.construct_xl_map_for_use(self.submission_type['value'])
logger.debug(f"Extraction kit map:\n\n{pformat(excel_map)}")
logger.debug(f"Missing reagents going into autofile: {pformat(reagents)}")
logger.debug(f"Missing info going into autofile: {pformat(info)}")
new_reagents = []
for reagent in reagents:
new_reagent = {}
new_reagent['type'] = reagent.type
new_reagent['lot'] = excel_map[new_reagent['type']]['lot']
new_reagent['lot']['value'] = reagent.lot
new_reagent['expiry'] = excel_map[new_reagent['type']]['expiry']
new_reagent['expiry']['value'] = reagent.expiry
new_reagent['sheet'] = excel_map[new_reagent['type']]['sheet']
# name is only present for Bacterial Culture
try:
new_reagent['name'] = excel_map[new_reagent['type']]['name']
new_reagent['name']['value'] = reagent.name
except Exception as e:
logger.error(f"Couldn't get name due to {e}")
new_reagents.append(new_reagent)
new_info = []
for k,v in info.items():
try:
new_item = {}
new_item['type'] = k
new_item['location'] = excel_map['info'][k]
new_item['value'] = v['value']
new_info.append(new_item)
except KeyError:
logger.error(f"Unable to fill in {k}, not found in relevant info.")
logger.debug(f"New reagents: {new_reagents}")
logger.debug(f"New info: {new_info}")
# open a new workbook using openpyxl
workbook = load_workbook(self.filepath)
# get list of sheet names
sheets = workbook.sheetnames
# logger.debug(workbook.sheetnames)
for sheet in sheets:
# open sheet
worksheet=workbook[sheet]
# Get relevant reagents for that sheet
sheet_reagents = [item for item in new_reagents if sheet in item['sheet']]
for reagent in sheet_reagents:
# logger.debug(f"Attempting to write lot {reagent['lot']['value']} in: row {reagent['lot']['row']}, column {reagent['lot']['column']}")
worksheet.cell(row=reagent['lot']['row'], column=reagent['lot']['column'], value=reagent['lot']['value'])
# logger.debug(f"Attempting to write expiry {reagent['expiry']['value']} in: row {reagent['expiry']['row']}, column {reagent['expiry']['column']}")
worksheet.cell(row=reagent['expiry']['row'], column=reagent['expiry']['column'], value=reagent['expiry']['value'])
try:
# logger.debug(f"Attempting to write name {reagent['name']['value']} in: row {reagent['name']['row']}, column {reagent['name']['column']}")
worksheet.cell(row=reagent['name']['row'], column=reagent['name']['column'], value=reagent['name']['value'])
except Exception as e:
logger.error(f"Could not write name {reagent['name']['value']} due to {e}")
# Get relevant info for that sheet
sheet_info = [item for item in new_info if sheet in item['location']['sheets']]
for item in sheet_info:
logger.debug(f"Attempting: {item['type']} in row {item['location']['row']}, column {item['location']['column']}")
worksheet.cell(row=item['location']['row'], column=item['location']['column'], value=item['value'])
# Hacky way to pop in 'signed by'
# custom_parser = get_polymorphic_subclass(BasicSubmission, info['submission_type'])
custom_parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type['value'])
workbook = custom_parser.custom_autofill(workbook)
return workbook
class PydContact(BaseModel):