Minor bug fixes.
This commit is contained in:
@@ -1,7 +1,9 @@
|
||||
## 202312.02
|
||||
|
||||
## 202312.01
|
||||
|
||||
- Control samples info now available in plate map.
|
||||
- Backups will now create an regenerated xlsx file.
|
||||
- Backups will now create a regenerated xlsx file.
|
||||
- Report generator now does sums automatically.
|
||||
|
||||
## 202311.04
|
||||
|
||||
5
TODO.md
5
TODO.md
@@ -1,9 +1,10 @@
|
||||
- [x] SubmissionReagentAssociation.query
|
||||
- [x] Move as much from db.functions to objects as possible.
|
||||
- [x] Clean up DB objects after failed test fix.
|
||||
- [x] Fix tests.
|
||||
- [ ] Fix pydant.PydSample.handle_duplicate_samples?
|
||||
- [x] Fix pydant.PydSample.handle_duplicate_samples?
|
||||
- [ ] See if the number of queries in BasicSubmission functions (and others) can be trimmed down.
|
||||
- [x] Document code
|
||||
- Done Submissions up to BasicSample
|
||||
- [x] Create a result object to facilitate returning function results.
|
||||
- [x] Refactor main_window_functions into as many objects (forms, etc.) as possible to clean it up.
|
||||
- [x] Integrate 'Construct First Strand' into the Artic import.
|
||||
|
||||
@@ -55,9 +55,9 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
; sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db
|
||||
sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db
|
||||
; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\Submissions_app_backups\DB_backups\submissions-new.db
|
||||
sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\python\submissions\tests\test_assets\submissions-test.db
|
||||
; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\python\submissions\tests\test_assets\submissions-test.db
|
||||
|
||||
|
||||
[post_write_hooks]
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
"""SubmissionReagentAssociations added
|
||||
|
||||
Revision ID: 238c3c3e5863
|
||||
Revises: 2684f065037c
|
||||
Create Date: 2023-12-05 12:57:17.446606
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '238c3c3e5863'
|
||||
down_revision = '2684f065037c'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('_reagents_submissions', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('comments', sa.String(length=1024), nullable=True))
|
||||
batch_op.alter_column('reagent_id',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=False)
|
||||
batch_op.alter_column('submission_id',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('_reagents_submissions', schema=None) as batch_op:
|
||||
batch_op.alter_column('submission_id',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=True)
|
||||
batch_op.alter_column('reagent_id',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=True)
|
||||
batch_op.drop_column('comments')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -4,7 +4,7 @@ from pathlib import Path
|
||||
|
||||
# Version of the realpython-reader package
|
||||
__project__ = "submissions"
|
||||
__version__ = "202312.1b"
|
||||
__version__ = "202312.2b"
|
||||
__author__ = {"name":"Landon Wark", "email":"Landon.Wark@phac-aspc.gc.ca"}
|
||||
__copyright__ = "2022-2023, Government of Canada"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import sys
|
||||
import os
|
||||
# environment variable must be set to enable qtwebengine in network path
|
||||
import sys, os
|
||||
from tools import ctx, setup_logger, check_if_app
|
||||
# environment variable must be set to enable qtwebengine in network path
|
||||
if check_if_app():
|
||||
os.environ['QTWEBENGINE_DISABLE_SANDBOX'] = "1"
|
||||
# setup custom logger
|
||||
|
||||
@@ -1,5 +1,21 @@
|
||||
'''
|
||||
All database related operations.
|
||||
'''
|
||||
from .functions import *
|
||||
from .models import *
|
||||
from sqlalchemy import event
|
||||
from sqlalchemy.engine import Engine
|
||||
|
||||
@event.listens_for(Engine, "connect")
|
||||
def set_sqlite_pragma(dbapi_connection, connection_record):
|
||||
"""
|
||||
*should* allow automatic creation of foreign keys in the database
|
||||
I have no idea how it actually works.
|
||||
|
||||
Args:
|
||||
dbapi_connection (_type_): _description_
|
||||
connection_record (_type_): _description_
|
||||
"""
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute("PRAGMA foreign_keys=ON")
|
||||
cursor.close()
|
||||
|
||||
from .models import *
|
||||
@@ -1,211 +0,0 @@
|
||||
'''Contains or imports all database convenience functions'''
|
||||
from tools import Result, Report
|
||||
from sqlalchemy import event
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError
|
||||
from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError
|
||||
import logging
|
||||
import pandas as pd
|
||||
import json
|
||||
from .models import *
|
||||
import logging
|
||||
from backend.validators.pydant import *
|
||||
|
||||
logger = logging.getLogger(f"Submissions_{__name__}")
|
||||
|
||||
@event.listens_for(Engine, "connect")
|
||||
def set_sqlite_pragma(dbapi_connection, connection_record):
|
||||
"""
|
||||
*should* allow automatic creation of foreign keys in the database
|
||||
I have no idea how it actually works.
|
||||
|
||||
Args:
|
||||
dbapi_connection (_type_): _description_
|
||||
connection_record (_type_): _description_
|
||||
"""
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute("PRAGMA foreign_keys=ON")
|
||||
cursor.close()
|
||||
|
||||
def submissions_to_df(submission_type:str|None=None, limit:int=0) -> pd.DataFrame:
|
||||
"""
|
||||
Convert submissions looked up by type to dataframe
|
||||
|
||||
Args:
|
||||
ctx (Settings): settings object passed by gui
|
||||
submission_type (str | None, optional): submission type (should be string in D3 of excel sheet) Defaults to None.
|
||||
limit (int): Maximum number of submissions to return. Defaults to 0.
|
||||
|
||||
Returns:
|
||||
pd.DataFrame: dataframe constructed from retrieved submissions
|
||||
"""
|
||||
logger.debug(f"Querying Type: {submission_type}")
|
||||
logger.debug(f"Using limit: {limit}")
|
||||
# use lookup function to create list of dicts
|
||||
# subs = [item.to_dict() for item in lookup_submissions(ctx=ctx, submission_type=submission_type, limit=limit)]
|
||||
subs = [item.to_dict() for item in BasicSubmission.query(submission_type=submission_type, limit=limit)]
|
||||
logger.debug(f"Got {len(subs)} submissions.")
|
||||
# make df from dicts (records) in list
|
||||
df = pd.DataFrame.from_records(subs)
|
||||
# Exclude sub information
|
||||
try:
|
||||
df = df.drop("controls", axis=1)
|
||||
except:
|
||||
logger.warning(f"Couldn't drop 'controls' column from submissionsheet df.")
|
||||
try:
|
||||
df = df.drop("ext_info", axis=1)
|
||||
except:
|
||||
logger.warning(f"Couldn't drop 'ext_info' column from submissionsheet df.")
|
||||
try:
|
||||
df = df.drop("pcr_info", axis=1)
|
||||
except:
|
||||
logger.warning(f"Couldn't drop 'pcr_info' column from submissionsheet df.")
|
||||
# NOTE: Moved to submissions_to_df function
|
||||
try:
|
||||
del df['samples']
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
del df['reagents']
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
del df['comments']
|
||||
except KeyError:
|
||||
pass
|
||||
return df
|
||||
|
||||
def get_control_subtypes(type:str, mode:str) -> list[str]:
|
||||
"""
|
||||
Get subtypes for a control analysis mode
|
||||
|
||||
Args:
|
||||
ctx (Settings): settings object passed from gui
|
||||
type (str): control type name
|
||||
mode (str): analysis mode name
|
||||
|
||||
Returns:
|
||||
list[str]: list of subtype names
|
||||
"""
|
||||
# Only the first control of type is necessary since they all share subtypes
|
||||
try:
|
||||
# outs = lookup_controls(ctx=ctx, control_type=type, limit=1)
|
||||
outs = Control.query(control_type=type, limit=1)
|
||||
except (TypeError, IndexError):
|
||||
return []
|
||||
# Get analysis mode data as dict
|
||||
jsoner = json.loads(getattr(outs, mode))
|
||||
logger.debug(f"JSON out: {jsoner}")
|
||||
try:
|
||||
genera = list(jsoner.keys())[0]
|
||||
except IndexError:
|
||||
return []
|
||||
subtypes = [item for item in jsoner[genera] if "_hashes" not in item and "_ratio" not in item]
|
||||
return subtypes
|
||||
|
||||
def update_last_used(reagent:Reagent, kit:KitType):
|
||||
"""
|
||||
Updates the 'last_used' field in kittypes/reagenttypes
|
||||
|
||||
Args:
|
||||
reagent (models.Reagent): reagent to be used for update
|
||||
kit (models.KitType): kit to be used for lookup
|
||||
"""
|
||||
report = Report()
|
||||
logger.debug(f"Attempting update of reagent type at intersection of ({reagent}), ({kit})")
|
||||
rt = ReagentType.query(kit_type=kit, reagent=reagent)
|
||||
if rt != None:
|
||||
assoc = KitTypeReagentTypeAssociation.query(kit_type=kit, reagent_type=rt)
|
||||
if assoc != None:
|
||||
if assoc.last_used != reagent.lot:
|
||||
logger.debug(f"Updating {assoc} last used to {reagent.lot}")
|
||||
assoc.last_used = reagent.lot
|
||||
result = assoc.save()
|
||||
return(report.add_result(result))
|
||||
return report.add_result(Result(msg=f"Updating last used {rt} was not performed.", status="Information"))
|
||||
|
||||
|
||||
def check_kit_integrity(sub:BasicSubmission|KitType|PydSubmission, reagenttypes:list=[]) -> Tuple[list, Report]:
|
||||
"""
|
||||
Ensures all reagents expected in kit are listed in Submission
|
||||
|
||||
Args:
|
||||
sub (BasicSubmission | KitType): Object containing complete list of reagent types.
|
||||
reagenttypes (list | None, optional): List to check against complete list. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict|None: Result object containing a message and any missing components.
|
||||
"""
|
||||
report = Report()
|
||||
logger.debug(type(sub))
|
||||
# What type is sub?
|
||||
# reagenttypes = []
|
||||
match sub:
|
||||
case PydSubmission():
|
||||
# ext_kit = lookup_kit_types(ctx=ctx, name=sub.extraction_kit['value'])
|
||||
ext_kit = KitType.query(name=sub.extraction_kit['value'])
|
||||
ext_kit_rtypes = [item.name for item in ext_kit.get_reagents(required=True, submission_type=sub.submission_type['value'])]
|
||||
reagenttypes = [item.type for item in sub.reagents]
|
||||
case BasicSubmission():
|
||||
# Get all required reagent types for this kit.
|
||||
ext_kit_rtypes = [item.name for item in sub.extraction_kit.get_reagents(required=True, submission_type=sub.submission_type_name)]
|
||||
# Overwrite function parameter reagenttypes
|
||||
for reagent in sub.reagents:
|
||||
logger.debug(f"For kit integrity, looking up reagent: {reagent}")
|
||||
try:
|
||||
# rt = list(set(reagent.type).intersection(sub.extraction_kit.reagent_types))[0].name
|
||||
# rt = lookup_reagent_types(ctx=ctx, kit_type=sub.extraction_kit, reagent=reagent)
|
||||
rt = ReagentType.query(kit_type=sub.extraction_kit, reagent=reagent)
|
||||
logger.debug(f"Got reagent type: {rt}")
|
||||
if isinstance(rt, ReagentType):
|
||||
reagenttypes.append(rt.name)
|
||||
except AttributeError as e:
|
||||
logger.error(f"Problem parsing reagents: {[f'{reagent.lot}, {reagent.type}' for reagent in sub.reagents]}")
|
||||
reagenttypes.append(reagent.type[0].name)
|
||||
except IndexError:
|
||||
logger.error(f"No intersection of {reagent} type {reagent.type} and {sub.extraction_kit.reagent_types}")
|
||||
raise ValueError(f"No intersection of {reagent} type {reagent.type} and {sub.extraction_kit.reagent_types}")
|
||||
case KitType():
|
||||
ext_kit_rtypes = [item.name for item in sub.get_reagents(required=True)]
|
||||
case _:
|
||||
raise ValueError(f"There was no match for the integrity object.\n\nCheck to make sure they are imported from the same place because it matters.")
|
||||
logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||
logger.debug(f"Submission reagents: {reagenttypes}")
|
||||
# check if lists are equal
|
||||
check = set(ext_kit_rtypes) == set(reagenttypes)
|
||||
logger.debug(f"Checking if reagents match kit contents: {check}")
|
||||
# what reagent types are in both lists?
|
||||
missing = list(set(ext_kit_rtypes).difference(reagenttypes))
|
||||
logger.debug(f"Missing reagents types: {missing}")
|
||||
# if lists are equal return no problem
|
||||
if len(missing)==0:
|
||||
result = None
|
||||
else:
|
||||
result = Result(msg=f"The submission you are importing is missing some reagents expected by the kit.\n\nIt looks like you are missing: {[item.upper() for item in missing]}\n\nAlternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.\n\nPlease make sure you check the lots carefully!", status="Warning")
|
||||
report.add_result(result)
|
||||
return report
|
||||
|
||||
def update_subsampassoc_with_pcr(submission:BasicSubmission, sample:BasicSample, input_dict:dict) -> dict|None:
|
||||
"""
|
||||
Inserts PCR results into wastewater submission/sample association
|
||||
|
||||
Args:
|
||||
ctx (Settings): settings object passed down from gui
|
||||
submission (models.BasicSubmission): Submission object
|
||||
sample (models.BasicSample): Sample object
|
||||
input_dict (dict): dictionary with info to be updated.
|
||||
|
||||
Returns:
|
||||
dict|None: result object
|
||||
"""
|
||||
# assoc = lookup_submission_sample_association(ctx, submission=submission, sample=sample)
|
||||
assoc = SubmissionSampleAssociation.query(submission=submission, sample=sample, limit=1)
|
||||
for k,v in input_dict.items():
|
||||
try:
|
||||
setattr(assoc, k, v)
|
||||
except AttributeError:
|
||||
logger.error(f"Can't set {k} to {v}")
|
||||
# result = store_object(ctx=ctx, object=assoc)
|
||||
result = assoc.save()
|
||||
return result
|
||||
|
||||
@@ -4,9 +4,8 @@ All control related models.
|
||||
from __future__ import annotations
|
||||
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey
|
||||
from sqlalchemy.orm import relationship, Query
|
||||
import logging
|
||||
import logging, json
|
||||
from operator import itemgetter
|
||||
import json
|
||||
from . import BaseClass
|
||||
from tools import setup_lookup, query_return
|
||||
from datetime import date, datetime
|
||||
@@ -51,6 +50,26 @@ class ControlType(BaseClass):
|
||||
pass
|
||||
return query_return(query=query, limit=limit)
|
||||
|
||||
def get_subtypes(self, mode:str) -> List[str]:
|
||||
"""
|
||||
Get subtypes associated with this controltype
|
||||
|
||||
Args:
|
||||
mode (str): analysis mode name
|
||||
|
||||
Returns:
|
||||
List[str]: list of subtypes available
|
||||
"""
|
||||
outs = self.instances[0]
|
||||
jsoner = json.loads(getattr(outs, mode))
|
||||
logger.debug(f"JSON out: {jsoner.keys()}")
|
||||
try:
|
||||
genera = list(jsoner.keys())[0]
|
||||
except IndexError:
|
||||
return []
|
||||
subtypes = [item for item in jsoner[genera] if "_hashes" not in item and "_ratio" not in item]
|
||||
return subtypes
|
||||
|
||||
class Control(BaseClass):
|
||||
"""
|
||||
Base class of a control sample.
|
||||
@@ -249,4 +268,3 @@ class Control(BaseClass):
|
||||
def save(self):
|
||||
self.__database_session__.add(self)
|
||||
self.__database_session__.commit()
|
||||
|
||||
|
||||
@@ -183,15 +183,6 @@ class ReagentType(BaseClass):
|
||||
# creator function: https://stackoverflow.com/questions/11091491/keyerror-when-adding-objects-to-sqlalchemy-association-object/11116291#11116291
|
||||
kit_types = association_proxy("reagenttype_kit_associations", "kit_type", creator=lambda kit: KitTypeReagentTypeAssociation(kit_type=kit))
|
||||
|
||||
# def __str__(self) -> str:
|
||||
# """
|
||||
# string representing this object
|
||||
|
||||
# Returns:
|
||||
# str: string representing this object's name
|
||||
# """
|
||||
# return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ReagentType({self.name})>"
|
||||
|
||||
@@ -379,7 +370,17 @@ class Reagent(BaseClass):
|
||||
name = Column(String(64)) #: reagent name
|
||||
lot = Column(String(64)) #: lot number of reagent
|
||||
expiry = Column(TIMESTAMP) #: expiry date - extended by eol_ext of parent programmatically
|
||||
submissions = relationship("BasicSubmission", back_populates="reagents", uselist=True) #: submissions this reagent is used in
|
||||
# submissions = relationship("BasicSubmission", back_populates="reagents", uselist=True) #: submissions this reagent is used in
|
||||
|
||||
reagent_submission_associations = relationship(
|
||||
"SubmissionReagentAssociation",
|
||||
back_populates="reagent",
|
||||
cascade="all, delete-orphan",
|
||||
) #: Relation to SubmissionSampleAssociation
|
||||
# association proxy of "user_keyword_associations" collection
|
||||
# to "keyword" attribute
|
||||
submissions = association_proxy("reagent_submission_associations", "submission") #: Association proxy to SubmissionSampleAssociation.samples
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if self.name != None:
|
||||
@@ -706,3 +707,69 @@ class SubmissionTypeKitTypeAssociation(BaseClass):
|
||||
query = query.join(KitType).filter(KitType.id==kit_type)
|
||||
limit = query.count()
|
||||
return query_return(query=query, limit=limit)
|
||||
|
||||
class SubmissionReagentAssociation(BaseClass):
|
||||
|
||||
__tablename__ = "_reagents_submissions"
|
||||
|
||||
reagent_id = Column(INTEGER, ForeignKey("_reagents.id"), primary_key=True) #: id of associated sample
|
||||
submission_id = Column(INTEGER, ForeignKey("_submissions.id"), primary_key=True)
|
||||
comments = Column(String(1024))
|
||||
|
||||
submission = relationship("BasicSubmission", back_populates="submission_reagent_associations") #: associated submission
|
||||
|
||||
reagent = relationship(Reagent, back_populates="reagent_submission_associations")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<{self.submission.rsl_plate_num}&{self.reagent.lot}>"
|
||||
|
||||
def __init__(self, reagent=None, submission=None):
|
||||
self.reagent = reagent
|
||||
self.submission = submission
|
||||
self.comments = ""
|
||||
|
||||
@classmethod
|
||||
@setup_lookup
|
||||
def query(cls,
|
||||
submission:"BasicSubmission"|str|int|None=None,
|
||||
reagent:Reagent|str|None=None,
|
||||
limit:int=0) -> SubmissionReagentAssociation|List[SubmissionReagentAssociation]:
|
||||
"""
|
||||
Lookup SubmissionReagentAssociations of interest.
|
||||
|
||||
Args:
|
||||
submission (BasicSubmission" | str | int | None, optional): Identifier of joined submission. Defaults to None.
|
||||
reagent (Reagent | str | None, optional): Identifier of joined reagent. Defaults to None.
|
||||
limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0.
|
||||
|
||||
Returns:
|
||||
SubmissionReagentAssociation|List[SubmissionReagentAssociation]: SubmissionReagentAssociation(s) of interest
|
||||
"""
|
||||
from . import BasicSubmission
|
||||
query: Query = cls.__database_session__.query(cls)
|
||||
match reagent:
|
||||
case Reagent():
|
||||
query = query.filter(cls.reagent==reagent)
|
||||
case str():
|
||||
# logger.debug(f"Filtering query with reagent: {reagent}")
|
||||
reagent = Reagent.query(lot_number=reagent)
|
||||
query = query.filter(cls.reagent==reagent)
|
||||
# logger.debug([item.reagent.lot for item in query.all()])
|
||||
# query = query.join(Reagent).filter(Reagent.lot==reagent)
|
||||
case _:
|
||||
pass
|
||||
# logger.debug(f"Result of query after reagent: {query.all()}")
|
||||
match submission:
|
||||
case BasicSubmission():
|
||||
query = query.filter(cls.submission==submission)
|
||||
case str():
|
||||
query = query.join(BasicSubmission).filter(BasicSubmission.rsl_plate_num==submission)
|
||||
case int():
|
||||
query = query.join(BasicSubmission).filter(BasicSubmission.id==submission)
|
||||
case _:
|
||||
pass
|
||||
# logger.debug(f"Result of query after submission: {query.all()}")
|
||||
# limit = query.count()
|
||||
return query_return(query=query, limit=limit)
|
||||
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@ from getpass import getuser
|
||||
import math, json, logging, uuid, tempfile, re, yaml
|
||||
from pprint import pformat
|
||||
from . import Reagent, SubmissionType, KitType, Organization
|
||||
from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, Table, JSON, FLOAT, case
|
||||
from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, JSON, FLOAT, case
|
||||
from sqlalchemy.orm import relationship, validates, Query
|
||||
from json.decoder import JSONDecodeError
|
||||
from sqlalchemy.ext.associationproxy import association_proxy
|
||||
import pandas as pd
|
||||
from openpyxl import Workbook
|
||||
from . import Base, BaseClass
|
||||
from . import BaseClass
|
||||
from tools import check_not_nan, row_map, query_return, setup_lookup
|
||||
from datetime import datetime, date
|
||||
from typing import List
|
||||
@@ -24,15 +24,6 @@ from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
# table containing reagents/submission relationships
|
||||
reagents_submissions = Table(
|
||||
"_reagents_submissions",
|
||||
Base.metadata,
|
||||
Column("reagent_id", INTEGER, ForeignKey("_reagents.id")),
|
||||
Column("submission_id", INTEGER, ForeignKey("_submissions.id")),
|
||||
extend_existing = True
|
||||
)
|
||||
|
||||
class BasicSubmission(BaseClass):
|
||||
"""
|
||||
Concrete of basic submission which polymorphs into BacterialCulture and Wastewater
|
||||
@@ -51,7 +42,7 @@ class BasicSubmission(BaseClass):
|
||||
submission_type_name = Column(String, ForeignKey("_submission_types.name", ondelete="SET NULL", name="fk_BS_subtype_name")) #: name of joined submission type
|
||||
technician = Column(String(64)) #: initials of processing tech(s)
|
||||
# Move this into custom types?
|
||||
reagents = relationship("Reagent", back_populates="submissions", secondary=reagents_submissions) #: relationship to reagents
|
||||
# reagents = relationship("Reagent", back_populates="submissions", secondary=reagents_submissions) #: relationship to reagents
|
||||
reagents_id = Column(String, ForeignKey("_reagents.id", ondelete="SET NULL", name="fk_BS_reagents_id")) #: id of used reagents
|
||||
extraction_info = Column(JSON) #: unstructured output from the extraction table logger.
|
||||
pcr_info = Column(JSON) #: unstructured output from pcr table logger or user(Artic)
|
||||
@@ -69,6 +60,15 @@ class BasicSubmission(BaseClass):
|
||||
# to "keyword" attribute
|
||||
samples = association_proxy("submission_sample_associations", "sample") #: Association proxy to SubmissionSampleAssociation.samples
|
||||
|
||||
submission_reagent_associations = relationship(
|
||||
"SubmissionReagentAssociation",
|
||||
back_populates="submission",
|
||||
cascade="all, delete-orphan",
|
||||
) #: Relation to SubmissionSampleAssociation
|
||||
# association proxy of "user_keyword_associations" collection
|
||||
# to "keyword" attribute
|
||||
reagents = association_proxy("submission_reagent_associations", "reagent") #: Association proxy to SubmissionSampleAssociation.samples
|
||||
|
||||
# Allows for subclassing into ex. BacterialCulture, Wastewater, etc.
|
||||
__mapper_args__ = {
|
||||
"polymorphic_identity": "Basic Submission",
|
||||
@@ -438,6 +438,22 @@ class BasicSubmission(BaseClass):
|
||||
"""
|
||||
return "{{ rsl_plate_num }}"
|
||||
|
||||
@classmethod
|
||||
def submissions_to_df(cls, submission_type:str|None=None, limit:int=0) -> pd.DataFrame:
|
||||
logger.debug(f"Querying Type: {submission_type}")
|
||||
logger.debug(f"Using limit: {limit}")
|
||||
# use lookup function to create list of dicts
|
||||
subs = [item.to_dict() for item in cls.query(submission_type=submission_type, limit=limit)]
|
||||
logger.debug(f"Got {len(subs)} submissions.")
|
||||
df = pd.DataFrame.from_records(subs)
|
||||
# Exclude sub information
|
||||
for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents']:
|
||||
try:
|
||||
df = df.drop(item, axis=1)
|
||||
except:
|
||||
logger.warning(f"Couldn't drop '{item}' column from submissionsheet df.")
|
||||
return df
|
||||
|
||||
def set_attribute(self, key:str, value):
|
||||
"""
|
||||
Performs custom attribute setting based on values.
|
||||
@@ -479,6 +495,11 @@ class BasicSubmission(BaseClass):
|
||||
field_value = value
|
||||
case "ctx" | "csv" | "filepath":
|
||||
return
|
||||
case "comment":
|
||||
if value == "" or value == None or value == 'null':
|
||||
field_value = None
|
||||
else:
|
||||
field_value = dict(name="submitter", text=value, time=datetime.now())
|
||||
case _:
|
||||
field_value = value
|
||||
# insert into field
|
||||
@@ -595,7 +616,8 @@ class BasicSubmission(BaseClass):
|
||||
start_date:date|str|int|None=None,
|
||||
end_date:date|str|int|None=None,
|
||||
reagent:Reagent|str|None=None,
|
||||
chronologic:bool=False, limit:int=0,
|
||||
chronologic:bool=False,
|
||||
limit:int=0,
|
||||
**kwargs
|
||||
) -> BasicSubmission | List[BasicSubmission]:
|
||||
"""
|
||||
|
||||
@@ -9,9 +9,8 @@ import numpy as np
|
||||
from pathlib import Path
|
||||
from backend.db.models import *
|
||||
from backend.validators import PydSubmission, PydReagent, RSLNamer, PydSample
|
||||
import logging
|
||||
import logging, re
|
||||
from collections import OrderedDict
|
||||
import re
|
||||
from datetime import date
|
||||
from dateutil.parser import parse, ParserError
|
||||
from tools import check_not_nan, convert_nans_to_nones, Settings
|
||||
@@ -256,8 +255,12 @@ class ReagentParser(object):
|
||||
name = df.iat[relevant[item]['name']['row']-1, relevant[item]['name']['column']-1]
|
||||
lot = df.iat[relevant[item]['lot']['row']-1, relevant[item]['lot']['column']-1]
|
||||
expiry = df.iat[relevant[item]['expiry']['row']-1, relevant[item]['expiry']['column']-1]
|
||||
if 'comment' in relevant[item].keys():
|
||||
comment = df.iat[relevant[item]['comment']['row']-1, relevant[item]['comment']['column']-1]
|
||||
else:
|
||||
comment = ""
|
||||
except (KeyError, IndexError):
|
||||
listo.append(PydReagent(type=item.strip(), lot=None, expiry=None, name=None, missing=True))
|
||||
listo.append(PydReagent(type=item.strip(), lot=None, expiry=None, name=None, comment="", missing=True))
|
||||
continue
|
||||
# If the cell is blank tell the PydReagent
|
||||
if check_not_nan(lot):
|
||||
@@ -266,8 +269,8 @@ class ReagentParser(object):
|
||||
missing = True
|
||||
# logger.debug(f"Got lot for {item}-{name}: {lot} as {type(lot)}")
|
||||
lot = str(lot)
|
||||
logger.debug(f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}")
|
||||
listo.append(PydReagent(type=item.strip(), lot=lot, expiry=expiry, name=name, missing=missing))
|
||||
logger.debug(f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}, comment: {comment}")
|
||||
listo.append(PydReagent(type=item.strip(), lot=lot, expiry=expiry, name=name, comment=comment, missing=missing))
|
||||
# logger.debug(f"Returning listo: {listo}")
|
||||
return listo
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
Contains functions for generating summary reports
|
||||
'''
|
||||
from pandas import DataFrame
|
||||
import logging
|
||||
import logging, re
|
||||
from datetime import date, timedelta
|
||||
import re
|
||||
from typing import List, Tuple
|
||||
from tools import jinja_template_loading, Settings
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from pathlib import Path
|
||||
from openpyxl import load_workbook
|
||||
from backend.db.models import BasicSubmission, SubmissionType
|
||||
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
class RSLNamer(object):
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Contains pydantic models and accompanying validators
|
||||
'''
|
||||
from operator import attrgetter
|
||||
import uuid
|
||||
import uuid, re, logging
|
||||
from pydantic import BaseModel, field_validator, Field
|
||||
from datetime import date, datetime, timedelta
|
||||
from dateutil.parser import parse
|
||||
@@ -10,8 +10,6 @@ from dateutil.parser._parser import ParserError
|
||||
from typing import List, Any, Tuple
|
||||
from . import RSLNamer
|
||||
from pathlib import Path
|
||||
import re
|
||||
import logging
|
||||
from tools import check_not_nan, convert_nans_to_nones, jinja_template_loading, Report, Result, row_map
|
||||
from backend.db.models import *
|
||||
from sqlalchemy.exc import StatementError, IntegrityError
|
||||
@@ -28,6 +26,14 @@ class PydReagent(BaseModel):
|
||||
expiry: date|None
|
||||
name: str|None
|
||||
missing: bool = Field(default=True)
|
||||
comment: str|None = Field(default="", validate_default=True)
|
||||
|
||||
@field_validator('comment', mode='before')
|
||||
@classmethod
|
||||
def create_comment(cls, value):
|
||||
if value == None:
|
||||
return ""
|
||||
return value
|
||||
|
||||
@field_validator("type", mode='before')
|
||||
@classmethod
|
||||
@@ -88,7 +94,7 @@ class PydReagent(BaseModel):
|
||||
else:
|
||||
return values.data['type']
|
||||
|
||||
def toSQL(self) -> Tuple[Reagent, Report]:
|
||||
def toSQL(self, submission:BasicSubmission|str=None) -> Tuple[Reagent, Report]:
|
||||
"""
|
||||
Converts this instance into a backend.db.models.kit.Reagent instance
|
||||
|
||||
@@ -96,6 +102,8 @@ class PydReagent(BaseModel):
|
||||
Tuple[Reagent, Report]: Reagent instance and result of function
|
||||
"""
|
||||
report = Report()
|
||||
if self.model_extra != None:
|
||||
self.__dict__.update(self.model_extra)
|
||||
logger.debug(f"Reagent SQL constructor is looking up type: {self.type}, lot: {self.lot}")
|
||||
reagent = Reagent.query(lot_number=self.lot)
|
||||
logger.debug(f"Result: {reagent}")
|
||||
@@ -117,6 +125,11 @@ class PydReagent(BaseModel):
|
||||
reagent.type.append(reagent_type)
|
||||
case "name":
|
||||
reagent.name = value
|
||||
case "comment":
|
||||
continue
|
||||
assoc = SubmissionReagentAssociation(reagent=reagent, submission=submission)
|
||||
assoc.comments = self.comment
|
||||
reagent.reagent_submission_associations.append(assoc)
|
||||
# add end-of-life extension from reagent type to expiry date
|
||||
# NOTE: this will now be done only in the reporting phase to account for potential changes in end-of-life extensions
|
||||
return reagent, report
|
||||
@@ -203,9 +216,17 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
extraction_kit: dict|None
|
||||
technician: dict|None
|
||||
submission_category: dict|None = Field(default=dict(value=None, missing=True), validate_default=True)
|
||||
comment: dict|None = Field(default=dict(value="", missing=True), validate_default=True)
|
||||
reagents: List[dict]|List[PydReagent] = []
|
||||
samples: List[Any]
|
||||
|
||||
@field_validator('comment', mode='before')
|
||||
@classmethod
|
||||
def create_comment(cls, value):
|
||||
if value == None:
|
||||
return ""
|
||||
return value
|
||||
|
||||
@field_validator("submitter_plate_num")
|
||||
@classmethod
|
||||
def enforce_with_uuid(cls, value):
|
||||
@@ -218,13 +239,19 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
@field_validator("submitted_date", mode="before")
|
||||
@classmethod
|
||||
def rescue_date(cls, value):
|
||||
if value == None:
|
||||
logger.debug(f"\n\nDate coming into pydantic: {value}\n\n")
|
||||
try:
|
||||
check = value['value'] == None
|
||||
except TypeError:
|
||||
check = True
|
||||
if check:
|
||||
return dict(value=date.today(), missing=True)
|
||||
return value
|
||||
|
||||
@field_validator("submitted_date")
|
||||
@classmethod
|
||||
def strip_datetime_string(cls, value):
|
||||
|
||||
if isinstance(value['value'], datetime):
|
||||
return value
|
||||
if isinstance(value['value'], date):
|
||||
@@ -307,7 +334,6 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
@field_validator("submission_type", mode='before')
|
||||
@classmethod
|
||||
def make_submission_type(cls, value, values):
|
||||
logger.debug(f"Submission type coming into pydantic: {value}")
|
||||
if not isinstance(value, dict):
|
||||
value = {"value": value}
|
||||
if check_not_nan(value['value']):
|
||||
@@ -331,8 +357,8 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
|
||||
def handle_duplicate_samples(self):
|
||||
"""
|
||||
Collapses multiple samples with same submitter id into one with lists for rows, columns
|
||||
TODO: Find out if this is really necessary
|
||||
Collapses multiple samples with same submitter id into one with lists for rows, columns.
|
||||
Necessary to prevent trying to create duplicate samples in SQL creation.
|
||||
"""
|
||||
submitter_ids = list(set([sample.submitter_id for sample in self.samples]))
|
||||
output = []
|
||||
@@ -581,9 +607,37 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
logger.debug(f"Template rendered as: {render}")
|
||||
return render
|
||||
|
||||
class PydContact(BaseModel):
|
||||
|
||||
def check_kit_integrity(self, reagenttypes:list=[]) -> Report:
|
||||
"""
|
||||
Ensures all reagents expected in kit are listed in Submission
|
||||
|
||||
Args:
|
||||
reagenttypes (list | None, optional): List to check against complete list. Defaults to None.
|
||||
|
||||
Returns:
|
||||
Report: Result object containing a message and any missing components.
|
||||
"""
|
||||
report = Report()
|
||||
ext_kit = KitType.query(name=self.extraction_kit['value'])
|
||||
ext_kit_rtypes = [item.name for item in ext_kit.get_reagents(required=True, submission_type=self.submission_type['value'])]
|
||||
reagenttypes = [item.type for item in self.reagents]
|
||||
logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||
logger.debug(f"Submission reagents: {reagenttypes}")
|
||||
# check if lists are equal
|
||||
check = set(ext_kit_rtypes) == set(reagenttypes)
|
||||
logger.debug(f"Checking if reagents match kit contents: {check}")
|
||||
# what reagent types are in both lists?
|
||||
missing = list(set(ext_kit_rtypes).difference(reagenttypes))
|
||||
logger.debug(f"Missing reagents types: {missing}")
|
||||
# if lists are equal return no problem
|
||||
if len(missing)==0:
|
||||
result = None
|
||||
else:
|
||||
result = Result(msg=f"The submission you are importing is missing some reagents expected by the kit.\n\nIt looks like you are missing: {[item.upper() for item in missing]}\n\nAlternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.\n\nPlease make sure you check the lots carefully!", status="Warning")
|
||||
report.add_result(result)
|
||||
return report
|
||||
|
||||
class PydContact(BaseModel):
|
||||
name: str
|
||||
phone: str|None
|
||||
email: str|None
|
||||
|
||||
@@ -194,73 +194,6 @@ def construct_chart(df:pd.DataFrame, modes:list, ytitle:str|None=None) -> Figure
|
||||
fig.add_traces(bar.data)
|
||||
return generic_figure_markers(fig=fig, modes=modes, ytitle=ytitle)
|
||||
|
||||
# Below are the individual construction functions. They must be named "construct_{mode}_chart" and
|
||||
# take only json_in and mode to hook into the main processor.
|
||||
|
||||
# def construct_refseq_chart(df:pd.DataFrame, group_name:str, mode:str) -> Figure:
|
||||
# """
|
||||
# Constructs intial refseq chart for both contains and matches (depreciated).
|
||||
|
||||
# Args:
|
||||
# df (pd.DataFrame): dataframe containing all sample data for the group.
|
||||
# group_name (str): name of the group being processed.
|
||||
# mode (str): contains or matches, overwritten by hardcoding, so don't think about it too hard.
|
||||
|
||||
# Returns:
|
||||
# Figure: initial figure with contains and matches traces.
|
||||
# """
|
||||
# # This overwrites the mode from the signature, might get confusing.
|
||||
# fig = Figure()
|
||||
# modes = ['contains', 'matches']
|
||||
# for ii, mode in enumerate(modes):
|
||||
# bar = px.bar(df, x="submitted_date",
|
||||
# y=f"{mode}_ratio",
|
||||
# color="target",
|
||||
# title=f"{group_name}_{mode}",
|
||||
# barmode='stack',
|
||||
# hover_data=["genus", "name", f"{mode}_hashes"],
|
||||
# text="genera"
|
||||
# )
|
||||
# bar.update_traces(visible = ii == 0)
|
||||
# # Plotly express returns a full figure, so we have to use the data from that figure only.
|
||||
# fig.add_traces(bar.data)
|
||||
# # sys.exit(f"number of traces={len(fig.data)}")
|
||||
# return generic_figure_markers(fig=fig, modes=modes)
|
||||
|
||||
# def construct_kraken_chart(settings:dict, df:pd.DataFrame, group_name:str, mode:str) -> Figure:
|
||||
# """
|
||||
# Constructs intial refseq chart for each mode in the kraken config settings. (depreciated)
|
||||
|
||||
# Args:
|
||||
# settings (dict): settings passed down from click.
|
||||
# df (pd.DataFrame): dataframe containing all sample data for the group.
|
||||
# group_name (str): name of the group being processed.
|
||||
# mode (str): kraken modes retrieved from config file by setup.
|
||||
|
||||
# Returns:
|
||||
# Figure: initial figure with traces for modes
|
||||
# """
|
||||
# df[f'{mode}_count'] = pd.to_numeric(df[f'{mode}_count'],errors='coerce')
|
||||
# df = df.groupby('submitted_date')[f'{mode}_count'].nlargest(2)
|
||||
|
||||
# # The actual percentage from kraken was off due to exclusion of NaN, recalculating.
|
||||
# df[f'{mode}_percent'] = 100 * df[f'{mode}_count'] / df.groupby('submitted_date')[f'{mode}_count'].transform('sum')
|
||||
# modes = settings['modes'][mode]
|
||||
# # This overwrites the mode from the signature, might get confusing.
|
||||
# fig = Figure()
|
||||
# for ii, entry in enumerate(modes):
|
||||
# bar = px.bar(df, x="submitted_date",
|
||||
# y=entry,
|
||||
# color="genus",
|
||||
# title=f"{group_name}_{entry}",
|
||||
# barmode="stack",
|
||||
# hover_data=["genus", "name", "target"],
|
||||
# text="genera",
|
||||
# )
|
||||
# bar.update_traces(visible = ii == 0)
|
||||
# fig.add_traces(bar.data)
|
||||
# return generic_figure_markers(fig=fig, modes=modes)
|
||||
|
||||
def divide_chunks(input_list:list, chunk_count:int):
|
||||
"""
|
||||
Divides a list into {chunk_count} equal parts
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
import numpy as np
|
||||
from tools import check_if_app, jinja_template_loading
|
||||
import logging
|
||||
import logging, sys
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
'''
|
||||
Constructs main application.
|
||||
TODO: Complete.
|
||||
'''
|
||||
import sys
|
||||
from PyQt6.QtWidgets import (
|
||||
QTabWidget, QWidget, QVBoxLayout,
|
||||
QHBoxLayout, QScrollArea, QMainWindow,
|
||||
@@ -14,14 +12,12 @@ from backend.validators import PydReagent
|
||||
from tools import check_if_app, Settings, Report
|
||||
from .pop_ups import AlertPop
|
||||
from .misc import AddReagentForm, LogParser
|
||||
import logging
|
||||
import logging, webbrowser, sys
|
||||
from datetime import date
|
||||
import webbrowser
|
||||
from .submission_table import SubmissionsSheet
|
||||
from .submission_widget import SubmissionFormContainer
|
||||
from .controls_chart import ControlsViewer
|
||||
from .kit_creator import KitAdder
|
||||
import webbrowser
|
||||
|
||||
logger = logging.getLogger(f'submissions.{__name__}')
|
||||
logger.info("Hello, I am a logger")
|
||||
|
||||
@@ -3,8 +3,8 @@ from PyQt6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QComboBox, QHBoxLayout,
|
||||
QDateEdit, QLabel, QSizePolicy
|
||||
)
|
||||
from PyQt6.QtCore import QSignalBlocker, QLoggingCategory
|
||||
from backend.db import ControlType, Control, get_control_subtypes
|
||||
from PyQt6.QtCore import QSignalBlocker
|
||||
from backend.db import ControlType, Control#, get_control_subtypes
|
||||
from PyQt6.QtCore import QDate, QSize
|
||||
import logging, sys
|
||||
from tools import Report, Result
|
||||
@@ -88,7 +88,8 @@ class ControlsViewer(QWidget):
|
||||
self.mode = self.mode_typer.currentText()
|
||||
self.sub_typer.clear()
|
||||
# lookup subtypes
|
||||
sub_types = get_control_subtypes(type=self.con_type, mode=self.mode)
|
||||
# sub_types = get_control_subtypes(type=self.con_type, mode=self.mode)
|
||||
sub_types = ControlType.query(name=self.con_type).get_subtypes(mode=self.mode)
|
||||
# sub_types = lookup_controls(ctx=obj.ctx, control_type=obj.con_type)
|
||||
if sub_types != []:
|
||||
# block signal that will rerun controls getter and update sub_typer
|
||||
|
||||
@@ -15,7 +15,6 @@ from PyQt6.QtWidgets import (
|
||||
from PyQt6.QtWebEngineWidgets import QWebEngineView
|
||||
from PyQt6.QtCore import Qt, QAbstractTableModel, QSortFilterProxyModel
|
||||
from PyQt6.QtGui import QAction, QCursor, QPixmap, QPainter
|
||||
from backend.db.functions import submissions_to_df
|
||||
from backend.db.models import BasicSubmission
|
||||
from backend.excel import make_report_html, make_report_xlsx
|
||||
from tools import check_if_app, Report, Result, jinja_template_loading, get_first_blank_df_row, row_map
|
||||
@@ -102,7 +101,8 @@ class SubmissionsSheet(QTableView):
|
||||
"""
|
||||
sets data in model
|
||||
"""
|
||||
self.data = submissions_to_df()
|
||||
# self.data = submissions_to_df()
|
||||
self.data = BasicSubmission.submissions_to_df()
|
||||
try:
|
||||
self.data['id'] = self.data['id'].apply(str)
|
||||
self.data['id'] = self.data['id'].str.zfill(3)
|
||||
|
||||
@@ -5,23 +5,20 @@ from PyQt6.QtWidgets import (
|
||||
from PyQt6.QtCore import pyqtSignal
|
||||
from pathlib import Path
|
||||
from . import select_open_file, select_save_file
|
||||
import logging
|
||||
import logging, difflib, inspect, json, sys
|
||||
from pathlib import Path
|
||||
from tools import Report, Result, check_not_nan
|
||||
from backend.excel.parser import SheetParser, PCRParser
|
||||
from backend.validators import PydSubmission, PydReagent
|
||||
from backend.db import (
|
||||
check_kit_integrity, KitType, Organization, SubmissionType, Reagent,
|
||||
KitType, Organization, SubmissionType, Reagent,
|
||||
ReagentType, KitTypeReagentTypeAssociation, BasicSubmission
|
||||
)
|
||||
from pprint import pformat
|
||||
from .pop_ups import QuestionAsker, AlertPop
|
||||
# from .misc import ReagentFormWidget
|
||||
from typing import List, Tuple
|
||||
import difflib
|
||||
from datetime import date
|
||||
import inspect
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
@@ -143,14 +140,14 @@ class SubmissionFormContainer(QWidget):
|
||||
return
|
||||
except AttributeError:
|
||||
self.prsr = SheetParser(ctx=self.app.ctx, filepath=fname)
|
||||
try:
|
||||
logger.debug(f"Submission dictionary:\n{pformat(self.prsr.sub)}")
|
||||
self.pyd = self.prsr.to_pydantic()
|
||||
logger.debug(f"Pydantic result: \n\n{pformat(self.pyd)}\n\n")
|
||||
except Exception as e:
|
||||
report.add_result(Result(msg=f"Problem creating pydantic model:\n\n{e}", status="Critical"))
|
||||
self.report.add_result(report)
|
||||
return
|
||||
# try:
|
||||
logger.debug(f"Submission dictionary:\n{pformat(self.prsr.sub)}")
|
||||
self.pyd = self.prsr.to_pydantic()
|
||||
logger.debug(f"Pydantic result: \n\n{pformat(self.pyd)}\n\n")
|
||||
# except Exception as e:
|
||||
# report.add_result(Result(msg=f"Problem creating pydantic model:\n\n{e}", status="Critical"))
|
||||
# self.report.add_result(report)
|
||||
# return
|
||||
self.form = self.pyd.toForm(parent=self)
|
||||
self.layout().addWidget(self.form)
|
||||
kit_widget = self.form.find_widgets(object_name="extraction_kit")[0].input
|
||||
@@ -265,7 +262,8 @@ class SubmissionFormContainer(QWidget):
|
||||
self.pyd: PydSubmission = self.form.parse_form()
|
||||
logger.debug(f"Submission: {pformat(self.pyd)}")
|
||||
logger.debug("Checking kit integrity...")
|
||||
result = check_kit_integrity(sub=self.pyd)
|
||||
# result = check_kit_integrity(sub=self.pyd)
|
||||
result = self.pyd.check_kit_integrity()
|
||||
report.add_result(result)
|
||||
if len(result.results) > 0:
|
||||
self.report.add_result(report)
|
||||
@@ -417,7 +415,8 @@ class SubmissionFormWidget(QWidget):
|
||||
# self.ignore = [None, "", "qt_spinbox_lineedit", "qt_scrollarea_viewport", "qt_scrollarea_hcontainer",
|
||||
# "qt_scrollarea_vcontainer", "submit_btn"
|
||||
# ]
|
||||
self.ignore = ['filepath', 'samples', 'reagents', 'csv', 'ctx']
|
||||
self.ignore = ['filepath', 'samples', 'reagents', 'csv', 'ctx', 'comment']
|
||||
self.recover = ['filepath', 'samples', 'csv', 'comment']
|
||||
layout = QVBoxLayout()
|
||||
for k, v in kwargs.items():
|
||||
if k not in self.ignore:
|
||||
@@ -448,8 +447,6 @@ class SubmissionFormWidget(QWidget):
|
||||
logger.debug(f"Hello from form parser!")
|
||||
info = {}
|
||||
reagents = []
|
||||
if hasattr(self, 'csv'):
|
||||
info['csv'] = self.csv
|
||||
for widget in self.findChildren(QWidget):
|
||||
# logger.debug(f"Parsed widget of type {type(widget)}")
|
||||
match widget:
|
||||
@@ -463,9 +460,13 @@ class SubmissionFormWidget(QWidget):
|
||||
info[field] = value
|
||||
logger.debug(f"Info: {pformat(info)}")
|
||||
logger.debug(f"Reagents: {pformat(reagents)}")
|
||||
# sys.exit()
|
||||
# logger.debug(f"Attrs not in info: {[k for k, v in self.__dict__.items() if k not in info.keys()]}")
|
||||
for item in self.recover:
|
||||
if hasattr(self, item):
|
||||
info[item] = getattr(self, item)
|
||||
# app = self.parent().parent().parent().parent().parent().parent().parent().parent
|
||||
submission = PydSubmission(filepath=self.filepath, reagents=reagents, samples=self.samples, **info)
|
||||
# submission = PydSubmission(filepath=self.filepath, reagents=reagents, samples=self.samples, **info)
|
||||
submission = PydSubmission(reagents=reagents, **info)
|
||||
return submission
|
||||
|
||||
class InfoItem(QWidget):
|
||||
|
||||
@@ -5,12 +5,9 @@ from __future__ import annotations
|
||||
from pathlib import Path
|
||||
import re
|
||||
import numpy as np
|
||||
import logging
|
||||
import logging, re, yaml, sys, os, stat, platform, getpass, inspect
|
||||
import pandas as pd
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
import yaml
|
||||
import sys, os, stat, platform, getpass
|
||||
import logging
|
||||
from logging import handlers
|
||||
from pathlib import Path
|
||||
from sqlalchemy.orm import Query, Session
|
||||
@@ -18,8 +15,6 @@ from sqlalchemy import create_engine
|
||||
from pydantic import field_validator, BaseModel, Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from typing import Any, Tuple, Literal, List
|
||||
import inspect
|
||||
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user