Improvements to JSON updaters.

This commit is contained in:
Landon Wark
2024-04-19 12:41:26 -05:00
parent c9bd8d1425
commit d91591e4ba
11 changed files with 266 additions and 226 deletions

View File

@@ -1,3 +1,4 @@
- [ ] Critical: Convert Json lits to dicts so I can have them update properly without using crashy Sqlalchemy-json
- [ ] Fix Parsed/Missing mix ups. - [ ] Fix Parsed/Missing mix ups.
- [x] Have sample parser check for controls and add to reagents? - [x] Have sample parser check for controls and add to reagents?
- [x] Update controls to NestedMutableJson - [x] Update controls to NestedMutableJson

View File

@@ -57,7 +57,7 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne
; sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db ; sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db
; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\Submissions_app_backups\DB_backups\submissions-demo.db ; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\Submissions_app_backups\DB_backups\submissions-demo.db
sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\python\submissions\tests\test_assets\submissions-test.db sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\python\submissions\mytests\test_assets\submissions-test.db
[post_write_hooks] [post_write_hooks]

View File

@@ -0,0 +1,51 @@
"""adding source plates to Artic submission...again
Revision ID: f18487b41f45
Revises: fabf697c721d
Create Date: 2024-04-17 10:42:30.508213
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f18487b41f45'
down_revision = 'fabf697c721d'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# op.drop_table('_alembic_tmp__submissionsampleassociation')
# with op.batch_alter_table('_submissionsampleassociation', schema=None) as batch_op:
# batch_op.create_unique_constraint("ssa_id", ['id'])
with op.batch_alter_table('_wastewaterartic', schema=None) as batch_op:
batch_op.add_column(sa.Column('source_plates', sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('_wastewaterartic', schema=None) as batch_op:
batch_op.drop_column('source_plates')
# with op.batch_alter_table('_submissionsampleassociation', schema=None) as batch_op:
# batch_op.drop_constraint(None, type_='unique')
# op.create_table('_alembic_tmp__submissionsampleassociation',
# sa.Column('sample_id', sa.INTEGER(), nullable=False),
# sa.Column('submission_id', sa.INTEGER(), nullable=False),
# sa.Column('row', sa.INTEGER(), nullable=False),
# sa.Column('column', sa.INTEGER(), nullable=False),
# sa.Column('base_sub_type', sa.VARCHAR(), nullable=True),
# sa.Column('id', sa.INTEGER(), server_default=sa.text('1'), nullable=False),
# sa.ForeignKeyConstraint(['sample_id'], ['_basicsample.id'], ),
# sa.ForeignKeyConstraint(['submission_id'], ['_basicsubmission.id'], ),
# sa.PrimaryKeyConstraint('submission_id', 'row', 'column'),
# sa.UniqueConstraint('id', name='ssa_unique')
# )
# ### end Alembic commands ###

Binary file not shown.

View File

@@ -86,7 +86,7 @@ class BaseClass(Base):
""" """
Add the object to the database and commit Add the object to the database and commit
""" """
logger.debug(f"Saving object: {pformat(self.__dict__)}") # logger.debug(f"Saving object: {pformat(self.__dict__)}")
try: try:
self.__database_session__.add(self) self.__database_session__.add(self)
self.__database_session__.commit() self.__database_session__.commit()

View File

@@ -4,8 +4,7 @@ All control related models.
from __future__ import annotations from __future__ import annotations
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey
from sqlalchemy.orm import relationship, Query from sqlalchemy.orm import relationship, Query
from sqlalchemy_json import NestedMutableJson import logging, re, sys
import logging, re
from operator import itemgetter from operator import itemgetter
from . import BaseClass from . import BaseClass
from tools import setup_lookup from tools import setup_lookup
@@ -13,6 +12,7 @@ from datetime import date, datetime
from typing import List from typing import List
from dateutil.parser import parse from dateutil.parser import parse
logger = logging.getLogger(f"submissions.{__name__}") logger = logging.getLogger(f"submissions.{__name__}")
class ControlType(BaseClass): class ControlType(BaseClass):
@@ -86,7 +86,6 @@ class ControlType(BaseClass):
strings = list(set([item.name.split("-")[0] for item in cls.get_positive_control_types()])) strings = list(set([item.name.split("-")[0] for item in cls.get_positive_control_types()]))
return re.compile(rf"(^{'|^'.join(strings)})-.*", flags=re.IGNORECASE) return re.compile(rf"(^{'|^'.join(strings)})-.*", flags=re.IGNORECASE)
class Control(BaseClass): class Control(BaseClass):
""" """
Base class of a control sample. Base class of a control sample.
@@ -97,9 +96,9 @@ class Control(BaseClass):
controltype = relationship("ControlType", back_populates="instances", foreign_keys=[parent_id]) #: reference to parent control type controltype = relationship("ControlType", back_populates="instances", foreign_keys=[parent_id]) #: reference to parent control type
name = Column(String(255), unique=True) #: Sample ID name = Column(String(255), unique=True) #: Sample ID
submitted_date = Column(TIMESTAMP) #: Date submitted to Robotics submitted_date = Column(TIMESTAMP) #: Date submitted to Robotics
contains = Column(NestedMutableJson) #: unstructured hashes in contains.tsv for each organism contains = Column(JSON) #: unstructured hashes in contains.tsv for each organism
matches = Column(NestedMutableJson) #: unstructured hashes in matches.tsv for each organism matches = Column(JSON) #: unstructured hashes in matches.tsv for each organism
kraken = Column(NestedMutableJson) #: unstructured output from kraken_report kraken = Column(JSON) #: unstructured output from kraken_report
submission_id = Column(INTEGER, ForeignKey("_basicsubmission.id")) #: parent submission id submission_id = Column(INTEGER, ForeignKey("_basicsubmission.id")) #: parent submission id
submission = relationship("BacterialCulture", back_populates="controls", foreign_keys=[submission_id]) #: parent submission submission = relationship("BacterialCulture", back_populates="controls", foreign_keys=[submission_id]) #: parent submission
refseq_version = Column(String(16)) #: version of refseq used in fastq parsing refseq_version = Column(String(16)) #: version of refseq used in fastq parsing

View File

@@ -16,12 +16,15 @@ from . import BaseClass, Reagent, SubmissionType, KitType, Organization
# See: https://docs.sqlalchemy.org/en/14/orm/extensions/mutable.html#establishing-mutability-on-scalar-column-values # See: https://docs.sqlalchemy.org/en/14/orm/extensions/mutable.html#establishing-mutability-on-scalar-column-values
from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, JSON, FLOAT, case from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, JSON, FLOAT, case
from sqlalchemy.orm import relationship, validates, Query from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy_json import NestedMutableJson # from sqlalchemy.ext.declarative import declared_attr
# from sqlalchemy_json import NestedMutableJson
# from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError, StatementError from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError, StatementError
from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError
import pandas as pd import pandas as pd
from openpyxl import Workbook from openpyxl import Workbook, load_workbook
from openpyxl.worksheet.worksheet import Worksheet from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.drawing.image import Image as OpenpyxlImage from openpyxl.drawing.image import Image as OpenpyxlImage
from tools import check_not_nan, row_map, setup_lookup, jinja_template_loading, rreplace from tools import check_not_nan, row_map, setup_lookup, jinja_template_loading, rreplace
@@ -54,10 +57,10 @@ class BasicSubmission(BaseClass):
# Move this into custom types? # Move this into custom types?
# reagents = relationship("Reagent", back_populates="submissions", secondary=reagents_submissions) #: relationship to reagents # reagents = relationship("Reagent", back_populates="submissions", secondary=reagents_submissions) #: relationship to reagents
reagents_id = Column(String, ForeignKey("_reagent.id", ondelete="SET NULL", name="fk_BS_reagents_id")) #: id of used reagents reagents_id = Column(String, ForeignKey("_reagent.id", ondelete="SET NULL", name="fk_BS_reagents_id")) #: id of used reagents
extraction_info = Column(NestedMutableJson) #: unstructured output from the extraction table logger. extraction_info = Column(JSON) #: unstructured output from the extraction table logger.
run_cost = Column(FLOAT(2)) #: total cost of running the plate. Set from constant and mutable kit costs at time of creation. run_cost = Column(FLOAT(2)) #: total cost of running the plate. Set from constant and mutable kit costs at time of creation.
uploaded_by = Column(String(32)) #: user name of person who submitted the submission to the database. uploaded_by = Column(String(32)) #: user name of person who submitted the submission to the database.
comment = Column(NestedMutableJson) #: user notes comment = Column(JSON) #: user notes
submission_category = Column(String(64)) #: ["Research", "Diagnostic", "Surveillance", "Validation"], else defaults to submission_type_name submission_category = Column(String(64)) #: ["Research", "Diagnostic", "Surveillance", "Validation"], else defaults to submission_type_name
submission_sample_associations = relationship( submission_sample_associations = relationship(
@@ -99,6 +102,13 @@ class BasicSubmission(BaseClass):
submission_type = self.submission_type or "Basic" submission_type = self.submission_type or "Basic"
return f"{submission_type}Submission({self.rsl_plate_num})" return f"{submission_type}Submission({self.rsl_plate_num})"
@classmethod
def jsons(cls):
output = [item.name for item in cls.__table__.columns if isinstance(item.type, JSON)]
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
output += BasicSubmission.jsons()
return output
def to_dict(self, full_data:bool=False, backup:bool=False, report:bool=False) -> dict: def to_dict(self, full_data:bool=False, backup:bool=False, report:bool=False) -> dict:
""" """
Constructs dictionary used in submissions summary Constructs dictionary used in submissions summary
@@ -315,7 +325,7 @@ class BasicSubmission(BaseClass):
logger.debug(f"Got {len(subs)} submissions.") logger.debug(f"Got {len(subs)} submissions.")
df = pd.DataFrame.from_records(subs) df = pd.DataFrame.from_records(subs)
# Exclude sub information # Exclude sub information
for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents', 'equipment', 'gel_info', 'gel_image', 'dna_core_submission_number', 'source_plates']: for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents', 'equipment', 'gel_info', 'gel_image', 'dna_core_submission_number', 'gel_controls']:
try: try:
df = df.drop(item, axis=1) df = df.drop(item, axis=1)
except: except:
@@ -360,15 +370,29 @@ class BasicSubmission(BaseClass):
field_value = value field_value = value
case "ctx" | "csv" | "filepath" | "equipment": case "ctx" | "csv" | "filepath" | "equipment":
return return
case "comment": # case "comment":
if value == "" or value == None or value == 'null': # if value == "" or value == None or value == 'null':
field_value = None # field_value = None
# else:
# field_value = dict(name=getuser(), text=value, time=datetime.now())
# # if self.comment is None:
# # self.comment = [field_value]
# # else:
# # self.comment.append(field_value)
# self.update_json(field=key, value=field_value)
# return
case item if item in self.jsons():
logger.debug(f"Setting JSON attribute.")
existing = self.__getattribute__(key)
if existing is None:
existing = []
if value in existing:
logger.warning("Value already exists. Preventing duplicate addition.")
return
else: else:
field_value = dict(name=getuser(), text=value, time=datetime.now()) existing.append(value)
if self.comment is None: self.__setattr__(key, existing)
self.comment = [field_value] flag_modified(self, key)
else:
self.comment.append(field_value)
return return
case _: case _:
field_value = value field_value = value
@@ -957,12 +981,13 @@ class BasicSubmission(BaseClass):
dlg = SubmissionComment(parent=obj, submission=self) dlg = SubmissionComment(parent=obj, submission=self)
if dlg.exec(): if dlg.exec():
comment = dlg.parse_form() comment = dlg.parse_form()
try: # try:
# For some reason .append results in new comment being ignored, so have to concatenate lists. # # For some reason .append results in new comment being ignored, so have to concatenate lists.
self.comment = self.comment + comment # self.comment = self.comment + comment
except (AttributeError, TypeError) as e: # except (AttributeError, TypeError) as e:
logger.error(f"Hit error ({e}) creating comment") # logger.error(f"Hit error ({e}) creating comment")
self.comment = comment # self.comment = comment
self.set_attribute(key='comment', value=comment)
logger.debug(self.comment) logger.debug(self.comment)
self.save(original=False) self.save(original=False)
@@ -1108,15 +1133,6 @@ class BacterialCulture(BasicSubmission):
template += "_{{ submitting_lab }}_{{ submitter_plate_num }}" template += "_{{ submitting_lab }}_{{ submitter_plate_num }}"
return template return template
# @classmethod
# def parse_info(cls, input_dict: dict, xl: pd.ExcelFile | None = None) -> dict:
# """
# Extends parent
# """
# input_dict = super().parse_info(input_dict, xl)
# input_dict['submitted_date']['missing'] = True
# return input_dict
@classmethod @classmethod
def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None, plate_map: dict | None = None) -> dict: def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None, plate_map: dict | None = None) -> dict:
""" """
@@ -1177,7 +1193,7 @@ class Wastewater(BasicSubmission):
ext_technician = Column(String(64)) #: Name of technician doing extraction ext_technician = Column(String(64)) #: Name of technician doing extraction
pcr_technician = Column(String(64)) #: Name of technician doing pcr pcr_technician = Column(String(64)) #: Name of technician doing pcr
# pcr_info = Column(JSON) # pcr_info = Column(JSON)
pcr_info = Column(NestedMutableJson)#: unstructured output from pcr table logger or user(Artic) pcr_info = Column(JSON)#: unstructured output from pcr table logger or user(Artic)
__mapper_args__ = __mapper_args__ = dict(polymorphic_identity="Wastewater", __mapper_args__ = __mapper_args__ = dict(polymorphic_identity="Wastewater",
polymorphic_load="inline", polymorphic_load="inline",
@@ -1319,35 +1335,36 @@ class Wastewater(BasicSubmission):
fname = select_open_file(obj=obj, file_extension="xlsx") fname = select_open_file(obj=obj, file_extension="xlsx")
parser = PCRParser(filepath=fname) parser = PCRParser(filepath=fname)
# Check if PCR info already exists # Check if PCR info already exists
if hasattr(self, 'pcr_info') and self.pcr_info != None: # if hasattr(self, 'pcr_info') and self.pcr_info != None:
# existing = json.loads(sub.pcr_info) # # existing = json.loads(sub.pcr_info)
existing = self.pcr_info # existing = self.pcr_info
logger.debug(f"Found existing pcr info: {pformat(self.pcr_info)}") # logger.debug(f"Found existing pcr info: {pformat(self.pcr_info)}")
else: # else:
existing = None # existing = None
if existing != None: # if existing != None:
# update pcr_info # # update pcr_info
try: # try:
logger.debug(f"Updating {type(existing)}:\n {pformat(existing)} with {type(parser.pcr)}:\n {pformat(parser.pcr)}") # logger.debug(f"Updating {type(existing)}:\n {pformat(existing)} with {type(parser.pcr)}:\n {pformat(parser.pcr)}")
# if json.dumps(parser.pcr) not in sub.pcr_info: # # if json.dumps(parser.pcr) not in sub.pcr_info:
if parser.pcr not in self.pcr_info: # if parser.pcr not in self.pcr_info:
logger.debug(f"This is new pcr info, appending to existing") # logger.debug(f"This is new pcr info, appending to existing")
existing.append(parser.pcr) # existing.append(parser.pcr)
else: # else:
logger.debug("This info already exists, skipping.") # logger.debug("This info already exists, skipping.")
# logger.debug(f"Setting {self.rsl_plate_num} PCR to:\n {pformat(existing)}") # # logger.debug(f"Setting {self.rsl_plate_num} PCR to:\n {pformat(existing)}")
# sub.pcr_info = json.dumps(existing) # # sub.pcr_info = json.dumps(existing)
self.pcr_info = existing # self.pcr_info = existing
except TypeError: # except TypeError:
logger.error(f"Error updating!") # logger.error(f"Error updating!")
# sub.pcr_info = json.dumps([parser.pcr]) # # sub.pcr_info = json.dumps([parser.pcr])
self.pcr_info = [parser.pcr] # self.pcr_info = [parser.pcr]
logger.debug(f"Final pcr info for {self.rsl_plate_num}:\n {pformat(self.pcr_info)}") # logger.debug(f"Final pcr info for {self.rsl_plate_num}:\n {pformat(self.pcr_info)}")
else: # else:
# sub.pcr_info = json.dumps([parser.pcr]) # # sub.pcr_info = json.dumps([parser.pcr])
self.pcr_info = [parser.pcr] # self.pcr_info = [parser.pcr]
# logger.debug(f"Existing {type(self.pcr_info)}: {self.pcr_info}") # # logger.debug(f"Existing {type(self.pcr_info)}: {self.pcr_info}")
# logger.debug(f"Inserting {type(parser.pcr)}: {parser.pcr}") # # logger.debug(f"Inserting {type(parser.pcr)}: {parser.pcr}")
self.set_attribute("pcr_info", parser.pcr)
self.save(original=False) self.save(original=False)
logger.debug(f"Got {len(parser.samples)} samples to update!") logger.debug(f"Got {len(parser.samples)} samples to update!")
logger.debug(f"Parser samples: {parser.samples}") logger.debug(f"Parser samples: {parser.samples}")
@@ -1367,10 +1384,12 @@ class WastewaterArtic(BasicSubmission):
id = Column(INTEGER, ForeignKey('_basicsubmission.id'), primary_key=True) id = Column(INTEGER, ForeignKey('_basicsubmission.id'), primary_key=True)
artic_technician = Column(String(64)) #: Name of technician performing artic artic_technician = Column(String(64)) #: Name of technician performing artic
dna_core_submission_number = Column(String(64)) #: Number used by core as id dna_core_submission_number = Column(String(64)) #: Number used by core as id
pcr_info = Column(NestedMutableJson) #: unstructured output from pcr table logger or user(Artic) pcr_info = Column(JSON) #: unstructured output from pcr table logger or user(Artic)
gel_image = Column(String(64)) #: file name of gel image in zip file gel_image = Column(String(64)) #: file name of gel image in zip file
gel_info = Column(NestedMutableJson) #: unstructured data from gel. gel_info = Column(JSON) #: unstructured data from gel.
source_plates = Column(NestedMutableJson) #: wastewater plates that samples come from gel_controls = Column(JSON) #: locations of controls on the gel
source_plates = Column(JSON) #: wastewater plates that samples come from
__mapper_args__ = dict(polymorphic_identity="Wastewater Artic", __mapper_args__ = dict(polymorphic_identity="Wastewater Artic",
polymorphic_load="inline", polymorphic_load="inline",
@@ -1408,14 +1427,14 @@ class WastewaterArtic(BasicSubmission):
Returns: Returns:
dict: Updated sample dictionary dict: Updated sample dictionary
""" """
from backend.validators import RSLNamer # from backend.validators import RSLNamer
input_dict = super().parse_info(input_dict) input_dict = super().parse_info(input_dict)
df = xl.parse("First Strand List", header=None) ws = load_workbook(xl.io, data_only=True)['Egel results']
plates = [] data = [ws.cell(row=jj,column=ii) for ii in range(15,27) for jj in range(10,18)]
for row in [8,9,10]: data = [cell for cell in data if cell.value is not None and "NTC" in cell.value]
plate_name = RSLNamer(df.iat[row-1, 2]).parsed_name input_dict['gel_controls'] = [dict(sample_id=cell.value, location=f"{row_map[cell.row-9]}{str(cell.column-14).zfill(2)}") for cell in data]
plates.append(dict(plate=plate_name, start_sample=df.iat[row-1, 3])) # df = xl.parse("Egel results").iloc[7:16, 13:26]
input_dict['source_plates'] = plates # df = df.set_index(df.columns[0])
return input_dict return input_dict
@classmethod @classmethod
@@ -1535,57 +1554,6 @@ class WastewaterArtic(BasicSubmission):
dict: Updated parser product. dict: Updated parser product.
""" """
input_dict = super().finalize_parse(input_dict, xl, info_map, plate_map) input_dict = super().finalize_parse(input_dict, xl, info_map, plate_map)
# logger.debug(pformat(input_dict))
# logger.debug(pformat(info_map))
# logger.debug(pformat(plate_map))
# samples = []
# for sample in input_dict['samples']:
# logger.debug(f"Input sample: {pformat(sample.__dict__)}")
# if sample.submitter_id == "NTC1":
# samples.append(dict(sample=sample.submitter_id, destination_row=8, destination_column=2, source_row=0, source_column=0, plate_number='control', plate=None))
# continue
# elif sample.submitter_id == "NTC2":
# samples.append(dict(sample=sample.submitter_id, destination_row=8, destination_column=5, source_row=0, source_column=0, plate_number='control', plate=None))
# continue
# destination_row = sample.row[0]
# destination_column = sample.column[0]
# # logger.debug(f"Looking up: {sample.submitter_id} friend.")
# lookup_sample = BasicSample.query(submitter_id=sample.submitter_id)
# lookup_ssa = SubmissionSampleAssociation.query(sample=lookup_sample, exclude_submission_type=cls.__mapper_args__['polymorphic_identity'] , chronologic=True, reverse=True, limit=1)
# try:
# plate = lookup_ssa.submission.rsl_plate_num
# source_row = lookup_ssa.row
# source_column = lookup_ssa.column
# except AttributeError as e:
# logger.error(f"Problem with lookup: {e}")
# plate = "Error"
# source_row = 0
# source_column = 0
# # continue
# output_sample = dict(
# sample=sample.submitter_id,
# destination_column=destination_column,
# destination_row=destination_row,
# plate=plate,
# source_column=source_column,
# source_row = source_row
# )
# logger.debug(f"output sample: {pformat(output_sample)}")
# samples.append(output_sample)
# plates = sorted(list(set([sample['plate'] for sample in samples if sample['plate'] != None and sample['plate'] != "Error"])))
# logger.debug(f"Here's what I got for plates: {plates}")
# for iii, plate in enumerate(plates):
# for sample in samples:
# if sample['plate'] == plate:
# sample['plate_number'] = iii + 1
# df = pd.DataFrame.from_records(samples).fillna(value="")
# try:
# df.source_row = df.source_row.astype(int)
# df.source_column = df.source_column.astype(int)
# df.sort_values(by=['destination_column', 'destination_row'], inplace=True)
# except AttributeError as e:
# logger.error(f"Couldn't construct df due to {e}")
# input_dict['csv'] = df
input_dict['csv'] = xl.parse("hitpicks_csv_to_export") input_dict['csv'] = xl.parse("hitpicks_csv_to_export")
return input_dict return input_dict
@@ -1606,30 +1574,30 @@ class WastewaterArtic(BasicSubmission):
worksheet = input_excel["First Strand List"] worksheet = input_excel["First Strand List"]
samples = cls.query(rsl_number=info['rsl_plate_num']['value']).submission_sample_associations samples = cls.query(rsl_number=info['rsl_plate_num']['value']).submission_sample_associations
samples = sorted(samples, key=attrgetter('column', 'row')) samples = sorted(samples, key=attrgetter('column', 'row'))
try: # try:
source_plates = [item['plate'] for item in info['source_plates']] # source_plates = [item['plate'] for item in info['source_plates']]
first_samples = [item['start_sample'] for item in info['source_plates']] # first_samples = [item['start_sample'] for item in info['source_plates']]
except: # except:
source_plates = [] # source_plates = []
first_samples = [] # first_samples = []
for sample in samples: # for sample in samples:
sample = sample.sample # sample = sample.sample
try: # try:
assoc = [item.submission.rsl_plate_num for item in sample.sample_submission_associations if item.submission.submission_type_name=="Wastewater"][-1] # assoc = [item.submission.rsl_plate_num for item in sample.sample_submission_associations if item.submission.submission_type_name=="Wastewater"][-1]
except IndexError: # except IndexError:
logger.error(f"Association not found for {sample}") # logger.error(f"Association not found for {sample}")
continue # continue
if assoc not in source_plates: # if assoc not in source_plates:
source_plates.append(assoc) # source_plates.append(assoc)
first_samples.append(sample.ww_processing_num) # first_samples.append(sample.ww_processing_num)
# Pad list to length of 3 # # Pad list to length of 3
source_plates += ['None'] * (3 - len(source_plates)) # source_plates += ['None'] * (3 - len(source_plates))
first_samples += [''] * (3 - len(first_samples)) # first_samples += [''] * (3 - len(first_samples))
source_plates = zip(source_plates, first_samples, strict=False) # source_plates = zip(source_plates, first_samples, strict=False)
for iii, plate in enumerate(source_plates, start=8): # for iii, plate in enumerate(source_plates, start=8):
logger.debug(f"Plate: {plate}") # logger.debug(f"Plate: {plate}")
for jjj, value in enumerate(plate, start=3): # for jjj, value in enumerate(plate, start=3):
worksheet.cell(row=iii, column=jjj, value=value) # worksheet.cell(row=iii, column=jjj, value=value)
logger.debug(f"Info:\n{pformat(info)}") logger.debug(f"Info:\n{pformat(info)}")
check = 'gel_info' in info.keys() and info['gel_info']['value'] != None check = 'gel_info' in info.keys() and info['gel_info']['value'] != None
if check: if check:
@@ -1676,7 +1644,7 @@ class WastewaterArtic(BasicSubmission):
Tuple[dict, Template]: (Updated dictionary, Template to be rendered) Tuple[dict, Template]: (Updated dictionary, Template to be rendered)
""" """
base_dict, template = super().get_details_template(base_dict=base_dict) base_dict, template = super().get_details_template(base_dict=base_dict)
base_dict['excluded'] += ['gel_info', 'gel_image', 'headers', "dna_core_submission_number", "source_plates"] base_dict['excluded'] += ['gel_info', 'gel_image', 'headers', "dna_core_submission_number", "source_plates", "gel_controls"]
base_dict['DNA Core ID'] = base_dict['dna_core_submission_number'] base_dict['DNA Core ID'] = base_dict['dna_core_submission_number']
check = 'gel_info' in base_dict.keys() and base_dict['gel_info'] != None check = 'gel_info' in base_dict.keys() and base_dict['gel_info'] != None
if check: if check:
@@ -1739,7 +1707,7 @@ class WastewaterArtic(BasicSubmission):
from frontend.widgets.gel_checker import GelBox from frontend.widgets.gel_checker import GelBox
from frontend.widgets import select_open_file from frontend.widgets import select_open_file
fname = select_open_file(obj=obj, file_extension="jpg") fname = select_open_file(obj=obj, file_extension="jpg")
dlg = GelBox(parent=obj, img_path=fname) dlg = GelBox(parent=obj, img_path=fname, submission=self)
if dlg.exec(): if dlg.exec():
self.dna_core_submission_number, img_path, output, comment = dlg.parse_form() self.dna_core_submission_number, img_path, output, comment = dlg.parse_form()
self.gel_image = img_path.name self.gel_image = img_path.name
@@ -2375,7 +2343,7 @@ class WastewaterAssociation(SubmissionSampleAssociation):
ct_n2 = Column(FLOAT(2)) #: AKA ct for N2 ct_n2 = Column(FLOAT(2)) #: AKA ct for N2
n1_status = Column(String(32)) #: positive or negative for N1 n1_status = Column(String(32)) #: positive or negative for N1
n2_status = Column(String(32)) #: positive or negative for N2 n2_status = Column(String(32)) #: positive or negative for N2
pcr_results = Column(NestedMutableJson) #: imported PCR status from QuantStudio pcr_results = Column(JSON) #: imported PCR status from QuantStudio
__mapper_args__ = dict(polymorphic_identity="Wastewater Association", __mapper_args__ = dict(polymorphic_identity="Wastewater Association",
polymorphic_load="inline", polymorphic_load="inline",

View File

@@ -7,24 +7,26 @@ from PyQt6.QtWidgets import (QWidget, QDialog, QGridLayout,
) )
import numpy as np import numpy as np
import pyqtgraph as pg import pyqtgraph as pg
from PyQt6.QtGui import QIcon from PyQt6.QtGui import QIcon, QFont
from PIL import Image from PIL import Image
import numpy as np import numpy as np
import logging import logging
from pprint import pformat from pprint import pformat
from typing import Tuple, List from typing import Tuple, List
from pathlib import Path from pathlib import Path
from backend.db.models import WastewaterArtic
logger = logging.getLogger(f"submissions.{__name__}") logger = logging.getLogger(f"submissions.{__name__}")
# Main window class # Main window class
class GelBox(QDialog): class GelBox(QDialog):
def __init__(self, parent, img_path:str|Path): def __init__(self, parent, img_path:str|Path, submission:WastewaterArtic):
super().__init__(parent) super().__init__(parent)
# setting title # setting title
self.setWindowTitle("PyQtGraph") self.setWindowTitle("PyQtGraph")
self.img_path = img_path self.img_path = img_path
self.submission = submission
# setting geometry # setting geometry
self.setGeometry(50, 50, 1200, 900) self.setGeometry(50, 50, 1200, 900)
# icon # icon
@@ -57,7 +59,11 @@ class GelBox(QDialog):
# plot window goes on right side, spanning 3 rows # plot window goes on right side, spanning 3 rows
layout.addWidget(self.imv, 1, 1,20,20) layout.addWidget(self.imv, 1, 1,20,20)
# setting this widget as central widget of the main window # setting this widget as central widget of the main window
self.form = ControlsForm(parent=self) try:
control_info = sorted(self.submission.gel_controls, key=lambda d: d['location'])
except KeyError:
control_info = None
self.form = ControlsForm(parent=self, control_info=control_info)
layout.addWidget(self.form,22,1,1,4) layout.addWidget(self.form,22,1,1,4)
QBtn = QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel QBtn = QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel
self.buttonBox = QDialogButtonBox(QBtn) self.buttonBox = QDialogButtonBox(QBtn)
@@ -79,16 +85,24 @@ class GelBox(QDialog):
class ControlsForm(QWidget): class ControlsForm(QWidget):
def __init__(self, parent) -> None: def __init__(self, parent, control_info:List=None) -> None:
super().__init__(parent) super().__init__(parent)
self.layout = QGridLayout() self.layout = QGridLayout()
columns = [] columns = []
rows = [] rows = []
try:
tt_text = "\n".join([f"{item['sample_id']} - CELL {item['location']}" for item in control_info])
except TypeError:
tt_text = None
for iii, item in enumerate(["Negative Control Key", "Description", "Results - 65 C", "Results - 63 C", "Results - Spike"]): for iii, item in enumerate(["Negative Control Key", "Description", "Results - 65 C", "Results - 63 C", "Results - Spike"]):
label = QLabel(item) label = QLabel(item)
self.layout.addWidget(label, 0, iii,1,1) self.layout.addWidget(label, 0, iii,1,1)
if iii > 1: if iii > 1:
columns.append(item) columns.append(item)
elif iii == 0:
if tt_text:
label.setStyleSheet("font-weight: bold; color: blue; text-decoration: underline;")
label.setToolTip(tt_text)
for iii, item in enumerate(["RSL-NTC", "ENC-NTC", "NTC"], start=1): for iii, item in enumerate(["RSL-NTC", "ENC-NTC", "NTC"], start=1):
label = QLabel(item) label = QLabel(item)
self.layout.addWidget(label, iii, 0, 1, 1) self.layout.addWidget(label, iii, 0, 1, 1)
@@ -102,6 +116,11 @@ class ControlsForm(QWidget):
widge.setText("Neg") widge.setText("Neg")
widge.setObjectName(f"{rows[iii]} : {columns[jjj]}") widge.setObjectName(f"{rows[iii]} : {columns[jjj]}")
self.layout.addWidget(widge, iii+1, jjj+2, 1, 1) self.layout.addWidget(widge, iii+1, jjj+2, 1, 1)
# try:
# for iii, item in enumerate(control_info, start=1):
# self.layout.addWidget(QLabel(f"{item['sample_id']} - {item['location']}"), iii+4, 1)
# except TypeError:
# pass
self.layout.addWidget(QLabel("Comments:"), 0,5,1,1) self.layout.addWidget(QLabel("Comments:"), 0,5,1,1)
self.comment_field = QTextEdit(self) self.comment_field = QTextEdit(self)
self.comment_field.setFixedHeight(50) self.comment_field.setFixedHeight(50)

View File

@@ -95,8 +95,8 @@ class SubmissionDetails(QDialog):
self.html = self.template.render(sub=self.base_dict, signing_permission=is_power_user()) self.html = self.template.render(sub=self.base_dict, signing_permission=is_power_user())
self.webview.setHtml(self.html) self.webview.setHtml(self.html)
self.setWindowTitle(f"Submission Details - {submission.rsl_plate_num}") self.setWindowTitle(f"Submission Details - {submission.rsl_plate_num}")
with open("details.html", "w") as f: # with open("details.html", "w") as f:
f.write(self.html) # f.write(self.html)
@pyqtSlot(str) @pyqtSlot(str)
def sign_off(self, submission:str|BasicSubmission): def sign_off(self, submission:str|BasicSubmission):
@@ -171,7 +171,7 @@ class SubmissionComment(QDialog):
commenter = getuser() commenter = getuser()
comment = self.txt_editor.toPlainText() comment = self.txt_editor.toPlainText()
dt = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S") dt = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
full_comment = [{"name":commenter, "time": dt, "text": comment}] full_comment = {"name":commenter, "time": dt, "text": comment}
logger.debug(f"Full comment: {full_comment}") logger.debug(f"Full comment: {full_comment}")
return full_comment return full_comment

View File

@@ -1,7 +1,7 @@
''' '''
Contains widgets specific to the submission summary and submission details. Contains widgets specific to the submission summary and submission details.
''' '''
import logging, json import logging
from pprint import pformat from pprint import pformat
from PyQt6.QtWidgets import QTableView, QMenu from PyQt6.QtWidgets import QTableView, QMenu
from PyQt6.QtCore import Qt, QAbstractTableModel, QSortFilterProxyModel from PyQt6.QtCore import Qt, QAbstractTableModel, QSortFilterProxyModel
@@ -177,35 +177,36 @@ class SubmissionsSheet(QTableView):
count += 1 count += 1
except AttributeError: except AttributeError:
continue continue
if sub.extraction_info != None: sub.set_attribute('extraction_info', new_run)
# existing = json.loads(sub.extraction_info) # if sub.extraction_info != None:
existing = sub.extraction_info # # existing = json.loads(sub.extraction_info)
else: # existing = sub.extraction_info
existing = None # else:
# Check if the new info already exists in the imported submission # existing = None
try: # # Check if the new info already exists in the imported submission
# if json.dumps(new_run) in sub.extraction_info: # try:
if new_run in sub.extraction_info: # # if json.dumps(new_run) in sub.extraction_info:
logger.debug(f"Looks like we already have that info.") # if new_run in sub.extraction_info:
continue # logger.debug(f"Looks like we already have that info.")
except TypeError: # continue
pass # except TypeError:
# Update or create the extraction info # pass
if existing != None: # # Update or create the extraction info
try: # if existing != None:
logger.debug(f"Updating {type(existing)}: {existing} with {type(new_run)}: {new_run}") # try:
existing.append(new_run) # logger.debug(f"Updating {type(existing)}: {existing} with {type(new_run)}: {new_run}")
logger.debug(f"Setting: {existing}") # existing.append(new_run)
# sub.extraction_info = json.dumps(existing) # logger.debug(f"Setting: {existing}")
sub.extraction_info = existing # # sub.extraction_info = json.dumps(existing)
except TypeError: # sub.extraction_info = existing
logger.error(f"Error updating!") # except TypeError:
# sub.extraction_info = json.dumps([new_run]) # logger.error(f"Error updating!")
sub.extraction_info = [new_run] # # sub.extraction_info = json.dumps([new_run])
logger.debug(f"Final ext info for {sub.rsl_plate_num}: {sub.extraction_info}") # sub.extraction_info = [new_run]
else: # logger.debug(f"Final ext info for {sub.rsl_plate_num}: {sub.extraction_info}")
# sub.extraction_info = json.dumps([new_run]) # else:
sub.extraction_info = [new_run] # # sub.extraction_info = json.dumps([new_run])
# sub.extraction_info = [new_run]
sub.save() sub.save()
self.report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information')) self.report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information'))
@@ -253,37 +254,38 @@ class SubmissionsSheet(QTableView):
logger.debug(f"Found submission: {sub.rsl_plate_num}") logger.debug(f"Found submission: {sub.rsl_plate_num}")
except AttributeError: except AttributeError:
continue continue
# check if pcr_info already exists sub.set_attribute('pcr_info', new_run)
if hasattr(sub, 'pcr_info') and sub.pcr_info != None: # # check if pcr_info already exists
# existing = json.loads(sub.pcr_info) # if hasattr(sub, 'pcr_info') and sub.pcr_info != None:
existing = sub.pcr_info # # existing = json.loads(sub.pcr_info)
else: # existing = sub.pcr_info
existing = None # else:
# check if this entry already exists in imported submission # existing = None
try: # # check if this entry already exists in imported submission
# if json.dumps(new_run) in sub.pcr_info: # try:
if new_run in sub.pcr_info: # # if json.dumps(new_run) in sub.pcr_info:
logger.debug(f"Looks like we already have that info.") # if new_run in sub.pcr_info:
continue # logger.debug(f"Looks like we already have that info.")
else: # continue
count += 1 # else:
except TypeError: # count += 1
logger.error(f"No json to dump") # except TypeError:
if existing is not None: # logger.error(f"No json to dump")
try: # if existing is not None:
logger.debug(f"Updating {type(existing)}: {existing} with {type(new_run)}: {new_run}") # try:
existing.append(new_run) # logger.debug(f"Updating {type(existing)}: {existing} with {type(new_run)}: {new_run}")
logger.debug(f"Setting: {existing}") # existing.append(new_run)
# sub.pcr_info = json.dumps(existing) # logger.debug(f"Setting: {existing}")
sub.pcr_info = existing # # sub.pcr_info = json.dumps(existing)
except TypeError: # sub.pcr_info = existing
logger.error(f"Error updating!") # except TypeError:
# sub.pcr_info = json.dumps([new_run]) # logger.error(f"Error updating!")
sub.pcr_info = [new_run] # # sub.pcr_info = json.dumps([new_run])
logger.debug(f"Final ext info for {sub.rsl_plate_num}: {sub.pcr_info}") # sub.pcr_info = [new_run]
else: # logger.debug(f"Final ext info for {sub.rsl_plate_num}: {sub.pcr_info}")
# sub.pcr_info = json.dumps([new_run]) # else:
sub.pcr_info = [new_run] # # sub.pcr_info = json.dumps([new_run])
# sub.pcr_info = [new_run]
sub.save() sub.save()
self.report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information')) self.report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information'))

View File

@@ -5,14 +5,14 @@ from PyQt6.QtWidgets import (
from PyQt6.QtCore import pyqtSignal from PyQt6.QtCore import pyqtSignal
from pathlib import Path from pathlib import Path
from . import select_open_file, select_save_file from . import select_open_file, select_save_file
import logging, difflib, inspect, pickle import logging, difflib, inspect
from pathlib import Path from pathlib import Path
from tools import Report, Result, check_not_nan from tools import Report, Result, check_not_nan
from backend.excel.parser import SheetParser, PCRParser from backend.excel.parser import SheetParser
from backend.validators import PydSubmission, PydReagent from backend.validators import PydSubmission, PydReagent
from backend.db import ( from backend.db import (
KitType, Organization, SubmissionType, Reagent, KitType, Organization, SubmissionType, Reagent,
ReagentType, KitTypeReagentTypeAssociation, BasicSubmission ReagentType, KitTypeReagentTypeAssociation
) )
from pprint import pformat from pprint import pformat
from .pop_ups import QuestionAsker, AlertPop from .pop_ups import QuestionAsker, AlertPop
@@ -154,7 +154,7 @@ class SubmissionFormWidget(QWidget):
# self.samples = [] # self.samples = []
self.missing_info = [] self.missing_info = []
self.ignore = ['filepath', 'samples', 'reagents', 'csv', 'ctx', 'comment', self.ignore = ['filepath', 'samples', 'reagents', 'csv', 'ctx', 'comment',
'equipment', 'source_plates', 'id', 'cost', 'extraction_info', 'equipment', 'gel_controls', 'id', 'cost', 'extraction_info',
'controls', 'pcr_info', 'gel_info', 'gel_image'] 'controls', 'pcr_info', 'gel_info', 'gel_image']
self.recover = ['filepath', 'samples', 'csv', 'comment', 'equipment'] self.recover = ['filepath', 'samples', 'csv', 'comment', 'equipment']
self.layout = QVBoxLayout() self.layout = QVBoxLayout()