Moments before disaster.
This commit is contained in:
@@ -1,10 +1,8 @@
|
||||
"""
|
||||
All database related operations.
|
||||
"""
|
||||
import sqlalchemy.orm
|
||||
from sqlalchemy import event, inspect
|
||||
from sqlalchemy.engine import Engine
|
||||
|
||||
from tools import ctx
|
||||
|
||||
|
||||
@@ -48,7 +46,11 @@ def update_log(mapper, connection, target):
|
||||
hist = attr.load_history()
|
||||
if not hist.has_changes():
|
||||
continue
|
||||
if attr.key == "custom":
|
||||
continue
|
||||
added = [str(item) for item in hist.added]
|
||||
if attr.key in ['submission_sample_associations', 'submission_reagent_associations']:
|
||||
added = ['Numbers truncated for space purposes.']
|
||||
deleted = [str(item) for item in hist.deleted]
|
||||
change = dict(field=attr.key, added=added, deleted=deleted)
|
||||
# logger.debug(f"Adding: {pformat(change)}")
|
||||
@@ -69,6 +71,6 @@ def update_log(mapper, connection, target):
|
||||
else:
|
||||
logger.info(f"No changes detected, not updating logs.")
|
||||
|
||||
# if ctx.database_schema == "sqlite":
|
||||
# if ctx.logging_enabled:
|
||||
event.listen(LogMixin, 'after_update', update_log, propagate=True)
|
||||
event.listen(LogMixin, 'after_insert', update_log, propagate=True)
|
||||
|
||||
@@ -1414,25 +1414,6 @@ class Equipment(BaseClass):
|
||||
if extraction_kit and extraction_kit not in process.kit_types:
|
||||
continue
|
||||
yield process
|
||||
# processes = (process for process in self.processes if submission_type in process.submission_types)
|
||||
# match extraction_kit:
|
||||
# case str():
|
||||
# # logger.debug(f"Filtering processes by extraction_kit str {extraction_kit}")
|
||||
# processes = (process for process in processes if
|
||||
# extraction_kit in [kit.name for kit in process.kit_types])
|
||||
# case KitType():
|
||||
# # logger.debug(f"Filtering processes by extraction_kit KitType {extraction_kit}")
|
||||
# processes = (process for process in processes if extraction_kit in process.kit_types)
|
||||
# case _:
|
||||
# pass
|
||||
# # NOTE: Convert to strings
|
||||
# # processes = [process.name for process in processes]
|
||||
# # assert all([isinstance(process, str) for process in processes])
|
||||
# # if len(processes) == 0:
|
||||
# # processes = ['']
|
||||
# # return processes
|
||||
# for process in processes:
|
||||
# yield process.name
|
||||
|
||||
@classmethod
|
||||
@setup_lookup
|
||||
@@ -1650,25 +1631,6 @@ class EquipmentRole(BaseClass):
|
||||
if extraction_kit and extraction_kit not in process.kit_types:
|
||||
continue
|
||||
yield process.name
|
||||
# if submission_type is not None:
|
||||
# # logger.debug("Getting all processes for this EquipmentRole")
|
||||
# processes = [process for process in self.processes if submission_type in process.submission_types]
|
||||
# else:
|
||||
# processes = self.processes
|
||||
# match extraction_kit:
|
||||
# case str():
|
||||
# # logger.debug(f"Filtering processes by extraction_kit str {extraction_kit}")
|
||||
# processes = [item for item in processes if extraction_kit in [kit.name for kit in item.kit_types]]
|
||||
# case KitType():
|
||||
# # logger.debug(f"Filtering processes by extraction_kit KitType {extraction_kit}")
|
||||
# processes = [item for item in processes if extraction_kit in [kit for kit in item.kit_types]]
|
||||
# case _:
|
||||
# pass
|
||||
# output = [item.name for item in processes]
|
||||
# if len(output) == 0:
|
||||
# return ['']
|
||||
# else:
|
||||
# return output
|
||||
|
||||
def to_export_dict(self, submission_type: SubmissionType, kit_type: KitType):
|
||||
"""
|
||||
@@ -1730,9 +1692,8 @@ class SubmissionEquipmentAssociation(BaseClass):
|
||||
|
||||
@classmethod
|
||||
@setup_lookup
|
||||
def query(cls, equipment_id: int, submission_id: int, role: str | None = None, limit: int = 0, **kwargs) -> Any | \
|
||||
List[
|
||||
Any]:
|
||||
def query(cls, equipment_id: int, submission_id: int, role: str | None = None, limit: int = 0, **kwargs) \
|
||||
-> Any | List[Any]:
|
||||
query: Query = cls.__database_session__.query(cls)
|
||||
query = query.filter(cls.equipment_id == equipment_id)
|
||||
query = query.filter(cls.submission_id == submission_id)
|
||||
@@ -1777,44 +1738,22 @@ class SubmissionTypeEquipmentRoleAssociation(BaseClass):
|
||||
raise ValueError(f'Invalid required value {value}. Must be 0 or 1.')
|
||||
return value
|
||||
|
||||
def get_all_processes(self, extraction_kit: KitType | str | None = None) -> List[Process]:
|
||||
"""
|
||||
Get all processes associated with this SubmissionTypeEquipmentRole
|
||||
|
||||
Args:
|
||||
extraction_kit (KitType | str | None, optional): KitType of interest. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Process]: All associated processes
|
||||
"""
|
||||
processes = [equipment.get_processes(self.submission_type) for equipment in self.equipment_role.instances]
|
||||
# NOTE: flatten list
|
||||
processes = [item for items in processes for item in items if item is not None]
|
||||
match extraction_kit:
|
||||
case str():
|
||||
# logger.debug(f"Filtering Processes by extraction_kit str {extraction_kit}")
|
||||
processes = [item for item in processes if extraction_kit in [kit.name for kit in item.kit_type]]
|
||||
case KitType():
|
||||
# logger.debug(f"Filtering Processes by extraction_kit KitType {extraction_kit}")
|
||||
processes = [item for item in processes if extraction_kit in [kit for kit in item.kit_type]]
|
||||
case _:
|
||||
pass
|
||||
return processes
|
||||
|
||||
@check_authorization
|
||||
def save(self):
|
||||
super().save()
|
||||
|
||||
def to_export_dict(self, extraction_kit: KitType) -> dict:
|
||||
def to_export_dict(self, extraction_kit: KitType | str) -> dict:
|
||||
"""
|
||||
Creates dictionary for exporting to yml used in new SubmissionType Construction
|
||||
|
||||
Args:
|
||||
kit_type (KitType): KitType of interest.
|
||||
extraction_kit (KitType | str): KitType of interest.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary containing relevant info for SubmissionType construction
|
||||
"""
|
||||
if isinstance(extraction_kit, str):
|
||||
extraction_kit = KitType.query(name=extraction_kit)
|
||||
base_dict = {k: v for k, v in self.equipment_role.to_export_dict(submission_type=self.submission_type,
|
||||
kit_type=extraction_kit).items()}
|
||||
base_dict['static'] = self.static
|
||||
@@ -2013,8 +1952,8 @@ class SubmissionTipsAssociation(BaseClass):
|
||||
|
||||
@classmethod
|
||||
@setup_lookup
|
||||
def query(cls, tip_id: int, role: str, submission_id: int | None = None, limit: int = 0, **kwargs) -> Any | List[
|
||||
Any]:
|
||||
def query(cls, tip_id: int, role: str, submission_id: int | None = None, limit: int = 0, **kwargs) \
|
||||
-> Any | List[Any]:
|
||||
query: Query = cls.__database_session__.query(cls)
|
||||
query = query.filter(cls.tip_id == tip_id)
|
||||
if submission_id is not None:
|
||||
|
||||
@@ -2,13 +2,10 @@
|
||||
Models for the main submission and sample types.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
# import sys
|
||||
# import types
|
||||
# import zipfile
|
||||
from copy import deepcopy
|
||||
from getpass import getuser
|
||||
import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys
|
||||
from zipfile import ZipFile
|
||||
from zipfile import ZipFile, BadZipfile
|
||||
from tempfile import TemporaryDirectory, TemporaryFile
|
||||
from operator import itemgetter
|
||||
from pprint import pformat
|
||||
@@ -20,7 +17,6 @@ from sqlalchemy.ext.associationproxy import association_proxy
|
||||
from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError, StatementError, \
|
||||
ArgumentError
|
||||
from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError
|
||||
# import pandas as pd
|
||||
from openpyxl import Workbook
|
||||
from openpyxl.drawing.image import Image as OpenpyxlImage
|
||||
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report, \
|
||||
@@ -1276,7 +1272,7 @@ class BasicSubmission(BaseClass, LogMixin):
|
||||
if msg.exec():
|
||||
try:
|
||||
self.backup(fname=fname, full_backup=True)
|
||||
except zipfile.BadZipfile:
|
||||
except BadZipfile:
|
||||
logger.error("Couldn't open zipfile for writing.")
|
||||
self.__database_session__.delete(self)
|
||||
try:
|
||||
@@ -2261,7 +2257,7 @@ class WastewaterArtic(BasicSubmission):
|
||||
|
||||
# Sample Classes
|
||||
|
||||
class BasicSample(BaseClass):
|
||||
class BasicSample(BaseClass, LogMixin):
|
||||
"""
|
||||
Base of basic sample which polymorphs into BCSample and WWSample
|
||||
"""
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
'''
|
||||
contains parser objects for pulling values from client generated submission sheets.
|
||||
'''
|
||||
import json
|
||||
import sys
|
||||
import logging
|
||||
from copy import copy
|
||||
from getpass import getuser
|
||||
from pprint import pformat
|
||||
@@ -11,7 +10,6 @@ from openpyxl import load_workbook, Workbook
|
||||
from pathlib import Path
|
||||
from backend.db.models import *
|
||||
from backend.validators import PydSubmission, RSLNamer
|
||||
import logging, re
|
||||
from collections import OrderedDict
|
||||
from tools import check_not_nan, is_missing, check_key_or_attr
|
||||
|
||||
@@ -195,7 +193,7 @@ class InfoParser(object):
|
||||
ws = self.xl[sheet]
|
||||
relevant = []
|
||||
for k, v in self.map.items():
|
||||
# NOTE: If the value is hardcoded put it in the dictionary directly.
|
||||
# NOTE: If the value is hardcoded put it in the dictionary directly. Ex. Artic kit
|
||||
if k == "custom":
|
||||
continue
|
||||
if isinstance(v, str):
|
||||
@@ -230,7 +228,7 @@ class InfoParser(object):
|
||||
case "submitted_date":
|
||||
value, missing = is_missing(value)
|
||||
logger.debug(f"Parsed submitted date: {value}")
|
||||
# NOTE: is field a JSON?
|
||||
# NOTE: is field a JSON? Includes: Extraction info, PCR info, comment, custom
|
||||
case thing if thing in self.sub_object.jsons():
|
||||
value, missing = is_missing(value)
|
||||
if missing: continue
|
||||
@@ -300,11 +298,11 @@ class ReagentParser(object):
|
||||
del reagent_map['info']
|
||||
except KeyError:
|
||||
pass
|
||||
logger.debug(f"Reagent map: {pformat(reagent_map)}")
|
||||
# logger.debug(f"Reagent map: {pformat(reagent_map)}")
|
||||
# NOTE: If reagent map is empty, maybe the wrong kit was given, check if there's only one kit for that submission type and use it if so.
|
||||
if not reagent_map:
|
||||
temp_kit_object = self.submission_type_obj.get_default_kit()
|
||||
logger.debug(f"Temp kit: {temp_kit_object}")
|
||||
# logger.debug(f"Temp kit: {temp_kit_object}")
|
||||
if temp_kit_object:
|
||||
self.kit_object = temp_kit_object
|
||||
# reagent_map = {k: v for k, v in self.kit_object.construct_xl_map_for_use(submission_type)}
|
||||
@@ -333,7 +331,7 @@ class ReagentParser(object):
|
||||
for sheet in self.xl.sheetnames:
|
||||
ws = self.xl[sheet]
|
||||
relevant = {k.strip(): v for k, v in self.map.items() if sheet in self.map[k]['sheet']}
|
||||
logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
|
||||
# logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
|
||||
if relevant == {}:
|
||||
continue
|
||||
for item in relevant:
|
||||
@@ -499,8 +497,7 @@ class SampleParser(object):
|
||||
yield new
|
||||
else:
|
||||
merge_on_id = self.sample_info_map['lookup_table']['merge_on_id']
|
||||
# plate_map_samples = sorted(copy(self.plate_map_samples), key=lambda d: d['id'])
|
||||
# lookup_samples = sorted(copy(self.lookup_samples), key=lambda d: d[merge_on_id])
|
||||
logger.info(f"Merging sample info using {merge_on_id}")
|
||||
plate_map_samples = sorted(copy(self.plate_map_samples), key=itemgetter('id'))
|
||||
lookup_samples = sorted(copy(self.lookup_samples), key=itemgetter(merge_on_id))
|
||||
for ii, psample in enumerate(plate_map_samples):
|
||||
@@ -517,20 +514,9 @@ class SampleParser(object):
|
||||
logger.warning(f"Match for {psample['id']} not direct, running search.")
|
||||
searchables = [(jj, sample) for jj, sample in enumerate(lookup_samples)
|
||||
if merge_on_id in sample.keys()]
|
||||
# for jj, lsample in enumerate(lookup_samples):
|
||||
# try:
|
||||
# check = lsample[merge_on_id] == psample['id']
|
||||
# except KeyError:
|
||||
# check = False
|
||||
# if check:
|
||||
# new = lsample | psample
|
||||
# lookup_samples[jj] = {}
|
||||
# break
|
||||
# else:
|
||||
# new = psample
|
||||
jj, new = next(((jj, lsample | psample) for jj, lsample in searchables
|
||||
if lsample[merge_on_id] == psample['id']), (-1, psample))
|
||||
logger.debug(f"Assigning from index {jj} - {new}")
|
||||
# logger.debug(f"Assigning from index {jj} - {new}")
|
||||
if jj >= 0:
|
||||
lookup_samples[jj] = {}
|
||||
if not check_key_or_attr(key='submitter_id', interest=new, check_none=True):
|
||||
@@ -554,7 +540,7 @@ class EquipmentParser(object):
|
||||
xl (Workbook): Openpyxl workbook from submitted excel file.
|
||||
submission_type (str | SubmissionType): Type of submission expected (Wastewater, Bacterial Culture, etc.)
|
||||
"""
|
||||
logger.debug("\n\nHello from EquipmentParser!\n\n")
|
||||
logger.info("\n\nHello from EquipmentParser!\n\n")
|
||||
if isinstance(submission_type, str):
|
||||
submission_type = SubmissionType.query(name=submission_type)
|
||||
self.submission_type = submission_type
|
||||
@@ -568,7 +554,6 @@ class EquipmentParser(object):
|
||||
Returns:
|
||||
List[dict]: List of locations
|
||||
"""
|
||||
# return {k: v for k, v in self.submission_type.construct_equipment_map()}
|
||||
return {k: v for k, v in self.submission_type.construct_field_map("equipment")}
|
||||
|
||||
def get_asset_number(self, input: str) -> str:
|
||||
@@ -638,7 +623,7 @@ class TipParser(object):
|
||||
xl (Workbook): Openpyxl workbook from submitted excel file.
|
||||
submission_type (str | SubmissionType): Type of submission expected (Wastewater, Bacterial Culture, etc.)
|
||||
"""
|
||||
logger.debug("\n\nHello from TipParser!\n\n")
|
||||
logger.info("\n\nHello from TipParser!\n\n")
|
||||
if isinstance(submission_type, str):
|
||||
submission_type = SubmissionType.query(name=submission_type)
|
||||
self.submission_type = submission_type
|
||||
@@ -652,7 +637,6 @@ class TipParser(object):
|
||||
Returns:
|
||||
List[dict]: List of locations
|
||||
"""
|
||||
# return {k: v for k, v in self.submission_type.construct_tips_map()}
|
||||
return {k: v for k, v in self.submission_type.construct_field_map("tip")}
|
||||
|
||||
def parse_tips(self) -> List[dict]:
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
Contains functions for generating summary reports
|
||||
'''
|
||||
from pprint import pformat
|
||||
|
||||
from pandas import DataFrame, ExcelWriter
|
||||
import logging
|
||||
from pathlib import Path
|
||||
@@ -72,7 +71,6 @@ class ReportMaker(object):
|
||||
for row in df.iterrows():
|
||||
# logger.debug(f"Row {ii}: {row}")
|
||||
lab = row[0][0]
|
||||
# logger.debug(type(row))
|
||||
# logger.debug(f"Old lab: {old_lab}, Current lab: {lab}")
|
||||
# logger.debug(f"Name: {row[0][1]}")
|
||||
data = [item for item in row[1]]
|
||||
@@ -151,7 +149,16 @@ class TurnaroundMaker(object):
|
||||
self.df = DataFrame.from_records(records)
|
||||
|
||||
@classmethod
|
||||
def build_record(cls, sub):
|
||||
def build_record(cls, sub: BasicSubmission) -> dict:
|
||||
"""
|
||||
Build a turnaround dictionary from a submission
|
||||
|
||||
Args:
|
||||
sub (BasicSubmission): The submission to be processed.
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
days, tat_ok = sub.get_turnaround_time()
|
||||
return dict(name=str(sub.rsl_plate_num), days=days, submitted_date=sub.submitted_date,
|
||||
completed_date=sub.completed_date, acceptable=tat_ok)
|
||||
@@ -170,4 +177,4 @@ class TurnaroundMaker(object):
|
||||
self.writer = ExcelWriter(filename.with_suffix(".xlsx"), engine='openpyxl')
|
||||
self.df.to_excel(self.writer, sheet_name="Turnaround")
|
||||
# logger.debug(f"Writing report to: {filename}")
|
||||
self.writer.close()
|
||||
self.writer.close()
|
||||
|
||||
@@ -41,21 +41,11 @@ class SheetWriter(object):
|
||||
self.sub[k] = v['value']
|
||||
else:
|
||||
self.sub[k] = v
|
||||
# logger.debug(f"\n\nWriting to {submission.filepath.__str__()}\n\n")
|
||||
# if self.filepath.stem.startswith("tmp"):
|
||||
# template = self.submission_type.template_file
|
||||
# workbook = load_workbook(BytesIO(template))
|
||||
# else:
|
||||
# try:
|
||||
# workbook = load_workbook(self.filepath)
|
||||
# except Exception as e:
|
||||
# logger.error(f"Couldn't open workbook due to {e}")
|
||||
template = self.submission_type.template_file
|
||||
if not template:
|
||||
logger.error(f"No template file found, falling back to Bacterial Culture")
|
||||
template = SubmissionType.retrieve_template_file()
|
||||
workbook = load_workbook(BytesIO(template))
|
||||
# self.workbook = workbook
|
||||
self.xl = workbook
|
||||
self.write_info()
|
||||
self.write_reagents()
|
||||
@@ -152,11 +142,9 @@ class InfoWriter(object):
|
||||
try:
|
||||
dicto['locations'] = info_map[k]
|
||||
except KeyError:
|
||||
# continue
|
||||
pass
|
||||
dicto['value'] = v
|
||||
if len(dicto) > 0:
|
||||
# output[k] = dicto
|
||||
yield k, dicto
|
||||
|
||||
def write_info(self) -> Workbook:
|
||||
@@ -279,7 +267,6 @@ class SampleWriter(object):
|
||||
self.sample_map = submission_type.construct_sample_map()['lookup_table']
|
||||
# NOTE: exclude any samples without a submission rank.
|
||||
samples = [item for item in self.reconcile_map(sample_list) if item['submission_rank'] > 0]
|
||||
# self.samples = sorted(samples, key=lambda k: k['submission_rank'])
|
||||
self.samples = sorted(samples, key=itemgetter('submission_rank'))
|
||||
|
||||
def reconcile_map(self, sample_list: list) -> Generator[dict, None, None]:
|
||||
@@ -368,7 +355,8 @@ class EquipmentWriter(object):
|
||||
logger.error(f"No {equipment['role']} in {pformat(equipment_map)}")
|
||||
# logger.debug(f"{equipment['role']} map: {mp_info}")
|
||||
placeholder = copy(equipment)
|
||||
if mp_info == {}:
|
||||
# if mp_info == {}:
|
||||
if not mp_info:
|
||||
for jj, (k, v) in enumerate(equipment.items(), start=1):
|
||||
dicto = dict(value=v, row=ii, column=jj)
|
||||
placeholder[k] = dicto
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
Contains pydantic models and accompanying validators
|
||||
'''
|
||||
from __future__ import annotations
|
||||
import sys
|
||||
import uuid, re, logging, csv
|
||||
import uuid, re, logging, csv, sys
|
||||
from pydantic import BaseModel, field_validator, Field, model_validator
|
||||
from datetime import date, datetime, timedelta
|
||||
from dateutil.parser import parse
|
||||
@@ -165,13 +164,7 @@ class PydReagent(BaseModel):
|
||||
report.add_result(Result(owner=__name__, code=0, msg="New reagent created.", status="Information"))
|
||||
else:
|
||||
if submission is not None and reagent not in submission.reagents:
|
||||
# assoc = SubmissionReagentAssociation(reagent=reagent, submission=submission)
|
||||
# assoc.comments = self.comment
|
||||
submission.update_reagentassoc(reagent=reagent, role=self.role)
|
||||
# else:
|
||||
# assoc = None
|
||||
# add end-of-life extension from reagent type to expiry date
|
||||
# NOTE: this will now be done only in the reporting phase to account for potential changes in end-of-life extensions
|
||||
return reagent, report
|
||||
|
||||
|
||||
@@ -191,11 +184,7 @@ class PydSample(BaseModel, extra='allow'):
|
||||
for k, v in data.model_extra.items():
|
||||
if k in model.timestamps():
|
||||
if isinstance(v, str):
|
||||
# try:
|
||||
v = datetime.strptime(v, "%Y-%m-%d")
|
||||
# except ValueError:
|
||||
# logger.warning(f"Attribute {k} value {v} for sample {data.submitter_id} could not be coerced into date. Setting to None.")
|
||||
# v = None
|
||||
data.__setattr__(k, v)
|
||||
# logger.debug(f"Data coming out of validation: {pformat(data)}")
|
||||
return data
|
||||
@@ -379,7 +368,6 @@ class PydEquipment(BaseModel, extra='ignore'):
|
||||
role=self.role, limit=1)
|
||||
except TypeError as e:
|
||||
logger.error(f"Couldn't get association due to {e}, returning...")
|
||||
# return equipment, None
|
||||
assoc = None
|
||||
if assoc is None:
|
||||
assoc = SubmissionEquipmentAssociation(submission=submission, equipment=equipment)
|
||||
@@ -830,11 +818,6 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
logger.debug(f"Checking reagent {reagent.lot}")
|
||||
reagent, _ = reagent.toSQL(submission=instance)
|
||||
# logger.debug(f"Association: {assoc}")
|
||||
# if assoc is not None: # and assoc not in instance.submission_reagent_associations:
|
||||
# if assoc not in instance.submission_reagent_associations:
|
||||
# instance.submission_reagent_associations.append(assoc)
|
||||
# else:
|
||||
# logger.warning(f"Reagent association {assoc} is already present in {instance.submission_reagent_associations}")
|
||||
case "samples":
|
||||
for sample in self.samples:
|
||||
sample, associations, _ = sample.toSQL(submission=instance)
|
||||
@@ -871,7 +854,6 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
logger.warning(f"Tips association {association} is already present in {instance}")
|
||||
case item if item in instance.timestamps():
|
||||
logger.warning(f"Incoming timestamp key: {item}, with value: {value}")
|
||||
# value = value.replace(tzinfo=timezone)
|
||||
if isinstance(value, date):
|
||||
value = datetime.combine(value, datetime.min.time())
|
||||
value = value.replace(tzinfo=timezone)
|
||||
@@ -903,7 +885,6 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
if check:
|
||||
try:
|
||||
instance.set_attribute(key=key, value=value)
|
||||
# instance.update({key:value})
|
||||
except AttributeError as e:
|
||||
logger.error(f"Could not set attribute: {key} to {value} due to: \n\n {e}")
|
||||
continue
|
||||
|
||||
Reference in New Issue
Block a user