Creation of new scripts.
This commit is contained in:
@@ -1,3 +1,7 @@
|
|||||||
|
# 202412.04
|
||||||
|
|
||||||
|
- Update of wastewater to allow for duplex PCR primers.
|
||||||
|
|
||||||
## 202412.03
|
## 202412.03
|
||||||
|
|
||||||
- Automated truncating of object names longer than 64 chars going into _auditlog
|
- Automated truncating of object names longer than 64 chars going into _auditlog
|
||||||
|
|||||||
@@ -4,12 +4,11 @@ from pathlib import Path
|
|||||||
from datetime import date
|
from datetime import date
|
||||||
import calendar
|
import calendar
|
||||||
|
|
||||||
# Version of the realpython-reader package
|
|
||||||
|
|
||||||
year = date.today().year
|
year = date.today().year
|
||||||
month = date.today().month
|
month = date.today().month
|
||||||
day = date.today().day
|
day = date.today().day
|
||||||
|
|
||||||
|
|
||||||
def get_week_of_month() -> int:
|
def get_week_of_month() -> int:
|
||||||
"""
|
"""
|
||||||
Gets the current week number of the month.
|
Gets the current week number of the month.
|
||||||
@@ -21,15 +20,17 @@ def get_week_of_month() -> int:
|
|||||||
if day in week:
|
if day in week:
|
||||||
return ii + 1
|
return ii + 1
|
||||||
|
|
||||||
|
|
||||||
# Automatically completes project info for help menu and compiling.
|
# Automatically completes project info for help menu and compiling.
|
||||||
__project__ = "submissions"
|
__project__ = "submissions"
|
||||||
__version__ = f"{year}{str(month).zfill(2)}.{get_week_of_month()}b"
|
__version__ = f"{year}{str(month).zfill(2)}.{get_week_of_month()}b"
|
||||||
__author__ = {"name":"Landon Wark", "email":"Landon.Wark@phac-aspc.gc.ca"}
|
__author__ = {"name": "Landon Wark", "email": "Landon.Wark@phac-aspc.gc.ca"}
|
||||||
__copyright__ = f"2022-{year}, Government of Canada"
|
__copyright__ = f"2022-{year}, Government of Canada"
|
||||||
__github__ = "https://github.com/landowark/submissions"
|
__github__ = "https://github.com/landowark/submissions"
|
||||||
|
|
||||||
project_path = Path(__file__).parents[2].absolute()
|
project_path = Path(__file__).parents[2].absolute()
|
||||||
|
|
||||||
|
|
||||||
class bcolors:
|
class bcolors:
|
||||||
HEADER = '\033[95m'
|
HEADER = '\033[95m'
|
||||||
OKBLUE = '\033[94m'
|
OKBLUE = '\033[94m'
|
||||||
@@ -40,26 +41,3 @@ class bcolors:
|
|||||||
ENDC = '\033[0m'
|
ENDC = '\033[0m'
|
||||||
BOLD = '\033[1m'
|
BOLD = '\033[1m'
|
||||||
UNDERLINE = '\033[4m'
|
UNDERLINE = '\033[4m'
|
||||||
|
|
||||||
# Hello Landon, this is your past self here. I'm trying not to screw you over like I usually do, so I will
|
|
||||||
# set out the workflow I've imagined for creating new submission types.
|
|
||||||
# First of all, you will need to write new parsing methods in backend.excel.parser to pull information out of the submission form
|
|
||||||
# for the submission itself as well as for any samples you can pull out of that same workbook.
|
|
||||||
# The workbooks no longer need a sheet map, but they do need their submission type put in the categories metadata of the client excel template.
|
|
||||||
# Second, you will have to update the model in backend.db.models.submissions and provide a new polymorph to the BasicSubmission object.
|
|
||||||
# The BSO should hold the majority of the general info.
|
|
||||||
# You can also update any of the parsers to pull out any custom info you need, like enforcing RSL plate numbers, scraping PCR results, etc.
|
|
||||||
|
|
||||||
# Landon, this is your slightly less past self here. For the most part, Past Landon has not screwed us. I've been able to add in the
|
|
||||||
# Wastewater Artic with minimal difficulties, except that the parser of the non-standard, user-generated excel sheets required slightly
|
|
||||||
# more work.
|
|
||||||
|
|
||||||
# Landon, this is your even more slightly less past self here. I've overhauled a lot of stuff to make things more flexible, so you should
|
|
||||||
# hopefully be even less screwed than before... at least with regards to parsers. The addition of kits and such is another story. Putting that
|
|
||||||
# On the todo list.
|
|
||||||
|
|
||||||
'''
|
|
||||||
Landon, this is 2023-11-07 Landon here in a comment string no less. Really all you should have to do now to add in new experiments is create a new
|
|
||||||
BasicSubmission derivative with associated SubbmissionType, BasicSample (and maybe SubmissionSampleAssociation if you're feeling lucky), oh, also,
|
|
||||||
kits, reagenttypes, reagents... This is sounding less and less impressive as I type it.
|
|
||||||
'''
|
|
||||||
@@ -8,6 +8,7 @@ if check_if_app():
|
|||||||
# setup custom logger
|
# setup custom logger
|
||||||
logger = setup_logger(verbosity=3)
|
logger = setup_logger(verbosity=3)
|
||||||
|
|
||||||
|
# from backend.scripts import modules
|
||||||
from backend import scripts
|
from backend import scripts
|
||||||
from PyQt6.QtWidgets import QApplication
|
from PyQt6.QtWidgets import QApplication
|
||||||
from frontend.widgets.app import App
|
from frontend.widgets.app import App
|
||||||
@@ -22,11 +23,12 @@ def run_startup():
|
|||||||
for script in startup_scripts:
|
for script in startup_scripts:
|
||||||
try:
|
try:
|
||||||
func = getattr(scripts, script)
|
func = getattr(scripts, script)
|
||||||
|
# func = modules[script]
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
logger.error(f"Couldn't run startup script {script} due to {e}")
|
logger.error(f"Couldn't run startup script {script} due to {e}")
|
||||||
continue
|
continue
|
||||||
logger.info(f"Running startup script: {func.__name__}")
|
logger.info(f"Running startup script: {func.__name__}")
|
||||||
func(ctx)
|
func.script(ctx)
|
||||||
|
|
||||||
|
|
||||||
def run_teardown():
|
def run_teardown():
|
||||||
@@ -38,11 +40,12 @@ def run_teardown():
|
|||||||
for script in teardown_scripts:
|
for script in teardown_scripts:
|
||||||
try:
|
try:
|
||||||
func = getattr(scripts, script)
|
func = getattr(scripts, script)
|
||||||
|
# func = modules[script]
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
logger.error(f"Couldn't run teardown script {script} due to {e}")
|
logger.error(f"Couldn't run teardown script {script} due to {e}")
|
||||||
continue
|
continue
|
||||||
logger.info(f"Running teardown script: {func.__name__}")
|
logger.info(f"Running teardown script: {func.__name__}")
|
||||||
func(ctx)
|
func.script(ctx)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
run_startup()
|
run_startup()
|
||||||
|
|||||||
@@ -2,12 +2,12 @@
|
|||||||
All kit and reagent related models
|
All kit and reagent related models
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import datetime, json, zipfile, yaml, logging, re
|
import json, zipfile, yaml, logging, re
|
||||||
from pprint import pformat
|
from pprint import pformat
|
||||||
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, BLOB
|
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, BLOB
|
||||||
from sqlalchemy.orm import relationship, validates, Query
|
from sqlalchemy.orm import relationship, validates, Query
|
||||||
from sqlalchemy.ext.associationproxy import association_proxy
|
from sqlalchemy.ext.associationproxy import association_proxy
|
||||||
from datetime import date, datetime
|
from datetime import date, datetime, timedelta
|
||||||
from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone
|
from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone
|
||||||
from typing import List, Literal, Generator, Any
|
from typing import List, Literal, Generator, Any
|
||||||
from pandas import ExcelFile
|
from pandas import ExcelFile
|
||||||
@@ -259,6 +259,79 @@ class KitType(BaseClass):
|
|||||||
base_dict['equipment_roles'].append(v)
|
base_dict['equipment_roles'].append(v)
|
||||||
return base_dict
|
return base_dict
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def import_from_yml(cls, submission_type:str|SubmissionType, filepath: Path | str | None = None, import_dict: dict | None = None) -> KitType:
|
||||||
|
if isinstance(submission_type, str):
|
||||||
|
submission_type = SubmissionType.query(name=submission_type)
|
||||||
|
if filepath:
|
||||||
|
yaml.add_constructor("!regex", yaml_regex_creator)
|
||||||
|
if isinstance(filepath, str):
|
||||||
|
filepath = Path(filepath)
|
||||||
|
if not filepath.exists():
|
||||||
|
logging.critical(f"Given file could not be found.")
|
||||||
|
return None
|
||||||
|
with open(filepath, "r") as f:
|
||||||
|
if filepath.suffix == ".json":
|
||||||
|
import_dict = json.load(fp=f)
|
||||||
|
elif filepath.suffix == ".yml":
|
||||||
|
import_dict = yaml.load(stream=f, Loader=yaml.Loader)
|
||||||
|
else:
|
||||||
|
raise Exception(f"Filetype {filepath.suffix} not supported.")
|
||||||
|
new_kit = KitType.query(name=import_dict['kit_type']['name'])
|
||||||
|
if not new_kit:
|
||||||
|
new_kit = KitType(name=import_dict['kit_type']['name'])
|
||||||
|
for role in import_dict['kit_type']['reagent_roles']:
|
||||||
|
new_role = ReagentRole.query(name=role['role'])
|
||||||
|
if new_role:
|
||||||
|
check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
|
||||||
|
if check.lower() == "n":
|
||||||
|
new_role = None
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
if not new_role:
|
||||||
|
eol = timedelta(role['extension_of_life'])
|
||||||
|
new_role = ReagentRole(name=role['role'], eol_ext=eol)
|
||||||
|
uses = dict(expiry=role['expiry'], lot=role['lot'], name=role['name'], sheet=role['sheet'])
|
||||||
|
ktrr_assoc = KitTypeReagentRoleAssociation(kit_type=new_kit, reagent_role=new_role, uses=uses)
|
||||||
|
ktrr_assoc.submission_type = submission_type
|
||||||
|
ktrr_assoc.required = role['required']
|
||||||
|
ktst_assoc = SubmissionTypeKitTypeAssociation(
|
||||||
|
kit_type=new_kit,
|
||||||
|
submission_type=submission_type,
|
||||||
|
mutable_cost_sample=import_dict['mutable_cost_sample'],
|
||||||
|
mutable_cost_column=import_dict['mutable_cost_column'],
|
||||||
|
constant_cost=import_dict['constant_cost']
|
||||||
|
)
|
||||||
|
for role in import_dict['kit_type']['equipment_roles']:
|
||||||
|
new_role = EquipmentRole.query(name=role['role'])
|
||||||
|
if new_role:
|
||||||
|
check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
|
||||||
|
if check.lower() == "n":
|
||||||
|
new_role = None
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
if not new_role:
|
||||||
|
new_role = EquipmentRole(name=role['role'])
|
||||||
|
for equipment in Equipment.assign_equipment(equipment_role=new_role):
|
||||||
|
new_role.instances.append(equipment)
|
||||||
|
ster_assoc = SubmissionTypeEquipmentRoleAssociation(submission_type=submission_type,
|
||||||
|
equipment_role=new_role)
|
||||||
|
try:
|
||||||
|
uses = dict(name=role['name'], process=role['process'], sheet=role['sheet'],
|
||||||
|
static=role['static'])
|
||||||
|
except KeyError:
|
||||||
|
uses = None
|
||||||
|
ster_assoc.uses = uses
|
||||||
|
for process in role['processes']:
|
||||||
|
new_process = Process.query(name=process)
|
||||||
|
if not new_process:
|
||||||
|
new_process = Process(name=process)
|
||||||
|
new_process.submission_types.append(submission_type)
|
||||||
|
new_process.kit_types.append(new_kit)
|
||||||
|
new_process.equipment_roles.append(new_role)
|
||||||
|
return new_kit
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ReagentRole(BaseClass):
|
class ReagentRole(BaseClass):
|
||||||
"""
|
"""
|
||||||
@@ -903,58 +976,7 @@ class SubmissionType(BaseClass):
|
|||||||
submission_type.sample_map = import_dict['samples']
|
submission_type.sample_map = import_dict['samples']
|
||||||
submission_type.defaults = import_dict['defaults']
|
submission_type.defaults = import_dict['defaults']
|
||||||
for kit in import_dict['kits']:
|
for kit in import_dict['kits']:
|
||||||
new_kit = KitType.query(name=kit['kit_type']['name'])
|
new_kit = KitType.import_from_yml(submission_type=submission_type, import_dict=kit)
|
||||||
if not new_kit:
|
|
||||||
new_kit = KitType(name=kit['kit_type']['name'])
|
|
||||||
for role in kit['kit_type']['reagent roles']:
|
|
||||||
new_role = ReagentRole.query(name=role['role'])
|
|
||||||
if new_role:
|
|
||||||
check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
|
|
||||||
if check.lower() == "n":
|
|
||||||
new_role = None
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if not new_role:
|
|
||||||
eol = datetime.timedelta(role['extension_of_life'])
|
|
||||||
new_role = ReagentRole(name=role['role'], eol_ext=eol)
|
|
||||||
uses = dict(expiry=role['expiry'], lot=role['lot'], name=role['name'], sheet=role['sheet'])
|
|
||||||
ktrr_assoc = KitTypeReagentRoleAssociation(kit_type=new_kit, reagent_role=new_role, uses=uses)
|
|
||||||
ktrr_assoc.submission_type = submission_type
|
|
||||||
ktrr_assoc.required = role['required']
|
|
||||||
ktst_assoc = SubmissionTypeKitTypeAssociation(
|
|
||||||
kit_type=new_kit,
|
|
||||||
submission_type=submission_type,
|
|
||||||
mutable_cost_sample=kit['mutable_cost_sample'],
|
|
||||||
mutable_cost_column=kit['mutable_cost_column'],
|
|
||||||
constant_cost=kit['constant_cost']
|
|
||||||
)
|
|
||||||
for role in kit['kit_type']['equipment roles']:
|
|
||||||
new_role = EquipmentRole.query(name=role['role'])
|
|
||||||
if new_role:
|
|
||||||
check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
|
|
||||||
if check.lower() == "n":
|
|
||||||
new_role = None
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if not new_role:
|
|
||||||
new_role = EquipmentRole(name=role['role'])
|
|
||||||
for equipment in Equipment.assign_equipment(equipment_role=new_role):
|
|
||||||
new_role.instances.append(equipment)
|
|
||||||
ster_assoc = SubmissionTypeEquipmentRoleAssociation(submission_type=submission_type,
|
|
||||||
equipment_role=new_role)
|
|
||||||
try:
|
|
||||||
uses = dict(name=role['name'], process=role['process'], sheet=role['sheet'],
|
|
||||||
static=role['static'])
|
|
||||||
except KeyError:
|
|
||||||
uses = None
|
|
||||||
ster_assoc.uses = uses
|
|
||||||
for process in role['processes']:
|
|
||||||
new_process = Process.query(name=process)
|
|
||||||
if not new_process:
|
|
||||||
new_process = Process(name=process)
|
|
||||||
new_process.submission_types.append(submission_type)
|
|
||||||
new_process.kit_types.append(new_kit)
|
|
||||||
new_process.equipment_roles.append(new_role)
|
|
||||||
if 'orgs' in import_dict.keys():
|
if 'orgs' in import_dict.keys():
|
||||||
logger.info("Found Organizations to be imported.")
|
logger.info("Found Organizations to be imported.")
|
||||||
Organization.import_from_yml(filepath=filepath)
|
Organization.import_from_yml(filepath=filepath)
|
||||||
@@ -1321,7 +1343,8 @@ class Equipment(BaseClass, LogMixin):
|
|||||||
else:
|
else:
|
||||||
return {k: v for k, v in self.__dict__.items()}
|
return {k: v for k, v in self.__dict__.items()}
|
||||||
|
|
||||||
def get_processes(self, submission_type: str | SubmissionType | None = None, extraction_kit: str | KitType | None = None) -> List[str]:
|
def get_processes(self, submission_type: str | SubmissionType | None = None,
|
||||||
|
extraction_kit: str | KitType | None = None) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Get all processes associated with this Equipment for a given SubmissionType
|
Get all processes associated with this Equipment for a given SubmissionType
|
||||||
|
|
||||||
@@ -1399,7 +1422,7 @@ class Equipment(BaseClass, LogMixin):
|
|||||||
from backend.validators.pydant import PydEquipment
|
from backend.validators.pydant import PydEquipment
|
||||||
processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit)
|
processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit)
|
||||||
return PydEquipment(processes=processes, role=role,
|
return PydEquipment(processes=processes, role=role,
|
||||||
**self.to_dict(processes=False))
|
**self.to_dict(processes=False))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_regex(cls) -> re.Pattern:
|
def get_regex(cls) -> re.Pattern:
|
||||||
@@ -1547,9 +1570,9 @@ class EquipmentRole(BaseClass):
|
|||||||
extraction_kit = KitType.query(name=extraction_kit)
|
extraction_kit = KitType.query(name=extraction_kit)
|
||||||
for process in self.processes:
|
for process in self.processes:
|
||||||
if submission_type and submission_type not in process.submission_types:
|
if submission_type and submission_type not in process.submission_types:
|
||||||
continue
|
continue
|
||||||
if extraction_kit and extraction_kit not in process.kit_types:
|
if extraction_kit and extraction_kit not in process.kit_types:
|
||||||
continue
|
continue
|
||||||
yield process.name
|
yield process.name
|
||||||
|
|
||||||
def to_export_dict(self, submission_type: SubmissionType, kit_type: KitType):
|
def to_export_dict(self, submission_type: SubmissionType, kit_type: KitType):
|
||||||
@@ -1597,7 +1620,6 @@ class SubmissionEquipmentAssociation(BaseClass):
|
|||||||
Returns:
|
Returns:
|
||||||
dict: This SubmissionEquipmentAssociation as a dictionary
|
dict: This SubmissionEquipmentAssociation as a dictionary
|
||||||
"""
|
"""
|
||||||
# TODO: Currently this will only fetch a single process, even if multiple are selectable.
|
|
||||||
try:
|
try:
|
||||||
process = self.process.name
|
process = self.process.name
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@@ -1606,7 +1628,13 @@ class SubmissionEquipmentAssociation(BaseClass):
|
|||||||
processes=[process], role=self.role, nickname=self.equipment.nickname)
|
processes=[process], role=self.role, nickname=self.equipment.nickname)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def to_pydantic(self):
|
def to_pydantic(self) -> "PydEquipment":
|
||||||
|
"""
|
||||||
|
Returns a pydantic model based on this object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PydEquipment: pydantic equipment model
|
||||||
|
"""
|
||||||
from backend.validators import PydEquipment
|
from backend.validators import PydEquipment
|
||||||
return PydEquipment(**self.to_sub_dict())
|
return PydEquipment(**self.to_sub_dict())
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,8 @@
|
|||||||
Models for the main submission and sample types.
|
Models for the main submission and sample types.
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from getpass import getuser
|
from getpass import getuser
|
||||||
import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys
|
import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys
|
||||||
@@ -559,7 +561,7 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
logger.error(f"Could not set {self} attribute {key} to {value} due to \n{e}")
|
logger.error(f"Could not set {self} attribute {key} to {value} due to \n{e}")
|
||||||
|
|
||||||
def update_subsampassoc(self, sample: BasicSample, input_dict: dict):
|
def update_subsampassoc(self, sample: BasicSample, input_dict: dict) -> SubmissionSampleAssociation:
|
||||||
"""
|
"""
|
||||||
Update a joined submission sample association.
|
Update a joined submission sample association.
|
||||||
|
|
||||||
@@ -568,7 +570,7 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
input_dict (dict): values to be updated
|
input_dict (dict): values to be updated
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Result: _description_
|
SubmissionSampleAssociation: Updated association
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
assoc = next(item for item in self.submission_sample_associations if item.sample == sample)
|
assoc = next(item for item in self.submission_sample_associations if item.sample == sample)
|
||||||
@@ -583,14 +585,14 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
# NOTE: for some reason I don't think assoc.__setattr__(k, v) works here.
|
# NOTE: for some reason I don't think assoc.__setattr__(k, v) works here.
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
logger.error(f"Can't set {k} to {v}")
|
logger.error(f"Can't set {k} to {v}")
|
||||||
result = assoc.save()
|
return assoc
|
||||||
return result
|
|
||||||
|
|
||||||
def update_reagentassoc(self, reagent: Reagent, role: str):
|
def update_reagentassoc(self, reagent: Reagent, role: str):
|
||||||
from backend.db import SubmissionReagentAssociation
|
from backend.db import SubmissionReagentAssociation
|
||||||
# NOTE: get the first reagent assoc that fills the given role.
|
# NOTE: get the first reagent assoc that fills the given role.
|
||||||
try:
|
try:
|
||||||
assoc = next(item for item in self.submission_reagent_associations if item.reagent and role in [role.name for role in item.reagent.role])
|
assoc = next(item for item in self.submission_reagent_associations if
|
||||||
|
item.reagent and role in [role.name for role in item.reagent.role])
|
||||||
assoc.reagent = reagent
|
assoc.reagent = reagent
|
||||||
except StopIteration as e:
|
except StopIteration as e:
|
||||||
logger.error(f"Association for {role} not found, creating new association.")
|
logger.error(f"Association for {role} not found, creating new association.")
|
||||||
@@ -611,7 +613,8 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
missing = value in ['', 'None', None]
|
missing = value in ['', 'None', None]
|
||||||
match key:
|
match key:
|
||||||
case "reagents":
|
case "reagents":
|
||||||
field_value = [item.to_pydantic(extraction_kit=self.extraction_kit) for item in self.submission_reagent_associations]
|
field_value = [item.to_pydantic(extraction_kit=self.extraction_kit) for item in
|
||||||
|
self.submission_reagent_associations]
|
||||||
case "samples":
|
case "samples":
|
||||||
field_value = [item.to_pydantic() for item in self.submission_sample_associations]
|
field_value = [item.to_pydantic() for item in self.submission_sample_associations]
|
||||||
case "equipment":
|
case "equipment":
|
||||||
@@ -643,7 +646,8 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
continue
|
continue
|
||||||
new_dict[key] = field_value
|
new_dict[key] = field_value
|
||||||
new_dict['filepath'] = Path(tempfile.TemporaryFile().name)
|
new_dict['filepath'] = Path(tempfile.TemporaryFile().name)
|
||||||
return PydSubmission(**new_dict)
|
dicto.update(new_dict)
|
||||||
|
return PydSubmission(**dicto)
|
||||||
|
|
||||||
def save(self, original: bool = True):
|
def save(self, original: bool = True):
|
||||||
"""
|
"""
|
||||||
@@ -1006,7 +1010,7 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
@setup_lookup
|
@setup_lookup
|
||||||
def query(cls,
|
def query(cls,
|
||||||
submission_type: str | SubmissionType | None = None,
|
submission_type: str | SubmissionType | None = None,
|
||||||
submission_type_name: str|None = None,
|
submission_type_name: str | None = None,
|
||||||
id: int | str | None = None,
|
id: int | str | None = None,
|
||||||
rsl_plate_num: str | None = None,
|
rsl_plate_num: str | None = None,
|
||||||
start_date: date | str | int | None = None,
|
start_date: date | str | int | None = None,
|
||||||
@@ -1287,7 +1291,7 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
writer = pyd.to_writer()
|
writer = pyd.to_writer()
|
||||||
writer.xl.save(filename=fname.with_suffix(".xlsx"))
|
writer.xl.save(filename=fname.with_suffix(".xlsx"))
|
||||||
|
|
||||||
def get_turnaround_time(self) -> Tuple[int|None, bool|None]:
|
def get_turnaround_time(self) -> Tuple[int | None, bool | None]:
|
||||||
try:
|
try:
|
||||||
completed = self.completed_date.date()
|
completed = self.completed_date.date()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@@ -1295,7 +1299,8 @@ class BasicSubmission(BaseClass, LogMixin):
|
|||||||
return self.calculate_turnaround(start_date=self.submitted_date.date(), end_date=completed)
|
return self.calculate_turnaround(start_date=self.submitted_date.date(), end_date=completed)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def calculate_turnaround(cls, start_date:date|None=None, end_date:date|None=None) -> Tuple[int|None, bool|None]:
|
def calculate_turnaround(cls, start_date: date | None = None, end_date: date | None = None) -> Tuple[
|
||||||
|
int | None, bool | None]:
|
||||||
if 'pytest' not in sys.modules:
|
if 'pytest' not in sys.modules:
|
||||||
from tools import ctx
|
from tools import ctx
|
||||||
else:
|
else:
|
||||||
@@ -1499,7 +1504,7 @@ class Wastewater(BasicSubmission):
|
|||||||
output = []
|
output = []
|
||||||
for sample in samples:
|
for sample in samples:
|
||||||
# NOTE: remove '-{target}' from controls
|
# NOTE: remove '-{target}' from controls
|
||||||
sample['sample'] = re.sub('-N\\d$', '', sample['sample'])
|
sample['sample'] = re.sub('-N\\d*$', '', sample['sample'])
|
||||||
# NOTE: if sample is already in output skip
|
# NOTE: if sample is already in output skip
|
||||||
if sample['sample'] in [item['sample'] for item in output]:
|
if sample['sample'] in [item['sample'] for item in output]:
|
||||||
logger.warning(f"Already have {sample['sample']}")
|
logger.warning(f"Already have {sample['sample']}")
|
||||||
@@ -1509,7 +1514,7 @@ class Wastewater(BasicSubmission):
|
|||||||
# NOTE: Set assessment
|
# NOTE: Set assessment
|
||||||
sample[f"{sample['target'].lower()}_status"] = sample['assessment']
|
sample[f"{sample['target'].lower()}_status"] = sample['assessment']
|
||||||
# NOTE: Get sample having other target
|
# NOTE: Get sample having other target
|
||||||
other_targets = [s for s in samples if re.sub('-N\\d$', '', s['sample']) == sample['sample']]
|
other_targets = [s for s in samples if re.sub('-N\\d*$', '', s['sample']) == sample['sample']]
|
||||||
for s in other_targets:
|
for s in other_targets:
|
||||||
sample[f"ct_{s['target'].lower()}"] = s['ct'] if isinstance(s['ct'], float) else 0.0
|
sample[f"ct_{s['target'].lower()}"] = s['ct'] if isinstance(s['ct'], float) else 0.0
|
||||||
sample[f"{s['target'].lower()}_status"] = s['assessment']
|
sample[f"{s['target'].lower()}_status"] = s['assessment']
|
||||||
@@ -1613,7 +1618,9 @@ class Wastewater(BasicSubmission):
|
|||||||
sample_dict = next(item for item in pcr_samples if item['sample'] == sample.rsl_number)
|
sample_dict = next(item for item in pcr_samples if item['sample'] == sample.rsl_number)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
continue
|
continue
|
||||||
self.update_subsampassoc(sample=sample, input_dict=sample_dict)
|
assoc = self.update_subsampassoc(sample=sample, input_dict=sample_dict)
|
||||||
|
result = assoc.save()
|
||||||
|
report.add_result(result)
|
||||||
controltype = ControlType.query(name="PCR Control")
|
controltype = ControlType.query(name="PCR Control")
|
||||||
submitted_date = datetime.strptime(" ".join(parser.pcr['run_start_date/time'].split(" ")[:-1]),
|
submitted_date = datetime.strptime(" ".join(parser.pcr['run_start_date/time'].split(" ")[:-1]),
|
||||||
"%Y-%m-%d %I:%M:%S %p")
|
"%Y-%m-%d %I:%M:%S %p")
|
||||||
@@ -1623,6 +1630,27 @@ class Wastewater(BasicSubmission):
|
|||||||
new_control.controltype = controltype
|
new_control.controltype = controltype
|
||||||
new_control.submission = self
|
new_control.submission = self
|
||||||
new_control.save()
|
new_control.save()
|
||||||
|
return report
|
||||||
|
|
||||||
|
def update_subsampassoc(self, sample: BasicSample, input_dict: dict):
|
||||||
|
"""
|
||||||
|
Updates a joined submission sample association by assigning ct values to n1 or n2 based on alphabetical sorting.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sample (BasicSample): Associated sample.
|
||||||
|
input_dict (dict): values to be updated
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SubmissionSampleAssociation: Updated association
|
||||||
|
"""
|
||||||
|
assoc = super().update_subsampassoc(sample=sample, input_dict=input_dict)
|
||||||
|
targets = {k: input_dict[k] for k in sorted(input_dict.keys()) if k.startswith("ct_")}
|
||||||
|
assert 0 < len(targets) <= 2
|
||||||
|
for i, v in enumerate(targets.values(), start=1):
|
||||||
|
update_key = f"ct_n{i}"
|
||||||
|
if getattr(assoc, update_key) is None:
|
||||||
|
setattr(assoc, update_key, v)
|
||||||
|
return assoc
|
||||||
|
|
||||||
|
|
||||||
class WastewaterArtic(BasicSubmission):
|
class WastewaterArtic(BasicSubmission):
|
||||||
@@ -1661,7 +1689,7 @@ class WastewaterArtic(BasicSubmission):
|
|||||||
else:
|
else:
|
||||||
output['artic_technician'] = self.artic_technician
|
output['artic_technician'] = self.artic_technician
|
||||||
output['gel_info'] = self.gel_info
|
output['gel_info'] = self.gel_info
|
||||||
output['gel_image_path'] = self.gel_image
|
output['gel_image'] = self.gel_image
|
||||||
output['dna_core_submission_number'] = self.dna_core_submission_number
|
output['dna_core_submission_number'] = self.dna_core_submission_number
|
||||||
output['source_plates'] = self.source_plates
|
output['source_plates'] = self.source_plates
|
||||||
output['artic_date'] = self.artic_date or self.submitted_date
|
output['artic_date'] = self.artic_date or self.submitted_date
|
||||||
@@ -1988,7 +2016,6 @@ class WastewaterArtic(BasicSubmission):
|
|||||||
egel_section = custom_fields['egel_info']
|
egel_section = custom_fields['egel_info']
|
||||||
# NOTE: print json field gel results to Egel results
|
# NOTE: print json field gel results to Egel results
|
||||||
worksheet = input_excel[egel_section['sheet']]
|
worksheet = input_excel[egel_section['sheet']]
|
||||||
# TODO: Move all this into a seperate function?
|
|
||||||
start_row = egel_section['start_row'] - 1
|
start_row = egel_section['start_row'] - 1
|
||||||
start_column = egel_section['start_column'] - 3
|
start_column = egel_section['start_column'] - 3
|
||||||
for row, ki in enumerate(info['gel_info']['value'], start=1):
|
for row, ki in enumerate(info['gel_info']['value'], start=1):
|
||||||
@@ -2003,10 +2030,10 @@ class WastewaterArtic(BasicSubmission):
|
|||||||
logger.error(f"Failed {kj['name']} with value {kj['value']} to row {row}, column {column}")
|
logger.error(f"Failed {kj['name']} with value {kj['value']} to row {row}, column {column}")
|
||||||
else:
|
else:
|
||||||
logger.warning("No gel info found.")
|
logger.warning("No gel info found.")
|
||||||
if check_key_or_attr(key='gel_image_path', interest=info, check_none=True):
|
if check_key_or_attr(key='gel_image', interest=info, check_none=True):
|
||||||
worksheet = input_excel[egel_section['sheet']]
|
worksheet = input_excel[egel_section['sheet']]
|
||||||
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
|
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
|
||||||
z = zipped.extract(info['gel_image_path']['value'], Path(TemporaryDirectory().name))
|
z = zipped.extract(info['gel_image']['value'], Path(TemporaryDirectory().name))
|
||||||
img = OpenpyxlImage(z)
|
img = OpenpyxlImage(z)
|
||||||
img.height = 400 # insert image height in pixels as float or int (e.g. 305.5)
|
img.height = 400 # insert image height in pixels as float or int (e.g. 305.5)
|
||||||
img.width = 600
|
img.width = 600
|
||||||
@@ -2041,9 +2068,9 @@ class WastewaterArtic(BasicSubmission):
|
|||||||
headers = [item['name'] for item in base_dict['gel_info'][0]['values']]
|
headers = [item['name'] for item in base_dict['gel_info'][0]['values']]
|
||||||
base_dict['headers'] = [''] * (4 - len(headers))
|
base_dict['headers'] = [''] * (4 - len(headers))
|
||||||
base_dict['headers'] += headers
|
base_dict['headers'] += headers
|
||||||
if check_key_or_attr(key='gel_image_path', interest=base_dict, check_none=True):
|
if check_key_or_attr(key='gel_image', interest=base_dict, check_none=True):
|
||||||
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
|
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
|
||||||
base_dict['gel_image'] = base64.b64encode(zipped.read(base_dict['gel_image_path'])).decode('utf-8')
|
base_dict['gel_image_actual'] = base64.b64encode(zipped.read(base_dict['gel_image'])).decode('utf-8')
|
||||||
return base_dict, template
|
return base_dict, template
|
||||||
|
|
||||||
def custom_context_events(self) -> dict:
|
def custom_context_events(self) -> dict:
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
from .irida import import_irida
|
from pathlib import Path
|
||||||
|
import importlib
|
||||||
|
|
||||||
def hello(ctx):
|
p = Path(__file__).parent.absolute()
|
||||||
print("\n\nHello! Welcome to Robotics Submission Tracker.\n\n")
|
subs = [item.stem for item in p.glob("*.py") if "__" not in item.stem]
|
||||||
|
modules = {}
|
||||||
def goodbye(ctx):
|
for sub in subs:
|
||||||
print("\n\nGoodbye. Thank you for using Robotics Submission Tracker.\n\n")
|
importlib.import_module(f"backend.scripts.{sub}")
|
||||||
|
|||||||
45
src/submissions/backend/scripts/backup_database.py
Normal file
45
src/submissions/backend/scripts/backup_database.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
"""
|
||||||
|
script meant to copy database data to new file. Currently for Sqlite only
|
||||||
|
"""
|
||||||
|
import logging, shutil
|
||||||
|
from datetime import date
|
||||||
|
from pathlib import Path
|
||||||
|
from tools import Settings
|
||||||
|
import pyodbc
|
||||||
|
|
||||||
|
logger = logging.getLogger(f"submissions.{__name__}")
|
||||||
|
|
||||||
|
|
||||||
|
def script(ctx: Settings):
|
||||||
|
"""
|
||||||
|
Copies the database into the backup directory the first time it is opened every month.
|
||||||
|
"""
|
||||||
|
month = date.today().strftime("%Y-%m")
|
||||||
|
current_month_bak = Path(ctx.backup_path).joinpath(f"submissions_backup-{month}").resolve()
|
||||||
|
logger.info(f"Here is the db directory: {ctx.database_path}")
|
||||||
|
logger.info(f"Here is the backup directory: {ctx.backup_path}")
|
||||||
|
match ctx.database_schema:
|
||||||
|
case "sqlite":
|
||||||
|
db_path = ctx.database_path.joinpath(ctx.database_name).with_suffix(".db")
|
||||||
|
current_month_bak = current_month_bak.with_suffix(".db")
|
||||||
|
if not current_month_bak.exists() and "Archives" not in db_path.__str__():
|
||||||
|
logger.info("No backup found for this month, backing up database.")
|
||||||
|
try:
|
||||||
|
shutil.copyfile(db_path, current_month_bak)
|
||||||
|
except PermissionError as e:
|
||||||
|
logger.error(f"Couldn't backup database due to: {e}")
|
||||||
|
case "postgresql+psycopg2":
|
||||||
|
logger.warning(f"Backup function not yet implemented for psql")
|
||||||
|
current_month_bak = current_month_bak.with_suffix(".psql")
|
||||||
|
case "mssql+pyodbc":
|
||||||
|
logger.warning(f"{ctx.database_schema} backup is currently experiencing permission issues")
|
||||||
|
current_month_bak = current_month_bak.with_suffix(".bak")
|
||||||
|
return
|
||||||
|
if not current_month_bak.exists():
|
||||||
|
logger.info(f"No backup found for this month, backing up database to {current_month_bak}.")
|
||||||
|
connection = pyodbc.connect(driver='{ODBC Driver 18 for SQL Server}',
|
||||||
|
server=f'{ctx.database_path}', database=f'{ctx.database_name}',
|
||||||
|
trusted_connection='yes', trustservercertificate="yes", autocommit=True)
|
||||||
|
backup = f"BACKUP DATABASE [{ctx.database_name}] TO DISK = N'{current_month_bak}'"
|
||||||
|
cursor = connection.cursor().execute(backup)
|
||||||
|
connection.close()
|
||||||
5
src/submissions/backend/scripts/goodbye.py
Normal file
5
src/submissions/backend/scripts/goodbye.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""
|
||||||
|
Test script for teardown_scripts
|
||||||
|
"""
|
||||||
|
def script(ctx):
|
||||||
|
print("\n\nGoodbye. Thank you for using Robotics Submission Tracker.\n\n")
|
||||||
5
src/submissions/backend/scripts/hello.py
Normal file
5
src/submissions/backend/scripts/hello.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""
|
||||||
|
Test script for startup_scripts
|
||||||
|
"""
|
||||||
|
def script(ctx):
|
||||||
|
print("\n\nHello! Welcome to Robotics Submission Tracker.\n\n")
|
||||||
@@ -7,7 +7,8 @@ from backend.db import IridaControl, ControlType
|
|||||||
|
|
||||||
logger = logging.getLogger(f"submissions.{__name__}")
|
logger = logging.getLogger(f"submissions.{__name__}")
|
||||||
|
|
||||||
def import_irida(ctx:Settings):
|
|
||||||
|
def script(ctx:Settings):
|
||||||
"""
|
"""
|
||||||
Grabs Irida controls from secondary database.
|
Grabs Irida controls from secondary database.
|
||||||
|
|
||||||
@@ -502,7 +502,6 @@ class PydSubmission(BaseModel, extra='allow'):
|
|||||||
dlg = ObjectSelector(title="Missing Submitting Lab",
|
dlg = ObjectSelector(title="Missing Submitting Lab",
|
||||||
message="We need a submitting lab. Please select from the list.",
|
message="We need a submitting lab. Please select from the list.",
|
||||||
obj_type=Organization)
|
obj_type=Organization)
|
||||||
|
|
||||||
if dlg.exec():
|
if dlg.exec():
|
||||||
value['value'] = dlg.parse_form()
|
value['value'] = dlg.parse_form()
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -60,7 +60,6 @@ class App(QMainWindow):
|
|||||||
self._connectActions()
|
self._connectActions()
|
||||||
self.show()
|
self.show()
|
||||||
self.statusBar().showMessage('Ready', 5000)
|
self.statusBar().showMessage('Ready', 5000)
|
||||||
self.backup_database()
|
|
||||||
|
|
||||||
def _createMenuBar(self):
|
def _createMenuBar(self):
|
||||||
"""
|
"""
|
||||||
@@ -169,28 +168,6 @@ class App(QMainWindow):
|
|||||||
dlg = SearchBox(self, object_type=BasicSample, extras=[])
|
dlg = SearchBox(self, object_type=BasicSample, extras=[])
|
||||||
dlg.exec()
|
dlg.exec()
|
||||||
|
|
||||||
def backup_database(self):
|
|
||||||
"""
|
|
||||||
Copies the database into the backup directory the first time it is opened every month.
|
|
||||||
"""
|
|
||||||
month = date.today().strftime("%Y-%m")
|
|
||||||
current_month_bak = Path(self.ctx.backup_path).joinpath(f"submissions_backup-{month}").resolve()
|
|
||||||
logger.info(f"Here is the db directory: {self.ctx.database_path}")
|
|
||||||
logger.info(f"Here is the backup directory: {self.ctx.backup_path}")
|
|
||||||
match self.ctx.database_schema:
|
|
||||||
case "sqlite":
|
|
||||||
db_path = self.ctx.database_path.joinpath(self.ctx.database_name).with_suffix(".db")
|
|
||||||
current_month_bak = current_month_bak.with_suffix(".db")
|
|
||||||
if not current_month_bak.exists() and "Archives" not in db_path.__str__():
|
|
||||||
logger.info("No backup found for this month, backing up database.")
|
|
||||||
try:
|
|
||||||
shutil.copyfile(db_path, current_month_bak)
|
|
||||||
except PermissionError as e:
|
|
||||||
logger.error(f"Couldn't backup database due to: {e}")
|
|
||||||
case "postgresql+psycopg2":
|
|
||||||
logger.warning(f"Backup function not yet implemented for psql")
|
|
||||||
current_month_bak = current_month_bak.with_suffix(".psql")
|
|
||||||
|
|
||||||
def export_ST_yaml(self):
|
def export_ST_yaml(self):
|
||||||
"""
|
"""
|
||||||
Copies submission type yaml to file system for editing and remport
|
Copies submission type yaml to file system for editing and remport
|
||||||
|
|||||||
@@ -83,7 +83,6 @@ class ControlsViewer(InfoPane):
|
|||||||
|
|
||||||
@report_result
|
@report_result
|
||||||
def chart_maker_function(self, *args, **kwargs):
|
def chart_maker_function(self, *args, **kwargs):
|
||||||
# TODO: Generalize this by moving as much code as possible to IridaControl
|
|
||||||
"""
|
"""
|
||||||
Create html chart for controls reporting
|
Create html chart for controls reporting
|
||||||
|
|
||||||
@@ -94,7 +93,7 @@ class ControlsViewer(InfoPane):
|
|||||||
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
||||||
"""
|
"""
|
||||||
report = Report()
|
report = Report()
|
||||||
# NOTE: set the mode_sub_type for kraken
|
# NOTE: set the mode_sub_type for kraken. Disabled in PCRControl
|
||||||
if self.mode_sub_typer.currentText() == "":
|
if self.mode_sub_typer.currentText() == "":
|
||||||
self.mode_sub_type = None
|
self.mode_sub_type = None
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -12,9 +12,9 @@
|
|||||||
{% if sub['gel_info'] %}
|
{% if sub['gel_info'] %}
|
||||||
<br/>
|
<br/>
|
||||||
<h3><u>Gel Box:</u></h3>
|
<h3><u>Gel Box:</u></h3>
|
||||||
{% if sub['gel_image'] %}
|
{% if sub['gel_image_actual'] %}
|
||||||
<br/>
|
<br/>
|
||||||
<img align='left' height="400px" width="600px" src="data:image/jpeg;base64,{{ sub['gel_image'] | safe }}">
|
<img align='left' height="400px" width="600px" src="data:image/jpeg;base64,{{ sub['gel_image_actual'] | safe }}">
|
||||||
{% endif %}
|
{% endif %}
|
||||||
<br/>
|
<br/>
|
||||||
<table style="width:100%; border: 1px solid black; border-collapse: collapse;">
|
<table style="width:100%; border: 1px solid black; border-collapse: collapse;">
|
||||||
|
|||||||
@@ -483,18 +483,14 @@ class Settings(BaseSettings, extra="allow"):
|
|||||||
continue
|
continue
|
||||||
match v:
|
match v:
|
||||||
case Path():
|
case Path():
|
||||||
# print("Path")
|
|
||||||
if v.is_dir():
|
if v.is_dir():
|
||||||
# print("dir")
|
|
||||||
v = v.absolute().__str__()
|
v = v.absolute().__str__()
|
||||||
elif v.is_file():
|
elif v.is_file():
|
||||||
# print("file")
|
|
||||||
v = v.parent.absolute().__str__()
|
v = v.parent.absolute().__str__()
|
||||||
else:
|
else:
|
||||||
v = v.__str__()
|
v = v.__str__()
|
||||||
case _:
|
case _:
|
||||||
pass
|
pass
|
||||||
# print(f"Key: {k}, Value: {v}")
|
|
||||||
dicto[k] = v
|
dicto[k] = v
|
||||||
with open(settings_path, 'w') as f:
|
with open(settings_path, 'w') as f:
|
||||||
yaml.dump(dicto, f)
|
yaml.dump(dicto, f)
|
||||||
@@ -1009,8 +1005,6 @@ def create_holidays_for_year(year: int|None=None) -> List[date]:
|
|||||||
holidays = [date(year, 1, 1), date(year, 7,1), date(year, 9, 30),
|
holidays = [date(year, 1, 1), date(year, 7,1), date(year, 9, 30),
|
||||||
date(year, 11, 11), date(year, 12, 25), date(year, 12, 26),
|
date(year, 11, 11), date(year, 12, 25), date(year, 12, 26),
|
||||||
date(year+1, 1, 1)]
|
date(year+1, 1, 1)]
|
||||||
# August Civic
|
|
||||||
# holidays.append(find_nth_monday(year, 8))
|
|
||||||
# Labour Day
|
# Labour Day
|
||||||
holidays.append(find_nth_monday(year, 9))
|
holidays.append(find_nth_monday(year, 9))
|
||||||
# Thanksgiving
|
# Thanksgiving
|
||||||
|
|||||||
Reference in New Issue
Block a user