_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q21000
|
AnalysisRequest.getManagers
|
train
|
def getManagers(self):
"""Return all managers of responsible departments
"""
manager_ids = []
manager_list = []
for department in self.getDepartments():
manager = department.getManager()
if manager is None:
continue
manager_id = manager.getId()
if manager_id not in manager_ids:
manager_ids.append(manager_id)
manager_list.append(manager)
return manager_list
|
python
|
{
"resource": ""
}
|
q21001
|
AnalysisRequest.getDueDate
|
train
|
def getDueDate(self):
"""Returns the earliest due date of the analyses this Analysis Request
contains."""
due_dates = map(lambda an: an.getDueDate, self.getAnalyses())
return due_dates and min(due_dates) or None
|
python
|
{
"resource": ""
}
|
q21002
|
AnalysisRequest.getLate
|
train
|
def getLate(self):
"""Return True if there is at least one late analysis in this Request
"""
for analysis in self.getAnalyses():
if analysis.review_state == "retracted":
continue
analysis_obj = api.get_object(analysis)
if analysis_obj.isLateAnalysis():
return True
return False
|
python
|
{
"resource": ""
}
|
q21003
|
AnalysisRequest.getPrinted
|
train
|
def getPrinted(self):
""" returns "0", "1" or "2" to indicate Printed state.
0 -> Never printed.
1 -> Printed after last publish
2 -> Printed but republished afterwards.
"""
workflow = getToolByName(self, 'portal_workflow')
review_state = workflow.getInfoFor(self, 'review_state', '')
if review_state not in ['published']:
return "0"
report_list = sorted(self.objectValues('ARReport'),
key=lambda report: report.getDatePublished())
if not report_list:
return "0"
last_report = report_list[-1]
if last_report.getDatePrinted():
return "1"
else:
for report in report_list:
if report.getDatePrinted():
return "2"
return "0"
|
python
|
{
"resource": ""
}
|
q21004
|
AnalysisRequest.getBillableItems
|
train
|
def getBillableItems(self):
"""Returns the items to be billed
"""
# Assigned profiles
profiles = self.getProfiles()
# Billable profiles which have a fixed price set
billable_profiles = filter(
lambda pr: pr.getUseAnalysisProfilePrice(), profiles)
# All services contained in the billable profiles
billable_profile_services = reduce(lambda a, b: a+b, map(
lambda profile: profile.getService(), billable_profiles), [])
# Keywords of the contained services
billable_service_keys = map(
lambda s: s.getKeyword(), set(billable_profile_services))
# The billable items contain billable profiles and single selected analyses
billable_items = billable_profiles
# Get the analyses to be billed
exclude_rs = ["retracted", "rejected"]
for analysis in self.getAnalyses(is_active=True):
if analysis.review_state in exclude_rs:
continue
if analysis.getKeyword in billable_service_keys:
continue
billable_items.append(api.get_object(analysis))
return billable_items
|
python
|
{
"resource": ""
}
|
q21005
|
AnalysisRequest.getDiscountAmount
|
train
|
def getDiscountAmount(self):
"""It computes and returns the analysis service's discount amount
without VAT
"""
has_client_discount = self.aq_parent.getMemberDiscountApplies()
if has_client_discount:
discount = Decimal(self.getDefaultMemberDiscount())
return Decimal(self.getSubtotal() * discount / 100)
else:
return 0
|
python
|
{
"resource": ""
}
|
q21006
|
AnalysisRequest.getTotalPrice
|
train
|
def getTotalPrice(self):
"""It gets the discounted price from analyses and profiles to obtain the
total value with the VAT and the discount applied
:returns: analysis request's total price including VATs and discounts
"""
price = (self.getSubtotal() - self.getDiscountAmount() +
self.getVATAmount())
return price
|
python
|
{
"resource": ""
}
|
q21007
|
AnalysisRequest.getVerifier
|
train
|
def getVerifier(self):
"""Returns the user that verified the whole Analysis Request. Since the
verification is done automatically as soon as all the analyses it
contains are verified, this function returns the user that verified the
last analysis pending.
"""
wtool = getToolByName(self, 'portal_workflow')
mtool = getToolByName(self, 'portal_membership')
verifier = None
# noinspection PyBroadException
try:
review_history = wtool.getInfoFor(self, 'review_history')
except: # noqa FIXME: remove blind except!
return 'access denied'
if not review_history:
return 'no history'
for items in review_history:
action = items.get('action')
if action != 'verify':
continue
actor = items.get('actor')
member = mtool.getMemberById(actor)
verifier = member.getProperty('fullname')
if verifier is None or verifier == '':
verifier = actor
return verifier
|
python
|
{
"resource": ""
}
|
q21008
|
AnalysisRequest.getVerifiersIDs
|
train
|
def getVerifiersIDs(self):
"""Returns the ids from users that have verified at least one analysis
from this Analysis Request
"""
verifiers_ids = list()
for brain in self.getAnalyses():
verifiers_ids += brain.getVerificators
return list(set(verifiers_ids))
|
python
|
{
"resource": ""
}
|
q21009
|
AnalysisRequest.getVerifiers
|
train
|
def getVerifiers(self):
"""Returns the list of lab contacts that have verified at least one
analysis from this Analysis Request
"""
contacts = list()
for verifier in self.getVerifiersIDs():
user = api.get_user(verifier)
contact = api.get_user_contact(user, ["LabContact"])
if contact:
contacts.append(contact)
return contacts
|
python
|
{
"resource": ""
}
|
q21010
|
AnalysisRequest.getQCAnalyses
|
train
|
def getQCAnalyses(self, qctype=None, review_state=None):
"""return the QC analyses performed in the worksheet in which, at
least, one sample of this AR is present.
Depending on qctype value, returns the analyses of:
- 'b': all Blank Reference Samples used in related worksheet/s
- 'c': all Control Reference Samples used in related worksheet/s
- 'd': duplicates only for samples contained in this AR
If qctype==None, returns all type of qc analyses mentioned above
"""
qcanalyses = []
suids = []
ans = self.getAnalyses()
wf = getToolByName(self, 'portal_workflow')
for an in ans:
an = an.getObject()
if an.getServiceUID() not in suids:
suids.append(an.getServiceUID())
def valid_dup(wan):
if wan.portal_type == 'ReferenceAnalysis':
return False
an_state = wf.getInfoFor(wan, 'review_state')
return \
wan.portal_type == 'DuplicateAnalysis' \
and wan.getRequestID() == self.id \
and (review_state is None or an_state in review_state)
def valid_ref(wan):
if wan.portal_type != 'ReferenceAnalysis':
return False
an_state = wf.getInfoFor(wan, 'review_state')
an_reftype = wan.getReferenceType()
return wan.getServiceUID() in suids \
and wan not in qcanalyses \
and (qctype is None or an_reftype == qctype) \
and (review_state is None or an_state in review_state)
for an in ans:
an = an.getObject()
ws = an.getWorksheet()
if not ws:
continue
was = ws.getAnalyses()
for wa in was:
if valid_dup(wa):
qcanalyses.append(wa)
elif valid_ref(wa):
qcanalyses.append(wa)
return qcanalyses
|
python
|
{
"resource": ""
}
|
q21011
|
AnalysisRequest.getResultsRange
|
train
|
def getResultsRange(self):
"""Returns the valid result ranges for the analyses this Analysis
Request contains.
By default uses the result ranges defined in the Analysis Specification
set in "Specification" field if any. Values manually set through
`ResultsRange` field for any given analysis keyword have priority over
the result ranges defined in "Specification" field.
:return: A list of dictionaries, where each dictionary defines the
result range to use for any analysis contained in this Analysis
Request for the keyword specified. Each dictionary has, at least,
the following keys: "keyword", "min", "max"
:rtype: dict
"""
specs_range = []
specification = self.getSpecification()
if specification:
specs_range = specification.getResultsRange()
specs_range = specs_range and specs_range or []
# Override with AR's custom ranges
ar_range = self.Schema().getField("ResultsRange").get(self)
if not ar_range:
return specs_range
# Remove those analysis ranges that neither min nor max are floatable
an_specs = [an for an in ar_range if
api.is_floatable(an.get('min', None)) or
api.is_floatable(an.get('max', None))]
# Want to know which are the analyses that needs to be overriden
keywords = map(lambda item: item.get('keyword'), an_specs)
# Get rid of those analyses to be overriden
out_specs = [sp for sp in specs_range if sp['keyword'] not in keywords]
# Add manually set ranges
out_specs.extend(an_specs)
return map(lambda spec: ResultsRangeDict(spec), out_specs)
|
python
|
{
"resource": ""
}
|
q21012
|
AnalysisRequest.getSamplingWorkflowEnabled
|
train
|
def getSamplingWorkflowEnabled(self):
"""Returns True if the sample of this Analysis Request has to be
collected by the laboratory personnel
"""
template = self.getTemplate()
if template:
return template.getSamplingRequired()
return self.bika_setup.getSamplingWorkflowEnabled()
|
python
|
{
"resource": ""
}
|
q21013
|
AnalysisRequest.getDepartments
|
train
|
def getDepartments(self):
"""Returns a list of the departments assigned to the Analyses
from this Analysis Request
"""
departments = list()
for analysis in self.getAnalyses(full_objects=True):
department = analysis.getDepartment()
if department and department not in departments:
departments.append(department)
return departments
|
python
|
{
"resource": ""
}
|
q21014
|
AnalysisRequest.getResultsInterpretationByDepartment
|
train
|
def getResultsInterpretationByDepartment(self, department=None):
"""Returns the results interpretation for this Analysis Request
and department. If department not set, returns the results
interpretation tagged as 'General'.
:returns: a dict with the following keys:
{'uid': <department_uid> or 'general', 'richtext': <text/plain>}
"""
uid = department.UID() if department else 'general'
rows = self.Schema()['ResultsInterpretationDepts'].get(self)
row = [row for row in rows if row.get('uid') == uid]
if len(row) > 0:
row = row[0]
elif uid == 'general' \
and hasattr(self, 'getResultsInterpretation') \
and self.getResultsInterpretation():
row = {'uid': uid, 'richtext': self.getResultsInterpretation()}
else:
row = {'uid': uid, 'richtext': ''}
return row
|
python
|
{
"resource": ""
}
|
q21015
|
AnalysisRequest.getPartitions
|
train
|
def getPartitions(self):
"""This functions returns the partitions from the analysis request's
analyses.
:returns: a list with the full partition objects
"""
partitions = []
for analysis in self.getAnalyses(full_objects=True):
if analysis.getSamplePartition() not in partitions:
partitions.append(analysis.getSamplePartition())
return partitions
|
python
|
{
"resource": ""
}
|
q21016
|
AnalysisRequest.isAnalysisServiceHidden
|
train
|
def isAnalysisServiceHidden(self, uid):
"""Checks if the analysis service that match with the uid provided must
be hidden in results. If no hidden assignment has been set for the
analysis in this request, returns the visibility set to the analysis
itself.
Raise a TypeError if the uid is empty or None
Raise a ValueError if there is no hidden assignment in this request or
no analysis service found for this uid.
"""
if not uid:
raise TypeError('None type or empty uid')
sets = self.getAnalysisServiceSettings(uid)
if 'hidden' not in sets:
uc = getToolByName(self, 'uid_catalog')
serv = uc(UID=uid)
if serv and len(serv) == 1:
return serv[0].getObject().getRawHidden()
else:
raise ValueError('{} is not valid'.format(uid))
return sets.get('hidden', False)
|
python
|
{
"resource": ""
}
|
q21017
|
AnalysisRequest.getRejecter
|
train
|
def getRejecter(self):
"""If the Analysis Request has been rejected, returns the user who did the
rejection. If it was not rejected or the current user has not enough
privileges to access to this information, returns None.
"""
wtool = getToolByName(self, 'portal_workflow')
mtool = getToolByName(self, 'portal_membership')
# noinspection PyBroadException
try:
review_history = wtool.getInfoFor(self, 'review_history')
except: # noqa FIXME: remove blind except!
return None
for items in review_history:
action = items.get('action')
if action != 'reject':
continue
actor = items.get('actor')
return mtool.getMemberById(actor)
return None
|
python
|
{
"resource": ""
}
|
q21018
|
AnalysisRequest.getDescendants
|
train
|
def getDescendants(self, all_descendants=False):
"""Returns the descendant Analysis Requests
:param all_descendants: recursively include all descendants
"""
# N.B. full objects returned here from
# `Products.Archetypes.Referenceable.getBRefs`
# -> don't add this method into Metadata
children = self.getBackReferences(
"AnalysisRequestParentAnalysisRequest")
descendants = []
# recursively include all children
if all_descendants:
for child in children:
descendants.append(child)
descendants += child.getDescendants(all_descendants=True)
else:
descendants = children
return descendants
|
python
|
{
"resource": ""
}
|
q21019
|
AnalysisRequest.getDescendantsUIDs
|
train
|
def getDescendantsUIDs(self, all_descendants=False):
"""Returns the UIDs of the descendant Analysis Requests
This method is used as metadata
"""
descendants = self.getDescendants(all_descendants=all_descendants)
return map(api.get_uid, descendants)
|
python
|
{
"resource": ""
}
|
q21020
|
AnalysisRequest.setParentAnalysisRequest
|
train
|
def setParentAnalysisRequest(self, value):
"""Sets a parent analysis request, making the current a partition
"""
self.Schema().getField("ParentAnalysisRequest").set(self, value)
if not value:
noLongerProvides(self, IAnalysisRequestPartition)
else:
alsoProvides(self, IAnalysisRequestPartition)
|
python
|
{
"resource": ""
}
|
q21021
|
AnalysisRequest.setDateReceived
|
train
|
def setDateReceived(self, value):
"""Sets the date received to this analysis request and to secondary
analysis requests
"""
self.Schema().getField('DateReceived').set(self, value)
for secondary in self.getSecondaryAnalysisRequests():
secondary.setDateReceived(value)
secondary.reindexObject(idxs=["getDateReceived", "is_received"])
|
python
|
{
"resource": ""
}
|
q21022
|
AnalysisRequest.addAttachment
|
train
|
def addAttachment(self, attachment):
"""Adds an attachment or a list of attachments to the Analysis Request
"""
if not isinstance(attachment, (list, tuple)):
attachment = [attachment]
original = self.getAttachment() or []
# Function addAttachment can accept brain, objects or uids
original = map(api.get_uid, original)
attachment = map(api.get_uid, attachment)
# Boil out attachments already assigned to this Analysis Request
attachment = filter(lambda at: at not in original, attachment)
if attachment:
original.extend(attachment)
self.setAttachment(original)
|
python
|
{
"resource": ""
}
|
q21023
|
AddDuplicateView.get_container_mapping
|
train
|
def get_container_mapping(self):
"""Returns a mapping of container -> postition
"""
layout = self.context.getLayout()
container_mapping = {}
for slot in layout:
if slot["type"] != "a":
continue
position = slot["position"]
container_uid = slot["container_uid"]
container_mapping[container_uid] = position
return container_mapping
|
python
|
{
"resource": ""
}
|
q21024
|
AddDuplicateView.folderitems
|
train
|
def folderitems(self):
"""Custom folderitems for Worksheet ARs
"""
items = []
for ar, pos in self.get_container_mapping().items():
ar = api.get_object_by_uid(ar)
ar_id = api.get_id(ar)
ar_uid = api.get_uid(ar)
ar_url = api.get_url(ar)
ar_title = api.get_title(ar)
url = api.get_url(ar)
client = ar.getClient()
client_url = api.get_url(client)
client_title = api.get_title(client)
item = {
"obj": ar,
"id": ar_id,
"uid": ar_uid,
"title": ar_title,
"type_class": "contenttype-AnalysisRequest",
"url": url,
"relative_url": url,
"view_url": url,
"Position": pos,
"RequestID": ar_id,
"Client": client_title,
"created": self.ulocalized_time(ar.created(), long_format=1),
"replace": {
"Client": get_link(client_url, value=client_title),
"RequestID": get_link(ar_url, value=ar_title),
},
"before": {},
"after": {},
"choices": {},
"class": {},
"state_class": "state-active",
"allow_edit": [],
"required": [],
}
items.append(item)
items = sorted(items, key=itemgetter("Position"))
return items
|
python
|
{
"resource": ""
}
|
q21025
|
IDServerView.get_next_id_for
|
train
|
def get_next_id_for(self, key):
"""Get a preview of the next number
"""
portal_type = key.split("-")[0]
config = get_config(None, portal_type=portal_type)
id_template = config.get("form", "")
number = self.storage.get(key) + 1
spec = {
"seq": number,
"alpha": Alphanumber(number),
"year": get_current_year(),
"parent_analysisrequest": "ParentAR",
"parent_ar_id": "ParentARId",
"sampleType": key.replace(portal_type, "").strip("-"),
}
return id_template.format(**spec)
|
python
|
{
"resource": ""
}
|
q21026
|
IDServerView.to_int
|
train
|
def to_int(self, number, default=0):
"""Returns an integer
"""
try:
return int(number)
except (KeyError, ValueError):
return self.to_int(default, 0)
|
python
|
{
"resource": ""
}
|
q21027
|
IDServerView.set_seed
|
train
|
def set_seed(self, key, value):
"""Set a number of the number generator
"""
number_generator = getUtility(INumberGenerator)
return number_generator.set_number(key, self.to_int(value))
|
python
|
{
"resource": ""
}
|
q21028
|
IDServerView.seed
|
train
|
def seed(self):
""" Reset the number from which the next generated sequence start.
If you seed at 100, next seed will be 101
"""
form = self.request.form
prefix = form.get('prefix', None)
if prefix is None:
return 'No prefix provided'
seed = form.get('seed', None)
if seed is None:
return 'No seed provided'
if not seed.isdigit():
return 'Seed must be a digit'
seed = int(seed)
if seed < 0:
return 'Seed cannot be negative'
new_seq = self.set_seed(prefix, seed)
return 'IDServerView: "%s" seeded to %s' % (prefix, new_seq)
|
python
|
{
"resource": ""
}
|
q21029
|
DashboardView.check_dashboard_cookie
|
train
|
def check_dashboard_cookie(self):
"""
Check if the dashboard cookie should exist through bikasetup
configuration.
If it should exist but doesn't exist yet, the function creates it
with all values as default.
If it should exist and already exists, it returns the value.
Otherwise, the function returns None.
:return: a dictionary of strings
"""
# Getting cookie
cookie_raw = self.request.get(DASHBOARD_FILTER_COOKIE, None)
# If it doesn't exist, create it with default values
if cookie_raw is None:
cookie_raw = self._create_raw_data()
self.request.response.setCookie(
DASHBOARD_FILTER_COOKIE,
json.dumps(cookie_raw),
quoted=False,
path='/')
return cookie_raw
return get_strings(json.loads(cookie_raw))
|
python
|
{
"resource": ""
}
|
q21030
|
DashboardView.is_filter_selected
|
train
|
def is_filter_selected(self, selection_id, value):
"""
Compares whether the 'selection_id' parameter value saved in the
cookie is the same value as the "value" parameter.
:param selection_id: a string as a dashboard_cookie key.
:param value: The value to compare against the value from
dashboard_cookie key.
:return: Boolean.
"""
selected = self.dashboard_cookie.get(selection_id)
return selected == value
|
python
|
{
"resource": ""
}
|
q21031
|
DashboardView._create_raw_data
|
train
|
def _create_raw_data(self):
"""
Gathers the different sections ids and creates a string as first
cookie data.
:return: A dictionary like:
{'analyses':'all','analysisrequest':'all','worksheets':'all'}
"""
result = {}
for section in self.get_sections():
result[section.get('id')] = 'all'
return result
|
python
|
{
"resource": ""
}
|
q21032
|
DashboardView._fill_dates_evo
|
train
|
def _fill_dates_evo(self, query_json, catalog_name, periodicity):
"""Returns an array of dictionaries, where each dictionary contains the
amount of items created at a given date and grouped by review_state,
based on the passed in periodicity.
This is an expensive function that will not be called more than once
every 2 hours (note cache decorator with `time() // (60 * 60 * 2)
"""
outevoidx = {}
outevo = []
days = 1
if periodicity == PERIODICITY_YEARLY:
days = 336
elif periodicity == PERIODICITY_BIANNUAL:
days = 168
elif periodicity == PERIODICITY_QUARTERLY:
days = 84
elif periodicity == PERIODICITY_MONTHLY:
days = 28
elif periodicity == PERIODICITY_WEEKLY:
days = 7
elif periodicity == PERIODICITY_ALL:
days = 336
# Get the date range
date_from, date_to = self.get_date_range(periodicity)
query = json.loads(query_json)
if 'review_state' in query:
del query['review_state']
query['sort_on'] = 'created'
query['created'] = {'query': (date_from, date_to),
'range': 'min:max'}
otherstate = _('Other status')
statesmap = self.get_states_map(query['portal_type'])
stats = statesmap.values()
stats.sort()
stats.append(otherstate)
statscount = {s: 0 for s in stats}
# Add first all periods, cause we want all segments to be displayed
curr = date_from.asdatetime()
end = date_to.asdatetime()
while curr < end:
currstr = self._getDateStr(periodicity, DateTime(curr))
if currstr not in outevoidx:
outdict = {'date': currstr}
for k in stats:
outdict[k] = 0
outevo.append(outdict)
outevoidx[currstr] = len(outevo)-1
curr = curr + datetime.timedelta(days=days)
brains = search(query, catalog_name)
for brain in brains:
created = brain.created
state = brain.review_state
if state not in statesmap:
logger.warn("'%s' State for '%s' not available" % (state, query['portal_type']))
state = statesmap[state] if state in statesmap else otherstate
created = self._getDateStr(periodicity, created)
statscount[state] += 1
if created in outevoidx:
oidx = outevoidx[created]
if state in outevo[oidx]:
outevo[oidx][state] += 1
else:
outevo[oidx][state] = 1
else:
# Create new row
currow = {'date': created,
state: 1}
outevo.append(currow)
# Remove all those states for which there is no data
rstates = [k for k, v in statscount.items() if v == 0]
for o in outevo:
for r in rstates:
if r in o:
del o[r]
# Sort available status by number of occurences descending
sorted_states = sorted(statscount.items(), key=itemgetter(1))
sorted_states = map(lambda item: item[0], sorted_states)
sorted_states.reverse()
return {'data': outevo, 'states': sorted_states}
|
python
|
{
"resource": ""
}
|
q21033
|
DashboardView._update_criteria_with_filters
|
train
|
def _update_criteria_with_filters(self, query, section_name):
"""
This method updates the 'query' dictionary with the criteria stored in
dashboard cookie.
:param query: A dictionary with search criteria.
:param section_name: The dashboard section name
:return: The 'query' dictionary
"""
if self.dashboard_cookie is None:
return query
cookie_criteria = self.dashboard_cookie.get(section_name)
if cookie_criteria == 'mine':
query['Creator'] = self.member.getId()
return query
|
python
|
{
"resource": ""
}
|
q21034
|
ReferenceSample.getReferenceAnalysesService
|
train
|
def getReferenceAnalysesService(self, service_uid):
""" return all analyses linked to this reference sample for a service """
analyses = []
for analysis in self.objectValues('ReferenceAnalysis'):
if analysis.getServiceUID() == service_uid:
analyses.append(analysis)
return analyses
|
python
|
{
"resource": ""
}
|
q21035
|
ReferenceSample.addReferenceAnalysis
|
train
|
def addReferenceAnalysis(self, service):
"""
Creates a new Reference Analysis object based on this Sample
Reference, with the type passed in and associates the newly
created object to the Analysis Service passed in.
:param service: Object, brain or UID of the Analysis Service
:param reference_type: type of ReferenceAnalysis, where 'b' is is
Blank and 'c' is Control
:type reference_type: A String
:returns: the newly created Reference Analysis
:rtype: string
"""
if api.is_uid(service) or api.is_brain(service):
return self.addReferenceAnalysis(api.get_object(service))
if not IAnalysisService.providedBy(service):
return None
interim_fields = service.getInterimFields()
analysis = _createObjectByType("ReferenceAnalysis", self, id=tmpID())
# Copy all the values from the schema
# TODO Add Service as a param in ReferenceAnalysis constructor and do
# this logic there instead of here
discard = ['id', ]
keys = service.Schema().keys()
for key in keys:
if key in discard:
continue
if key not in analysis.Schema().keys():
continue
val = service.getField(key).get(service)
# Campbell's mental note:never ever use '.set()' directly to a
# field. If you can't use the setter, then use the mutator in order
# to give the value. We have realized that in some cases using
# 'set' when the value is a string, it saves the value
# as unicode instead of plain string.
# analysis.getField(key).set(analysis, val)
mutator_name = analysis.getField(key).mutator
mutator = getattr(analysis, mutator_name)
mutator(val)
analysis.setAnalysisService(service)
ref_type = self.getBlank() and 'b' or 'c'
analysis.setReferenceType(ref_type)
analysis.setInterimFields(interim_fields)
analysis.unmarkCreationFlag()
renameAfterCreation(analysis)
return analysis
|
python
|
{
"resource": ""
}
|
q21036
|
ReferenceSample.getServices
|
train
|
def getServices(self):
""" get all services for this Sample """
tool = getToolByName(self, REFERENCE_CATALOG)
services = []
for spec in self.getReferenceResults():
service = tool.lookupObject(spec['uid'])
services.append(service)
return services
|
python
|
{
"resource": ""
}
|
q21037
|
ReferenceSample.isValid
|
train
|
def isValid(self):
"""
Returns if the current Reference Sample is valid. This is, the sample
hasn't neither been expired nor disposed.
"""
today = DateTime()
expiry_date = self.getExpiryDate()
if expiry_date and today > expiry_date:
return False
# TODO: Do We really need ExpiryDate + DateExpired? Any difference?
date_expired = self.getDateExpired()
if date_expired and today > date_expired:
return False
date_disposed = self.getDateDisposed()
if date_disposed and today > date_disposed:
return False
return True
|
python
|
{
"resource": ""
}
|
q21038
|
after_submit
|
train
|
def after_submit(analysis):
"""Method triggered after a 'submit' transition for the analysis passed in
is performed. Promotes the submit transition to the Worksheet to which the
analysis belongs to. Note that for the worksheet there is already a guard
that assures the transition to the worksheet will only be performed if all
analyses within the worksheet have already been transitioned.
This function is called automatically by
bika.lims.workfow.AfterTransitionEventHandler
"""
# Mark this analysis as ISubmitted
alsoProvides(analysis, ISubmitted)
# Promote to analyses this analysis depends on
promote_to_dependencies(analysis, "submit")
# TODO: REFLEX TO REMOVE
# Do all the reflex rules process
if IRequestAnalysis.providedBy(analysis):
analysis._reflex_rule_process('submit')
# Promote transition to worksheet
ws = analysis.getWorksheet()
if ws:
doActionFor(ws, 'submit')
push_reindex_to_actions_pool(ws)
# Promote transition to Analysis Request
if IRequestAnalysis.providedBy(analysis):
doActionFor(analysis.getRequest(), 'submit')
reindex_request(analysis)
|
python
|
{
"resource": ""
}
|
q21039
|
after_retract
|
train
|
def after_retract(analysis):
"""Function triggered after a 'retract' transition for the analysis passed
in is performed. The analysis transitions to "retracted" state and a new
copy of the analysis is created. The copy initial state is "unassigned",
unless the the retracted analysis was assigned to a worksheet. In such case,
the copy is transitioned to 'assigned' state too
"""
# Retract our dependents (analyses that depend on this analysis)
cascade_to_dependents(analysis, "retract")
# Retract our dependencies (analyses this analysis depends on)
promote_to_dependencies(analysis, "retract")
# Rename the analysis to make way for it's successor.
# Support multiple retractions by renaming to *-0, *-1, etc
parent = analysis.aq_parent
keyword = analysis.getKeyword()
# Get only those that are analyses and with same keyword as the original
analyses = parent.getAnalyses(full_objects=True)
analyses = filter(lambda an: an.getKeyword() == keyword, analyses)
# TODO This needs to get managed by Id server in a nearly future!
new_id = '{}-{}'.format(keyword, len(analyses))
# Create a copy of the retracted analysis
an_uid = api.get_uid(analysis)
new_analysis = create_analysis(parent, analysis, id=new_id, RetestOf=an_uid)
new_analysis.setResult("")
new_analysis.setResultCaptureDate(None)
new_analysis.reindexObject()
logger.info("Retest for {} ({}) created: {}".format(
keyword, api.get_id(analysis), api.get_id(new_analysis)))
# Assign the new analysis to this same worksheet, if any.
worksheet = analysis.getWorksheet()
if worksheet:
worksheet.addAnalysis(new_analysis)
# Try to rollback the Analysis Request
if IRequestAnalysis.providedBy(analysis):
doActionFor(analysis.getRequest(), "rollback_to_receive")
reindex_request(analysis)
|
python
|
{
"resource": ""
}
|
q21040
|
after_reject
|
train
|
def after_reject(analysis):
"""Function triggered after the "reject" transition for the analysis passed
in is performed."""
# Remove from the worksheet
remove_analysis_from_worksheet(analysis)
# Reject our dependents (analyses that depend on this analysis)
cascade_to_dependents(analysis, "reject")
if IRequestAnalysis.providedBy(analysis):
# Try verify (for when remaining analyses are in 'verified')
doActionFor(analysis.getRequest(), "verify")
# Try submit (remaining analyses are in 'to_be_verified')
doActionFor(analysis.getRequest(), "submit")
# Try rollback (no remaining analyses or some not submitted)
doActionFor(analysis.getRequest(), "rollback_to_receive")
reindex_request(analysis)
|
python
|
{
"resource": ""
}
|
q21041
|
reindex_request
|
train
|
def reindex_request(analysis, idxs=None):
"""Reindex the Analysis Request the analysis belongs to, as well as the
ancestors recursively
"""
if not IRequestAnalysis.providedBy(analysis) or \
IDuplicateAnalysis.providedBy(analysis):
# Analysis not directly bound to an Analysis Request. Do nothing
return
n_idxs = ['assigned_state', 'getDueDate']
n_idxs = idxs and list(set(idxs + n_idxs)) or n_idxs
request = analysis.getRequest()
ancestors = [request] + request.getAncestors(all_ancestors=True)
for ancestor in ancestors:
push_reindex_to_actions_pool(ancestor, n_idxs)
|
python
|
{
"resource": ""
}
|
q21042
|
remove_analysis_from_worksheet
|
train
|
def remove_analysis_from_worksheet(analysis):
"""Removes the analysis passed in from the worksheet, if assigned to any
"""
worksheet = analysis.getWorksheet()
if not worksheet:
return
analyses = filter(lambda an: an != analysis, worksheet.getAnalyses())
worksheet.setAnalyses(analyses)
worksheet.purgeLayout()
if analyses:
# Maybe this analysis was the only one that was not yet submitted or
# verified, so try to submit or verify the Worksheet to be aligned
# with the current states of the analyses it contains.
doActionFor(worksheet, "submit")
doActionFor(worksheet, "verify")
else:
# We've removed all analyses. Rollback to "open"
doActionFor(worksheet, "rollback_to_open")
# Reindex the Worksheet
idxs = ["getAnalysesUIDs"]
push_reindex_to_actions_pool(worksheet, idxs=idxs)
|
python
|
{
"resource": ""
}
|
q21043
|
remove_qc_reports
|
train
|
def remove_qc_reports(portal):
"""Removes the action Quality Control from Reports
"""
logger.info("Removing Reports > Quality Control ...")
ti = portal.reports.getTypeInfo()
actions = map(lambda action: action.id, ti._actions)
for index, action in enumerate(actions, start=0):
if action == 'qualitycontrol':
ti.deleteActions([index])
break
logger.info("Removing Reports > Quality Control [DONE]")
|
python
|
{
"resource": ""
}
|
q21044
|
purge_portlets
|
train
|
def purge_portlets(portal):
"""Remove old portlets. Leave the Navigation portlet only
"""
logger.info("Purging portlets ...")
def remove_portlets(context_portlet):
mapping = portal.restrictedTraverse(context_portlet)
for key in mapping.keys():
if key not in PORTLETS_TO_PURGE:
logger.info("Skipping portlet: '{}'".format(key))
continue
logger.info("Removing portlet: '{}'".format(key))
del mapping[key]
remove_portlets("++contextportlets++plone.leftcolumn")
remove_portlets("++contextportlets++plone.rightcolumn")
# Reimport the portlets profile
setup = portal.portal_setup
setup.runImportStepFromProfile(profile, 'portlets')
logger.info("Purging portlets [DONE]")
|
python
|
{
"resource": ""
}
|
q21045
|
setup_partitioning
|
train
|
def setup_partitioning(portal):
"""Setups the enhanced partitioning system
"""
logger.info("Setting up the enhanced partitioning system")
# Add "Create partition" transition
add_create_partition_transition(portal)
# Add getAncestorsUIDs index in analyses catalog
add_partitioning_indexes(portal)
# Adds metadata columns for partitioning
add_partitioning_metadata(portal)
# Setup default ID formatting for partitions
set_partitions_id_formatting(portal)
|
python
|
{
"resource": ""
}
|
q21046
|
add_partitioning_metadata
|
train
|
def add_partitioning_metadata(portal):
"""Add metadata columns required for partitioning machinery
"""
logger.info("Adding partitioning metadata")
add_metadata(portal, CATALOG_ANALYSIS_REQUEST_LISTING,
'getRawParentAnalysisRequest')
add_metadata(portal, CATALOG_ANALYSIS_REQUEST_LISTING,
"getDescendantsUIDs")
|
python
|
{
"resource": ""
}
|
q21047
|
remove_sample_prep_workflow
|
train
|
def remove_sample_prep_workflow(portal):
"""Removes sample_prep and sample_prep_complete transitions
"""
# There is no need to walk through objects because of:
# https://github.com/senaite/senaite.core/blob/master/bika/lims/upgrade/v01_02_008.py#L187
logger.info("Removing 'sample_prep' related states and transitions ...")
workflow_ids = ["bika_sample_workflow",
"bika_ar_workflow",
"bika_analysis_workflow"]
to_remove = ["sample_prep", "sample_prep_complete"]
wf_tool = api.get_tool("portal_workflow")
for wf_id in workflow_ids:
workflow = wf_tool.getWorkflowById(wf_id)
for state_trans in to_remove:
if state_trans in workflow.transitions:
logger.info("Removing transition '{}' from '{}'"
.format(state_trans, wf_id))
workflow.transitions.deleteTransitions([state_trans])
if state_trans in workflow.states:
logger.info("Removing state '{}' from '{}'"
.format(state_trans, wf_id))
workflow.states.deleteStates([state_trans])
|
python
|
{
"resource": ""
}
|
q21048
|
reindex_multifiles
|
train
|
def reindex_multifiles(portal):
"""Reindex Multifiles to be searchable by the catalog
"""
logger.info("Reindexing Multifiles ...")
brains = api.search(dict(portal_type="Multifile"), "bika_setup_catalog")
total = len(brains)
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Reindexing Multifile: {0}/{1}".format(num, total))
obj = api.get_object(brain)
obj.reindexObject()
|
python
|
{
"resource": ""
}
|
q21049
|
remove_not_requested_analyses_view
|
train
|
def remove_not_requested_analyses_view(portal):
"""Remove the view 'Not requested analyses" from inside AR
"""
logger.info("Removing 'Analyses not requested' view ...")
ar_ptype = portal.portal_types.AnalysisRequest
ar_ptype._actions = filter(lambda act: act.id != "analyses_not_requested",
ar_ptype.listActions())
|
python
|
{
"resource": ""
}
|
q21050
|
get_catalogs
|
train
|
def get_catalogs(portal):
"""Returns the catalogs from the site
"""
res = []
for object in portal.objectValues():
if ICatalogTool.providedBy(object):
res.append(object)
elif IZCatalog.providedBy(object):
res.append(object)
res.sort()
return res
|
python
|
{
"resource": ""
}
|
q21051
|
add_worksheet_indexes
|
train
|
def add_worksheet_indexes(portal):
"""Add indexes for better worksheet handling
"""
logger.info("Adding worksheet indexes")
add_index(portal, catalog_id="bika_analysis_catalog",
index_name="getCategoryTitle",
index_attribute="getCategoryTitle",
index_metatype="FieldIndex")
|
python
|
{
"resource": ""
}
|
q21052
|
remove_bika_listing_resources
|
train
|
def remove_bika_listing_resources(portal):
"""Remove all bika_listing resources
"""
logger.info("Removing bika_listing resouces")
REMOVE_JS = [
"++resource++bika.lims.js/bika.lims.bikalisting.js",
"++resource++bika.lims.js/bika.lims.bikalistingfilterbar.js",
]
REMOVE_CSS = [
"bika_listing.css",
]
for js in REMOVE_JS:
logger.info("********** Unregistering JS %s" % js)
portal.portal_javascripts.unregisterResource(js)
for css in REMOVE_CSS:
logger.info("********** Unregistering CSS %s" % css)
portal.portal_css.unregisterResource(css)
|
python
|
{
"resource": ""
}
|
q21053
|
hide_samples
|
train
|
def hide_samples(portal):
"""Removes samples views from everywhere, related indexes, etc.
"""
logger.info("Removing Samples from navbar ...")
if "samples" in portal:
portal.manage_delObjects(["samples"])
def remove_samples_action(content_type):
type_info = content_type.getTypeInfo()
actions = map(lambda action: action.id, type_info._actions)
for index, action in enumerate(actions, start=0):
if action == 'samples':
type_info.deleteActions([index])
break
def remove_actions_from_sample(sample):
type_info = sample.getTypeInfo()
idxs = [index for index, value in enumerate(type_info._actions)]
type_info.deleteActions(idxs)
logger.info("Removing Samples action view from inside Clients ...")
for client in portal.clients.objectValues("Client"):
remove_samples_action(client)
logger.info("Removing Samples action view from inside Batches ...")
for batch in portal.batches.objectValues("Batch"):
remove_samples_action(batch)
logger.info("Removing actions from inside Samples ...")
for sample in api.search(dict(portal_type="Sample"), "bika_catalog"):
remove_actions_from_sample(api.get_object(sample))
commit_transaction(portal)
|
python
|
{
"resource": ""
}
|
q21054
|
fix_ar_analyses_inconsistencies
|
train
|
def fix_ar_analyses_inconsistencies(portal):
"""Fixes inconsistencies between analyses and the ARs they belong to when
the AR is in a "cancelled", "invalidated" or "rejected state
"""
def fix_analyses(request, status):
wf_id = "bika_analysis_workflow"
workflow = api.get_tool("portal_workflow").getWorkflowById(wf_id)
review_states = ['assigned', 'unassigned', 'to_be_verified']
query = dict(portal_type="Analysis",
getRequestUID=api.get_uid(request),
review_state=review_states)
for brain in api.search(query, CATALOG_ANALYSIS_LISTING):
analysis = api.get_object(brain)
# If the analysis is assigned to a worksheet, unassign first
ws = analysis.getWorksheet()
if ws:
remove_analysis_from_worksheet(analysis)
reindex_request(analysis)
# Force the new state
changeWorkflowState(analysis, wf_id, status)
workflow.updateRoleMappingsFor(analysis)
analysis.reindexObject(idxs=["review_state", "is_active"])
def fix_ar_analyses(status, wf_state_id="review_state"):
brains = api.search({wf_state_id: status},
CATALOG_ANALYSIS_REQUEST_LISTING)
total = len(brains)
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Fixing inconsistent analyses from {} ARs: {}/{}"
.format(status, num, total))
fix_analyses(brain, status)
logger.info("Fixing Analysis Request - Analyses inconsistencies ...")
pool = ActionHandlerPool.get_instance()
pool.queue_pool()
fix_ar_analyses("cancelled")
fix_ar_analyses("invalid")
fix_ar_analyses("rejected")
pool.resume()
commit_transaction(portal)
|
python
|
{
"resource": ""
}
|
q21055
|
add_worksheet_progress_percentage
|
train
|
def add_worksheet_progress_percentage(portal):
"""Adds getProgressPercentage metadata to worksheets catalog
"""
add_metadata(portal, CATALOG_WORKSHEET_LISTING, "getProgressPercentage")
logger.info("Reindexing Worksheets ...")
query = dict(portal_type="Worksheet")
brains = api.search(query, CATALOG_WORKSHEET_LISTING)
total = len(brains)
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Reindexing open Worksheets: {}/{}"
.format(num, total))
worksheet = api.get_object(brain)
worksheet.reindexObject()
|
python
|
{
"resource": ""
}
|
q21056
|
remove_get_department_uids
|
train
|
def remove_get_department_uids(portal):
"""Removes getDepartmentUIDs indexes and metadata
"""
logger.info("Removing filtering by department ...")
del_index(portal, "bika_catalog", "getDepartmentUIDs")
del_index(portal, "bika_setup_catalog", "getDepartmentUID")
del_index(portal, CATALOG_ANALYSIS_REQUEST_LISTING, "getDepartmentUIDs")
del_index(portal, CATALOG_WORKSHEET_LISTING, "getDepartmentUIDs")
del_index(portal, CATALOG_ANALYSIS_LISTING, "getDepartmentUID")
del_metadata(portal, CATALOG_ANALYSIS_REQUEST_LISTING, "getDepartmentUIDs")
del_metadata(portal, CATALOG_WORKSHEET_LISTING, "getDepartmentUIDs")
del_metadata(portal, CATALOG_ANALYSIS_LISTING, "getDepartmentUID")
|
python
|
{
"resource": ""
}
|
q21057
|
change_analysis_requests_id_formatting
|
train
|
def change_analysis_requests_id_formatting(portal, p_type="AnalysisRequest"):
"""Applies the system's Sample ID Formatting to Analysis Request
"""
ar_id_format = dict(
form='{sampleType}-{seq:04d}',
portal_type='AnalysisRequest',
prefix='analysisrequest',
sequence_type='generated',
counter_type='',
split_length=1)
bs = portal.bika_setup
id_formatting = bs.getIDFormatting()
ar_format = filter(lambda id: id["portal_type"] == p_type, id_formatting)
if p_type=="AnalysisRequest":
logger.info("Set ID Format for Analysis Request portal_type ...")
if not ar_format or "sample" in ar_format[0]["form"]:
# Copy the ID formatting set for Sample
change_analysis_requests_id_formatting(portal, p_type="Sample")
return
else:
logger.info("ID Format for Analysis Request already set: {} [SKIP]"
.format(ar_format[0]["form"]))
return
else:
ar_format = ar_format and ar_format[0].copy() or ar_id_format
# Set the Analysis Request ID Format
ar_id_format.update(ar_format)
ar_id_format["portal_type"] ="AnalysisRequest"
ar_id_format["prefix"] = "analysisrequest"
set_id_format(portal, ar_id_format)
# Find out the last ID for Sample and reseed AR to prevent ID already taken
# errors on AR creation
if p_type == "Sample":
number_generator = getUtility(INumberGenerator)
ar_keys = dict()
ar_keys_prev = dict()
for key, value in number_generator.storage.items():
if "sample-" in key:
ar_key = key.replace("sample-", "analysisrequest-")
ar_keys[ar_key] = api.to_int(value, 0)
elif "analysisrequest-" in key:
ar_keys_prev[key] = api.to_int(value, 0)
for key, value in ar_keys.items():
if key in ar_keys_prev:
# Maybe this upgrade step has already been run, so we don't
# want the ar IDs to be reseeded again!
if value <= ar_keys_prev[key]:
logger.info("ID for '{}' already seeded to '{}' [SKIP]"
.format(key, ar_keys_prev[key]))
continue
logger.info("Seeding {} to {}".format(key, value))
number_generator.set_number(key, value)
|
python
|
{
"resource": ""
}
|
q21058
|
set_id_format
|
train
|
def set_id_format(portal, format):
"""Sets the id formatting in setup for the format provided
"""
bs = portal.bika_setup
if 'portal_type' not in format:
return
logger.info("Applying format {} for {}".format(format.get('form', ''),
format.get(
'portal_type')))
portal_type = format['portal_type']
ids = list()
id_map = bs.getIDFormatting()
for record in id_map:
if record.get('portal_type', '') == portal_type:
continue
ids.append(record)
ids.append(format)
bs.setIDFormatting(ids)
|
python
|
{
"resource": ""
}
|
q21059
|
remove_stale_javascripts
|
train
|
def remove_stale_javascripts(portal):
"""Removes stale javascripts
"""
logger.info("Removing stale javascripts ...")
for js in JAVASCRIPTS_TO_REMOVE:
logger.info("Unregistering JS %s" % js)
portal.portal_javascripts.unregisterResource(js)
|
python
|
{
"resource": ""
}
|
q21060
|
remove_stale_css
|
train
|
def remove_stale_css(portal):
"""Removes stale CSS
"""
logger.info("Removing stale css ...")
for css in CSS_TO_REMOVE:
logger.info("Unregistering CSS %s" % css)
portal.portal_css.unregisterResource(css)
|
python
|
{
"resource": ""
}
|
q21061
|
remove_stale_indexes_from_bika_catalog
|
train
|
def remove_stale_indexes_from_bika_catalog(portal):
"""Removes stale indexes and metadata from bika_catalog. Most of these
indexes and metadata were used for Samples, but they are no longer used.
"""
logger.info("Removing stale indexes and metadata from bika_catalog ...")
cat_id = "bika_catalog"
indexes_to_remove = [
"getAnalyst",
"getAnalysts",
"getAnalysisService",
"getClientOrderNumber",
"getClientReference",
"getClientSampleID",
"getContactTitle",
"getDateDisposed",
"getDateExpired",
"getDateOpened",
"getDatePublished",
"getInvoiced",
"getPreserver",
"getSamplePointTitle",
"getSamplePointUID",
"getSampler",
"getScheduledSamplingSampler",
"getSamplingDate",
"getWorksheetTemplateTitle",
"BatchUID",
]
metadata_to_remove = [
"getAnalysts",
"getClientOrderNumber",
"getClientReference",
"getClientSampleID",
"getContactTitle",
"getSamplePointTitle",
"getAnalysisService",
"getDatePublished",
]
for index in indexes_to_remove:
del_index(portal, cat_id, index)
for metadata in metadata_to_remove:
del_metadata(portal, cat_id, metadata)
commit_transaction(portal)
|
python
|
{
"resource": ""
}
|
q21062
|
fix_worksheet_status_inconsistencies
|
train
|
def fix_worksheet_status_inconsistencies(portal):
"""Walks through open worksheets and transition them to 'verified' or
'to_be_verified' if all their analyses are not in an open status
"""
logger.info("Fixing worksheet inconsistencies ...")
query = dict(portal_type="Worksheet",
review_state=["open", "to_be_verified"])
brains = api.search(query, CATALOG_WORKSHEET_LISTING)
total = len(brains)
for num, brain in enumerate(brains):
success = False
if num % 100 == 0:
logger.info("Fixing worksheet inconsistencies: {}/{}"
.format(num, total))
# Note we don't check anything, WS guards for "submit" and "verify"
# will take care of checking if the status of contained analyses allows
# the transition.
worksheet = api.get_object(brain)
if api.get_workflow_status_of(worksheet) == "open":
success, msg = do_action_for(worksheet, "submit")
elif api.get_workflow_status_of(worksheet) == "to_be_verified":
success, msg = do_action_for(worksheet, "verify")
if success:
logger.info("Worksheet {} transitioned to 'to_be_verified'"
.format(worksheet.getId()))
success, msg = do_action_for(worksheet, "verify")
if success:
logger.info("Worksheet {} transitioned to 'verified'"
.format(worksheet.getId()))
commit_transaction(portal)
|
python
|
{
"resource": ""
}
|
q21063
|
apply_analysis_request_partition_interface
|
train
|
def apply_analysis_request_partition_interface(portal):
"""Walks trhough all AR-like partitions registered in the system and
applies the IAnalysisRequestPartition marker interface to them
"""
logger.info("Applying 'IAnalysisRequestPartition' marker interface ...")
query = dict(portal_type="AnalysisRequest", isRootAncestor=False)
brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING)
total = len(brains)
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Applying 'IAnalysisRequestPartition' interface: {}/{}"
.format(num, total))
ar = api.get_object(brain)
if IAnalysisRequestPartition.providedBy(ar):
continue
if ar.getParentAnalysisRequest():
alsoProvides(ar, IAnalysisRequestPartition)
commit_transaction(portal)
|
python
|
{
"resource": ""
}
|
q21064
|
update_notify_on_sample_invalidation
|
train
|
def update_notify_on_sample_invalidation(portal):
"""The name of the Setup field was NotifyOnARRetract, so it was
confusing. There was also two fields "NotifyOnRejection"
"""
setup = api.get_setup()
# NotifyOnARRetract --> NotifyOnSampleInvalidation
old_value = setup.__dict__.get("NotifyOnARRetract", True)
setup.setNotifyOnSampleInvalidation(old_value)
# NotifyOnRejection --> NotifyOnSampleRejection
old_value = setup.__dict__.get("NotifyOnRejection", False)
setup.setNotifyOnSampleRejection(old_value)
|
python
|
{
"resource": ""
}
|
q21065
|
set_secondary_id_formatting
|
train
|
def set_secondary_id_formatting(portal):
"""Sets the default id formatting for secondary ARs
"""
secondary_id_format = dict(
form="{parent_ar_id}-S{secondary_count:02d}",
portal_type="AnalysisRequestSecondary",
prefix="analysisrequestsecondary",
sequence_type="")
set_id_format(portal, secondary_id_format)
|
python
|
{
"resource": ""
}
|
q21066
|
reindex_submitted_analyses
|
train
|
def reindex_submitted_analyses(portal):
"""Reindex submitted analyses
"""
logger.info("Reindex submitted analyses")
brains = api.search({}, "bika_analysis_catalog")
total = len(brains)
logger.info("Processing {} analyses".format(total))
for num, brain in enumerate(brains):
# skip analyses which have an analyst
if brain.getAnalyst:
continue
# reindex analyses which have no annalyst set, but a result
if brain.getResult not in ["", None]:
analysis = brain.getObject()
analysis.reindexObject()
if num > 0 and num % 5000 == 0:
logger.info("Commiting reindexed analyses {}/{} ..."
.format(num, total))
transaction.commit()
|
python
|
{
"resource": ""
}
|
q21067
|
remove_invoices
|
train
|
def remove_invoices(portal):
"""Moves all existing invoices inside the client and removes the invoices
folder with the invoice batches
"""
logger.info("Unlink Invoices")
invoices = portal.get("invoices")
if invoices is None:
return
for batch in invoices.objectValues():
for invoice in batch.objectValues():
invoice_id = invoice.getId()
client = invoice.getClient()
if not client:
# invoice w/o a client -> remove
batch.manage_delObjects(invoice_id)
continue
if invoice_id in client.objectIds():
continue
# move invoices inside the client
cp = batch.manage_cutObjects(invoice_id)
client.manage_pasteObjects(cp)
# delete the invoices folder
portal.manage_delObjects(invoices.getId())
|
python
|
{
"resource": ""
}
|
q21068
|
get_workflow_ids_for
|
train
|
def get_workflow_ids_for(brain_or_object):
"""Returns a list with the workflow ids bound to the type of the object
passed in
"""
portal_type = api.get_portal_type(brain_or_object)
wf_ids = workflow_ids_by_type.get(portal_type, None)
if wf_ids:
return wf_ids
workflow_ids_by_type[portal_type] = api.get_workflows_for(brain_or_object)
return workflow_ids_by_type[portal_type]
|
python
|
{
"resource": ""
}
|
q21069
|
get_workflow_states_for
|
train
|
def get_workflow_states_for(brain_or_object):
"""Returns a list with the states supported by the workflows the object
passed in is bound to
"""
portal_type = api.get_portal_type(brain_or_object)
states = states_by_type.get(portal_type, None)
if states:
return states
# Retrieve the states from the workflows this object is bound to
states = []
wf_tool = api.get_tool("portal_workflow")
for wf_id in get_workflow_ids_for(brain_or_object):
workflow = wf_tool.getWorkflowById(wf_id)
wf_states = map(lambda state: state[0], workflow.states.items())
states.extend(wf_states)
states = list(set(states))
states_by_type[portal_type] = states
return states
|
python
|
{
"resource": ""
}
|
q21070
|
restore_review_history_for
|
train
|
def restore_review_history_for(brain_or_object):
"""Restores the review history for the given brain or object
"""
# Get the review history. Note this comes sorted from oldest to newest
review_history = get_purged_review_history_for(brain_or_object)
obj = api.get_object(brain_or_object)
wf_tool = api.get_tool("portal_workflow")
wf_ids = get_workflow_ids_for(brain_or_object)
wfs = map(lambda wf_id: wf_tool.getWorkflowById(wf_id), wf_ids)
wfs = filter(lambda wf: wf.state_var == "review_state", wfs)
if not wfs:
logger.error("No valid workflow found for {}".format(api.get_id(obj)))
else:
# It should not be possible to have more than one workflow with same
# state_variable here. Anyhow, we don't care in this case (we only want
# the object to have a review history).
workflow = wfs[0]
create_action = False
for history in review_history:
action_id = history["action"]
if action_id is None:
if create_action:
# We don't want multiple creation events, we only stick to
# one workflow, so if this object had more thone one wf
# bound in the past, we still want only one creation action
continue
create_action = True
# Change status and reindex
wf_tool.setStatusOf(workflow.id, obj, history)
indexes = ["review_state", "is_active"]
obj.reindexObject(idxs=indexes)
|
python
|
{
"resource": ""
}
|
q21071
|
cache_affected_objects_review_history
|
train
|
def cache_affected_objects_review_history(portal):
"""Fills the review_history_cache dict. The keys are the uids of the objects
to be bound to new workflow and the values are their current review_history
"""
logger.info("Caching review_history ...")
query = dict(portal_type=NEW_SENAITE_WORKFLOW_BINDINGS)
brains = api.search(query, UID_CATALOG)
total = len(brains)
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Caching review_history: {}/{}"
.format(num, total))
review_history = get_review_history_for(brain)
review_history_cache[api.get_uid(brain)] = review_history
|
python
|
{
"resource": ""
}
|
q21072
|
get_review_history_for
|
train
|
def get_review_history_for(brain_or_object):
"""Returns the review history list for the given object. If there is no
review history for the object, it returns a default review history
"""
workflow_history = api.get_object(brain_or_object).workflow_history
if not workflow_history:
# No review_history for this object!. This object was probably
# migrated to 1.3.0 before review_history was handled in this
# upgrade step.
# https://github.com/senaite/senaite.core/issues/1270
return create_initial_review_history(brain_or_object)
review_history = []
for wf_id, histories in workflow_history.items():
for history in histories:
hist = history.copy()
if "inactive_state" in history:
hist["review_state"] = history["inactive_state"]
elif "cancellation_state" in history:
hist["review_state"] = history["cancellation_state"]
review_history.append(hist)
# Sort by time (from oldest to newest)
return sorted(review_history, key=lambda st: st.get("time"))
|
python
|
{
"resource": ""
}
|
q21073
|
create_initial_review_history
|
train
|
def create_initial_review_history(brain_or_object):
"""Creates a new review history for the given object
"""
obj = api.get_object(brain_or_object)
# It shouldn't be necessary to walk-through all workflows from this object,
# cause there are no objects with more than one workflow bound in 1.3.
# Nevertheless, one never knows if there is an add-on that does.
review_history = list()
wf_tool = api.get_tool("portal_workflow")
for wf_id in api.get_workflows_for(obj):
wf = wf_tool.getWorkflowById(wf_id)
if not hasattr(wf, "initial_state"):
logger.warn("No initial_state attr for workflow '{}': {}'"
.format(wf_id, repr(obj)))
# If no initial_state found for this workflow and object, set
# "registered" as default. This upgrade step is smart enough to generate
# a new review_state for new workflow bound to this object later,
# if the current state does not match with any of the newly available.
# Hence, is totally safe to set the initial state here to "registered"
initial_state = getattr(wf, "initial_state", "registered")
initial_review_history = {
'action': None,
'actor': obj.Creator(),
'comments': 'Default review_history (by 1.3 upgrade step)',
'review_state': initial_state,
'time': obj.created(),
}
if hasattr(wf, "state_variable"):
initial_review_history[wf.state_variable] = initial_state
else:
# This is totally weird, but as per same reasoning above (re
# initial_state), not having a "state_variable" won't cause any
# problem later. The logic responsible of the reassignment of
# initial review states after new workflows are bound will safely
# assign the default state_variable (review_state)
logger.warn("No state_variable attr for workflow '{}': {}"
.format(wf_id, repr(obj)))
review_history.append(initial_review_history)
# Sort by time (from oldest to newest)
return sorted(review_history, key=lambda st: st.get("time"))
|
python
|
{
"resource": ""
}
|
q21074
|
resort_client_actions
|
train
|
def resort_client_actions(portal):
"""Resorts client action views
"""
sorted_actions = [
"edit",
"contacts",
"view", # this redirects to analysisrequests
"analysisrequests",
"batches",
"samplepoints",
"profiles",
"templates",
"specs",
"orders",
"reports_listing"
]
type_info = portal.portal_types.getTypeInfo("Client")
actions = filter(lambda act: act.id in sorted_actions, type_info._actions)
missing = filter(lambda act: act.id not in sorted_actions, type_info._actions)
# Sort the actions
actions = sorted(actions, key=lambda act: sorted_actions.index(act.id))
if missing:
# Move the actions not explicitily sorted to the end
actions.extend(missing)
# Reset the actions to type info
type_info._actions = actions
|
python
|
{
"resource": ""
}
|
q21075
|
init_auditlog
|
train
|
def init_auditlog(portal):
"""Initialize the contents for the audit log
"""
# reindex the auditlog folder to display the icon right in the setup
portal.bika_setup.auditlog.reindexObject()
# Initialize contents for audit logging
start = time.time()
uid_catalog = api.get_tool("uid_catalog")
brains = uid_catalog()
total = len(brains)
logger.info("Initializing {} objects for the audit trail...".format(total))
for num, brain in enumerate(brains):
# Progress notification
if num and num % 1000 == 0:
transaction.commit()
logger.info("{}/{} ojects initialized for audit logging"
.format(num, total))
# End progress notification
if num + 1 == total:
end = time.time()
duration = float(end-start)
logger.info("{} ojects initialized for audit logging in {:.2f}s"
.format(total, duration))
if api.get_portal_type(brain) in SKIP_TYPES_FOR_AUDIT_LOG:
continue
obj = api.get_object(brain)
if not supports_snapshots(obj):
continue
if has_snapshots(obj):
continue
# Take one snapshot per review history item
rh = api.get_review_history(obj, rev=False)
for item in rh:
actor = item.get("actor")
user = get_user(actor)
if user:
# remember the roles of the actor
item["roles"] = get_roles(user)
# The review history contains the variable "time" which we will set
# as the "modification" time
timestamp = item.pop("time", DateTime())
item["time"] = timestamp.ISO()
item["modified"] = timestamp.ISO()
item["remote_address"] = None
take_snapshot(obj, **item)
|
python
|
{
"resource": ""
}
|
q21076
|
remove_log_action
|
train
|
def remove_log_action(portal):
"""Removes the old Log action from types
"""
logger.info("Removing Log Tab ...")
portal_types = api.get_tool("portal_types")
for name in portal_types.listContentTypes():
ti = portal_types[name]
actions = map(lambda action: action.id, ti._actions)
for index, action in enumerate(actions):
if action == "log":
logger.info("Removing Log Action for {}".format(name))
ti.deleteActions([index])
break
logger.info("Removing Log Tab [DONE]")
|
python
|
{
"resource": ""
}
|
q21077
|
reindex_sortable_title
|
train
|
def reindex_sortable_title(portal):
"""Reindex sortable_title from some catalogs
"""
catalogs = [
"bika_catalog",
"bika_setup_catalog",
"portal_catalog",
]
for catalog_name in catalogs:
logger.info("Reindexing sortable_title for {} ...".format(catalog_name))
handler = ZLogHandler(steps=100)
catalog = api.get_tool(catalog_name)
catalog.reindexIndex("sortable_title", None, pghandler=handler)
commit_transaction(portal)
|
python
|
{
"resource": ""
}
|
q21078
|
DateTimeWidget.ulocalized_time
|
train
|
def ulocalized_time(self, time, context, request):
"""Returns the localized time in string format
"""
value = ut(time, long_format=self.show_time, time_only=False,
context=context, request=request)
return value or ""
|
python
|
{
"resource": ""
}
|
q21079
|
DateTimeWidget.ulocalized_gmt0_time
|
train
|
def ulocalized_gmt0_time(self, time, context, request):
"""Returns the localized time in string format, but in GMT+0
"""
value = get_date(context, time)
if not value:
return ""
# DateTime is stored with TimeZone, but DateTimeWidget omits TZ
value = value.toZone("GMT+0")
return self.ulocalized_time(value, context, request)
|
python
|
{
"resource": ""
}
|
q21080
|
AnalysisServicesView.get_currency_symbol
|
train
|
def get_currency_symbol(self):
"""Returns the locale currency symbol
"""
currency = self.context.bika_setup.getCurrency()
locale = locales.getLocale("en")
locale_currency = locale.numbers.currencies.get(currency)
if locale_currency is None:
return "$"
return locale_currency.symbol
|
python
|
{
"resource": ""
}
|
q21081
|
AnalysisServicesView.format_maxtime
|
train
|
def format_maxtime(self, maxtime):
"""Formats the max time record to a days, hours, minutes string
"""
minutes = maxtime.get("minutes", "0")
hours = maxtime.get("hours", "0")
days = maxtime.get("days", "0")
# days, hours, minutes
return u"{}: {} {}: {} {}: {}".format(
_("days"), days, _("hours"), hours, _("minutes"), minutes)
|
python
|
{
"resource": ""
}
|
q21082
|
AnalysisServicesView.folderitems
|
train
|
def folderitems(self, full_objects=False, classic=True):
"""Sort by Categories
"""
bsc = getToolByName(self.context, "bika_setup_catalog")
self.an_cats = bsc(
portal_type="AnalysisCategory",
sort_on="sortable_title")
self.an_cats_order = dict([
(b.Title, "{:04}".format(a))
for a, b in enumerate(self.an_cats)])
items = super(AnalysisServicesView, self).folderitems()
if self.do_cats:
self.categories = map(lambda x: x[0],
sorted(self.categories, key=lambda x: x[1]))
else:
self.categories.sort()
return items
|
python
|
{
"resource": ""
}
|
q21083
|
Sticker.get_items
|
train
|
def get_items(self):
"""Returns a list of SuperModel items
"""
uids = self.get_uids()
if not uids:
return [SuperModel(self.context)]
items = map(lambda uid: SuperModel(uid), uids)
return self._resolve_number_of_copies(items)
|
python
|
{
"resource": ""
}
|
q21084
|
Sticker.getAvailableTemplates
|
train
|
def getAvailableTemplates(self):
"""Returns an array with the templates of stickers available.
Each array item is a dictionary with the following structure:
{'id': <template_id>,
'title': <teamplate_title>,
'selected: True/False'}
"""
# Getting adapters for current context. those adapters will return
# the desired sticker templates for the current context:
try:
adapters = getAdapters((self.context, ), IGetStickerTemplates)
except ComponentLookupError:
logger.info("No IGetStickerTemplates adapters found.")
adapters = None
templates = []
if adapters is not None:
# Gather all templates
for name, adapter in adapters:
templates += adapter(self.request)
if templates:
return templates
# If there are no adapters, get all sticker templates in the system
seltemplate = self.getSelectedTemplate()
for temp in getStickerTemplates(filter_by_type=self.filter_by_type):
out = temp
out["selected"] = temp.get("id", "") == seltemplate
templates.append(out)
return templates
|
python
|
{
"resource": ""
}
|
q21085
|
Sticker.getSelectedTemplate
|
train
|
def getSelectedTemplate(self, default="Code_39_40x20mm.pt"):
"""Returns the id of the sticker template selected.
If no specific template found in the request (parameter template),
returns the default template set in Setup > Stickers.
If the template doesn't exist, uses the default template.
If no template selected but size param, get the sticker template set as
default in Bika Setup for the size set.
"""
# Default sticker
bs_template = self.context.bika_setup.getAutoStickerTemplate()
size = self.request.get("size", "")
if self.filter_by_type:
templates = getStickerTemplates(filter_by_type=self.filter_by_type)
# Get the first sticker
bs_template = templates[0].get("id", "") if templates else ""
elif size == "small":
bs_template = self.context.bika_setup.getSmallStickerTemplate()
elif size == "large":
bs_template = self.context.bika_setup.getLargeStickerTemplate()
rq_template = self.request.get("template", bs_template)
# Check if the template exists. If not, fallback to default's
# 'prefix' is also the resource folder's name
prefix = ""
templates_dir = ""
if rq_template.find(":") >= 0:
prefix, rq_template = rq_template.split(":")
templates_dir = self._getStickersTemplatesDirectory(prefix)
else:
this_dir = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(this_dir, "templates/stickers/")
if self.filter_by_type:
templates_dir = templates_dir + "/" + self.filter_by_type
if not os.path.isfile(os.path.join(templates_dir, rq_template)):
rq_template = default
return "%s:%s" % (prefix, rq_template) if prefix else rq_template
|
python
|
{
"resource": ""
}
|
q21086
|
Sticker.getSelectedTemplateCSS
|
train
|
def getSelectedTemplateCSS(self):
"""Looks for the CSS file from the selected template and return its
contents.
If the selected template is default.pt, looks for a file named
default.css in the stickers path and return its contents. If no CSS
file found, retrns an empty string
"""
template = self.getSelectedTemplate()
# Look for the CSS
content = ""
if template.find(":") >= 0:
# A template from another add-on
prefix, template = template.split(":")
templates_dir = self._getStickersTemplatesDirectory(prefix)
css = "{0}.css".format(template[:-3])
if css in os.listdir(templates_dir):
path = "%s/%s.css" % (templates_dir, template[:-3])
if os.path.isfile(path):
with open(path, "r") as content_file:
content = content_file.read()
else:
this_dir = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(this_dir, "templates/stickers/")
# Only use the directory asked in "filter_by_type"
if self.filter_by_type:
templates_dir = templates_dir + "/" + self.filter_by_type
path = "%s/%s.css" % (templates_dir, template[:-3])
if os.path.isfile(path):
with open(path, "r") as content_file:
content = content_file.read()
return content
|
python
|
{
"resource": ""
}
|
q21087
|
Sticker.renderItem
|
train
|
def renderItem(self, item):
"""Renders the next available sticker.
Uses the template specified in the request ('template' parameter) by
default. If no template defined in the request, uses the default
template set by default in Setup > Stickers.
If the template specified doesn't exist, uses the default bika.lims'
Code_128_1x48mm.pt template (was sticker_small.pt).
"""
self.current_item = item
templates_dir = "templates/stickers"
embedt = self.getSelectedTemplate()
if embedt.find(":") >= 0:
prefix, embedt = embedt.split(":")
templates_dir = self._getStickersTemplatesDirectory(prefix)
elif self.filter_by_type:
templates_dir = "/".join([templates_dir, self.filter_by_type])
fullpath = os.path.join(templates_dir, embedt)
try:
embed = ViewPageTemplateFile(fullpath)
return embed(self, item=item)
except Exception:
exc = traceback.format_exc()
msg = "<div class='error'>{} - {} '{}':<pre>{}</pre></div>".format(
item.id, _("Failed to load sticker"), embedt, exc)
return msg
|
python
|
{
"resource": ""
}
|
q21088
|
Sticker.getItemsURL
|
train
|
def getItemsURL(self):
"""Used in stickers_preview.pt
"""
req_items = self.get_uids()
req_items = req_items or [api.get_uid(self.context)]
req = "{}?items={}".format(self.request.URL, ",".join(req_items))
return req
|
python
|
{
"resource": ""
}
|
q21089
|
Sticker._getStickersTemplatesDirectory
|
train
|
def _getStickersTemplatesDirectory(self, resource_name):
"""Returns the paths for the directory containing the css and pt files
for the stickers deppending on the filter_by_type.
:param resource_name: The name of the resource folder.
:type resource_name: string
:returns: a string as a path
"""
templates_dir =\
queryResourceDirectory("stickers", resource_name).directory
if self.filter_by_type:
templates_dir = templates_dir + "/" + self.filter_by_type
return templates_dir
|
python
|
{
"resource": ""
}
|
q21090
|
Sticker.pdf_from_post
|
train
|
def pdf_from_post(self):
"""Returns a pdf stream with the stickers
"""
html = self.request.form.get("html")
style = self.request.form.get("style")
reporthtml = "<html><head>{0}</head><body>{1}</body></html>"
reporthtml = reporthtml.format(style, html)
reporthtml = safe_unicode(reporthtml).encode("utf-8")
pdf_fn = tempfile.mktemp(suffix=".pdf")
pdf_file = createPdf(htmlreport=reporthtml, outfile=pdf_fn)
return pdf_file
|
python
|
{
"resource": ""
}
|
q21091
|
Sticker._resolve_number_of_copies
|
train
|
def _resolve_number_of_copies(self, items):
"""For the given objects generate as many copies as the desired number
of stickers.
:param items: list of objects whose stickers are going to be previewed.
:type items: list
:returns: list containing n copies of each object in the items list
:rtype: list
"""
copied_items = []
for obj in items:
for copy in range(self.get_copies_count()):
copied_items.append(obj)
return copied_items
|
python
|
{
"resource": ""
}
|
q21092
|
Sticker.get_copies_count
|
train
|
def get_copies_count(self):
"""Return the copies_count number request parameter
:returns: the number of copies for each sticker as stated
in the request
:rtype: int
"""
setup = api.get_setup()
default_num = setup.getDefaultNumberOfCopies()
request_num = self.request.form.get("copies_count")
return to_int(request_num, default_num)
|
python
|
{
"resource": ""
}
|
q21093
|
DuplicateAnalysis.getResultsRange
|
train
|
def getResultsRange(self):
"""Returns the valid result range for this analysis duplicate, based on
both on the result and duplicate variation set in the original analysis
A Duplicate will be out of range if its result does not match with the
result for the parent analysis plus the duplicate variation in % as the
margin error.
:return: A dictionary with the keys min and max
:rtype: dict
"""
specs = ResultsRangeDict()
analysis = self.getAnalysis()
if not analysis:
return specs
result = analysis.getResult()
if not api.is_floatable(result):
return specs
specs.min = specs.max = result
result = api.to_float(result)
dup_variation = analysis.getDuplicateVariation()
dup_variation = api.to_float(dup_variation)
if not dup_variation:
return specs
margin = abs(result) * (dup_variation / 100.0)
specs.min = str(result - margin)
specs.max = str(result + margin)
return specs
|
python
|
{
"resource": ""
}
|
q21094
|
getCatalogDefinitions
|
train
|
def getCatalogDefinitions():
"""
Returns a dictionary with catalogs definitions.
"""
final = {}
analysis_request = bika_catalog_analysisrequest_listing_definition
analysis = bika_catalog_analysis_listing_definition
autoimportlogs = bika_catalog_autoimportlogs_listing_definition
worksheet = bika_catalog_worksheet_listing_definition
report = bika_catalog_report_definition
# Merging the catalogs
final.update(analysis_request)
final.update(analysis)
final.update(autoimportlogs)
final.update(worksheet)
final.update(report)
return final
|
python
|
{
"resource": ""
}
|
q21095
|
getCatalog
|
train
|
def getCatalog(instance, field='UID'):
"""
Returns the catalog that stores objects of instance passed in type.
If an object is indexed by more than one catalog, the first match
will be returned.
:param instance: A single object
:type instance: ATContentType
:returns: The first catalog that stores the type of object passed in
"""
uid = instance.UID()
if 'workflow_skiplist' in instance.REQUEST and \
[x for x in instance.REQUEST['workflow_skiplist']
if x.find(uid) > -1]:
return None
else:
# grab the first catalog we are indexed in.
# we're only indexed in one.
at = getToolByName(instance, 'archetype_tool')
plone = instance.portal_url.getPortalObject()
catalog_name = instance.portal_type in at.catalog_map \
and at.catalog_map[instance.portal_type][0] or 'portal_catalog'
catalog = getToolByName(plone, catalog_name)
return catalog
|
python
|
{
"resource": ""
}
|
q21096
|
PrintView.download
|
train
|
def download(self, data, filename, content_type="application/pdf"):
"""Download the PDF
"""
self.request.response.setHeader(
"Content-Disposition", "inline; filename=%s" % filename)
self.request.response.setHeader("Content-Type", content_type)
self.request.response.setHeader("Content-Length", len(data))
self.request.response.setHeader("Cache-Control", "no-store")
self.request.response.setHeader("Pragma", "no-cache")
self.request.response.write(data)
|
python
|
{
"resource": ""
}
|
q21097
|
upgradestep
|
train
|
def upgradestep(upgrade_product, version):
""" Decorator for updating the QuickInstaller of a upgrade """
def wrap_func(fn):
def wrap_func_args(context, *args):
p = getToolByName(context, 'portal_quickinstaller').get(upgrade_product)
setattr(p, 'installedversion', version)
return fn(context, *args)
return wrap_func_args
return wrap_func
|
python
|
{
"resource": ""
}
|
q21098
|
HoribaJobinYvonCSVParser.parse_data_line
|
train
|
def parse_data_line(self, line):
""" Parses the data line into a dictionary for the importer
"""
it = self._generate(line)
reader = csv.DictReader(it, fieldnames=self.headers)
values = reader.next()
values['DefaultResult'] = 'ResidualError'
values['LineName'] = re.sub(r'\W', '', values['LineName'].strip())
values['Concentration'] = values['Cc'].strip()
values['StandardDeviation'] = values['SD'].strip()
values['ResidualError'] = values['RSD'].strip()
values['NetIntensity'] = values['Net_Intensity'].strip().split('/')
values['Remarks'] = ''
values['TestLine'] = ''
self._addRawResult(self._resid, {values['LineName']: values}, False)
return 0
|
python
|
{
"resource": ""
}
|
q21099
|
AnalysisSpecificationWidget.process_form
|
train
|
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False):
"""Return a list of dictionaries fit for AnalysisSpecsResultsField
consumption.
If neither hidemin nor hidemax are specified, only services which have
float()able entries in result,min and max field will be included. If
hidemin and/or hidemax specified, results might contain empty min
and/or max fields.
"""
values = []
# selected services
service_uids = form.get("uids", [])
if not service_uids:
# Inject empty fields for the validator
values = [dict.fromkeys(field.getSubfields())]
for uid in service_uids:
s_min = self._get_spec_value(form, uid, "min")
s_max = self._get_spec_value(form, uid, "max")
if not s_min and not s_max:
# If user has not set value neither for min nor max, omit this
# record. Otherwise, since 'min' and 'max' are defined as
# mandatory subfields, the following message will appear after
# submission: "Specifications is required, please correct."
continue
# TODO: disallow this case in the UI
if s_min and s_max:
if float(s_min) > float(s_max):
logger.warn("Min({}) > Max({}) is not allowed"
.format(s_min, s_max))
continue
min_operator = self._get_spec_value(
form, uid, "min_operator", check_floatable=False)
max_operator = self._get_spec_value(
form, uid, "max_operator", check_floatable=False)
service = api.get_object_by_uid(uid)
values.append({
"keyword": service.getKeyword(),
"uid": uid,
"min_operator": min_operator,
"min": s_min,
"max_operator": max_operator,
"max": s_max,
"warn_min": self._get_spec_value(form, uid, "warn_min"),
"warn_max": self._get_spec_value(form, uid, "warn_max"),
"hidemin": self._get_spec_value(form, uid, "hidemin"),
"hidemax": self._get_spec_value(form, uid, "hidemax"),
"rangecomment": self._get_spec_value(form, uid, "rangecomment",
check_floatable=False)})
return values, {}
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.