_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q20900
|
generateUniqueId
|
train
|
def generateUniqueId(context, **kw):
""" Generate pretty content IDs.
"""
# get the config for this portal type from the system setup
config = get_config(context, **kw)
# get the variables map for later string interpolation
variables = get_variables(context, **kw)
# The new generate sequence number
number = 0
# get the sequence type from the global config
sequence_type = config.get("sequence_type", "generated")
# Sequence Type is "Counter", so we use the length of the backreferences or
# contained objects of the evaluated "context" defined in the config
if sequence_type in ["counter"]:
number = get_counted_number(context, config, variables, **kw)
# Sequence Type is "Generated", so the ID is constructed according to the
# configured split length
if sequence_type in ["generated"]:
number = get_generated_number(context, config, variables, **kw)
# store the new sequence number to the variables map for str interpolation
if isinstance(number, Alphanumber):
variables["alpha"] = number
variables["seq"] = to_int(number)
# The ID formatting template from user config, e.g. {sampleId}-R{seq:02d}
id_template = config.get("form", "")
# Interpolate the ID template
try:
new_id = id_template.format(**variables)
except KeyError, e:
logger.error('KeyError: {} not in id_template {}'.format(
e, id_template))
raise
normalized_id = api.normalize_filename(new_id)
logger.info("generateUniqueId: {}".format(normalized_id))
return normalized_id
|
python
|
{
"resource": ""
}
|
q20901
|
ObjectTransitionedEventHandler
|
train
|
def ObjectTransitionedEventHandler(obj, event):
"""Object has been transitioned to an new state
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# default transition entry
entry = {
"modified": DateTime().ISO(),
"action": event.action,
}
# get the last history item
history = api.get_review_history(obj, rev=True)
if history:
entry = history[0]
# make transitions also a modification entry
timestamp = entry.pop("time", DateTime())
entry["modified"] = timestamp.ISO()
entry["action"] = event.action
# take a new snapshot
take_snapshot(obj, **entry)
# reindex the object in the auditlog catalog
reindex_object(obj)
|
python
|
{
"resource": ""
}
|
q20902
|
ObjectModifiedEventHandler
|
train
|
def ObjectModifiedEventHandler(obj, event):
"""Object has been modified
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# take a new snapshot
take_snapshot(obj, action="edit")
# reindex the object in the auditlog catalog
reindex_object(obj)
|
python
|
{
"resource": ""
}
|
q20903
|
ObjectInitializedEventHandler
|
train
|
def ObjectInitializedEventHandler(obj, event):
"""Object has been created
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# object has already snapshots
if has_snapshots(obj):
return
# take a new snapshot
take_snapshot(obj, action="create")
|
python
|
{
"resource": ""
}
|
q20904
|
AnalysisSpec.Title
|
train
|
def Title(self):
""" Return the title if possible, else return the Sample type.
Fall back on the instance's ID if there's no sample type or title.
"""
title = ''
if self.title:
title = self.title
else:
sampletype = self.getSampleType()
if sampletype:
title = sampletype.Title()
return safe_unicode(title).encode('utf-8')
|
python
|
{
"resource": ""
}
|
q20905
|
AnalysisSpec.getSampleTypes
|
train
|
def getSampleTypes(self, active_only=True):
"""Return all sampletypes
"""
catalog = api.get_tool("bika_setup_catalog")
query = {
"portal_type": "SampleType",
# N.B. The `sortable_title` index sorts case sensitive. Since there
# is no sort key for sample types, it makes more sense to sort
# them alphabetically in the selection
"sort_on": "title",
"sort_order": "ascending"
}
results = catalog(query)
if active_only:
results = filter(api.is_active, results)
sampletypes = map(
lambda brain: (brain.UID, brain.Title), results)
return DisplayList(sampletypes)
|
python
|
{
"resource": ""
}
|
q20906
|
_objectdata_cache_key
|
train
|
def _objectdata_cache_key(func, obj):
"""Cache Key for object data
"""
uid = api.get_uid(obj)
modified = api.get_modification_date(obj).millis()
review_state = api.get_review_status(obj)
return "{}-{}-{}".format(uid, review_state, modified)
|
python
|
{
"resource": ""
}
|
q20907
|
get_storage
|
train
|
def get_storage(obj):
"""Get or create the audit log storage for the given object
:param obj: Content object
:returns: PersistentList
"""
annotation = IAnnotations(obj)
if annotation.get(SNAPSHOT_STORAGE) is None:
annotation[SNAPSHOT_STORAGE] = PersistentList()
return annotation[SNAPSHOT_STORAGE]
|
python
|
{
"resource": ""
}
|
q20908
|
get_snapshot_count
|
train
|
def get_snapshot_count(obj):
"""Returns the number of snapsots
:param obj: Content object
:returns: Current snapshots in the storage
"""
try:
annotation = IAnnotations(obj)
except TypeError:
return 0
storage = annotation.get(SNAPSHOT_STORAGE, [])
return len(storage)
|
python
|
{
"resource": ""
}
|
q20909
|
get_snapshot_by_version
|
train
|
def get_snapshot_by_version(obj, version=0):
"""Get a snapshot by version
Snapshot versions begin with `0`, because this is the first index of the
storage, which is a list.
:param obj: Content object
:param version: The index position of the snapshot in the storage
:returns: Snapshot at the given index position
"""
if version < 0:
return None
snapshots = get_snapshots(obj)
if version > len(snapshots) - 1:
return None
return snapshots[version]
|
python
|
{
"resource": ""
}
|
q20910
|
get_object_data
|
train
|
def get_object_data(obj):
"""Get object schema data
NOTE: We RAM cache this data because it should only change when the object
was modified!
XXX: We need to set at least the modification date when we set fields in
Ajax Listing when we take a snapshot there!
:param obj: Content object
:returns: Dictionary of extracted schema data
"""
model = SuperModel(obj)
try:
data = model.to_dict()
except Exception as exc:
logger.error("Failed to get schema data for {}: {}"
.format(repr(obj), str(exc)))
data = {}
return data
|
python
|
{
"resource": ""
}
|
q20911
|
get_object_metadata
|
train
|
def get_object_metadata(obj, **kw):
"""Get object metadata
:param obj: Content object
:returns: Dictionary of extracted object metadata
"""
# inject metadata of volatile data
metadata = {
"actor": get_user_id(),
"roles": get_roles(),
"action": "",
"review_state": api.get_review_status(obj),
"active": api.is_active(obj),
"snapshot_created": DateTime().ISO(),
"modified": api.get_modification_date(obj).ISO(),
"remote_address": "",
"user_agent": "",
"referer": "",
"comments": "",
}
# Update request data
metadata.update(get_request_data())
# allow metadata overrides
metadata.update(kw)
return metadata
|
python
|
{
"resource": ""
}
|
q20912
|
compare_last_two_snapshots
|
train
|
def compare_last_two_snapshots(obj, raw=False):
"""Helper to compare the last two snapshots directly
"""
if get_snapshot_count(obj) < 2:
return {}
version = get_version(obj)
snap1 = get_snapshot_by_version(obj, version - 1)
snap2 = get_snapshot_by_version(obj, version)
return compare_snapshots(snap1, snap2, raw=raw)
|
python
|
{
"resource": ""
}
|
q20913
|
diff_values
|
train
|
def diff_values(value_a, value_b, raw=False):
"""Returns a human-readable diff between two values
:param value_a: First value to compare
:param value_b: Second value to compare
:param raw: True to compare the raw values, e.g. UIDs
:returns a list of diff tuples
"""
if not raw:
value_a = _process_value(value_a)
value_b = _process_value(value_b)
# No changes
if value_a == value_b:
return None
diffs = []
# N.B.: the choice for the tuple data structure is to enable in the future
# more granular diffs, e.g. the changed values within a dictionary etc.
diffs.append((value_a, value_b))
return diffs
|
python
|
{
"resource": ""
}
|
q20914
|
_process_value
|
train
|
def _process_value(value):
"""Convert the value into a human readable diff string
"""
if not value:
value = _("Not set")
# XXX: bad data, e.g. in AS Method field
elif value == "None":
value = _("Not set")
# 0 is detected as the portal UID
elif value == "0":
pass
elif api.is_uid(value):
value = _get_title_or_id_from_uid(value)
elif isinstance(value, (dict)):
value = json.dumps(sorted(value.items()), indent=1)
elif isinstance(value, (list, tuple)):
value = sorted(map(_process_value, value))
value = "; ".join(value)
elif isinstance(value, unicode):
value = api.safe_unicode(value).encode("utf8")
return str(value)
|
python
|
{
"resource": ""
}
|
q20915
|
Batch.Title
|
train
|
def Title(self):
"""Return the Batch ID if title is not defined
"""
titlefield = self.Schema().getField('title')
if titlefield.widget.visible:
return safe_unicode(self.title).encode('utf-8')
else:
return safe_unicode(self.id).encode('utf-8')
|
python
|
{
"resource": ""
}
|
q20916
|
Batch.getClient
|
train
|
def getClient(self):
"""Retrieves the Client for which the current Batch is attached to
Tries to retrieve the Client from the Schema property, but if not
found, searches for linked ARs and retrieve the Client from the
first one. If the Batch has no client, returns None.
"""
client = self.Schema().getField('Client').get(self)
if client:
return client
client = self.aq_parent
if IClient.providedBy(client):
return client
|
python
|
{
"resource": ""
}
|
q20917
|
Batch.BatchLabelVocabulary
|
train
|
def BatchLabelVocabulary(self):
"""Return all batch labels as a display list
"""
bsc = getToolByName(self, 'bika_setup_catalog')
ret = []
for p in bsc(portal_type='BatchLabel',
is_active=True,
sort_on='sortable_title'):
ret.append((p.UID, p.Title))
return DisplayList(ret)
|
python
|
{
"resource": ""
}
|
q20918
|
Batch.getAnalysisRequestsBrains
|
train
|
def getAnalysisRequestsBrains(self, **kwargs):
"""Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog.
"""
kwargs['getBatchUID'] = self.UID()
catalog = getToolByName(self, CATALOG_ANALYSIS_REQUEST_LISTING)
brains = catalog(kwargs)
return brains
|
python
|
{
"resource": ""
}
|
q20919
|
Batch.getAnalysisRequests
|
train
|
def getAnalysisRequests(self, **kwargs):
"""Return all the Analysis Requests objects linked to the Batch kargs
are passed directly to the catalog.
"""
brains = self.getAnalysisRequestsBrains(**kwargs)
return [b.getObject() for b in brains]
|
python
|
{
"resource": ""
}
|
q20920
|
Instrument.getValidCertifications
|
train
|
def getValidCertifications(self):
""" Returns the certifications fully valid
"""
certs = []
today = date.today()
for c in self.getCertifications():
validfrom = c.getValidFrom() if c else None
validto = c.getValidTo() if validfrom else None
if not validfrom or not validto:
continue
validfrom = validfrom.asdatetime().date()
validto = validto.asdatetime().date()
if (today >= validfrom and today <= validto):
certs.append(c)
return certs
|
python
|
{
"resource": ""
}
|
q20921
|
Instrument.isValid
|
train
|
def isValid(self):
""" Returns if the current instrument is not out for verification, calibration,
out-of-date regards to its certificates and if the latest QC succeed
"""
return self.isOutOfDate() is False \
and self.isQCValid() is True \
and self.getDisposeUntilNextCalibrationTest() is False \
and self.isValidationInProgress() is False \
and self.isCalibrationInProgress() is False
|
python
|
{
"resource": ""
}
|
q20922
|
Instrument.isQCValid
|
train
|
def isQCValid(self):
""" Returns True if the results of the last batch of QC Analyses
performed against this instrument was within the valid range.
For a given Reference Sample, more than one Reference Analyses assigned
to this same instrument can be performed and the Results Capture Date
might slightly differ amongst them. Thus, this function gets the latest
QC Analysis performed, looks for siblings (through RefAnalysisGroupID)
and if the results for all them are valid, then returns True. If there
is one single Reference Analysis from the group with an out-of-range
result, the function returns False
"""
query = {"portal_type": "ReferenceAnalysis",
"getInstrumentUID": self.UID(),
"sort_on": "getResultCaptureDate",
"sort_order": "reverse",
"sort_limit": 1,}
brains = api.search(query, CATALOG_ANALYSIS_LISTING)
if len(brains) == 0:
# There are no Reference Analyses assigned to this instrument yet
return True
# Look for siblings. These are the QC Analyses that were created
# together with this last ReferenceAnalysis and for the same Reference
# Sample. If they were added through "Add Reference Analyses" in a
# Worksheet, they typically appear in the same slot.
group_id = brains[0].getReferenceAnalysesGroupID
query = {"portal_type": "ReferenceAnalysis",
"getInstrumentUID": self.UID(),
"getReferenceAnalysesGroupID": group_id,}
brains = api.search(query, CATALOG_ANALYSIS_LISTING)
for brain in brains:
results_range = brain.getResultsRange
if not results_range:
continue
# Is out of range?
out_of_range = is_out_of_range(brain)[0]
if out_of_range:
return False
# By default, in range
return True
|
python
|
{
"resource": ""
}
|
q20923
|
Instrument.addReferences
|
train
|
def addReferences(self, reference, service_uids):
""" Add reference analyses to reference
"""
# TODO Workflow - Analyses. Assignment of refanalysis to Instrument
addedanalyses = []
wf = getToolByName(self, 'portal_workflow')
bsc = getToolByName(self, 'bika_setup_catalog')
bac = getToolByName(self, 'bika_analysis_catalog')
ref_type = reference.getBlank() and 'b' or 'c'
ref_uid = reference.UID()
postfix = 1
for refa in reference.getReferenceAnalyses():
grid = refa.getReferenceAnalysesGroupID()
try:
cand = int(grid.split('-')[2])
if cand >= postfix:
postfix = cand + 1
except:
pass
postfix = str(postfix).zfill(int(3))
refgid = 'I%s-%s' % (reference.id, postfix)
for service_uid in service_uids:
# services with dependents don't belong in references
service = bsc(portal_type='AnalysisService', UID=service_uid)[0].getObject()
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
ref_analysis = reference.addReferenceAnalysis(service)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
ref_analysis.setReferenceAnalysesGroupID(refgid)
ref_analysis.setInstrument(self)
ref_analysis.reindexObject()
addedanalyses.append(ref_analysis)
# Set DisposeUntilNextCalibrationTest to False
if (len(addedanalyses) > 0):
self.getField('DisposeUntilNextCalibrationTest').set(self, False)
return addedanalyses
|
python
|
{
"resource": ""
}
|
q20924
|
Instrument.setImportDataInterface
|
train
|
def setImportDataInterface(self, values):
""" Return the current list of import data interfaces
"""
exims = self.getImportDataInterfacesList()
new_values = [value for value in values if value in exims]
if len(new_values) < len(values):
logger.warn("Some Interfaces weren't added...")
self.Schema().getField('ImportDataInterface').set(self, new_values)
|
python
|
{
"resource": ""
}
|
q20925
|
AttachmentsViewlet.show
|
train
|
def show(self):
"""Controls if the viewlet should be rendered
"""
url = self.request.getURL()
# XXX: Hack to show the viewlet only on the AR base_view
if not any(map(url.endswith, ["base_view", "manage_results"])):
return False
return self.attachments_view.user_can_add_attachments() or \
self.attachments_view.user_can_update_attachments()
|
python
|
{
"resource": ""
}
|
q20926
|
Import
|
train
|
def Import(context, request):
""" Read analysis results from an XML string
"""
errors = []
logs = []
# Do import stuff here
logs.append("Generic XML Import is not available")
results = {'errors': errors, 'log': logs}
return json.dumps(results)
|
python
|
{
"resource": ""
}
|
q20927
|
get_storage_location
|
train
|
def get_storage_location():
""" get the portal with the plone.api
"""
location = api.portal.getSite()
if location.get('bika_setup', False):
location = location['bika_setup']
return location
|
python
|
{
"resource": ""
}
|
q20928
|
NumberGenerator.storage
|
train
|
def storage(self):
""" get the counter storage
"""
annotation = get_portal_annotation()
if annotation.get(NUMBER_STORAGE) is None:
annotation[NUMBER_STORAGE] = OIBTree()
return annotation[NUMBER_STORAGE]
|
python
|
{
"resource": ""
}
|
q20929
|
NumberGenerator.get_number
|
train
|
def get_number(self, key):
""" get the next consecutive number
"""
storage = self.storage
logger.debug("NUMBER before => %s" % storage.get(key, '-'))
try:
logger.debug("*** consecutive number lock acquire ***")
lock.acquire()
try:
counter = storage[key]
storage[key] = counter + 1
except KeyError:
storage[key] = 1
finally:
logger.debug("*** consecutive number lock release ***")
self.storage._p_changed = True
lock.release()
logger.debug("NUMBER after => %s" % storage.get(key, '-'))
return storage[key]
|
python
|
{
"resource": ""
}
|
q20930
|
NumberGenerator.set_number
|
train
|
def set_number(self, key, value):
""" set a key's value
"""
storage = self.storage
if not isinstance(value, int):
logger.error("set_number: Value must be an integer")
return
try:
lock.acquire()
storage[key] = value
finally:
self.storage._p_changed = True
lock.release()
return storage[key]
|
python
|
{
"resource": ""
}
|
q20931
|
SampleType.getJSMinimumVolume
|
train
|
def getJSMinimumVolume(self, **kw):
"""Try convert the MinimumVolume to 'ml' or 'g' so that JS has an
easier time working with it. If conversion fails, return raw value.
"""
default = self.Schema()['MinimumVolume'].get(self)
try:
mgdefault = default.split(' ', 1)
mgdefault = mg(float(mgdefault[0]), mgdefault[1])
except:
mgdefault = mg(0, 'ml')
try:
return str(mgdefault.ounit('ml'))
except:
pass
try:
return str(mgdefault.ounit('g'))
except:
pass
return str(default)
|
python
|
{
"resource": ""
}
|
q20932
|
SampleType._sticker_templates_vocabularies
|
train
|
def _sticker_templates_vocabularies(self):
"""
Returns the vocabulary to be used in
AdmittedStickerTemplates.small_default
If the object has saved not AdmittedStickerTemplates.admitted stickers,
this method will return an empty DisplayList. Otherwise it returns
the stickers selected in admitted.
:return: A DisplayList
"""
admitted = self.getAdmittedStickers()
if not admitted:
return DisplayList()
voc = DisplayList()
stickers = getStickerTemplates()
for sticker in stickers:
if sticker.get('id') in admitted:
voc.add(sticker.get('id'), sticker.get('title'))
return voc
|
python
|
{
"resource": ""
}
|
q20933
|
EmailView.fail
|
train
|
def fail(self, message, status=500, **kw):
"""Set a JSON error object and a status to the response
"""
self.request.response.setStatus(status)
result = {"success": False, "errors": message, "status": status}
result.update(kw)
return result
|
python
|
{
"resource": ""
}
|
q20934
|
EmailView.handle_ajax_request
|
train
|
def handle_ajax_request(self):
"""Handle requests ajax routes
"""
# check if the method exists
func_arg = self.traverse_subpath[0]
func_name = "ajax_{}".format(func_arg)
func = getattr(self, func_name, None)
if func is None:
return self.fail("Invalid function", status=400)
# Additional provided path segments after the function name are handled
# as positional arguments
args = self.traverse_subpath[1:]
# check mandatory arguments
func_sig = inspect.getargspec(func)
# positional arguments after `self` argument
required_args = func_sig.args[1:]
if len(args) < len(required_args):
return self.fail("Wrong signature, please use '{}/{}'"
.format(func_arg, "/".join(required_args)), 400)
return func(*args)
|
python
|
{
"resource": ""
}
|
q20935
|
EmailView.parse_email
|
train
|
def parse_email(self, email):
"""parse an email to an unicode name, email tuple
"""
splitted = safe_unicode(email).rsplit(",", 1)
if len(splitted) == 1:
return (False, splitted[0])
elif len(splitted) == 2:
return (splitted[0], splitted[1])
else:
raise ValueError("Could not parse email '{}'".format(email))
|
python
|
{
"resource": ""
}
|
q20936
|
EmailView.to_email_attachment
|
train
|
def to_email_attachment(self, filename, filedata, **kw):
"""Create a new MIME Attachment
The Content-Type: header is build from the maintype and subtype of the
guessed filename mimetype. Additional parameters for this header are
taken from the keyword arguments.
"""
maintype = "application"
subtype = "octet-stream"
mime_type = mimetypes.guess_type(filename)[0]
if mime_type is not None:
maintype, subtype = mime_type.split("/")
attachment = MIMEBase(maintype, subtype, **kw)
attachment.set_payload(filedata)
encoders.encode_base64(attachment)
attachment.add_header("Content-Disposition",
"attachment; filename=%s" % filename)
return attachment
|
python
|
{
"resource": ""
}
|
q20937
|
EmailView.send_email
|
train
|
def send_email(self, recipients, subject, body, attachments=None):
"""Prepare and send email to the recipients
:param recipients: a list of email or name,email strings
:param subject: the email subject
:param body: the email body
:param attachments: list of email attachments
:returns: True if all emails were sent, else false
"""
recipient_pairs = map(self.parse_email, recipients)
template_context = {
"recipients": "\n".join(
map(lambda p: formataddr(p), recipient_pairs))
}
body_template = Template(safe_unicode(body)).safe_substitute(
**template_context)
_preamble = "This is a multi-part message in MIME format.\n"
_from = formataddr((self.email_from_name, self.email_from_address))
_subject = Header(s=safe_unicode(subject), charset="utf8")
_body = MIMEText(body_template, _subtype="plain", _charset="utf8")
# Create the enclosing message
mime_msg = MIMEMultipart()
mime_msg.preamble = _preamble
mime_msg["Subject"] = _subject
mime_msg["From"] = _from
mime_msg.attach(_body)
# Attach attachments
for attachment in attachments:
mime_msg.attach(attachment)
success = []
# Send one email per recipient
for pair in recipient_pairs:
# N.B.: Headers are added additive, so we need to remove any
# existing "To" headers
# No KeyError is raised if the key does not exist.
# https://docs.python.org/2/library/email.message.html#email.message.Message.__delitem__
del mime_msg["To"]
# N.B. we use just the email here to prevent this Postfix Error:
# Recipient address rejected: User unknown in local recipient table
mime_msg["To"] = pair[1]
msg_string = mime_msg.as_string()
sent = self.send(msg_string)
if not sent:
logger.error("Could not send email to {}".format(pair))
success.append(sent)
if not all(success):
return False
return True
|
python
|
{
"resource": ""
}
|
q20938
|
EmailView.send
|
train
|
def send(self, msg_string, immediate=True):
"""Send the email via the MailHost tool
"""
try:
mailhost = api.get_tool("MailHost")
mailhost.send(msg_string, immediate=immediate)
except SMTPException as e:
logger.error(e)
return False
except socket.error as e:
logger.error(e)
return False
return True
|
python
|
{
"resource": ""
}
|
q20939
|
EmailView.add_status_message
|
train
|
def add_status_message(self, message, level="info"):
"""Set a portal status message
"""
return self.context.plone_utils.addPortalMessage(message, level)
|
python
|
{
"resource": ""
}
|
q20940
|
EmailView.get_report_data
|
train
|
def get_report_data(self, report):
"""Report data to be used in the template
"""
ar = report.getAnalysisRequest()
attachments = map(self.get_attachment_data, ar.getAttachment())
pdf = self.get_pdf(report)
filesize = "{} Kb".format(self.get_filesize(pdf))
filename = "{}.pdf".format(ar.getId())
return {
"ar": ar,
"attachments": attachments,
"pdf": pdf,
"obj": report,
"uid": api.get_uid(report),
"filesize": filesize,
"filename": filename,
}
|
python
|
{
"resource": ""
}
|
q20941
|
EmailView.get_recipients_data
|
train
|
def get_recipients_data(self, reports):
"""Recipients data to be used in the template
"""
if not reports:
return []
recipients = []
recipient_names = []
for num, report in enumerate(reports):
# get the linked AR of this ARReport
ar = report.getAnalysisRequest()
# recipient names of this report
report_recipient_names = []
for recipient in self.get_recipients(ar):
name = recipient.get("Fullname")
email = recipient.get("EmailAddress")
record = {
"name": name,
"email": email,
"valid": True,
}
if record not in recipients:
recipients.append(record)
# remember the name of the recipient for this report
report_recipient_names.append(name)
recipient_names.append(report_recipient_names)
# recipient names, which all of the reports have in common
common_names = set(recipient_names[0]).intersection(*recipient_names)
# mark recipients not in common
for recipient in recipients:
if recipient.get("name") not in common_names:
recipient["valid"] = False
return recipients
|
python
|
{
"resource": ""
}
|
q20942
|
EmailView.get_responsibles_data
|
train
|
def get_responsibles_data(self, reports):
"""Responsibles data to be used in the template
"""
if not reports:
return []
recipients = []
recipient_names = []
for num, report in enumerate(reports):
# get the linked AR of this ARReport
ar = report.getAnalysisRequest()
# recipient names of this report
report_recipient_names = []
responsibles = ar.getResponsible()
for manager_id in responsibles.get("ids", []):
responsible = responsibles["dict"][manager_id]
name = responsible.get("name")
email = responsible.get("email")
record = {
"name": name,
"email": email,
"valid": True,
}
if record not in recipients:
recipients.append(record)
# remember the name of the recipient for this report
report_recipient_names.append(name)
recipient_names.append(report_recipient_names)
# recipient names, which all of the reports have in common
common_names = set(recipient_names[0]).intersection(*recipient_names)
# mark recipients not in common
for recipient in recipients:
if recipient.get("name") not in common_names:
recipient["valid"] = False
return recipients
|
python
|
{
"resource": ""
}
|
q20943
|
EmailView.email_from_name
|
train
|
def email_from_name(self):
"""Portal email name
"""
lab_from_name = self.laboratory.getName()
portal_from_name = self.portal.email_from_name
return lab_from_name or portal_from_name
|
python
|
{
"resource": ""
}
|
q20944
|
EmailView.get_total_size
|
train
|
def get_total_size(self, *files):
"""Calculate the total size of the given files
"""
# Recursive unpack an eventual list of lists
def iterate(item):
if isinstance(item, (list, tuple)):
for i in item:
for ii in iterate(i):
yield ii
else:
yield item
# Calculate the total size of the given objects starting with an
# initial size of 0
return reduce(lambda x, y: x + y,
map(self.get_filesize, iterate(files)), 0)
|
python
|
{
"resource": ""
}
|
q20945
|
EmailView.get_object_by_uid
|
train
|
def get_object_by_uid(self, uid):
"""Get the object by UID
"""
logger.debug("get_object_by_uid::UID={}".format(uid))
obj = api.get_object_by_uid(uid, None)
if obj is None:
logger.warn("!! No object found for UID #{} !!")
return obj
|
python
|
{
"resource": ""
}
|
q20946
|
EmailView.get_filesize
|
train
|
def get_filesize(self, f):
"""Return the filesize of the PDF as a float
"""
try:
filesize = float(f.get_size())
return float("%.2f" % (filesize / 1024))
except (POSKeyError, TypeError, AttributeError):
return 0.0
|
python
|
{
"resource": ""
}
|
q20947
|
EmailView.get_recipients
|
train
|
def get_recipients(self, ar):
"""Return the AR recipients in the same format like the AR Report
expects in the records field `Recipients`
"""
plone_utils = api.get_tool("plone_utils")
def is_email(email):
if not plone_utils.validateSingleEmailAddress(email):
return False
return True
def recipient_from_contact(contact):
if not contact:
return None
email = contact.getEmailAddress()
return {
"UID": api.get_uid(contact),
"Username": contact.getUsername(),
"Fullname": to_utf8(contact.Title()),
"EmailAddress": email,
}
def recipient_from_email(email):
if not is_email(email):
return None
return {
"UID": "",
"Username": "",
"Fullname": email,
"EmailAddress": email,
}
# Primary Contacts
to = filter(None, [recipient_from_contact(ar.getContact())])
# CC Contacts
cc = filter(None, map(recipient_from_contact, ar.getCCContact()))
# CC Emails
cc_emails = map(lambda x: x.strip(), ar.getCCEmails().split(","))
cc_emails = filter(None, map(recipient_from_email, cc_emails))
return to + cc + cc_emails
|
python
|
{
"resource": ""
}
|
q20948
|
EmailView.ajax_recalculate_size
|
train
|
def ajax_recalculate_size(self):
"""Recalculate the total size of the selected attachments
"""
reports = self.get_reports()
attachments = self.get_attachments()
total_size = self.get_total_size(reports, attachments)
return {
"files": len(reports) + len(attachments),
"size": "%.2f" % total_size,
"limit": self.max_email_size,
"limit_exceeded": total_size > self.max_email_size,
}
|
python
|
{
"resource": ""
}
|
q20949
|
is_worksheet_context
|
train
|
def is_worksheet_context():
"""Returns whether the current context from the request is a Worksheet
"""
request = api.get_request()
parents = request.get("PARENTS", [])
portal_types_names = map(lambda p: getattr(p, "portal_type", None), parents)
if "Worksheet" in portal_types_names:
return True
# Check if the worksheet is declared in request explicitly
ws_uid = request.get("ws_uid", "")
obj = api.get_object_by_uid(ws_uid, None)
if IWorksheet.providedBy(obj):
return True
return False
|
python
|
{
"resource": ""
}
|
q20950
|
guard_assign
|
train
|
def guard_assign(analysis):
"""Return whether the transition "assign" can be performed or not
"""
# Only if the request was done from worksheet context.
if not is_worksheet_context():
return False
# Cannot assign if the Sample has not been received
if not analysis.isSampleReceived():
return False
# Cannot assign if the analysis has a worksheet assigned already
if analysis.getWorksheet():
return False
# Cannot assign if user does not have permissions to manage worksheets
return user_can_manage_worksheets()
|
python
|
{
"resource": ""
}
|
q20951
|
guard_submit
|
train
|
def guard_submit(analysis):
"""Return whether the transition "submit" can be performed or not
"""
# Cannot submit without a result
if not analysis.getResult():
return False
# Cannot submit with interims without value
for interim in analysis.getInterimFields():
if not interim.get("value", ""):
return False
# Cannot submit if attachment not set, but is required
if not analysis.getAttachment():
if analysis.getAttachmentOption() == 'r':
return False
# Check if can submit based on the Analysis Request state
if IRequestAnalysis.providedBy(analysis):
point_of_capture = analysis.getPointOfCapture()
# Cannot submit if the Sample has not been received
if point_of_capture == "lab" and not analysis.isSampleReceived():
return False
# Cannot submit if the Sample has not been sampled
if point_of_capture == "field" and not analysis.isSampleSampled():
return False
# Check if the current user can submit if is not assigned
if not analysis.bika_setup.getAllowToSubmitNotAssigned():
if not user_has_super_roles():
# Cannot submit if unassigned
if not analysis.getAnalyst():
return False
# Cannot submit if assigned analyst is not the current user
if analysis.getAnalyst() != api.get_current_user().getId():
return False
# Cannot submit unless all dependencies are submitted or can be submitted
for dependency in analysis.getDependencies():
if not is_submitted_or_submittable(dependency):
return False
return True
|
python
|
{
"resource": ""
}
|
q20952
|
guard_multi_verify
|
train
|
def guard_multi_verify(analysis):
"""Return whether the transition "multi_verify" can be performed or not
The transition multi_verify will only take place if multi-verification of
results is enabled.
"""
# Cannot multiverify if there is only one remaining verification
remaining_verifications = analysis.getNumberOfRemainingVerifications()
if remaining_verifications <= 1:
return False
# Cannot verify if the user submitted and self-verification is not allowed
if was_submitted_by_current_user(analysis):
if not analysis.isSelfVerificationEnabled():
return False
# Cannot verify if the user verified and multi verification is not allowed
if was_verified_by_current_user(analysis):
if not is_multi_verification_allowed(analysis):
return False
# Cannot verify if the user was last verifier and consecutive verification
# by same user is not allowed
if current_user_was_last_verifier(analysis):
if not is_consecutive_multi_verification_allowed(analysis):
return False
# Cannot verify unless all dependencies are verified or can be verified
for dependency in analysis.getDependencies():
if not is_verified_or_verifiable(dependency):
return False
return True
|
python
|
{
"resource": ""
}
|
q20953
|
guard_retract
|
train
|
def guard_retract(analysis):
""" Return whether the transition "retract" can be performed or not
"""
# Cannot retract if there are dependents that cannot be retracted
if not is_transition_allowed(analysis.getDependents(), "retract"):
return False
dependencies = analysis.getDependencies()
if not dependencies:
return True
# Cannot retract if all dependencies have been verified
if all(map(lambda an: IVerified.providedBy(an), dependencies)):
return False
return True
|
python
|
{
"resource": ""
}
|
q20954
|
user_has_super_roles
|
train
|
def user_has_super_roles():
"""Return whether the current belongs to superuser roles
"""
member = api.get_current_user()
super_roles = ["LabManager", "Manager"]
diff = filter(lambda role: role in super_roles, member.getRoles())
return len(diff) > 0
|
python
|
{
"resource": ""
}
|
q20955
|
current_user_was_last_verifier
|
train
|
def current_user_was_last_verifier(analysis):
"""Returns whether the current user was the last verifier or not
"""
verifiers = analysis.getVerificators()
return verifiers and verifiers[:-1] == api.get_current_user().getId()
|
python
|
{
"resource": ""
}
|
q20956
|
is_transition_allowed
|
train
|
def is_transition_allowed(analyses, transition_id):
"""Returns whether all analyses can be transitioned or not
"""
if not analyses:
return True
if not isinstance(analyses, list):
return is_transition_allowed([analyses], transition_id)
for analysis in analyses:
if not wf.isTransitionAllowed(analysis, transition_id):
return False
return True
|
python
|
{
"resource": ""
}
|
q20957
|
is_submitted_or_submittable
|
train
|
def is_submitted_or_submittable(analysis):
"""Returns whether the analysis is submittable or has already been submitted
"""
if ISubmitted.providedBy(analysis):
return True
if wf.isTransitionAllowed(analysis, "submit"):
return True
return False
|
python
|
{
"resource": ""
}
|
q20958
|
is_verified_or_verifiable
|
train
|
def is_verified_or_verifiable(analysis):
"""Returns whether the analysis is verifiable or has already been verified
"""
if IVerified.providedBy(analysis):
return True
if wf.isTransitionAllowed(analysis, "verify"):
return True
if wf.isTransitionAllowed(analysis, "multi_verify"):
return True
return False
|
python
|
{
"resource": ""
}
|
q20959
|
InstrumentValidation.isValidationInProgress
|
train
|
def isValidationInProgress(self):
"""Checks if the date is beteween a validation period
"""
today = DateTime()
down_from = self.getDownFrom()
down_to = self.getDownTo()
return down_from <= today <= down_to
|
python
|
{
"resource": ""
}
|
q20960
|
InstrumentValidation.getRemainingDaysInValidation
|
train
|
def getRemainingDaysInValidation(self):
"""Returns the days until the instrument returns from validation
"""
delta = 0
today = DateTime()
down_from = self.getDownFrom() or today
down_to = self.getDownTo()
# one of the fields is not set, return 0 days
if not down_from or not down_to:
return 0
# down_from comes after down_to?
if down_from > down_to:
return 0
# calculate the time between today and down_to, even if down_from
# is in the future.
else:
delta = down_to - today
return int(math.ceil(delta))
|
python
|
{
"resource": ""
}
|
q20961
|
LabContact.hasUser
|
train
|
def hasUser(self):
"""Check if contact has user
"""
username = self.getUsername()
if not username:
return False
user = api.get_user(username)
return user is not None
|
python
|
{
"resource": ""
}
|
q20962
|
LabContact._departmentsVoc
|
train
|
def _departmentsVoc(self):
"""Vocabulary of available departments
"""
query = {
"portal_type": "Department",
"is_active": True
}
results = api.search(query, "bika_setup_catalog")
items = map(lambda dept: (api.get_uid(dept), api.get_title(dept)),
results)
dept_uids = map(api.get_uid, results)
# Currently assigned departments
depts = self.getDepartments()
# If one department assigned to the Lab Contact is disabled, it will
# be shown in the list until the department has been unassigned.
for dept in depts:
uid = api.get_uid(dept)
if uid in dept_uids:
continue
items.append((uid, api.get_title(dept)))
return api.to_display_list(items, sort_by="value", allow_empty=False)
|
python
|
{
"resource": ""
}
|
q20963
|
LabContact._defaultDepsVoc
|
train
|
def _defaultDepsVoc(self):
"""Vocabulary of all departments
"""
# Getting the assigned departments
deps = self.getDepartments()
items = []
for d in deps:
items.append((api.get_uid(d), api.get_title(d)))
return api.to_display_list(items, sort_by="value", allow_empty=True)
|
python
|
{
"resource": ""
}
|
q20964
|
LabContact.addDepartment
|
train
|
def addDepartment(self, dep):
"""Adds a department
:param dep: UID or department object
:returns: True when the department was added
"""
if api.is_uid(dep):
dep = api.get_object_by_uid(dep)
deps = self.getDepartments()
if dep not in deps:
return False
deps.append(dep)
self.setDepartments(deps)
return True
|
python
|
{
"resource": ""
}
|
q20965
|
LabContact.removeDepartment
|
train
|
def removeDepartment(self, dep):
"""Removes a department
:param dep: UID or department object
:returns: True when the department was removed
"""
if api.is_uid(dep):
dep = api.get_object_by_uid(dep)
deps = self.getDepartments()
if dep not in deps:
return False
deps.remove(dep)
self.setDepartments(deps)
return True
|
python
|
{
"resource": ""
}
|
q20966
|
rename_bika_setup
|
train
|
def rename_bika_setup():
"""
Rename Bika Setup to just Setup to avoid naming confusions for new users
"""
logger.info("Renaming Bika Setup...")
bika_setup = api.get_bika_setup()
bika_setup.setTitle("Setup")
bika_setup.reindexObject()
setup = api.get_portal().portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'controlpanel')
|
python
|
{
"resource": ""
}
|
q20967
|
guard_retract
|
train
|
def guard_retract(worksheet):
"""Return whether the transition retract can be performed or not to the
worksheet passed in. Since the retract transition from worksheet is a
shortcut to retract transitions from all analyses the worksheet contains,
this guard only returns True if retract transition is allowed for all
analyses the worksheet contains
"""
analyses = worksheet.getAnalyses()
detached = ['rejected', 'retracted']
num_detached = 0
for analysis in analyses:
if api.get_workflow_status_of(analysis) in detached:
num_detached += 1
elif not isTransitionAllowed(analysis, "retract"):
return False
return analyses and num_detached < len(analyses) or False
|
python
|
{
"resource": ""
}
|
q20968
|
guard_rollback_to_open
|
train
|
def guard_rollback_to_open(worksheet):
"""Return whether 'rollback_to_receive' transition can be performed or not
"""
for analysis in worksheet.getAnalyses():
if api.get_review_status(analysis) in ["assigned"]:
return True
return False
|
python
|
{
"resource": ""
}
|
q20969
|
ContainerType.getContainers
|
train
|
def getContainers(self):
"""Return a list of all containers of this type
"""
_containers = []
for container in self.bika_setup.bika_containers.objectValues():
containertype = container.getContainerType()
if containertype and containertype.UID() == self.UID():
_containers.append(container)
return _containers
|
python
|
{
"resource": ""
}
|
q20970
|
Calculation.setFormula
|
train
|
def setFormula(self, Formula=None):
"""Set the Dependent Services from the text of the calculation Formula
"""
bsc = getToolByName(self, 'bika_setup_catalog')
if Formula is None:
self.setDependentServices(None)
self.getField('Formula').set(self, Formula)
else:
keywords = re.compile(r"\[([^.^\]]+)\]").findall(Formula)
brains = bsc(portal_type='AnalysisService',
getKeyword=keywords)
services = [brain.getObject() for brain in brains]
self.getField('DependentServices').set(self, services)
self.getField('Formula').set(self, Formula)
|
python
|
{
"resource": ""
}
|
q20971
|
Calculation.getCalculationDependants
|
train
|
def getCalculationDependants(self, deps=None):
"""Return a flat list of services who depend on this calculation.
This refers only to services who's Calculation UIDReferenceField have
the value set to point to this calculation.
It has nothing to do with the services referenced in the calculation's
Formula.
"""
if deps is None:
deps = []
backrefs = get_backreferences(self, 'AnalysisServiceCalculation')
services = map(get_object_by_uid, backrefs)
for service in services:
calc = service.getCalculation()
if calc and calc.UID() != self.UID():
calc.getCalculationDependants(deps)
deps.append(service)
return deps
|
python
|
{
"resource": ""
}
|
q20972
|
Calculation._getGlobals
|
train
|
def _getGlobals(self, **kwargs):
"""Return the globals dictionary for the formula calculation
"""
# Default globals
globs = {
"__builtins__": None,
"all": all,
"any": any,
"bool": bool,
"chr": chr,
"cmp": cmp,
"complex": complex,
"divmod": divmod,
"enumerate": enumerate,
"float": float,
"format": format,
"frozenset": frozenset,
"hex": hex,
"int": int,
"len": len,
"list": list,
"long": long,
"math": math,
"max": max,
"min": min,
"oct": oct,
"ord": ord,
"pow": pow,
"range": range,
"reversed": reversed,
"round": round,
"str": str,
"sum": sum,
"tuple": tuple,
"xrange": xrange,
}
# Update with keyword arguments
globs.update(kwargs)
# Update with additional Python libraries
for imp in self.getPythonImports():
mod = imp["module"]
func = imp["function"]
member = self._getModuleMember(mod, func)
if member is None:
raise ImportError(
"Could not find member {} of module {}".format(
func, mod))
globs[func] = member
return globs
|
python
|
{
"resource": ""
}
|
q20973
|
Calculation._getModuleMember
|
train
|
def _getModuleMember(self, dotted_name, member):
"""Get the member object of a module.
:param dotted_name: The dotted name of the module, e.g. 'scipy.special'
:type dotted_name: string
:param member: The name of the member function, e.g. 'gammaincinv'
:type member: string
:returns: member object or None
"""
try:
mod = importlib.import_module(dotted_name)
except ImportError:
return None
members = dict(inspect.getmembers(mod))
return members.get(member)
|
python
|
{
"resource": ""
}
|
q20974
|
after_unassign
|
train
|
def after_unassign(reference_analysis):
"""Removes the reference analysis from the system
"""
analysis_events.after_unassign(reference_analysis)
ref_sample = reference_analysis.aq_parent
ref_sample.manage_delObjects([reference_analysis.getId()])
|
python
|
{
"resource": ""
}
|
q20975
|
after_retract
|
train
|
def after_retract(reference_analysis):
"""Function triggered after a 'retract' transition for the reference
analysis passed in is performed. The reference analysis transitions to
"retracted" state and a new copy of the reference analysis is created
"""
reference = reference_analysis.getSample()
service = reference_analysis.getAnalysisService()
worksheet = reference_analysis.getWorksheet()
instrument = reference_analysis.getInstrument()
if worksheet:
# This a reference analysis in a worksheet
slot = worksheet.get_slot_position_for(reference_analysis)
refgid = reference_analysis.getReferenceAnalysesGroupID()
ref = worksheet.add_reference_analysis(reference, service, slot, refgid)
if not ref:
logger.warn("Cannot add a retest for reference analysis {} into {}"
.format(reference_analysis.getId(), worksheet.getId()))
return
ref.setRetestOf(reference_analysis)
ref.setResult(reference_analysis.getResult())
if instrument:
ref.setInstrument(instrument)
instrument.reindexObject()
# Try to rollback the worksheet to prevent inconsistencies
wf.doActionFor(worksheet, "rollback_to_open")
elif instrument:
# This is an internal calibration test
instrument.addReferences(reference, [api.get_uid(service)])
instrument.reindexObject()
|
python
|
{
"resource": ""
}
|
q20976
|
Update.require
|
train
|
def require(self, fieldname, allow_blank=False):
"""fieldname is required"""
if self.request.form and fieldname not in self.request.form.keys():
raise Exception("Required field not found in request: %s" % fieldname)
if self.request.form and (not self.request.form[fieldname] or allow_blank):
raise Exception("Required field %s may not have blank value")
|
python
|
{
"resource": ""
}
|
q20977
|
Update.used
|
train
|
def used(self, fieldname):
"""fieldname is used, remove from list of unused fields"""
if fieldname in self.unused:
self.unused.remove(fieldname)
|
python
|
{
"resource": ""
}
|
q20978
|
ReferenceResultsView.get_reference_results
|
train
|
def get_reference_results(self):
"""Return a mapping of Analysis Service -> Reference Results
"""
referenceresults = self.context.getReferenceResults()
return dict(map(lambda rr: (rr.get("uid"), rr), referenceresults))
|
python
|
{
"resource": ""
}
|
q20979
|
is_import_interface
|
train
|
def is_import_interface(instrument_interface):
"""Returns whether the instrument interface passed in is for results import
"""
if IInstrumentImportInterface.providedBy(instrument_interface):
return True
# TODO Remove this once classic instrument interface migrated
if hasattr(instrument_interface, '__name__'):
obj_name = instrument_interface.__name__.replace(__name__, "")
if obj_name[1:] in __all__ and hasattr(instrument_interface, "Import"):
return True
return False
|
python
|
{
"resource": ""
}
|
q20980
|
is_export_interface
|
train
|
def is_export_interface(instrument_interface):
"""Returns whether the instrument interface passed in is for results export
"""
if IInstrumentExportInterface.providedBy(instrument_interface):
return True
# TODO Remove this once classic instrument interface migrated
if hasattr(instrument_interface, '__name__'):
obj_name = instrument_interface.__name__.replace(__name__, "")
if obj_name[1:] in __all__ and hasattr(instrument_interface, "Export"):
return True
return False
|
python
|
{
"resource": ""
}
|
q20981
|
getExim
|
train
|
def getExim(exim_id):
"""Returns the instrument interface for the exim_id passed in
"""
interfaces = filter(lambda i: i[0]==exim_id, get_instrument_interfaces())
return interfaces and interfaces[0][1] or None
|
python
|
{
"resource": ""
}
|
q20982
|
get_automatic_parser
|
train
|
def get_automatic_parser(exim_id, infile):
"""Returns the parser to be used by default for the instrument id interface
and results file passed in.
"""
adapter = getExim(exim_id)
if IInstrumentAutoImportInterface.providedBy(adapter):
return adapter.get_automatic_parser(infile)
# TODO Remove this once classic instrument interface migrated
parser_func = filter(lambda i: i[0] == exim_id, PARSERS)
parser_func = parser_func and parser_func[0][1] or None
if not parser_func or not hasattr(adapter, parser_func):
return None
parser_func = getattr(adapter, parser_func)
return parser_func(infile)
|
python
|
{
"resource": ""
}
|
q20983
|
ReferenceSamplesView.is_manage_allowed
|
train
|
def is_manage_allowed(self):
"""Check if manage is allowed
"""
checkPermission = self.context.portal_membership.checkPermission
return checkPermission(ManageWorksheets, self.context)
|
python
|
{
"resource": ""
}
|
q20984
|
ReferenceSamplesView.get_assigned_services
|
train
|
def get_assigned_services(self):
"""Get the current assigned services of this Worksheet
"""
analyses = self.context.getAnalyses()
routine_analyses = filter(
lambda an: IRoutineAnalysis.providedBy(an), analyses)
services = map(lambda an: an.getAnalysisService(), routine_analyses)
return services
|
python
|
{
"resource": ""
}
|
q20985
|
ReferenceSamplesView.get_assigned_services_uids
|
train
|
def get_assigned_services_uids(self):
"""Get the current assigned services UIDs of this Worksheet
"""
services = self.get_assigned_services()
uids = map(api.get_uid, services)
return list(set(uids))
|
python
|
{
"resource": ""
}
|
q20986
|
ReferenceSamplesView.get_supported_services_uids
|
train
|
def get_supported_services_uids(self, referencesample):
"""Get the supported services of the reference sample
"""
uids = referencesample.getSupportedServices(only_uids=True)
return list(set(uids))
|
python
|
{
"resource": ""
}
|
q20987
|
ReferenceSamplesView.make_supported_services_choices
|
train
|
def make_supported_services_choices(self, referencesample):
"""Create choices for supported services
"""
choices = []
assigned_services = self.get_assigned_services_uids()
for uid in self.get_supported_services_uids(referencesample):
service = api.get_object(uid)
title = api.get_title(service)
selected = uid in assigned_services
choices.append({
"ResultValue": uid,
"ResultText": title,
"selected": selected,
})
return choices
|
python
|
{
"resource": ""
}
|
q20988
|
ReferenceSamplesView.make_position_choices
|
train
|
def make_position_choices(self):
"""Create choices for available positions
"""
choices = []
for pos in self.get_available_positions():
choices.append({
"ResultValue": pos,
"ResultText": pos,
})
return choices
|
python
|
{
"resource": ""
}
|
q20989
|
ReferenceSamplesView.get_available_positions
|
train
|
def get_available_positions(self):
"""Return a list of empty slot numbers
"""
available_positions = ["new"]
layout = self.context.getLayout()
used_positions = [int(slot["position"]) for slot in layout]
if used_positions:
used = [
pos for pos in range(1, max(used_positions) + 1) if
pos not in used_positions]
available_positions.extend(used)
return available_positions
|
python
|
{
"resource": ""
}
|
q20990
|
AnalysesView.isItemAllowed
|
train
|
def isItemAllowed(self, obj):
"""Returns true if the current analysis to be rendered has a slot
assigned for the current layout.
:param obj: analysis to be rendered as a row in the list
:type obj: ATContentType/DexterityContentType
:return: True if the obj has an slot assigned. Otherwise, False.
:rtype: bool
"""
uid = api.get_uid(obj)
if not self.get_item_slot(uid):
logger.warning("Slot not assigned to item %s" % uid)
return False
return BaseView.isItemAllowed(self, obj)
|
python
|
{
"resource": ""
}
|
q20991
|
AnalysesView.folderitems
|
train
|
def folderitems(self):
"""Returns an array of dictionaries, each dictionary represents an
analysis row to be rendered in the list. The array returned is sorted
in accordance with the layout positions set for the analyses this
worksheet contains when the analyses were added in the worksheet.
:returns: list of dicts with the items to be rendered in the list
"""
items = BaseView.folderitems(self)
# Fill empty positions from the layout with fake rows. The worksheet
# can be generated by making use of a WorksheetTemplate, so there is
# the chance that some slots of this worksheet being empty. We need to
# render a row still, at lest to display the slot number (Pos)
self.fill_empty_slots(items)
# Sort the items in accordance with the layout
items = sorted(items, key=itemgetter("pos_sortkey"))
# Fill the slot header cells (first cell of each row). Each slot
# contains the analyses that belong to the same parent
# (AnalysisRequest, ReferenceSample), so the information about the
# parent must be displayed in the first cell of each slot.
self.fill_slots_headers(items)
return items
|
python
|
{
"resource": ""
}
|
q20992
|
AnalysesView.get_item_position
|
train
|
def get_item_position(self, analysis_uid):
"""Returns a list with the position for the analysis_uid passed in
within the current worksheet in accordance with the current layout,
where the first item from the list returned is the slot and the second
is the position of the analysis within the slot.
:param analysis_uid: uid of the analysis the position is requested
:return: the position (slot + position within slot) of the analysis
:rtype: list
"""
str_position = self.uids_strpositions.get(analysis_uid, "")
tokens = str_position.split(":")
if len(tokens) != 2:
return None
return [to_int(tokens[0]), to_int(tokens[1])]
|
python
|
{
"resource": ""
}
|
q20993
|
AnalysesView.fill_empty_slots
|
train
|
def fill_empty_slots(self, items):
"""Append dicts to the items passed in for those slots that don't have
any analysis assigned but the row needs to be rendered still.
:param items: dictionary with the items to be rendered in the list
"""
for pos in self.get_empty_slots():
item = {
"obj": self.context,
"id": self.context.id,
"uid": self.context.UID(),
"title": self.context.Title(),
"type_class": "blank-worksheet-row",
"url": self.context.absolute_url(),
"relative_url": self.context.absolute_url(),
"view_url": self.context.absolute_url(),
"path": "/".join(self.context.getPhysicalPath()),
"before": {},
"after": {},
"replace": {
"Pos": "<span class='badge'>{}</span> {}".format(
pos, _("Reassignable Slot"))
},
"choices": {},
"class": {},
"state_class": "state-empty",
"allow_edit": [],
"Pos": pos,
"pos_sortkey": "{:010}:{:010}".format(pos, 1),
"Service": "",
"Attachments": "",
"state_title": "",
"disabled": True,
}
items.append(item)
|
python
|
{
"resource": ""
}
|
q20994
|
AnalysesView.fill_slots_headers
|
train
|
def fill_slots_headers(self, items):
"""Generates the header cell for each slot. For each slot, the first
cell displays information about the parent all analyses within that
given slot have in common, such as the AR Id, SampleType, etc.
:param items: dictionary with items to be rendered in the list
"""
prev_position = 0
for item in items:
item_position = item["Pos"]
if item_position == prev_position:
item = self.skip_item_key(item, "Pos")
# head slot already filled
continue
if item.get("disabled", False):
# empty slot
continue
# This is the first analysis found for the given position, add the
# slot info in there and apply a rowspan accordingly.
rowspan = self.items_rowspans.get(item_position, 1)
prev_position = item_position
item["rowspan"] = {"Pos": rowspan}
item["replace"]["Pos"] = self.get_slot_header(item)
|
python
|
{
"resource": ""
}
|
q20995
|
AnalysesView.skip_item_key
|
train
|
def skip_item_key(self, item, key):
"""Add the key to the item's "skip" list
"""
if "skip" in item:
item["skip"].append(key)
else:
item["skip"] = [key]
return item
|
python
|
{
"resource": ""
}
|
q20996
|
AnalysesView.render_remarks_tag
|
train
|
def render_remarks_tag(self, ar):
"""Renders a remarks image icon
"""
if not ar.getRemarks():
return ""
uid = api.get_uid(ar)
url = ar.absolute_url()
title = ar.Title()
tooltip = _("Remarks of {}").format(title)
# Note: The 'href' is picked up by the overlay handler, see
# bika.lims.worksheet.coffee
attrs = {
"css_class": "slot-remarks",
"style": "cursor: pointer;",
"title": tooltip,
"uid": uid,
"href": "{}/base_view".format(url),
}
return get_image("remarks_ico.png", **attrs)
|
python
|
{
"resource": ""
}
|
q20997
|
AnalysisRequest.Description
|
train
|
def Description(self):
"""Returns searchable data as Description"""
descr = " ".join((self.getId(), self.aq_parent.Title()))
return safe_unicode(descr).encode('utf-8')
|
python
|
{
"resource": ""
}
|
q20998
|
AnalysisRequest.getDefaultMemberDiscount
|
train
|
def getDefaultMemberDiscount(self):
"""Compute default member discount if it applies
"""
if hasattr(self, 'getMemberDiscountApplies'):
if self.getMemberDiscountApplies():
settings = self.bika_setup
return settings.getMemberDiscount()
else:
return "0.00"
|
python
|
{
"resource": ""
}
|
q20999
|
AnalysisRequest.getResponsible
|
train
|
def getResponsible(self):
"""Return all manager info of responsible departments
"""
managers = {}
for department in self.getDepartments():
manager = department.getManager()
if manager is None:
continue
manager_id = manager.getId()
if manager_id not in managers:
managers[manager_id] = {}
managers[manager_id]['salutation'] = safe_unicode(
manager.getSalutation())
managers[manager_id]['name'] = safe_unicode(
manager.getFullname())
managers[manager_id]['email'] = safe_unicode(
manager.getEmailAddress())
managers[manager_id]['phone'] = safe_unicode(
manager.getBusinessPhone())
managers[manager_id]['job_title'] = safe_unicode(
manager.getJobTitle())
if manager.getSignature():
managers[manager_id]['signature'] = \
'{}/Signature'.format(manager.absolute_url())
else:
managers[manager_id]['signature'] = False
managers[manager_id]['departments'] = ''
mngr_dept = managers[manager_id]['departments']
if mngr_dept:
mngr_dept += ', '
mngr_dept += safe_unicode(department.Title())
managers[manager_id]['departments'] = mngr_dept
mngr_keys = managers.keys()
mngr_info = {'ids': mngr_keys, 'dict': managers}
return mngr_info
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.