_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q21200
|
AnalysesView.get_instruments_vocabulary
|
train
|
def get_instruments_vocabulary(self, analysis_brain):
"""Returns a vocabulary with the valid and active instruments available
for the analysis passed in.
If the option "Allow instrument entry of results" for the Analysis
is disabled, the function returns an empty vocabulary.
If the analysis passed in is a Reference Analysis (Blank or Control),
the vocabulary, the invalid instruments will be included in the
vocabulary too.
The vocabulary is a list of dictionaries. Each dictionary has the
following structure:
{'ResultValue': <instrument_UID>,
'ResultText': <instrument_Title>}
:param analysis_brain: A single Analysis or ReferenceAnalysis
:type analysis_brain: Analysis or.ReferenceAnalysis
:return: A vocabulary with the instruments for the analysis
:rtype: A list of dicts: [{'ResultValue':UID, 'ResultText':Title}]
"""
if not analysis_brain.getInstrumentEntryOfResults:
# Instrument entry of results for this analysis is not allowed
return list()
# If the analysis is a QC analysis, display all instruments, including
# those uncalibrated or for which the last QC test failed.
meta_type = analysis_brain.meta_type
uncalibrated = meta_type == 'ReferenceAnalysis'
if meta_type == 'DuplicateAnalysis':
base_analysis_type = analysis_brain.getAnalysisPortalType
uncalibrated = base_analysis_type == 'ReferenceAnalysis'
uids = analysis_brain.getAllowedInstrumentUIDs
query = {'portal_type': 'Instrument',
'is_active': True,
'UID': uids}
brains = api.search(query, 'bika_setup_catalog')
vocab = [{'ResultValue': '', 'ResultText': _('None')}]
for brain in brains:
instrument = self.get_object(brain)
if uncalibrated and not instrument.isOutOfDate():
# Is a QC analysis, include instrument also if is not valid
vocab.append({'ResultValue': instrument.UID(),
'ResultText': instrument.Title()})
if instrument.isValid():
# Only add the 'valid' instruments: certificate
# on-date and valid internal calibration tests
vocab.append({'ResultValue': instrument.UID(),
'ResultText': instrument.Title()})
return vocab
|
python
|
{
"resource": ""
}
|
q21201
|
AnalysesView._folder_item_category
|
train
|
def _folder_item_category(self, analysis_brain, item):
"""Sets the category to the item passed in
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if not self.show_categories:
return
cat = analysis_brain.getCategoryTitle
item["category"] = cat
cat_order = self.analysis_categories_order.get(cat)
if (cat, cat_order) not in self.categories:
self.categories.append((cat, cat_order))
|
python
|
{
"resource": ""
}
|
q21202
|
AnalysesView._folder_item_duedate
|
train
|
def _folder_item_duedate(self, analysis_brain, item):
"""Set the analysis' due date to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
# Note that if the analysis is a Reference Analysis, `getDueDate`
# returns the date when the ReferenceSample expires. If the analysis is
# a duplicate, `getDueDate` returns the due date of the source analysis
due_date = analysis_brain.getDueDate
if not due_date:
return None
due_date_str = self.ulocalized_time(due_date, long_format=0)
item['DueDate'] = due_date_str
# If the Analysis is late/overdue, display an icon
capture_date = analysis_brain.getResultCaptureDate
capture_date = capture_date or DateTime()
if capture_date > due_date:
# The analysis is late or overdue
img = get_image('late.png', title=t(_("Late Analysis")),
width='16px', height='16px')
item['replace']['DueDate'] = '{} {}'.format(due_date_str, img)
|
python
|
{
"resource": ""
}
|
q21203
|
AnalysesView._folder_item_result
|
train
|
def _folder_item_result(self, analysis_brain, item):
"""Set the analysis' result to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item["Result"] = ""
if not self.has_permission(ViewResults, analysis_brain):
# If user has no permissions, don"t display the result but an icon
img = get_image("to_follow.png", width="16px", height="16px")
item["before"]["Result"] = img
return
result = analysis_brain.getResult
capture_date = analysis_brain.getResultCaptureDate
capture_date_str = self.ulocalized_time(capture_date, long_format=0)
item["Result"] = result
item["CaptureDate"] = capture_date_str
item["result_captured"] = capture_date_str
# Edit mode enabled of this Analysis
if self.is_analysis_edition_allowed(analysis_brain):
# Allow to set Remarks
item["allow_edit"].append("Remarks")
# Set the results field editable
if self.is_result_edition_allowed(analysis_brain):
item["allow_edit"].append("Result")
# Prepare result options
choices = analysis_brain.getResultOptions
if choices:
# N.B.we copy here the list to avoid persistent changes
choices = copy(choices)
# By default set empty as the default selected choice
choices.insert(0, dict(ResultValue="", ResultText=""))
item["choices"]["Result"] = choices
if not result:
return
obj = self.get_object(analysis_brain)
formatted_result = obj.getFormattedResult(
sciformat=int(self.scinot), decimalmark=self.dmk)
item["formatted_result"] = formatted_result
|
python
|
{
"resource": ""
}
|
q21204
|
AnalysesView._folder_item_calculation
|
train
|
def _folder_item_calculation(self, analysis_brain, item):
"""Set the analysis' calculation and interims to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
is_editable = self.is_analysis_edition_allowed(analysis_brain)
# Set interim fields. Note we add the key 'formatted_value' to the list
# of interims the analysis has already assigned.
interim_fields = analysis_brain.getInterimFields or list()
for interim_field in interim_fields:
interim_keyword = interim_field.get('keyword', '')
if not interim_keyword:
continue
interim_value = interim_field.get('value', '')
interim_formatted = formatDecimalMark(interim_value, self.dmk)
interim_field['formatted_value'] = interim_formatted
item[interim_keyword] = interim_field
item['class'][interim_keyword] = 'interim'
# Note: As soon as we have a separate content type for field
# analysis, we can solely rely on the field permission
# "senaite.core: Field: Edit Analysis Result"
if is_editable:
if self.has_permission(FieldEditAnalysisResult, analysis_brain):
item['allow_edit'].append(interim_keyword)
# Add this analysis' interim fields to the interim_columns list
interim_hidden = interim_field.get('hidden', False)
if not interim_hidden:
interim_title = interim_field.get('title')
self.interim_columns[interim_keyword] = interim_title
item['interimfields'] = interim_fields
self.interim_fields[analysis_brain.UID] = interim_fields
# Set calculation
calculation_uid = analysis_brain.getCalculationUID
has_calculation = calculation_uid and True or False
item['calculation'] = has_calculation
|
python
|
{
"resource": ""
}
|
q21205
|
AnalysesView._folder_item_method
|
train
|
def _folder_item_method(self, analysis_brain, item):
"""Fills the analysis' method to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
is_editable = self.is_analysis_edition_allowed(analysis_brain)
method_title = analysis_brain.getMethodTitle
item['Method'] = method_title or ''
if is_editable:
method_vocabulary = self.get_methods_vocabulary(analysis_brain)
if method_vocabulary:
item['Method'] = analysis_brain.getMethodUID
item['choices']['Method'] = method_vocabulary
item['allow_edit'].append('Method')
self.show_methodinstr_columns = True
elif method_title:
item['replace']['Method'] = get_link(analysis_brain.getMethodURL,
method_title)
self.show_methodinstr_columns = True
|
python
|
{
"resource": ""
}
|
q21206
|
AnalysesView._folder_item_instrument
|
train
|
def _folder_item_instrument(self, analysis_brain, item):
"""Fills the analysis' instrument to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item['Instrument'] = ''
if not analysis_brain.getInstrumentEntryOfResults:
# Manual entry of results, instrument is not allowed
item['Instrument'] = _('Manual')
item['replace']['Instrument'] = \
'<a href="#">{}</a>'.format(t(_('Manual')))
return
# Instrument can be assigned to this analysis
is_editable = self.is_analysis_edition_allowed(analysis_brain)
self.show_methodinstr_columns = True
instrument = self.get_instrument(analysis_brain)
if is_editable:
# Edition allowed
voc = self.get_instruments_vocabulary(analysis_brain)
if voc:
# The service has at least one instrument available
item['Instrument'] = instrument.UID() if instrument else ''
item['choices']['Instrument'] = voc
item['allow_edit'].append('Instrument')
return
if instrument:
# Edition not allowed
instrument_title = instrument and instrument.Title() or ''
instrument_link = get_link(instrument.absolute_url(),
instrument_title)
item['Instrument'] = instrument_title
item['replace']['Instrument'] = instrument_link
return
|
python
|
{
"resource": ""
}
|
q21207
|
AnalysesView._folder_item_uncertainty
|
train
|
def _folder_item_uncertainty(self, analysis_brain, item):
"""Fills the analysis' uncertainty to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item["Uncertainty"] = ""
if not self.has_permission(ViewResults, analysis_brain):
return
result = analysis_brain.getResult
obj = self.get_object(analysis_brain)
formatted = format_uncertainty(obj, result, decimalmark=self.dmk,
sciformat=int(self.scinot))
if formatted:
item["Uncertainty"] = formatted
else:
item["Uncertainty"] = obj.getUncertainty(result)
if self.is_uncertainty_edition_allowed(analysis_brain):
item["allow_edit"].append("Uncertainty")
|
python
|
{
"resource": ""
}
|
q21208
|
AnalysesView._folder_item_detection_limits
|
train
|
def _folder_item_detection_limits(self, analysis_brain, item):
"""Fills the analysis' detection limits to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item["DetectionLimitOperand"] = ""
if not self.is_analysis_edition_allowed(analysis_brain):
# Return immediately if the we are not in edit mode
return
# TODO: Performance, we wake-up the full object here
obj = self.get_object(analysis_brain)
# No Detection Limit Selection
if not obj.getDetectionLimitSelector():
return None
# Show Detection Limit Operand Selector
item["DetectionLimitOperand"] = obj.getDetectionLimitOperand()
item["allow_edit"].append("DetectionLimitOperand")
self.columns["DetectionLimitOperand"]["toggle"] = True
# Prepare selection list for LDL/UDL
choices = [
{"ResultValue": "", "ResultText": ""},
{"ResultValue": LDL, "ResultText": LDL},
{"ResultValue": UDL, "ResultText": UDL}
]
# Set the choices to the item
item["choices"]["DetectionLimitOperand"] = choices
|
python
|
{
"resource": ""
}
|
q21209
|
AnalysesView._folder_item_specifications
|
train
|
def _folder_item_specifications(self, analysis_brain, item):
"""Set the results range to the item passed in"""
# Everyone can see valid-ranges
item['Specification'] = ''
results_range = analysis_brain.getResultsRange
if not results_range:
return
# Display the specification interval
item["Specification"] = get_formatted_interval(results_range, "")
# Show an icon if out of range
out_range, out_shoulders = is_out_of_range(analysis_brain)
if not out_range:
return
# At least is out of range
img = get_image("exclamation.png", title=_("Result out of range"))
if not out_shoulders:
img = get_image("warning.png", title=_("Result in shoulder range"))
self._append_html_element(item, "Result", img)
|
python
|
{
"resource": ""
}
|
q21210
|
AnalysesView._folder_item_verify_icons
|
train
|
def _folder_item_verify_icons(self, analysis_brain, item):
"""Set the analysis' verification icons to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
submitter = analysis_brain.getSubmittedBy
if not submitter:
# This analysis hasn't yet been submitted, no verification yet
return
if analysis_brain.review_state == 'retracted':
# Don't display icons and additional info about verification
return
verifiers = analysis_brain.getVerificators
in_verifiers = submitter in verifiers
if in_verifiers:
# If analysis has been submitted and verified by the same person,
# display a warning icon
msg = t(_("Submitted and verified by the same user: {}"))
icon = get_image('warning.png', title=msg.format(submitter))
self._append_html_element(item, 'state_title', icon)
num_verifications = analysis_brain.getNumberOfRequiredVerifications
if num_verifications > 1:
# More than one verification required, place an icon and display
# the number of verifications done vs. total required
done = analysis_brain.getNumberOfVerifications
pending = num_verifications - done
ratio = float(done) / float(num_verifications) if done > 0 else 0
ratio = int(ratio * 100)
scale = ratio == 0 and 0 or (ratio / 25) * 25
anchor = "<a href='#' title='{} {} {}' " \
"class='multi-verification scale-{}'>{}/{}</a>"
anchor = anchor.format(t(_("Multi-verification required")),
str(pending),
t(_("verification(s) pending")),
str(scale),
str(done),
str(num_verifications))
self._append_html_element(item, 'state_title', anchor)
if analysis_brain.review_state != 'to_be_verified':
# The verification of analysis has already been done or first
# verification has not been done yet. Nothing to do
return
# Check if the user has "Bika: Verify" privileges
if not self.has_permission(TransitionVerify):
# User cannot verify, do nothing
return
username = api.get_current_user().id
if username not in verifiers:
# Current user has not verified this analysis
if submitter != username:
# Current user is neither a submitter nor a verifier
return
# Current user is the same who submitted the result
if analysis_brain.isSelfVerificationEnabled:
# Same user who submitted can verify
title = t(_("Can verify, but submitted by current user"))
html = get_image('warning.png', title=title)
self._append_html_element(item, 'state_title', html)
return
# User who submitted cannot verify
title = t(_("Cannot verify, submitted by current user"))
html = get_image('submitted-by-current-user.png', title=title)
self._append_html_element(item, 'state_title', html)
return
# This user verified this analysis before
multi_verif = self.context.bika_setup.getTypeOfmultiVerification()
if multi_verif != 'self_multi_not_cons':
# Multi verification by same user is not allowed
title = t(_("Cannot verify, was verified by current user"))
html = get_image('submitted-by-current-user.png', title=title)
self._append_html_element(item, 'state_title', html)
return
# Multi-verification by same user, but non-consecutively, is allowed
if analysis_brain.getLastVerificator != username:
# Current user was not the last user to verify
title = t(
_("Can verify, but was already verified by current user"))
html = get_image('warning.png', title=title)
self._append_html_element(item, 'state_title', html)
return
# Last user who verified is the same as current user
title = t(_("Cannot verify, last verified by current user"))
html = get_image('submitted-by-current-user.png', title=title)
self._append_html_element(item, 'state_title', html)
return
|
python
|
{
"resource": ""
}
|
q21211
|
AnalysesView._folder_item_assigned_worksheet
|
train
|
def _folder_item_assigned_worksheet(self, analysis_brain, item):
"""Adds an icon to the item dict if the analysis is assigned to a
worksheet and if the icon is suitable for the current context
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if not IAnalysisRequest.providedBy(self.context):
# We want this icon to only appear if the context is an AR
return
analysis_obj = self.get_object(analysis_brain)
worksheet = analysis_obj.getWorksheet()
if not worksheet:
# No worksheet assigned. Do nothing
return
title = t(_("Assigned to: ${worksheet_id}",
mapping={'worksheet_id': safe_unicode(worksheet.id)}))
img = get_image('worksheet.png', title=title)
anchor = get_link(worksheet.absolute_url(), img)
self._append_html_element(item, 'state_title', anchor)
|
python
|
{
"resource": ""
}
|
q21212
|
AnalysesView._folder_item_reflex_icons
|
train
|
def _folder_item_reflex_icons(self, analysis_brain, item):
"""Adds an icon to the item dictionary if the analysis has been
automatically generated due to a reflex rule
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if not analysis_brain.getIsReflexAnalysis:
# Do nothing
return
img = get_image('reflexrule.png',
title=t(_('It comes form a reflex rule')))
self._append_html_element(item, 'Service', img)
|
python
|
{
"resource": ""
}
|
q21213
|
AnalysesView._folder_item_fieldicons
|
train
|
def _folder_item_fieldicons(self, analysis_brain):
"""Resolves if field-specific icons must be displayed for the object
passed in.
:param analysis_brain: Brain that represents an analysis
"""
full_obj = self.get_object(analysis_brain)
uid = api.get_uid(full_obj)
for name, adapter in getAdapters((full_obj,), IFieldIcons):
alerts = adapter()
if not alerts or uid not in alerts:
continue
alerts = alerts[uid]
if uid not in self.field_icons:
self.field_icons[uid] = alerts
continue
self.field_icons[uid].extend(alerts)
|
python
|
{
"resource": ""
}
|
q21214
|
AnalysesView._folder_item_remarks
|
train
|
def _folder_item_remarks(self, analysis_brain, item):
"""Renders the Remarks field for the passed in analysis
If the edition of the analysis is permitted, adds the field into the
list of editable fields.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if self.analysis_remarks_enabled():
item["Remarks"] = analysis_brain.getRemarks
if self.is_analysis_edition_allowed(analysis_brain):
item["allow_edit"].extend(["Remarks"])
|
python
|
{
"resource": ""
}
|
q21215
|
AnalysesView._append_html_element
|
train
|
def _append_html_element(self, item, element, html, glue=" ",
after=True):
"""Appends an html value after or before the element in the item dict
:param item: dictionary that represents an analysis row
:param element: id of the element the html must be added thereafter
:param html: element to append
:param glue: glue to use for appending
:param after: if the html content must be added after or before"""
position = after and 'after' or 'before'
item[position] = item.get(position, {})
original = item[position].get(element, '')
if not original:
item[position][element] = html
return
item[position][element] = glue.join([original, html])
|
python
|
{
"resource": ""
}
|
q21216
|
sortable_title
|
train
|
def sortable_title(instance):
"""Uses the default Plone sortable_text index lower-case
"""
title = plone_sortable_title(instance)
if safe_callable(title):
title = title()
return title.lower()
|
python
|
{
"resource": ""
}
|
q21217
|
sortable_sortkey_title
|
train
|
def sortable_sortkey_title(instance):
"""Returns a sortable title as a mxin of sortkey + lowercase sortable_title
"""
title = sortable_title(instance)
if safe_callable(title):
title = title()
sort_key = instance.getSortKey()
if sort_key is None:
sort_key = 999999
return "{:010.3f}{}".format(sort_key, title)
|
python
|
{
"resource": ""
}
|
q21218
|
ARTemplateAnalysesView.get_settings
|
train
|
def get_settings(self):
"""Returns a mapping of UID -> setting
"""
settings = self.context.getAnalysisServicesSettings()
mapping = dict(map(lambda s: (s.get("uid"), s), settings))
return mapping
|
python
|
{
"resource": ""
}
|
q21219
|
WorkflowActionAssignAdapter.sorted_analyses
|
train
|
def sorted_analyses(self, analyses):
"""Sort the analyses by AR ID ascending and subsorted by priority
sortkey within the AR they belong to
"""
analyses = sorted(analyses, key=lambda an: an.getRequestID())
def sorted_by_sortkey(objs):
return sorted(objs, key=lambda an: an.getPrioritySortkey())
# Now, we need the analyses within a request ID to be sorted by
# sortkey (sortable_title index), so it will appear in the same
# order as they appear in Analyses list from AR view
current_sample_id = None
current_analyses = []
sorted_analyses = []
for analysis in analyses:
sample_id = analysis.getRequestID()
if sample_id and current_sample_id != sample_id:
# Sort the brains we've collected until now, that
# belong to the same Analysis Request
current_analyses = sorted_by_sortkey(current_analyses)
sorted_analyses.extend(current_analyses)
current_sample_id = sample_id
current_analyses = []
# Now we are inside the same AR
current_analyses.append(analysis)
continue
# Sort the last set of brains we've collected
current_analyses = sorted_by_sortkey(current_analyses)
sorted_analyses.extend(current_analyses)
return sorted_analyses
|
python
|
{
"resource": ""
}
|
q21220
|
InstrumentMaintenanceTask.getMaintenanceTypes
|
train
|
def getMaintenanceTypes(self):
""" Return the current list of maintenance types
"""
types = [('Preventive',safe_unicode(_('Preventive')).encode('utf-8')),
('Repair', safe_unicode(_('Repair')).encode('utf-8')),
('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8'))]
return DisplayList(types)
|
python
|
{
"resource": ""
}
|
q21221
|
after_unassign
|
train
|
def after_unassign(duplicate_analysis):
"""Removes the duplicate from the system
"""
analysis_events.after_unassign(duplicate_analysis)
parent = duplicate_analysis.aq_parent
logger.info("Removing duplicate '{}' from '{}'"
.format(duplicate_analysis.getId(), parent.getId()))
parent.manage_delObjects([duplicate_analysis.getId()])
|
python
|
{
"resource": ""
}
|
q21222
|
after_retract
|
train
|
def after_retract(duplicate_analysis):
"""Function triggered after a 'retract' transition for the duplicate passed
in is performed. The duplicate transitions to "retracted" state and a new
copy of the duplicate is created.
"""
# Rename the analysis to make way for it's successor.
# Support multiple retractions by renaming to *-0, *-1, etc
parent = duplicate_analysis.aq_parent
keyword = duplicate_analysis.getKeyword()
analyses = filter(lambda an: an.getKeyword() == keyword,
parent.objectValues("DuplicateAnalysis"))
# Rename the retracted duplicate
# https://docs.plone.org/develop/plone/content/rename.html
# _verifyObjectPaste permission check must be cancelled
parent._verifyObjectPaste = str
retracted_id = '{}-{}'.format(keyword, len(analyses))
# Make sure all persistent objects have _p_jar attribute
transaction.savepoint(optimistic=True)
parent.manage_renameObject(duplicate_analysis.getId(), retracted_id)
delattr(parent, '_verifyObjectPaste')
# Find out the slot position of the duplicate in the worksheet
worksheet = duplicate_analysis.getWorksheet()
if not worksheet:
logger.warn("Duplicate {} has been retracted, but without worksheet"
.format(duplicate_analysis.getId()))
return
dest_slot = worksheet.get_slot_position_for(duplicate_analysis)
if not dest_slot:
logger.warn("Duplicate {} has been retracted, but not found in any"
"slot of worksheet {}"
.format(duplicate_analysis.getId(), worksheet.getId()))
return
# Create a copy (retest) of the duplicate and assign to worksheet
ref_gid = duplicate_analysis.getReferenceAnalysesGroupID()
retest = _createObjectByType("DuplicateAnalysis", worksheet, tmpID())
copy_analysis_field_values(duplicate_analysis, retest)
retest.setAnalysis(duplicate_analysis.getAnalysis())
retest.setRetestOf(duplicate_analysis)
retest.setReferenceAnalysesGroupID(ref_gid)
retest.setResult(duplicate_analysis.getResult())
worksheet.addToLayout(retest, dest_slot)
worksheet.setAnalyses(worksheet.getAnalyses() + [retest, ])
# Reindex
retest.reindexObject(idxs=["getAnalyst", "getWorksheetUID", "isRetest",
"getReferenceAnalysesGroupID"])
worksheet.reindexObject(idxs=["getAnalysesUIDs"])
# Try to rollback the worksheet to prevent inconsistencies
doActionFor(worksheet, "rollback_to_open")
|
python
|
{
"resource": ""
}
|
q21223
|
MasshunterQuantCSVParser.parse_sequencetableline
|
train
|
def parse_sequencetableline(self, line):
""" Parses sequence table lines
Sequence Table example:
Sequence Table,,,,,,,,,,,,,,,,,
Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
DSS_Nist_L2.d,DSS_Nist_L2,P1-B2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
DSS_Nist_L3.d,DSS_Nist_L3,P1-C2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
UTAK_DS_L1.d,UTAK_DS_L1,P1-D2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
UTAK_DS_L2.d,UTAK_DS_L2,P1-E2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
UTAK_DS_low.d,UTAK_DS_Low,P1-F2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
FDBS_31.d,FDBS_31,P1-G2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
FDBS_32.d,FDBS_32,P1-H2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_60-r001.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_60-r002.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_61-r001.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_61-r002.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
,,,,,,,,,,,,,,,,,
"""
# Sequence Table,,,,,,,,,,,,,,,,,
# prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
# mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
# ,,,,,,,,,,,,,,,,,
if line.startswith(self.SEQUENCETABLE_KEY) \
or line.startswith(self.SEQUENCETABLE_PRERUN) \
or line.startswith(self.SEQUENCETABLE_MIDRUN) \
or self._end_sequencetable == True:
# Nothing to do, continue
return 0
# Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
if line.startswith(self.SEQUENCETABLE_HEADER_DATAFILE):
self._sequencesheader = [token.strip() for token in line.split(',') if token.strip()]
return 0
# post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
# Quantitation Results,,,,,,,,,,,,,,,,,
if line.startswith(self.SEQUENCETABLE_POSTRUN) \
or line.startswith(self.QUANTITATIONRESULTS_KEY) \
or line.startswith(self.COMMAS):
self._end_sequencetable = True
if len(self._sequences) == 0:
self.err("No Sequence Table found", linenum=self._numline)
return -1
# Jumps 2 lines:
# Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
# prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
return 2
# DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
splitted = [token.strip() for token in line.split(',')]
sequence = {}
for colname in self._sequencesheader:
sequence[colname] = ''
for i in range(len(splitted)):
token = splitted[i]
if i < len(self._sequencesheader):
colname = self._sequencesheader[i]
if token and colname in self.SEQUENCETABLE_NUMERICHEADERS:
try:
sequence[colname] = float(token)
except ValueError:
self.warn(
"No valid number ${token} in column ${index} (${column_name})",
mapping={"token": token,
"index": str(i + 1),
"column_name": colname},
numline=self._numline, line=line)
sequence[colname] = token
else:
sequence[colname] = token
elif token:
self.err("Orphan value in column ${index} (${token})",
mapping={"index": str(i+1),
"token": token},
numline=self._numline, line=line)
self._sequences.append(sequence)
|
python
|
{
"resource": ""
}
|
q21224
|
JSONReadExtender.render_template_partitions
|
train
|
def render_template_partitions(self):
"""
Supplies a more detailed view of the Partitions for this
template. It's built to mimic the partitions that are stored in the
ar_add form state variable, so that when a partition is chosen, there
is no further translation necessary.
It combines the Analyses and Partitions AT schema field values.
For some fields (separate, minvol) there is no information, when partitions
are specified in the AR Template.
:return a list of dictionaries like this:
container
[]
container_titles
[]
preservation
[]
preservation_titles
[]
separate
false
minvol
"0.0000 m3 "
services
["2fdc040e05bb42ca8b52e41761fdb795", 6 more...]
service_titles
["Copper", "Iron", "Magnesium", 4 more...]
"""
Analyses = self.context.Schema()['Analyses'].get(self.context)
Parts = self.context.Schema()['Partitions'].get(self.context)
if not Parts:
# default value copied in from content/artemplate.py
Parts = [{'part_id': 'part-1',
'Container': '',
'Preservation': '',
'container_uid': '',
'preservation_uid': ''}]
parts = []
not_found = set()
for Part in Parts:
part = {
'part_id': Part.get("part_id", "part-1"),
'container_titles': Part.get("Container", ""),
'container': Part.get("container_uid", ""),
'preservation_titles': Part.get("Preservation", ""),
'preservation': Part.get("preservation_uid", ""),
'services': [],
'service_titles': [],
}
for analysis in Analyses:
uid = analysis['service_uid']
partiton = analysis['partition']
if partiton == part['part_id']:
part['services'].append(uid)
part['service_titles'].append(uid)
not_found.discard(analysis['service_uid'])
else:
if uid in part['services']:
part['services'].remove(uid)
if uid in part['service_titles']:
part['service_titles'].remove(uid)
not_found.add(analysis['service_uid'])
parts.append(part)
# all others go into the first part. Mostly this will be due to
# partition info not being defined?
for uid in not_found:
if uid not in part['services']:
parts[0]['services'].append(uid)
if uid not in part['service_titles']:
parts[0]['service_titles'].append(uid)
return parts
|
python
|
{
"resource": ""
}
|
q21225
|
ComboBoxWidget.process_form
|
train
|
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False):
"""A typed in value takes precedence over a selected value.
"""
name = field.getName()
otherName = "%s_other" % name
value = form.get(otherName, empty_marker)
regex = field.widget.field_regex
# validate the custom value against the given regex
if value and not re.match(regex, value):
value = None
# If value is an empty string we check if the selection box
# has a usable value.
if value is empty_marker or not value:
value = form.get(name, empty_marker)
if value is empty_marker:
return empty_marker
if not value and emptyReturnsMarker:
return empty_marker
return value, {}
|
python
|
{
"resource": ""
}
|
q21226
|
searchResults
|
train
|
def searchResults(self, REQUEST=None, used=None, **kw):
"""Search the catalog
Search terms can be passed in the REQUEST or as keyword
arguments.
The used argument is now deprecated and ignored
"""
if REQUEST and REQUEST.get('getRequestUID') \
and self.id == CATALOG_ANALYSIS_LISTING:
# Fetch all analyses that have the request UID passed in as an ancestor,
# cause we want Primary ARs to always display the analyses from their
# derived ARs (if result is not empty)
request = REQUEST.copy()
orig_uid = request.get('getRequestUID')
# If a list of request uid, retrieve them sequentially to make the
# masking process easier
if isinstance(orig_uid, list):
results = list()
for uid in orig_uid:
request['getRequestUID'] = [uid]
results += self.searchResults(REQUEST=request, used=used, **kw)
return results
# Get all analyses, those from descendant ARs included
del request['getRequestUID']
request['getAncestorsUIDs'] = orig_uid
results = self.searchResults(REQUEST=request, used=used, **kw)
# Masking
primary = filter(lambda an: an.getParentUID == orig_uid, results)
derived = filter(lambda an: an.getParentUID != orig_uid, results)
derived_keys = map(lambda an: an.getKeyword, derived)
results = filter(lambda an: an.getKeyword not in derived_keys, primary)
return results + derived
# Normal search
return self._catalog.searchResults(REQUEST, used, **kw)
|
python
|
{
"resource": ""
}
|
q21227
|
barcode_entry.handle_Sample
|
train
|
def handle_Sample(self, instance):
"""If this sample has a single AR, go there.
If the sample has 0 or >1 ARs, go to the sample's view URL.
"""
ars = instance.getAnalysisRequests()
if len(ars) == 1:
return self.handle_AnalysisRequest(ars[0])
else:
return instance.absolute_url()
|
python
|
{
"resource": ""
}
|
q21228
|
set_sample_type_default_stickers
|
train
|
def set_sample_type_default_stickers(portal):
"""
Fills the admitted stickers and their default stickers to every sample
type.
"""
# Getting all sticker templates
stickers = getStickerTemplates()
sticker_ids = []
for sticker in stickers:
sticker_ids.append(sticker.get('id'))
def_small_template = portal.bika_setup.getSmallStickerTemplate()
def_large_template = portal.bika_setup.getLargeStickerTemplate()
# Getting all Sample Type objects
catalog = api.get_tool('bika_setup_catalog')
brains = catalog(portal_type='SampleType')
for brain in brains:
obj = api.get_object(brain)
if obj.getAdmittedStickers() is not None:
continue
obj.setAdmittedStickers(sticker_ids)
obj.setDefaultLargeSticker(def_large_template)
obj.setDefaultSmallSticker(def_small_template)
|
python
|
{
"resource": ""
}
|
q21229
|
ARAnalysesField.get
|
train
|
def get(self, instance, **kwargs):
"""Returns a list of Analyses assigned to this AR
Return a list of catalog brains unless `full_objects=True` is passed.
Other keyword arguments are passed to bika_analysis_catalog
:param instance: Analysis Request object
:param kwargs: Keyword arguments to inject in the search query
:returns: A list of Analysis Objects/Catalog Brains
"""
catalog = getToolByName(instance, CATALOG_ANALYSIS_LISTING)
query = dict(
[(k, v) for k, v in kwargs.items() if k in catalog.indexes()])
query["portal_type"] = "Analysis"
query["getRequestUID"] = api.get_uid(instance)
analyses = catalog(query)
if not kwargs.get("full_objects", False):
return analyses
return map(api.get_object, analyses)
|
python
|
{
"resource": ""
}
|
q21230
|
ARAnalysesField._get_services
|
train
|
def _get_services(self, full_objects=False):
"""Fetch and return analysis service objects
"""
bsc = api.get_tool("bika_setup_catalog")
brains = bsc(portal_type="AnalysisService")
if full_objects:
return map(api.get_object, brains)
return brains
|
python
|
{
"resource": ""
}
|
q21231
|
ARAnalysesField._to_service
|
train
|
def _to_service(self, thing):
"""Convert to Analysis Service
:param thing: UID/Catalog Brain/Object/Something
:returns: Analysis Service object or None
"""
# Convert UIDs to objects
if api.is_uid(thing):
thing = api.get_object_by_uid(thing, None)
# Bail out if the thing is not a valid object
if not api.is_object(thing):
logger.warn("'{}' is not a valid object!".format(repr(thing)))
return None
# Ensure we have an object here and not a brain
obj = api.get_object(thing)
if IAnalysisService.providedBy(obj):
return obj
if IAnalysis.providedBy(obj):
return obj.getAnalysisService()
# An object, but neither an Analysis nor AnalysisService?
# This should never happen.
portal_type = api.get_portal_type(obj)
logger.error("ARAnalysesField doesn't accept objects from {} type. "
"The object will be dismissed.".format(portal_type))
return None
|
python
|
{
"resource": ""
}
|
q21232
|
ARAnalysesField._update_specs
|
train
|
def _update_specs(self, instance, specs):
"""Update AR specifications
:param instance: Analysis Request
:param specs: List of Specification Records
"""
if specs is None:
return
# N.B. we copy the records here, otherwise the spec will be written to
# the attached specification of this AR
rr = {item["keyword"]: item.copy()
for item in instance.getResultsRange()}
for spec in specs:
keyword = spec.get("keyword")
if keyword in rr:
# overwrite the instance specification only, if the specific
# analysis spec has min/max values set
if all([spec.get("min"), spec.get("max")]):
rr[keyword].update(spec)
else:
rr[keyword] = spec
else:
rr[keyword] = spec
return instance.setResultsRange(rr.values())
|
python
|
{
"resource": ""
}
|
q21233
|
AnalysisProfile.getTotalPrice
|
train
|
def getTotalPrice(self):
"""
Computes the final price using the VATAmount and the subtotal price
"""
price, vat = self.getAnalysisProfilePrice(), self.getVATAmount()
return float(price) + float(vat)
|
python
|
{
"resource": ""
}
|
q21234
|
EasyQParser.xlsx_to_csv
|
train
|
def xlsx_to_csv(self, infile, worksheet=0, delimiter=","):
""" Convert xlsx to easier format first, since we want to use the
convenience of the CSV library
"""
wb = load_workbook(self.getInputFile())
sheet = wb.worksheets[worksheet]
buffer = StringIO()
# extract all rows
for n, row in enumerate(sheet.rows):
line = []
for cell in row:
value = cell.value
if type(value) in types.StringTypes:
value = value.encode("utf8")
if value is None:
value = ""
line.append(str(value))
print >>buffer, delimiter.join(line)
buffer.seek(0)
return buffer
|
python
|
{
"resource": ""
}
|
q21235
|
SamplePoint.getSampleTypeTitles
|
train
|
def getSampleTypeTitles(self):
"""Returns a list of sample type titles
"""
sample_types = self.getSampleTypes()
sample_type_titles = map(lambda obj: obj.Title(), sample_types)
# N.B. This is used only for search purpose, because the catalog does
# not add an entry to the Keywordindex for an empty list.
#
# => This "empty" category allows to search for values with a certain
# sample type set OR with no sample type set.
# (see bika.lims.browser.analysisrequest.add2.get_sampletype_info)
if not sample_type_titles:
return [""]
return sample_type_titles
|
python
|
{
"resource": ""
}
|
q21236
|
contentmenu_factories_available
|
train
|
def contentmenu_factories_available(self):
"""These types will have their Add New... factories dropdown menu removed.
"""
if hasattr(self._addContext(), 'portal_type') \
and self._addContext().portal_type in [
'ARImport',
'Batch',
'Client',
'AnalysisRequest',
'Worksheet',
'AnalysisCategory',
'AnalysisProfile',
'ARTemplate',
'AnalysisService',
'AnalysisSpec',
'Attachment',
'Calculation',
'Instrument',
'LabContact',
'Laboratory',
'Method',
'Department',
'ReferenceDefinition',
'ReportFolder',
'SampleType',
'SamplePoint',
'StorageLocation',
'WorksheetTemplate',
'LabProduct',
'ReferenceSample',
'Preservation'
]:
return False
else:
itemsToAdd = self._itemsToAdd()
showConstrainOptions = self._showConstrainOptions()
if self._addingToParent() and not self.context_state.is_default_page():
return False
return (len(itemsToAdd) > 0 or showConstrainOptions)
|
python
|
{
"resource": ""
}
|
q21237
|
ReportsListingView.get_filesize
|
train
|
def get_filesize(self, pdf):
"""Compute the filesize of the PDF
"""
try:
filesize = float(pdf.get_size())
return filesize / 1024
except (POSKeyError, TypeError):
return 0
|
python
|
{
"resource": ""
}
|
q21238
|
TX1800iParser._submit_results
|
train
|
def _submit_results(self):
"""
Adding current values as a Raw Result and Resetting everything.
"""
if self._cur_res_id and self._cur_values:
# Setting DefaultResult just because it is obligatory.
self._addRawResult(self._cur_res_id, self._cur_values)
self._reset()
|
python
|
{
"resource": ""
}
|
q21239
|
ThermoScientificMultiskanCSVParser.parse_data
|
train
|
def parse_data(self, sline):
"""This function builds the addRawResults dictionary using the header values of the labels section
as sample Ids.
"""
if sline[0] == '':
return 0
for idx, label in enumerate(self._labels_values[sline[0]]):
if label != '':
self._addRawResult(label.split(' ')[0], {self.analysiskey: sline[1:][idx]}, False)
return 0
|
python
|
{
"resource": ""
}
|
q21240
|
ContactLoginDetailsView.get_users
|
train
|
def get_users(self):
"""Get all users of the portal
"""
# We make use of the existing controlpanel `@@usergroup-userprefs`
# view logic to make sure we get all users from all plugins (e.g. LDAP)
users_view = UsersOverviewControlPanel(self.context, self.request)
return users_view.doSearch("")
|
python
|
{
"resource": ""
}
|
q21241
|
ContactLoginDetailsView.get_user_properties
|
train
|
def get_user_properties(self):
"""Return the properties of the User
"""
user = self.context.getUser()
# No User linked, nothing to do
if user is None:
return {}
out = {}
plone_user = user.getUser()
userid = plone_user.getId()
for sheet in plone_user.listPropertysheets():
ps = plone_user.getPropertysheet(sheet)
out.update(dict(ps.propertyItems()))
portal = api.get_portal()
mtool = getToolByName(self.context, 'portal_membership')
out["id"] = userid
out["portrait"] = mtool.getPersonalPortrait(id=userid)
out["edit_url"] = "{}/@@user-information?userid={}".format(
portal.absolute_url(), userid)
return out
|
python
|
{
"resource": ""
}
|
q21242
|
ContactLoginDetailsView.linkable_users
|
train
|
def linkable_users(self):
"""Search Plone users which are not linked to a contact or lab contact
"""
# Only users with at nost these roles are displayed
linkable_roles = {"Authenticated", "Member", "Client"}
out = []
for user in self.get_users():
userid = user.get("id", None)
if userid is None:
continue
# Skip users which are already linked to a Contact
contact = Contact.getContactByUsername(userid)
labcontact = LabContact.getContactByUsername(userid)
if contact or labcontact:
continue
if self.is_contact():
# Checking Plone user belongs to Client group only. Otherwise,
# weird things could happen (a client contact assigned to a
# user with labman privileges, different contacts from
# different clients assigned to the same user, etc.)
user_roles = security.get_roles(user=userid)
if not linkable_roles.issuperset(set(user_roles)):
continue
userdata = {
"userid": userid,
"email": user.get("email"),
"fullname": user.get("title"),
}
# filter out users which do not match the searchstring
if self.searchstring:
s = self.searchstring.lower()
if not any(
map(lambda v: re.search(s, str(v).lower()),
userdata.values())):
continue
# update data (maybe for later use)
userdata.update(user)
# Append the userdata for the results
out.append(userdata)
out.sort(lambda x, y: cmp(x["fullname"], y["fullname"]))
return out
|
python
|
{
"resource": ""
}
|
q21243
|
ContactLoginDetailsView._link_user
|
train
|
def _link_user(self, userid):
"""Link an existing user to the current Contact
"""
# check if we have a selected user from the search-list
if userid:
try:
self.context.setUser(userid)
self.add_status_message(
_("User linked to this Contact"), "info")
except ValueError, e:
self.add_status_message(e, "error")
else:
self.add_status_message(
_("Please select a User from the list"), "info")
|
python
|
{
"resource": ""
}
|
q21244
|
ContactLoginDetailsView.add_status_message
|
train
|
def add_status_message(self, message, severity="info"):
"""Set a portal message
"""
self.context.plone_utils.addPortalMessage(message, severity)
|
python
|
{
"resource": ""
}
|
q21245
|
_cache_key_select_state
|
train
|
def _cache_key_select_state(method, self, workflow_id, field_id, field_title):
"""
This function returns the key used to decide if select_state has to be recomputed
"""
key = update_timer(), workflow_id, field_id, field_title
return key
|
python
|
{
"resource": ""
}
|
q21246
|
_cache_key_select_analysisservice
|
train
|
def _cache_key_select_analysisservice(method, self, allow_blank,
multiselect, style=None):
"""
This function returns the key used to decide if method select_analysisservice has to be recomputed
"""
key = update_timer(), allow_blank, multiselect, style
return key
|
python
|
{
"resource": ""
}
|
q21247
|
_cache_key_select_analyst
|
train
|
def _cache_key_select_analyst(method, self, allow_blank=False, style=None):
"""
This function returns the key used to decide if method select_analyst has to be recomputed
"""
key = update_timer(),allow_blank, style
return key
|
python
|
{
"resource": ""
}
|
q21248
|
_cache_key_select_user
|
train
|
def _cache_key_select_user(method, self, allow_blank=True, style=None):
"""
This function returns the key used to decide if method select_user has to be recomputed
"""
key = update_timer(), allow_blank, style
return key
|
python
|
{
"resource": ""
}
|
q21249
|
_cache_key_select_daterange
|
train
|
def _cache_key_select_daterange(method, self, field_id, field_title, style=None):
"""
This function returns the key used to decide if method select_daterange has to be recomputed
"""
key = update_timer(), field_id, field_title, style
return key
|
python
|
{
"resource": ""
}
|
q21250
|
_cache_key_select_sample_type
|
train
|
def _cache_key_select_sample_type(method, self, allow_blank=True, multiselect=False, style=None):
"""
This function returns the key used to decide if method select_sample_type has to be recomputed
"""
key = update_timer(), allow_blank, multiselect, style
return key
|
python
|
{
"resource": ""
}
|
q21251
|
ReferenceAnalysis.getResultsRange
|
train
|
def getResultsRange(self):
"""Returns the valid result range for this reference analysis based on
the results ranges defined in the Reference Sample from which this
analysis has been created.
A Reference Analysis (control or blank) will be considered out of range
if its results does not match with the result defined on its parent
Reference Sample, with the % error as the margin of error, that will be
used to set the range's min and max values
:return: A dictionary with the keys min and max
:rtype: dict
"""
specs = ResultsRangeDict(result="")
sample = self.getSample()
if not sample:
return specs
service_uid = self.getServiceUID()
sample_range = sample.getResultsRangeDict()
return sample_range.get(service_uid, specs)
|
python
|
{
"resource": ""
}
|
q21252
|
ServicesWidget.process_form
|
train
|
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False, validating=True):
"""Return UIDs of the selected services
"""
service_uids = form.get("uids", [])
return service_uids, {}
|
python
|
{
"resource": ""
}
|
q21253
|
ServicesWidget.Services
|
train
|
def Services(self, field, show_select_column=True):
"""Render Analyses Services Listing Table
"""
instance = getattr(self, "instance", field.aq_parent)
table = api.get_view("table_analyses_services",
context=instance,
request=self.REQUEST)
# Call listing hooks
table.update()
table.before_render()
return table.ajax_contents_table()
|
python
|
{
"resource": ""
}
|
q21254
|
get_user
|
train
|
def get_user(user=None):
"""Get the user object
:param user: A user id, memberdata object or None for the current user
:returns: Plone User (PlonePAS) / Propertied User (PluggableAuthService)
"""
if user is None:
# Return the current authenticated user
user = getSecurityManager().getUser()
elif isinstance(user, MemberData):
# MemberData wrapped user -> get the user object
user = user.getUser()
elif isinstance(user, basestring):
# User ID -> get the user
user = get_member_by_login_name(get_portal(), user, False)
if user:
user = user.getUser()
return user
|
python
|
{
"resource": ""
}
|
q21255
|
get_group
|
train
|
def get_group(group):
"""Return the group
:param group: The group name/id
:returns: Group
"""
portal_groups = get_tool("portal_groups")
if isinstance(group, basestring):
group = portal_groups.getGroupById(group)
elif isinstance(group, GroupData):
group = group
return group
|
python
|
{
"resource": ""
}
|
q21256
|
get_groups
|
train
|
def get_groups(user=None):
"""Return the groups of the user
:param user: A user id, memberdata object or None for the current user
:returns: List of groups
"""
portal_groups = get_tool("portal_groups")
user = get_user(user)
if user is None:
return []
return portal_groups.getGroupsForPrincipal(user)
|
python
|
{
"resource": ""
}
|
q21257
|
add_group
|
train
|
def add_group(group, user=None):
"""Add the user to the group
"""
user = get_user(user)
if user is None:
raise ValueError("User '{}' not found".format(repr(user)))
if isinstance(group, basestring):
group = [group]
elif isinstance(group, GroupData):
group = [group]
portal_groups = get_tool("portal_groups")
for group in map(get_group, group):
if group is None:
continue
portal_groups.addPrincipalToGroup(get_user_id(user), group.getId())
return get_groups(user)
|
python
|
{
"resource": ""
}
|
q21258
|
fix_broken_calculations
|
train
|
def fix_broken_calculations():
"""Walks-through calculations associated to undergoing analyses and
resets the value for DependentServices field"""
logger.info("Fixing broken calculations (re-assignment of dependents)...")
# Fetch only the subset of analyses that are undergoing.
# Analyses that have been verified or published cannot be updated, so there
# is no sense to check their calculations
review_states = [
'attachment_due',
'not_requested',
'rejected',
'retracted',
'sample_due',
'sample_prep',
'sample_received',
'sample_received',
'sample_registered',
'sampled',
'to_be_preserved',
'to_be_sampled',
]
uc = api.get_tool('uid_catalog')
catalog = get_tool(CATALOG_ANALYSIS_LISTING)
brains = catalog(portal_type='Analysis', review_state=review_states)
for brain in brains:
analysis = brain.getObject()
calculation = analysis.getCalculation()
if not calculation:
continue
dependents = calculation.getDependentServices()
# We don't want eventualities such as [None,]
dependents = filter(None, dependents)
if not dependents:
# Assign the formula again to the calculation. Note the function
# setFormula inferes the dependent services (and stores them) by
# inspecting the keywords set in the formula itself.
# So, instead of doing this job here, we just let setFormula to work
# for us.
formula = calculation.getFormula()
calculation.setFormula(formula)
deps = calculation.getDependentServices()
if not deps:
# Ok, this calculation does not depend on the result of other
# analyses, so we can omit this one, he is already ok
continue
deps = [dep.getKeyword() for dep in deps]
deps = ', '.join(deps)
arid = analysis.getRequestID()
logger.info("Dependents for {}.{}.{}: {}".format(arid,
analysis.getKeyword(),
calculation.Title(),
deps))
# Set the calculation to the analysis again (field Calculation is an
# HistoryAwareReferenceField in Analyses that inherits from
# AbstractRoutineAnalysis
analysis.setCalculation(calculation)
# Check if all is ok
an_deps = analysis.getCalculation().getDependentServices()
if not an_deps:
# Maybe the version of the calculation is an old one. If so, we
# need to use the last version, cause HistoryAwareReferenceField
# will always point to the version assigned to the calculation
# that was associated to the analysis.
uid = calculation.UID()
target_version = analysis.reference_versions[uid]
last_calc = uc(UID=uid)
if not last_calc:
# This should not happen
logger.warn("No calculation found for %s" % uid)
continue
last_calc = last_calc[0].getObject()
if last_calc.version_id != target_version:
# Ok, this is another version. We have no choice here... we
# need to assign the latest version...
analysis.reference_versions[uid]=last_calc.version_id
# Just in case
analysis.reindexObject()
|
python
|
{
"resource": ""
}
|
q21259
|
UpgradeReferenceFields
|
train
|
def UpgradeReferenceFields():
"""Convert all ReferenceField's values into UIDReferenceFields.
These are not touched: HistoryAware to be removed:
- Analysis.Calculation: HistoryAwareReferenceField (rel=
AnalysisCalculation)
- DuplicateAnalysis.Calculation: HistoryAwareReferenceField (rel=
DuplicateAnalysisCalculation)
- RejectAnalysis.Calculation: HistoryAwareReferenceField (rel=
RejectAnalysisCalculation)
These are not touched: They are deprecated and will be removed:
- AnalysisRequest.Profile: ReferenceField (rel=AnalysisRequestProfile)
- LabContact.Department ReferenceField (rel=LabContactDepartment)
The remaining fields are listed below.
"""
# Change these carefully
# they were made slowly with love
# still they may be wrong.
for portal_type, fields in [
# portal_type
['ARReport', [
('AnalysisRequest', 'ARReportAnalysisRequest')
]],
['Analysis', [
# AbstractBaseAnalysis
('Category', 'AnalysisCategory'),
('Department', 'AnalysisDepartment'),
('Instrument', 'AnalysisInstrument'),
('Method', 'AnalysisMethod'),
# AbstractAnalysis
('AnalysisService', 'AnalysisAnalysisService'),
('Attachment', 'AnalysisAttachment'),
# AbstractRoutineAnalysis
('OriginalReflexedAnalysis', 'AnalysisOriginalReflexedAnalysis'),
('ReflexAnalysisOf', 'AnalysisReflexAnalysisOf'),
('SamplePartition', 'AnalysisSamplePartition')
]],
['ReferenceAnalysis', [
# AbstractBaseAnalysis
('Category', 'AnalysisCategory'),
('Department', 'AnalysisDepartment'),
('Instrument', 'AnalysisInstrument'),
('Method', 'AnalysisMethod'),
# AbstractAnalysis
('AnalysisService', 'AnalysisAnalysisService'),
('Attachment', 'AnalysisAttachment'),
]],
['DuplicateAnalysis', [
# AbstractBaseAnalysis
('Category', 'AnalysisCategory'),
('Department', 'AnalysisDepartment'),
('Instrument', 'AnalysisInstrument'),
('Method', 'AnalysisMethod'),
# AbstractAnalysis
('AnalysisService', 'AnalysisAnalysisService'),
('Attachment', 'AnalysisAttachment'),
# AbstractRoutineAnalysis
('OriginalReflexedAnalysis', 'AnalysisOriginalReflexedAnalysis'),
('ReflexAnalysisOf', 'AnalysisReflexAnalysisOf'),
('SamplePartition', 'AnalysisSamplePartition'),
# DuplicateAnalysis
('Analysis', 'DuplicateAnalysisAnalysis'),
]],
['AnalysisService', [
# AbstractBaseAnalysis
('Category', 'AnalysisCategory'),
('Department', 'AnalysisDepartment'),
('Instrument', 'AnalysisInstrument'),
('Method', 'AnalysisMethod'),
# AnalysisService
('Calculation', 'AnalysisServiceCalculation'),
('Container', 'AnalysisServiceContainer'),
('Instruments', 'AnalysisServiceInstruments'),
('Methods', 'AnalysisServiceMethods'),
('Preservation', 'AnalysisServicePreservation'),
# Backward compatibility with < 1.0.0
('Calculation', 'AnalysisServiceCalculation'),
('Category', 'AnalysisServiceAnalysisCategory'),
('Department', 'AnalysisServiceDepartment'),
('Instrument', 'AnalysisServiceInstrument'),
('Instruments', 'AnalysisServiceInstruments'),
('Method', 'AnalysisServiceMethod'),
('Methods', 'AnalysisServiceMethods'),
('Preservation','AnalysisServicePreservation'),
('Container', 'AnalysisServiceContainer'),
]],
['AnalysisRequest', [
('Contact', 'AnalysisRequestContact'),
('Sample', 'AnalysisRequestSample'),
]],
['AnalysisSpec', [
('SampleType', 'AnalysisSpecSampleType')
]],
['Calculation', [
('DependentServices', 'CalculationDependentServices'),
('DependentServices', 'CalculationAnalysisService'),
]],
['Instrument', [
('Analyses', 'InstrumentAnalyses'),
('Method', 'InstrumentMethod'),
]],
['Method', [
('Calculation', 'MethodCalculation'),
]],
['SamplePartition', [
('Analyses', 'SamplePartitionAnalysis'),
]],
['Worksheet', [
('WorksheetTemplate', 'WorksheetAnalysisTemplate')
]]]:
logger.info("Migrating references for portal_type: %s" % portal_type)
for fieldname, relation in fields:
if is_UIDReferenceField(portal_type, fieldname):
migrate_refs(portal_type, relation, fieldname)
# remove at refs
for remove in refs_to_remove:
del_at_refs(remove)
# reindex objects
for obj in objs_to_reindex:
obj.reindexObject()
|
python
|
{
"resource": ""
}
|
q21260
|
get_uid
|
train
|
def get_uid(value):
"""Takes a brain or object and returns a valid UID.
In this case, the object may come from portal_archivist, so we will
need to do a catalog query to get the UID of the current version
"""
if not value:
return ''
# Is value a brain?
if ICatalogBrain.providedBy(value):
value = value.getObject()
# validate UID
uid = value.UID()
uc = get_tool('uid_catalog')
if uc(UID=uid):
# The object is valid
return uid
# Otherwise the object is an old version
brains = uc(portal_type=value.portal_type, Title=value.Title())
if not brains:
# Cannot find UID
raise RuntimeError('The UID for %s/%s cannot be found!' %
(value.portal_type, value.Title()))
if len(brains) > 1:
# Found multiple objects, this is a failure
raise RuntimeError('Searching for %s/%s returned multiple objects.' %
(value.portal_type, value.Title()))
return brains[0].UID
|
python
|
{
"resource": ""
}
|
q21261
|
migrateFileFields
|
train
|
def migrateFileFields(portal):
"""
This function walks over all attachment types and migrates their FileField
fields.
"""
portal_types = [
"Attachment",
"ARImport",
"Instrument",
"InstrumentCertification",
"Method",
"Multifile",
"Report",
"ARReport",
"SamplePoint"]
for portal_type in portal_types:
# Do the migration
migrate_to_blob(
portal,
portal_type=portal_type,
remove_old_value=True)
|
python
|
{
"resource": ""
}
|
q21262
|
XXX_REMOVEME
|
train
|
def XXX_REMOVEME(func):
"""Decorator for dead code removal
"""
@wraps(func)
def decorator(self, *args, **kwargs):
msg = "~~~~~~~ XXX REMOVEME marked method called: {}.{}".format(
self.__class__.__name__, func.func_name)
raise RuntimeError(msg)
return func(self, *args, **kwargs)
return decorator
|
python
|
{
"resource": ""
}
|
q21263
|
returns_json
|
train
|
def returns_json(func):
"""Decorator for functions which return JSON
"""
def decorator(*args, **kwargs):
instance = args[0]
request = getattr(instance, 'request', None)
request.response.setHeader("Content-Type", "application/json")
result = func(*args, **kwargs)
return json.dumps(result)
return decorator
|
python
|
{
"resource": ""
}
|
q21264
|
returns_super_model
|
train
|
def returns_super_model(func):
"""Decorator to return standard content objects as SuperModels
"""
def to_super_model(obj):
# avoid circular imports
from senaite.core.supermodel import SuperModel
# Object is already a SuperModel
if isinstance(obj, SuperModel):
return obj
# Only portal objects are supported
if not api.is_object(obj):
raise TypeError("Expected a portal object, got '{}'"
.format(type(obj)))
# Wrap the object into a specific Publication Object Adapter
uid = api.get_uid(obj)
portal_type = api.get_portal_type(obj)
adapter = queryAdapter(uid, ISuperModel, name=portal_type)
if adapter is None:
return SuperModel(uid)
return adapter
@wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
if isinstance(obj, (list, tuple)):
return map(to_super_model, obj)
return to_super_model(obj)
return wrapper
|
python
|
{
"resource": ""
}
|
q21265
|
profileit
|
train
|
def profileit(path=None):
"""cProfile decorator to profile a function
:param path: output file path
:type path: str
:return: Function
"""
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
if path is not None:
print prof.print_stats()
prof.dump_stats(os.path.expanduser(path))
else:
print prof.print_stats()
return retval
return wrapper
return inner
|
python
|
{
"resource": ""
}
|
q21266
|
timeit
|
train
|
def timeit(threshold=0):
"""Decorator to log the execution time of a function
"""
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
return_value = func(*args, **kwargs)
end = time.time()
duration = float(end-start)
if duration > threshold:
logger.info("Execution of '{}{}' took {:2f}s".format(
func.__name__, args, duration))
return return_value
return wrapper
return inner
|
python
|
{
"resource": ""
}
|
q21267
|
AttachmentsView.action_update
|
train
|
def action_update(self):
"""Form action enpoint to update the attachments
"""
order = []
form = self.request.form
attachments = form.get("attachments", [])
for attachment in attachments:
# attachment is a form mapping, not a dictionary -> convert
values = dict(attachment)
uid = values.pop("UID")
obj = api.get_object_by_uid(uid)
# delete the attachment if the delete flag is true
if values.pop("delete", False):
self.delete_attachment(obj)
continue
# remember the order
order.append(uid)
# update the attachment with the given data
obj.update(**values)
obj.reindexObject()
# set the attachments order to the annotation storage
self.set_attachments_order(order)
# redirect back to the default view
return self.request.response.redirect(self.context.absolute_url())
|
python
|
{
"resource": ""
}
|
q21268
|
AttachmentsView.action_add_to_ws
|
train
|
def action_add_to_ws(self):
"""Form action to add a new attachment in a worksheet
"""
ws = self.context
form = self.request.form
attachment_file = form.get('AttachmentFile_file', None)
analysis_uid = self.request.get('analysis_uid', None)
service_uid = self.request.get('Service', None)
AttachmentType = form.get('AttachmentType', '')
AttachmentKeys = form.get('AttachmentKeys', '')
ReportOption = form.get('ReportOption', 'r')
# nothing to do if the attachment file is missing
if attachment_file is None:
logger.warn("AttachmentView.action_add_attachment: Attachment file is missing")
return
if analysis_uid:
rc = api.get_tool("reference_catalog")
analysis = rc.lookupObject(analysis_uid)
# create attachment
attachment = self.create_attachment(
ws,
attachment_file,
AttachmentType=AttachmentType,
AttachmentKeys=AttachmentKeys,
ReportOption=ReportOption)
others = analysis.getAttachment()
attachments = []
for other in others:
attachments.append(other.UID())
attachments.append(attachment.UID())
analysis.setAttachment(attachments)
# The metadata for getAttachmentUIDs need to get updated,
# otherwise the attachments are not displayed
# https://github.com/senaite/bika.lims/issues/521
analysis.reindexObject()
if service_uid:
workflow = api.get_tool('portal_workflow')
# XXX: refactor out dependency to this view.
view = api.get_view("manage_results", context=self.context, request=self.request)
analyses = self.context.getAnalyses()
allowed_states = ["assigned", "unassigned", "to_be_verified"]
for analysis in analyses:
if analysis.portal_type not in ('Analysis', 'DuplicateAnalysis'):
continue
if not analysis.getServiceUID() == service_uid:
continue
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state not in allowed_states:
continue
# create attachment
attachment = self.create_attachment(
ws,
attachment_file,
AttachmentType=AttachmentType,
AttachmentKeys=AttachmentKeys,
ReportOption=ReportOption)
others = analysis.getAttachment()
attachments = []
for other in others:
attachments.append(other.UID())
attachments.append(attachment.UID())
analysis.setAttachment(attachments)
# The metadata for getAttachmentUIDs need to get updated,
# otherwise the attachments are not displayed
# https://github.com/senaite/bika.lims/issues/521
analysis.reindexObject()
if self.request['HTTP_REFERER'].endswith('manage_results'):
self.request.response.redirect('{}/manage_results'.format(
self.context.absolute_url()))
else:
self.request.response.redirect(self.context.absolute_url())
|
python
|
{
"resource": ""
}
|
q21269
|
AttachmentsView.action_add
|
train
|
def action_add(self):
"""Form action to add a new attachment
Code taken from bika.lims.content.addARAttachment.
"""
form = self.request.form
parent = api.get_parent(self.context)
attachment_file = form.get('AttachmentFile_file', None)
AttachmentType = form.get('AttachmentType', '')
AttachmentKeys = form.get('AttachmentKeys', '')
ReportOption = form.get('ReportOption', 'r')
# nothing to do if the attachment file is missing
if attachment_file is None:
logger.warn("AttachmentView.action_add_attachment: Attachment file is missing")
return
# create attachment
attachment = self.create_attachment(
parent,
attachment_file,
AttachmentType=AttachmentType,
AttachmentKeys=AttachmentKeys,
ReportOption=ReportOption)
# append the new UID to the end of the current order
self.set_attachments_order(api.get_uid(attachment))
# handle analysis attachment
analysis_uid = form.get("Analysis", None)
if analysis_uid:
analysis = api.get_object_by_uid(analysis_uid)
others = analysis.getAttachment()
attachments = []
for other in others:
attachments.append(other.UID())
attachments.append(attachment.UID())
analysis.setAttachment(attachments)
# The metadata for getAttachmentUIDs need to get updated,
# otherwise the attachments are not displayed
# https://github.com/senaite/bika.lims/issues/521
analysis.reindexObject()
else:
others = self.context.getAttachment()
attachments = []
for other in others:
attachments.append(other.UID())
attachments.append(attachment.UID())
self.context.setAttachment(attachments)
if self.request['HTTP_REFERER'].endswith('manage_results'):
self.request.response.redirect('{}/manage_results'.format(
self.context.absolute_url()))
else:
self.request.response.redirect(self.context.absolute_url())
|
python
|
{
"resource": ""
}
|
q21270
|
AttachmentsView.create_attachment
|
train
|
def create_attachment(self, container, attachment_file, **kw):
"""Create an Attachment object in the given container
"""
filename = getattr(attachment_file, "filename", "Attachment")
attachment = api.create(container, "Attachment", title=filename)
attachment.edit(AttachmentFile=attachment_file, **kw)
attachment.processForm()
attachment.reindexObject()
logger.info("Created new Attachment {} in {}".format(
repr(attachment), repr(container)))
return attachment
|
python
|
{
"resource": ""
}
|
q21271
|
AttachmentsView.delete_attachment
|
train
|
def delete_attachment(self, attachment):
"""Delete attachment from the AR or Analysis
The attachment will be only deleted if it is not further referenced by
another AR/Analysis.
"""
# Get the holding parent of this attachment
parent = None
if attachment.getLinkedRequests():
# Holding parent is an AR
parent = attachment.getRequest()
elif attachment.getLinkedAnalyses():
# Holding parent is an Analysis
parent = attachment.getAnalysis()
if parent is None:
logger.warn(
"Attachment {} is nowhere assigned. This should never happen!"
.format(repr(attachment)))
return False
# Get the other attachments of the holding parent
attachments = parent.getAttachment()
# New attachments to set
if attachment in attachments:
attachments.remove(attachment)
# Set the attachments w/o the current attachments
parent.setAttachment(attachments)
retain = False
# Attachment is referenced by another Analysis
if attachment.getLinkedAnalyses():
holder = attachment.getAnalysis()
logger.info("Attachment {} referenced by {} -> RETAIN"
.format(repr(attachment), repr(holder)))
retain = True
# Attachment is referenced by another AR
if attachment.getLinkedRequests():
holder = attachment.getRequest()
logger.info("Attachment {} referenced by {} -> RETAIN"
.format(repr(attachment), repr(holder)))
retain = True
# Delete attachment finally
if retain is False:
client = api.get_parent(attachment)
client.manage_delObjects([attachment.getId(), ])
|
python
|
{
"resource": ""
}
|
q21272
|
AttachmentsView.get_attachment_size
|
train
|
def get_attachment_size(self, attachment):
"""Get the human readable size of the attachment
"""
fsize = 0
file = attachment.getAttachmentFile()
if file:
fsize = file.get_size()
if fsize < 1024:
fsize = '%s b' % fsize
else:
fsize = '%s Kb' % (fsize / 1024)
return fsize
|
python
|
{
"resource": ""
}
|
q21273
|
AttachmentsView.get_attachment_info
|
train
|
def get_attachment_info(self, attachment):
"""Returns a dictionary of attachment information
"""
attachment_uid = api.get_uid(attachment)
attachment_file = attachment.getAttachmentFile()
attachment_type = attachment.getAttachmentType()
attachment_icon = attachment_file.icon
if callable(attachment_icon):
attachment_icon = attachment_icon()
return {
'keywords': attachment.getAttachmentKeys(),
'size': self.get_attachment_size(attachment),
'name': attachment_file.filename,
'Icon': attachment_icon,
'type': api.get_uid(attachment_type) if attachment_type else '',
'absolute_url': attachment.absolute_url(),
'UID': attachment_uid,
'report_option': attachment.getReportOption(),
'analysis': '',
}
|
python
|
{
"resource": ""
}
|
q21274
|
AttachmentsView.get_attachments
|
train
|
def get_attachments(self):
"""Returns a list of attachments info dictionaries
Original code taken from bika.lims.analysisrequest.view
"""
attachments = []
# process AR attachments
for attachment in self.context.getAttachment():
attachment_info = self.get_attachment_info(attachment)
attachments.append(attachment_info)
# process analyses attachments
for analysis in self.context.getAnalyses(full_objects=True):
for attachment in analysis.getAttachment():
attachment_info = self.get_attachment_info(attachment)
attachment_info["analysis"] = analysis.Title()
attachment_info["analysis_uid"] = api.get_uid(analysis)
attachments.append(attachment_info)
return attachments
|
python
|
{
"resource": ""
}
|
q21275
|
AttachmentsView.get_sorted_attachments
|
train
|
def get_sorted_attachments(self):
"""Returns a sorted list of analysis info dictionaries
"""
inf = float("inf")
order = self.get_attachments_order()
attachments = self.get_attachments()
def att_cmp(att1, att2):
_n1 = att1.get('UID')
_n2 = att2.get('UID')
_i1 = _n1 in order and order.index(_n1) + 1 or inf
_i2 = _n2 in order and order.index(_n2) + 1 or inf
return cmp(_i1, _i2)
sorted_attachments = sorted(attachments, cmp=att_cmp)
return sorted_attachments
|
python
|
{
"resource": ""
}
|
q21276
|
AttachmentsView.get_attachment_types
|
train
|
def get_attachment_types(self):
"""Returns a list of available attachment types
"""
bika_setup_catalog = api.get_tool("bika_setup_catalog")
attachment_types = bika_setup_catalog(portal_type='AttachmentType',
is_active=True,
sort_on="sortable_title",
sort_order="ascending")
return attachment_types
|
python
|
{
"resource": ""
}
|
q21277
|
AttachmentsView.get_analyses
|
train
|
def get_analyses(self):
"""Returns a list of analyses from the AR
"""
analyses = self.context.getAnalyses(full_objects=True)
return filter(self.is_analysis_attachment_allowed, analyses)
|
python
|
{
"resource": ""
}
|
q21278
|
AttachmentsView.is_analysis_attachment_allowed
|
train
|
def is_analysis_attachment_allowed(self, analysis):
"""Checks if the analysis
"""
if analysis.getAttachmentOption() not in ["p", "r"]:
return False
if api.get_workflow_status_of(analysis) in ["retracted"]:
return False
return True
|
python
|
{
"resource": ""
}
|
q21279
|
AttachmentsView.user_can_add_attachments
|
train
|
def user_can_add_attachments(self):
"""Checks if the current logged in user is allowed to add attachments
"""
if not self.global_attachments_allowed():
return False
context = self.context
pm = api.get_tool("portal_membership")
return pm.checkPermission(AddAttachment, context)
|
python
|
{
"resource": ""
}
|
q21280
|
AttachmentsView.user_can_update_attachments
|
train
|
def user_can_update_attachments(self):
"""Checks if the current logged in user is allowed to update attachments
"""
context = self.context
pm = api.get_tool("portal_membership")
return pm.checkPermission(EditResults, context) or \
pm.checkPermission(EditFieldResults, context)
|
python
|
{
"resource": ""
}
|
q21281
|
AttachmentsView.user_can_delete_attachments
|
train
|
def user_can_delete_attachments(self):
"""Checks if the current logged in user is allowed to delete attachments
"""
context = self.context
user = api.get_current_user()
if not self.is_ar_editable():
return False
return (self.user_can_add_attachments() and
not user.allowed(context, ["Client"])) or \
self.user_can_update_attachments()
|
python
|
{
"resource": ""
}
|
q21282
|
AttachmentsView.storage
|
train
|
def storage(self):
"""A storage which keeps configuration settings for attachments
"""
annotation = self.get_annotation()
if annotation.get(ATTACHMENTS_STORAGE) is None:
annotation[ATTACHMENTS_STORAGE] = OOBTree()
return annotation[ATTACHMENTS_STORAGE]
|
python
|
{
"resource": ""
}
|
q21283
|
AttachmentsView.flush
|
train
|
def flush(self):
"""Remove the whole storage
"""
annotation = self.get_annotation()
if annotation.get(ATTACHMENTS_STORAGE) is not None:
del annotation[ATTACHMENTS_STORAGE]
|
python
|
{
"resource": ""
}
|
q21284
|
AttachmentsView.set_attachments_order
|
train
|
def set_attachments_order(self, order):
"""Remember the attachments order
"""
# append single uids to the order
if isinstance(order, basestring):
new_order = self.storage.get("order", [])
new_order.append(order)
order = new_order
self.storage.update({"order": order})
|
python
|
{
"resource": ""
}
|
q21285
|
ajaxAttachmentsView.ajax_delete_analysis_attachment
|
train
|
def ajax_delete_analysis_attachment(self):
"""Endpoint for attachment delete in WS
"""
form = self.request.form
attachment_uid = form.get("attachment_uid", None)
if not attachment_uid:
return "error"
attachment = api.get_object_by_uid(attachment_uid, None)
if attachment is None:
return "Could not resolve attachment UID {}".format(attachment_uid)
# handle delete via the AttachmentsView
view = self.context.restrictedTraverse("@@attachments_view")
view.delete_attachment(attachment)
return "success"
|
python
|
{
"resource": ""
}
|
q21286
|
get_backreferences
|
train
|
def get_backreferences(context, relationship=None, as_brains=None):
"""Return all objects which use a UIDReferenceField to reference context.
:param context: The object which is the target of references.
:param relationship: The relationship name of the UIDReferenceField.
:param as_brains: Requests that this function returns only catalog brains.
as_brains can only be used if a relationship has been specified.
This function can be called with or without specifying a relationship.
- If a relationship is provided, the return value will be a list of items
which reference the context using the provided relationship.
If relationship is provided, then you can request that the backrefs
should be returned as catalog brains. If you do not specify as_brains,
the raw list of UIDs will be returned.
- If the relationship is not provided, then the entire set of
backreferences to the context object is returned (by reference) as a
dictionary. This value can then be modified in-place, to edit the stored
backreferences.
"""
instance = context.aq_base
raw_backrefs = get_storage(instance)
if not relationship:
assert not as_brains, "You cannot use as_brains with no relationship"
return raw_backrefs
backrefs = list(raw_backrefs.get(relationship, []))
if not backrefs:
return []
if not as_brains:
return backrefs
cat = _get_catalog_for_uid(backrefs[0])
return cat(UID=backrefs)
|
python
|
{
"resource": ""
}
|
q21287
|
UIDReferenceField.get_relationship_key
|
train
|
def get_relationship_key(self, context):
"""Return the configured relationship key or generate a new one
"""
if not self.relationship:
return context.portal_type + self.getName()
return self.relationship
|
python
|
{
"resource": ""
}
|
q21288
|
UIDReferenceField.link_reference
|
train
|
def link_reference(self, source, target):
"""Link the target to the source
"""
target_uid = api.get_uid(target)
# get the annotation storage key
key = self.get_relationship_key(target)
# get all backreferences from the source
# N.B. only like this we get the persistent mapping!
backrefs = get_backreferences(source, relationship=None)
if key not in backrefs:
backrefs[key] = PersistentList()
if target_uid not in backrefs[key]:
backrefs[key].append(target_uid)
return True
|
python
|
{
"resource": ""
}
|
q21289
|
UIDReferenceField.unlink_reference
|
train
|
def unlink_reference(self, source, target):
"""Unlink the target from the source
"""
target_uid = api.get_uid(target)
# get the storage key
key = self.get_relationship_key(target)
# get all backreferences from the source
# N.B. only like this we get the persistent mapping!
backrefs = get_backreferences(source, relationship=None)
if key not in backrefs:
logger.warn(
"Referenced object {} has no backreferences for the key {}"
.format(repr(source), key))
return False
if target_uid not in backrefs[key]:
logger.warn("Target {} was not linked by {}"
.format(repr(target), repr(source)))
return False
backrefs[key].remove(target_uid)
return True
|
python
|
{
"resource": ""
}
|
q21290
|
UIDReferenceField.getRaw
|
train
|
def getRaw(self, context, aslist=False, **kwargs):
"""Grab the stored value, and return it directly as UIDs.
:param context: context is the object who's schema contains this field.
:type context: BaseContent
:param aslist: Forces a single-valued field to return a list type.
:type aslist: bool
:param kwargs: kwargs are passed directly to the underlying get.
:type kwargs: dict
:return: UID or list of UIDs for multiValued fields.
:rtype: string | list[string]
"""
value = StringField.get(self, context, **kwargs)
if not value:
return [] if self.multiValued else None
if self.multiValued:
ret = value
else:
ret = self.get_uid(context, value)
if aslist:
ret = [ret]
return ret
|
python
|
{
"resource": ""
}
|
q21291
|
UIDReferenceField._set_backreferences
|
train
|
def _set_backreferences(self, context, items, **kwargs):
"""Set the back references on the linked items
This will set an annotation storage on the referenced items which point
to the current context.
"""
# Don't set any references during initialization.
# This might cause a recursion error when calling `getRaw` to fetch the
# current set UIDs!
initializing = kwargs.get('_initializing_', False)
if initializing:
return
# UID of the current object
uid = api.get_uid(context)
# current set UIDs
raw = self.getRaw(context) or []
# handle single reference fields
if isinstance(raw, basestring):
raw = [raw, ]
cur = set(raw)
# UIDs to be set
new = set(map(api.get_uid, items))
# removed UIDs
removed = cur.difference(new)
# Unlink removed UIDs from the source
for uid in removed:
source = api.get_object_by_uid(uid, None)
if source is None:
logger.warn("UID {} does not exist anymore".format(uid))
continue
self.unlink_reference(source, context)
# Link backrefs
for item in items:
self.link_reference(item, context)
|
python
|
{
"resource": ""
}
|
q21292
|
DateReceivedFieldVisibility.isVisible
|
train
|
def isVisible(self, field, mode="view", default="visible"):
"""Returns whether the field is visible in a given mode
"""
if mode != "edit":
return default
# If this is a Secondary Analysis Request, this field is not editable
if IAnalysisRequestSecondary.providedBy(self.context):
return "invisible"
return self.context.isOpen() and "visible" or "invisible"
|
python
|
{
"resource": ""
}
|
q21293
|
format_keyword
|
train
|
def format_keyword(keyword):
"""
Removing special character from a keyword. Analysis Services must have
this kind of keywords. E.g. if assay name from the Instrument is
'HIV-1 2.0', an AS must be created on Bika with the keyword 'HIV120'
"""
import re
result = ''
if keyword:
result = re.sub(r"\W", "", keyword)
result = re.sub("_", "", result)
return result
|
python
|
{
"resource": ""
}
|
q21294
|
guard_activate
|
train
|
def guard_activate(analysis_service):
"""Returns whether the transition activate can be performed for the
analysis service passed in
"""
calculation = analysis_service.getCalculation()
if not calculation:
return True
# If the calculation is inactive, we cannot activate the service
if not api.is_active(calculation):
return False
# All services that we depend on to calculate our result are active or we
# don't depend on other services.
dependencies = calculation.getDependentServices()
for dependency in dependencies:
if not api.is_active(dependency):
return False
return True
|
python
|
{
"resource": ""
}
|
q21295
|
guard_deactivate
|
train
|
def guard_deactivate(analysis_service):
"""Returns whether the transition deactivate can be performed for the
analysis service passed in
"""
for dependant in analysis_service.getServiceDependants():
status = api.get_workflow_status_of(dependant)
if status == "active":
return False
return True
|
python
|
{
"resource": ""
}
|
q21296
|
AnalysisRequestsView.get_progress_percentage
|
train
|
def get_progress_percentage(self, ar_brain):
"""Returns the percentage of completeness of the Analysis Request
"""
review_state = ar_brain.review_state
if review_state == "published":
return 100
numbers = ar_brain.getAnalysesNum
num_analyses = numbers[1] or 0
if not num_analyses:
return 0
# [verified, total, not_submitted, to_be_verified]
num_to_be_verified = numbers[3] or 0
num_verified = numbers[0] or 0
# 2 steps per analysis (submit, verify) plus one step for publish
max_num_steps = (num_analyses * 2) + 1
num_steps = num_to_be_verified + (num_verified * 2)
if not num_steps:
return 0
if num_steps > max_num_steps:
return 100
return (num_steps * 100) / max_num_steps
|
python
|
{
"resource": ""
}
|
q21297
|
RecordsWidget.process_form
|
train
|
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False):
"""
Basic impl for form processing in a widget plus allowing empty
values to be saved
"""
# a poor workaround for Plone repeating itself.
# XXX this is important XXX
key = field.getName() + '_value'
if key in instance.REQUEST:
return instance.REQUEST[key], {}
value = form.get(field.getName(), empty_marker)
# When a recordswidget's visibility is defined as hidden
# '...visible={'view': 'hidden', 'edit': 'hidden'},...' the template
# displays it as an input field with the attribute 'value' as a string
# 'value="[{:},{:}]"'. This makes the system save the content of the
# widget as the string instead of a dictionary inside a list, so we
# need to check if the variable contains a python object as a string.
if value and value is not empty_marker and isinstance(value, str):
import ast
try:
value = ast.literal_eval(form.get(field.getName()))
except:
# cannot resolve string as a list!
return empty_marker
if not value:
return value, {}
if value is empty_marker:
return empty_marker
if emptyReturnsMarker and value == '':
return empty_marker
# we make sure that empty "value" inputs are saved as "" empty string.
for i in range(len(value)):
value[i] = dict(value[i])
if 'value' not in value[i]:
value[i]['value'] = ''
instance.REQUEST[key] = value
return value, {}
|
python
|
{
"resource": ""
}
|
q21298
|
ProxyField.get
|
train
|
def get(self, instance, **kwargs):
"""retrieves the value of the same named field on the proxy object
"""
# The default value
default = self.getDefault(instance)
# Retrieve the proxy object
proxy_object = self.get_proxy(instance)
# Return None if we could not find a proxied object, e.g. through
# the proxy expression 'context.getSample()' on an AR
if proxy_object is None:
logger.debug("Expression '{}' did not return a valid Proxy Object on {}"
.format(self.proxy, instance))
return default
# Lookup the proxied field by name
field_name = self.getName()
field = proxy_object.getField(field_name)
# Bail out if the proxy object has no identical named field
if field is None:
raise KeyError("Object '{}' with id '{}' has no field named '{}'".format(
proxy_object.portal_type, proxy_object.getId(), field_name))
# return the value of the proxy field
return field.get(proxy_object)
|
python
|
{
"resource": ""
}
|
q21299
|
ProxyField.set
|
train
|
def set(self, instance, value, **kwargs):
"""writes the value to the same named field on the proxy object
"""
# Retrieve the proxy object
proxy_object = self.get_proxy(instance)
# Return None if we could not find a proxied object, e.g. through
# the proxy expression 'context.getSample()' on an AR
if not proxy_object:
logger.debug("Expression '{}' did not return a valid Proxy Object on {}"
.format(self.proxy, instance))
return None
# Lookup the proxied field by name
field_name = self.getName()
field = proxy_object.getField(field_name)
# Bail out if the proxy object has no identical named field.
if field is None:
raise KeyError("Object '{}' with id '{}' has no field named '{}'".format(
proxy_object.portal_type, proxy_object.getId(), field_name))
# set the value on the proxy object
field.set(proxy_object, value, **kwargs)
# get the current time
now = DateTime.DateTime()
# update the modification date of the proxied object
proxy_object.setModificationDate(now)
# update the modification date of the holding object
instance.setModificationDate(now)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.