_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q11400
imprints2marc
train
def imprints2marc(self, key, value): """Populate the ``260`` MARC field.""" return { 'a': value.get('place'), 'b': value.get('publisher'), 'c': value.get('date'), }
python
{ "resource": "" }
q11401
thesis_info
train
def thesis_info(self, key, value): """Populate the ``thesis_info`` key.""" def _get_degree_type(value): DEGREE_TYPES_MAP = { 'RAPPORT DE STAGE': 'other', 'INTERNSHIP REPORT': 'other', 'DIPLOMA': 'diploma', 'BACHELOR': 'bachelor', 'LAUREA': 'laurea', 'MASTER': 'master', 'THESIS': 'other', 'PHD': 'phd', 'PDF': 'phd', 'PH.D. THESIS': 'phd', 'HABILITATION': 'habilitation', } b_value = force_single_element(value.get('b', '')) if b_value: return DEGREE_TYPES_MAP.get(b_value.upper(), 'other') def _get_institutions(value): c_values = force_list(value.get('c')) z_values = force_list(value.get('z')) # XXX: we zip only when they have the same length, otherwise # we might match a value with the wrong recid. if len(c_values) != len(z_values): return [{'name': c_value} for c_value in c_values] else: return [{ 'curated_relation': True, 'name': c_value, 'record': get_record_ref(z_value, 'institutions'), } for c_value, z_value in zip(c_values, z_values)] thesis_info = self.get('thesis_info', {}) thesis_info['date'] = normalize_date(force_single_element(value.get('d'))) thesis_info['degree_type'] = _get_degree_type(value) thesis_info['institutions'] = _get_institutions(value) return thesis_info
python
{ "resource": "" }
q11402
thesis_info2marc
train
def thesis_info2marc(self, key, value): """Populate the ``502`` MARC field. Also populates the ``500`` MARC field through side effects. """ def _get_b_value(value): DEGREE_TYPES_MAP = { 'bachelor': 'Bachelor', 'diploma': 'Diploma', 'habilitation': 'Habilitation', 'laurea': 'Laurea', 'master': 'Master', 'other': 'Thesis', 'phd': 'PhD', } degree_type = value.get('degree_type') if degree_type: return DEGREE_TYPES_MAP.get(degree_type) result_500 = self.get('500', []) result_502 = self.get('502', {}) if value.get('defense_date'): result_500.append({ 'a': u'Presented on {}'.format(value.get('defense_date')), }) result_502 = { 'b': _get_b_value(value), 'c': [el['name'] for el in force_list(value.get('institutions'))], 'd': value.get('date'), } self['500'] = result_500 return result_502
python
{ "resource": "" }
q11403
abstracts
train
def abstracts(self, key, value): """Populate the ``abstracts`` key.""" result = [] source = force_single_element(value.get('9')) for a_value in force_list(value.get('a')): result.append({ 'source': source, 'value': a_value, }) return result
python
{ "resource": "" }
q11404
funding_info
train
def funding_info(self, key, value): """Populate the ``funding_info`` key.""" return { 'agency': value.get('a'), 'grant_number': value.get('c'), 'project_number': value.get('f'), }
python
{ "resource": "" }
q11405
funding_info2marc
train
def funding_info2marc(self, key, value): """Populate the ``536`` MARC field.""" return { 'a': value.get('agency'), 'c': value.get('grant_number'), 'f': value.get('project_number'), }
python
{ "resource": "" }
q11406
license
train
def license(self, key, value): """Populate the ``license`` key.""" def _get_license(value): a_values = force_list(value.get('a')) oa_licenses = [el for el in a_values if el == 'OA' or el == 'Open Access'] other_licenses = [el for el in a_values if el != 'OA' and el != 'Open Access'] if not other_licenses: return force_single_element(oa_licenses) return force_single_element(other_licenses) def _get_material(value): material = value.get('3', '').lower() if material == 'article': return 'publication' return material return { 'imposing': value.get('b'), 'license': _get_license(value), 'material': _get_material(value), 'url': value.get('u'), }
python
{ "resource": "" }
q11407
license2marc
train
def license2marc(self, key, value): """Populate the ``540`` MARC field.""" return { 'a': value.get('license'), 'b': value.get('imposing'), 'u': value.get('url'), '3': value.get('material'), }
python
{ "resource": "" }
q11408
copyright
train
def copyright(self, key, value): """Populate the ``copyright`` key.""" MATERIAL_MAP = { 'Article': 'publication', 'Published thesis as a book': 'publication', } material = value.get('e') or value.get('3') return { 'holder': value.get('d'), 'material': MATERIAL_MAP.get(material), 'statement': value.get('f'), 'url': value.get('u'), 'year': maybe_int(value.get('g')), }
python
{ "resource": "" }
q11409
copyright2marc
train
def copyright2marc(self, key, value): """Populate the ``542`` MARC field.""" E_MAP = { 'publication': 'Article', } e_value = value.get('material') return { 'd': value.get('holder'), 'e': E_MAP.get(e_value), 'f': value.get('statement'), 'g': value.get('year'), 'u': value.get('url'), }
python
{ "resource": "" }
q11410
_private_notes2marc
train
def _private_notes2marc(self, key, value): """Populate the ``595`` MARC key. Also populates the `595_H` MARC key through side effects. """ def _is_from_hal(value): return value.get('source') == 'HAL' if not _is_from_hal(value): return { '9': value.get('source'), 'a': value.get('value'), } self.setdefault('595_H', []).append({'a': value.get('value')})
python
{ "resource": "" }
q11411
_export_to2marc
train
def _export_to2marc(self, key, value): """Populate the ``595`` MARC field.""" def _is_for_cds(value): return 'CDS' in value def _is_for_hal(value): return 'HAL' in value and value['HAL'] def _is_not_for_hal(value): return 'HAL' in value and not value['HAL'] result = [] if _is_for_cds(value): result.append({'c': 'CDS'}) if _is_for_hal(value): result.append({'c': 'HAL'}) elif _is_not_for_hal(value): result.append({'c': 'not HAL'}) return result
python
{ "resource": "" }
q11412
_desy_bookkeeping
train
def _desy_bookkeeping(self, key, value): """Populate the ``_desy_bookkeeping`` key.""" return { 'date': normalize_date(value.get('d')), 'expert': force_single_element(value.get('a')), 'status': value.get('s'), }
python
{ "resource": "" }
q11413
_desy_bookkeeping2marc
train
def _desy_bookkeeping2marc(self, key, value): """Populate the ``595_D`` MARC field. Also populates the ``035`` MARC field through side effects. """ if 'identifier' not in value: return { 'a': value.get('expert'), 'd': value.get('date'), 's': value.get('status'), } self.setdefault('035', []).append({ '9': 'DESY', 'z': value['identifier'] })
python
{ "resource": "" }
q11414
_dates
train
def _dates(self, key, value): """Don't populate any key through the return value. On the other hand, populates the ``date_proposed``, ``date_approved``, ``date_started``, ``date_cancelled``, and the ``date_completed`` keys through side effects. """ if value.get('q'): self['date_proposed'] = normalize_date(value['q']) if value.get('r'): self['date_approved'] = normalize_date(value['r']) if value.get('s'): self['date_started'] = normalize_date(value['s']) if value.get('c'): self['date_cancelled'] = normalize_date(value['c']) if value.get('t'): self['date_completed'] = normalize_date(value['t']) raise IgnoreKey
python
{ "resource": "" }
q11415
experiment
train
def experiment(self, key, values): """Populate the ``experiment`` key. Also populates the ``legacy_name``, the ``accelerator``, and the ``institutions`` keys through side effects. """ experiment = self.get('experiment', {}) legacy_name = self.get('legacy_name', '') accelerator = self.get('accelerator', {}) institutions = self.get('institutions', []) for value in force_list(values): if value.get('c'): experiment['value'] = value.get('c') if value.get('d'): experiment['short_name'] = value.get('d') if value.get('a'): legacy_name = value.get('a') if value.get('b'): accelerator['value'] = value.get('b') institution = {} if value.get('u'): institution['value'] = value.get('u') if value.get('z'): record = get_record_ref(maybe_int(value.get('z')), 'institutions') if record: institution['curated_relation'] = True institution['record'] = record institutions.append(institution) self['legacy_name'] = legacy_name self['accelerator'] = accelerator self['institutions'] = institutions return experiment
python
{ "resource": "" }
q11416
core
train
def core(self, key, value): """Populate the ``core`` key. Also populates the ``deleted`` and ``project_type`` keys through side effects. """ core = self.get('core') deleted = self.get('deleted') project_type = self.get('project_type', []) if not core: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'CORE' in normalized_a_values: core = True if not deleted: normalized_c_values = [el.upper() for el in force_list(value.get('c'))] if 'DELETED' in normalized_c_values: deleted = True if not project_type: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'ACCELERATOR' in normalized_a_values: project_type.append('accelerator') self['project_type'] = project_type self['deleted'] = deleted return core
python
{ "resource": "" }
q11417
control_number
train
def control_number(endpoint): """Populate the ``control_number`` key. Also populates the ``self`` key through side effects. """ def _control_number(self, key, value): self['self'] = get_record_ref(int(value), endpoint) return int(value) return _control_number
python
{ "resource": "" }
q11418
acquisition_source
train
def acquisition_source(self, key, value): """Populate the ``acquisition_source`` key.""" def _get_datetime(value): d_value = force_single_element(value.get('d', '')) if d_value: try: date = PartialDate.loads(d_value) except ValueError: return d_value else: datetime_ = datetime(year=date.year, month=date.month, day=date.day) return datetime_.isoformat() internal_uid, orcid, source = None, None, None a_values = force_list(value.get('a')) for a_value in a_values: if IS_INTERNAL_UID.match(a_value): if a_value.startswith('inspire:uid:'): internal_uid = int(a_value[12:]) else: internal_uid = int(a_value) elif IS_ORCID.match(a_value): if a_value.startswith('orcid:'): orcid = a_value[6:] else: orcid = a_value else: source = a_value c_value = force_single_element(value.get('c', '')) normalized_c_value = c_value.lower() if normalized_c_value == 'batchupload': method = 'batchuploader' elif normalized_c_value == 'submission': method = 'submitter' else: method = normalized_c_value return { 'datetime': _get_datetime(value), 'email': value.get('b'), 'internal_uid': internal_uid, 'method': method, 'orcid': orcid, 'source': source, 'submission_number': value.get('e'), }
python
{ "resource": "" }
q11419
external_system_identifiers
train
def external_system_identifiers(endpoint): """Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects. """ @utils.flatten @utils.for_each_value def _external_system_identifiers(self, key, value): new_recid = maybe_int(value.get('d')) if new_recid: self['new_record'] = get_record_ref(new_recid, endpoint) return [ { 'schema': 'SPIRES', 'value': ext_sys_id, } for ext_sys_id in force_list(value.get('a')) ] return _external_system_identifiers
python
{ "resource": "" }
q11420
deleted_records
train
def deleted_records(endpoint): """Populate the ``deleted_records`` key.""" @utils.for_each_value def _deleted_records(self, key, value): deleted_recid = maybe_int(value.get('a')) if deleted_recid: return get_record_ref(deleted_recid, endpoint) return _deleted_records
python
{ "resource": "" }
q11421
accelerator_experiments
train
def accelerator_experiments(self, key, value): """Populate the ``accelerator_experiments`` key.""" result = [] a_value = force_single_element(value.get('a')) e_values = [el for el in force_list(value.get('e')) if el != '-'] zero_values = force_list(value.get('0')) if a_value and not e_values: result.append({'accelerator': a_value}) # XXX: we zip only when they have the same length, otherwise # we might match a value with the wrong recid. if len(e_values) == len(zero_values): for e_value, zero_value in zip(e_values, zero_values): result.append({ 'legacy_name': e_value, 'record': get_record_ref(zero_value, 'experiments'), }) else: for e_value in e_values: result.append({'legacy_name': e_value}) return result
python
{ "resource": "" }
q11422
keywords
train
def keywords(self, key, values): """Populate the ``keywords`` key. Also populates the ``energy_ranges`` key through side effects. """ keywords = self.get('keywords', []) energy_ranges = self.get('energy_ranges', []) for value in force_list(values): if value.get('a'): schema = force_single_element(value.get('2', '')).upper() sources = force_list(value.get('9')) a_values = force_list(value.get('a')) if 'conference' not in sources: for a_value in a_values: keywords.append({ 'schema': schema, 'source': force_single_element(sources), 'value': a_value, }) if value.get('e'): energy_ranges.append(ENERGY_RANGES_MAP.get(value.get('e'))) self['energy_ranges'] = energy_ranges return keywords
python
{ "resource": "" }
q11423
keywords2marc
train
def keywords2marc(self, key, values): """Populate the ``695`` MARC field. Also populates the ``084`` and ``6531`` MARC fields through side effects. """ result_695 = self.get('695', []) result_084 = self.get('084', []) result_6531 = self.get('6531', []) for value in values: schema = value.get('schema') source = value.get('source') keyword = value.get('value') if schema == 'PACS' or schema == 'PDG': result_084.append({ '2': schema, '9': source, 'a': keyword, }) elif schema == 'JACOW': result_6531.append({ '2': 'JACoW', '9': source, 'a': keyword, }) elif schema == 'INSPIRE': result_695.append({ '2': 'INSPIRE', '9': source, 'a': keyword, }) elif schema == 'INIS': result_695.append({ '2': 'INIS', '9': source, 'a': keyword, }) elif source != 'magpie': result_6531.append({ '9': source, 'a': keyword, }) self['6531'] = result_6531 self['084'] = result_084 return result_695
python
{ "resource": "" }
q11424
collaborations
train
def collaborations(self, key, value): """Populate the ``collaborations`` key.""" result = [] for g_value in force_list(value.get('g')): collaborations = normalize_collaboration(g_value) if len(collaborations) == 1: result.append({ 'record': get_record_ref(maybe_int(value.get('0')), 'experiments'), 'value': collaborations[0], }) else: result.extend({'value': collaboration} for collaboration in collaborations) return result
python
{ "resource": "" }
q11425
publication_info
train
def publication_info(self, key, value): """Populate the ``publication_info`` key.""" def _get_cnum(value): w_value = force_single_element(value.get('w', '')) normalized_w_value = w_value.replace('/', '-').upper() return normalized_w_value def _get_material(value): schema = load_schema('elements/material') valid_materials = schema['enum'] m_value = force_single_element(value.get('m', '')) normalized_m_value = m_value.lower() if normalized_m_value in valid_materials: return normalized_m_value def _get_parent_isbn(value): z_value = force_single_element(value.get('z', '')) if z_value: return normalize_isbn(z_value) def _get_pubinfo_freetext(value): x_value = force_single_element(value.get('x', '')) if not x_value.startswith('#DONE'): return x_value page_start, page_end, artid = split_page_artid(value.get('c')) parent_recid = maybe_int(force_single_element(value.get('0'))) parent_record = get_record_ref(parent_recid, 'literature') journal_recid = maybe_int(force_single_element(value.get('1'))) journal_record = get_record_ref(journal_recid, 'journals') conference_recid = maybe_int(force_single_element(value.get('2'))) conference_record = get_record_ref(conference_recid, 'conferences') return { 'artid': artid, 'cnum': _get_cnum(value), 'conf_acronym': force_single_element(value.get('q')), 'conference_record': conference_record, 'hidden': key.startswith('7731') or None, 'journal_issue': force_single_element(value.get('n')), 'journal_record': journal_record, 'journal_title': force_single_element(value.get('p')), 'journal_volume': force_single_element(value.get('v')), 'material': _get_material(value), 'page_end': page_end, 'page_start': page_start, 'parent_isbn': _get_parent_isbn(value), 'parent_record': parent_record, 'parent_report_number': force_single_element(value.get('r')), 'pubinfo_freetext': _get_pubinfo_freetext(value), 'year': maybe_int(force_single_element(value.get('y'))), }
python
{ "resource": "" }
q11426
publication_info2marc
train
def publication_info2marc(self, key, values): """Populate the ``773`` MARC field. Also populates the ``7731`` MARC field through side effects. """ result_773 = self.get('773', []) result_7731 = self.get('7731', []) for value in force_list(convert_new_publication_info_to_old(values)): page_artid = [] if value.get('page_start') and value.get('page_end'): page_artid.append(u'{page_start}-{page_end}'.format(**value)) elif value.get('page_start'): page_artid.append(u'{page_start}'.format(**value)) elif value.get('artid'): page_artid.append(u'{artid}'.format(**value)) result = { '0': get_recid_from_ref(value.get('parent_record')), 'c': page_artid, 'm': value.get('material'), 'n': value.get('journal_issue'), 'p': value.get('journal_title'), 'q': value.get('conf_acronym'), 'r': value.get('parent_report_number'), 'v': value.get('journal_volume'), 'w': value.get('cnum'), 'x': value.get('pubinfo_freetext'), 'y': value.get('year'), 'z': value.get('parent_isbn'), } if value.get('hidden'): result_7731.append(result) else: result_773.append(result) self['7731'] = result_7731 return result_773
python
{ "resource": "" }
q11427
related_records2marc
train
def related_records2marc(self, key, value): """Populate the ``78708`` MARC field Also populates the ``78002``, ``78502`` MARC fields through side effects. """ if value.get('relation_freetext'): return { 'i': value.get('relation_freetext'), 'w': get_recid_from_ref(value.get('record')), } elif value.get('relation') == 'successor': self.setdefault('78502', []).append({ 'i': 'superseded by', 'w': get_recid_from_ref(value.get('record')), }) elif value.get('relation') == 'predecessor': self.setdefault('78002', []).append({ 'i': 'supersedes', 'w': get_recid_from_ref(value.get('record')), }) else: raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get('relation')))
python
{ "resource": "" }
q11428
proceedings
train
def proceedings(self, key, value): """Populate the ``proceedings`` key. Also populates the ``refereed`` key through side effects. """ proceedings = self.get('proceedings') refereed = self.get('refereed') if not proceedings: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'PROCEEDINGS' in normalized_a_values: proceedings = True if not refereed: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'PEER REVIEW' in normalized_a_values: refereed = True elif 'NON-PUBLISHED' in normalized_a_values: refereed = False self['refereed'] = refereed return proceedings
python
{ "resource": "" }
q11429
short_title
train
def short_title(self, key, value): """Populate the ``short_title`` key. Also populates the ``title_variants`` key through side effects. """ short_title = value.get('a') title_variants = self.get('title_variants', []) if value.get('u'): short_title = value.get('u') title_variants.append(value.get('a')) self['title_variants'] = title_variants return short_title
python
{ "resource": "" }
q11430
ranks
train
def ranks(self, key, value): """Populate the ``ranks`` key.""" return [normalize_rank(el) for el in force_list(value.get('a'))]
python
{ "resource": "" }
q11431
BaseProgram.new_parser
train
def new_parser(self): """ Create a command line argument parser Add a few default flags, such as --version for displaying the program version when invoked """ parser = argparse.ArgumentParser(description=self.description) parser.add_argument( '--version', help='show version and exit', default=False, action='store_true') parser.add_argument( '--debug', help='enable debugging', default=False, action='store_true') return parser
python
{ "resource": "" }
q11432
Program.help_function
train
def help_function(self, command=None): """ Show help for all available commands or just a single one """ if command: return self.registered[command].get( 'description', 'No help available' ) return ', '.join(sorted(self.registered))
python
{ "resource": "" }
q11433
Program.add_command
train
def add_command(self, command, function, description=None): """ Register a new function for command """ super(Program, self).add_command(command, function, description) self.service.register(command, function)
python
{ "resource": "" }
q11434
Response._show
train
def _show(self, res, err, prefix='', colored=False): """ Show result or error """ if self.kind is 'local': what = res if not err else err print(what) return if self.kind is 'remote': if colored: red, green, reset = Fore.RED, Fore.GREEN, Fore.RESET else: red = green = reset = '' if err: what = prefix + red + 'remote err: {}'.format(err) + reset else: what = prefix + green + str(res) + reset print(what)
python
{ "resource": "" }
q11435
CtlProgram.call
train
def call(self, command, *args): """ Execute local OR remote command and show response """ if not command: return # Look for local methods first try: res = self.registered[command]['function'](self, *args) return Response('local', res, None) # Method not found, try remote except KeyError: # Execute remote command res, err = self.client.call(command, *args) return Response('remote', res, err, self.client.is_multi()) # Local exception except Exception as e: return Response('local', res, str(e))
python
{ "resource": "" }
q11436
CtlProgram.parse_input
train
def parse_input(self, text): """ Parse ctl user input. Double quotes are used to group together multi words arguments. """ parts = util.split(text) command = parts[0] if text and parts else None command = command.lower() if command else None args = parts[1:] if len(parts) > 1 else [] return (command, args)
python
{ "resource": "" }
q11437
CtlProgram.loop
train
def loop(self): """ Enter loop, read user input then run command. Repeat """ while True: text = compat.input('ctl > ') command, args = self.parse_input(text) if not command: continue response = self.call(command, *args) response.show()
python
{ "resource": "" }
q11438
split
train
def split(text): """ Split text into arguments accounting for muti-word arguments which are double quoted """ # Cleanup text text = text.strip() text = re.sub('\s+', ' ', text) # collpse multiple spaces space, quote, parts = ' ', '"', [] part, quoted = '', False for char in text: # Encoutered beginning double quote if char is quote and quoted is False: quoted = True continue # Encountered the ending double quote if char is quote and quoted is True: quoted = False parts.append(part.strip()) part = '' continue # Found space in quoted if char is space and quoted is True: part += char continue # Found space but not quoted if char is space: if part: parts.append(part) part = '' continue # Found other character if char is not space: part += char continue if part: parts.append(part.strip()) return parts
python
{ "resource": "" }
q11439
read_version
train
def read_version(): """ Read package version """ with open('./oi/version.py') as fh: for line in fh: if line.startswith('VERSION'): return line.split('=')[1].strip().strip("'")
python
{ "resource": "" }
q11440
PrettyGraph.strip_prefixes
train
def strip_prefixes(g: Graph): """ Remove the prefixes from the graph for aesthetics """ return re.sub(r'^@prefix .* .\n', '', g.serialize(format="turtle").decode(), flags=re.MULTILINE).strip()
python
{ "resource": "" }
q11441
_PickleJar.clear
train
def clear(self) -> None: """ Clear all cache entries for directory and, if it is a 'pure' directory, remove the directory itself """ if self._cache_directory is not None: # Safety - if there isn't a cache directory file, this probably isn't a valid cache assert os.path.exists(self._cache_directory_index), "Attempt to clear a non-existent cache" self._load() # Shouldn't have any impact but... for e in self._cache.values(): if os.path.exists(e.loc): os.remove(e.loc) self._cache.clear() self._update() self._cache = {}
python
{ "resource": "" }
q11442
fhirtordf
train
def fhirtordf(argv: List[str], default_exit: bool = True) -> bool: """ Entry point for command line utility """ dlp = dirlistproc.DirectoryListProcessor(argv, description="Convert FHIR JSON into RDF", infile_suffix=".json", outfile_suffix=".ttl", addargs=addargs, noexit=not default_exit) if not dlp.successful_parse: return False # Version if dlp.opts.version: print("FHIR to RDF Conversion Tool -- Version {}".format(__version__)) # We either have to have an input file or an input directory if not dlp.opts.infile and not dlp.opts.indir: if not dlp.opts.version: dlp.parser.error("Either an input file or an input directory must be supplied") return dlp.opts.version # Create the output directory if needed if dlp.opts.outdir and not os.path.exists(dlp.opts.outdir): os.makedirs(dlp.opts.outdir) # If we are going to a single output file or stdout, gather all the input dlp.opts.graph = Graph() if (not dlp.opts.outfile and not dlp.opts.outdir) or\ (dlp.opts.outfile and len(dlp.opts.outfile) == 1) else None dlp.opts.fhir_metavoc = load_fhir_ontology(dlp.opts) # If it looks like we're processing a URL as an input file, skip the suffix check if dlp.opts.infile and len(dlp.opts.infile) == 1 and not dlp.opts.indir and "://" in dlp.opts.infile[0]: dlp.infile_suffix = "" dlp.outfile_suffix = '.' + suffix_for(dlp.opts.format) nfiles, nsuccess = dlp.run(proc=proc_file, file_filter_2=file_filter) if nfiles: if dlp.opts.graph: serialize_graph(dlp.opts.graph, dlp.opts.outfile[0] if dlp.opts.outfile else None, dlp.opts) return nsuccess > 0 return False
python
{ "resource": "" }
q11443
get_distutils_option
train
def get_distutils_option(option, commands): """ Returns the value of the given distutils option. Parameters ---------- option : str The name of the option commands : list of str The list of commands on which this option is available Returns ------- val : str or None the value of the given distutils option. If the option is not set, returns None. """ dist = get_dummy_distribution() for cmd in commands: cmd_opts = dist.command_options.get(cmd) if cmd_opts is not None and option in cmd_opts: return cmd_opts[option][1] else: return None
python
{ "resource": "" }
q11444
add_command_option
train
def add_command_option(command, name, doc, is_bool=False): """ Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value. """ dist = get_dummy_distribution() cmdcls = dist.get_command_class(command) if (hasattr(cmdcls, '_astropy_helpers_options') and name in cmdcls._astropy_helpers_options): return attr = name.replace('-', '_') if hasattr(cmdcls, attr): raise RuntimeError( '{0!r} already has a {1!r} class attribute, barring {2!r} from ' 'being usable as a custom option name.'.format(cmdcls, attr, name)) for idx, cmd in enumerate(cmdcls.user_options): if cmd[0] == name: log.warn('Overriding existing {0!r} option ' '{1!r}'.format(command, name)) del cmdcls.user_options[idx] if name in cmdcls.boolean_options: cmdcls.boolean_options.remove(name) break cmdcls.user_options.append((name, None, doc)) if is_bool: cmdcls.boolean_options.append(name) # Distutils' command parsing requires that a command object have an # attribute with the same name as the option (with '-' replaced with '_') # in order for that option to be recognized as valid setattr(cmdcls, attr, None) # This caches the options added through add_command_option so that if it is # run multiple times in the same interpreter repeated adds are ignored # (this way we can still raise a RuntimeError if a custom option overrides # a built-in option) if not hasattr(cmdcls, '_astropy_helpers_options'): cmdcls._astropy_helpers_options = set([name]) else: cmdcls._astropy_helpers_options.add(name)
python
{ "resource": "" }
q11445
ensure_sphinx_astropy_installed
train
def ensure_sphinx_astropy_installed(): """ Make sure that sphinx-astropy is available, installing it temporarily if not. This returns the available version of sphinx-astropy as well as any paths that should be added to sys.path for sphinx-astropy to be available. """ # We've split out the Sphinx part of astropy-helpers into sphinx-astropy # but we want it to be auto-installed seamlessly for anyone using # build_docs. We check if it's already installed, and if not, we install # it to a local .eggs directory and add the eggs to the path (these # have to each be added to the path, we can't add them by simply adding # .eggs to the path) sys_path_inserts = [] sphinx_astropy_version = None try: from sphinx_astropy import __version__ as sphinx_astropy_version # noqa except ImportError: from setuptools import Distribution dist = Distribution() # Numpydoc 0.9.0 requires sphinx 1.6+, we can limit the version here # until we also makes our minimum required version Sphinx 1.6 if SPHINX_LT_16: dist.fetch_build_eggs('numpydoc<0.9') # This egg build doesn't respect python_requires, not aware of # pre-releases. We know that mpl 3.1+ requires Python 3.6+, so this # ugly workaround takes care of it until there is a solution for # https://github.com/astropy/astropy-helpers/issues/462 if LooseVersion(sys.version) < LooseVersion('3.6'): dist.fetch_build_eggs('matplotlib<3.1') eggs = dist.fetch_build_eggs('sphinx-astropy') # Find out the version of sphinx-astropy if possible. For some old # setuptools version, eggs will be None even if sphinx-astropy was # successfully installed. if eggs is not None: for egg in eggs: if egg.project_name == 'sphinx-astropy': sphinx_astropy_version = egg.parsed_version.public break eggs_path = os.path.abspath('.eggs') for egg in glob.glob(os.path.join(eggs_path, '*.egg')): sys_path_inserts.append(egg) return sphinx_astropy_version, sys_path_inserts
python
{ "resource": "" }
q11446
get_numpy_include_path
train
def get_numpy_include_path(): """ Gets the path to the numpy headers. """ # We need to go through this nonsense in case setuptools # downloaded and installed Numpy for us as part of the build or # install, since Numpy may still think it's in "setup mode", when # in fact we're ready to use it to build astropy now. import builtins if hasattr(builtins, '__NUMPY_SETUP__'): del builtins.__NUMPY_SETUP__ import imp import numpy imp.reload(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
python
{ "resource": "" }
q11447
is_path_hidden
train
def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b'.') else: is_dotted = name.startswith('.') return is_dotted or _has_hidden_attribute(filepath)
python
{ "resource": "" }
q11448
walk_skip_hidden
train
def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter `topdown` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files
python
{ "resource": "" }
q11449
write_if_different
train
def write_if_different(filename, data): """Write `data` to `filename`, if the content of the file is different. Parameters ---------- filename : str The file name to be written to. data : bytes The data to be written to `filename`. """ assert isinstance(data, bytes) if os.path.exists(filename): with open(filename, 'rb') as fd: original_data = fd.read() else: original_data = None if original_data != data: with open(filename, 'wb') as fd: fd.write(data)
python
{ "resource": "" }
q11450
import_file
train
def import_file(filename, name=None): """ Imports a module from a single file as if it doesn't belong to a particular package. The returned module will have the optional ``name`` if given, or else a name generated from the filename. """ # Specifying a traditional dot-separated fully qualified name here # results in a number of "Parent module 'astropy' not found while # handling absolute import" warnings. Using the same name, the # namespaces of the modules get merged together. So, this # generates an underscore-separated name which is more likely to # be unique, and it doesn't really matter because the name isn't # used directly here anyway. mode = 'r' if name is None: basename = os.path.splitext(filename)[0] name = '_'.join(os.path.relpath(basename).split(os.sep)[1:]) if not os.path.exists(filename): raise ImportError('Could not import file {0}'.format(filename)) if import_machinery: loader = import_machinery.SourceFileLoader(name, filename) mod = loader.load_module() else: with open(filename, mode) as fd: mod = imp.load_module(name, fd, filename, ('.py', mode, 1)) return mod
python
{ "resource": "" }
q11451
resolve_name
train
def resolve_name(name): """Resolve a name like ``module.object`` to an object and return it. Raise `ImportError` if the module or name is not found. """ parts = name.split('.') cursor = len(parts) - 1 module_name = parts[:cursor] attr_name = parts[-1] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=[attr_name]) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] attr_name = parts[cursor] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret
python
{ "resource": "" }
q11452
find_data_files
train
def find_data_files(package, pattern): """ Include files matching ``pattern`` inside ``package``. Parameters ---------- package : str The package inside which to look for data files pattern : str Pattern (glob-style) to match for the data files (e.g. ``*.dat``). This supports the``**``recursive syntax. For example, ``**/*.fits`` matches all files ending with ``.fits`` recursively. Only one instance of ``**`` can be included in the pattern. """ return glob.glob(os.path.join(package, pattern), recursive=True)
python
{ "resource": "" }
q11453
get_pkg_version_module
train
def get_pkg_version_module(packagename, fromlist=None): """Returns the package's .version module generated by `astropy_helpers.version_helpers.generate_version_py`. Raises an ImportError if the version module is not found. If ``fromlist`` is an iterable, return a tuple of the members of the version module corresponding to the member names given in ``fromlist``. Raises an `AttributeError` if any of these module members are not found. """ version = import_file(os.path.join(packagename, 'version.py'), name='version') if fromlist: return tuple(getattr(version, member) for member in fromlist) else: return version
python
{ "resource": "" }
q11454
get_debug_option
train
def get_debug_option(packagename): """ Determines if the build is in debug mode. Returns ------- debug : bool True if the current build was started with the debug option, False otherwise. """ try: current_debug = get_pkg_version_module(packagename, fromlist=['debug'])[0] except (ImportError, AttributeError): current_debug = None # Only modify the debug flag if one of the build commands was explicitly # run (i.e. not as a sub-command of something else) dist = get_dummy_distribution() if any(cmd in dist.commands for cmd in ['build', 'build_ext']): debug = bool(get_distutils_build_option('debug')) else: debug = bool(current_debug) if current_debug is not None and current_debug != debug: build_ext_cmd = dist.get_command_class('build_ext') build_ext_cmd._force_rebuild = True return debug
python
{ "resource": "" }
q11455
generate_hooked_command
train
def generate_hooked_command(cmd_name, cmd_cls, hooks): """ Returns a generated subclass of ``cmd_cls`` that runs the pre- and post-command hooks for that command before and after the ``cmd_cls.run`` method. """ def run(self, orig_run=cmd_cls.run): self.run_command_hooks('pre_hooks') orig_run(self) self.run_command_hooks('post_hooks') return type(cmd_name, (cmd_cls, object), {'run': run, 'run_command_hooks': run_command_hooks, 'pre_hooks': hooks.get('pre', []), 'post_hooks': hooks.get('post', [])})
python
{ "resource": "" }
q11456
run_command_hooks
train
def run_command_hooks(cmd_obj, hook_kind): """Run hooks registered for that command and phase. *cmd_obj* is a finalized command object; *hook_kind* is either 'pre_hook' or 'post_hook'. """ hooks = getattr(cmd_obj, hook_kind, None) if not hooks: return for modname, hook in hooks: if isinstance(hook, str): try: hook_obj = resolve_name(hook) except ImportError as exc: raise DistutilsModuleError( 'cannot find hook {0}: {1}'.format(hook, exc)) else: hook_obj = hook if not callable(hook_obj): raise DistutilsOptionError('hook {0!r} is not callable' % hook) log.info('running {0} from {1} for {2} command'.format( hook_kind.rstrip('s'), modname, cmd_obj.get_command_name())) try: hook_obj(cmd_obj) except Exception: log.error('{0} command hook {1} raised an exception: %s\n'.format( hook_obj.__name__, cmd_obj.get_command_name())) log.error(traceback.format_exc()) sys.exit(1)
python
{ "resource": "" }
q11457
update_package_files
train
def update_package_files(srcdir, extensions, package_data, packagenames, package_dirs): """ This function is deprecated and maintained for backward compatibility with affiliated packages. Affiliated packages should update their setup.py to use `get_package_info` instead. """ info = get_package_info(srcdir) extensions.extend(info['ext_modules']) package_data.update(info['package_data']) packagenames = list(set(packagenames + info['packages'])) package_dirs.update(info['package_dir'])
python
{ "resource": "" }
q11458
get_package_info
train
def get_package_info(srcdir='.', exclude=()): """ Collates all of the information for building all subpackages and returns a dictionary of keyword arguments that can be passed directly to `distutils.setup`. The purpose of this function is to allow subpackages to update the arguments to the package's ``setup()`` function in its setup.py script, rather than having to specify all extensions/package data directly in the ``setup.py``. See Astropy's own ``setup.py`` for example usage and the Astropy development docs for more details. This function obtains that information by iterating through all packages in ``srcdir`` and locating a ``setup_package.py`` module. This module can contain the following functions: ``get_extensions()``, ``get_package_data()``, ``get_build_options()``, and ``get_external_libraries()``. Each of those functions take no arguments. - ``get_extensions`` returns a list of `distutils.extension.Extension` objects. - ``get_package_data()`` returns a dict formatted as required by the ``package_data`` argument to ``setup()``. - ``get_build_options()`` returns a list of tuples describing the extra build options to add. - ``get_external_libraries()`` returns a list of libraries that can optionally be built using external dependencies. """ ext_modules = [] packages = [] package_dir = {} # Read in existing package data, and add to it below setup_cfg = os.path.join(srcdir, 'setup.cfg') if os.path.exists(setup_cfg): conf = read_configuration(setup_cfg) if 'options' in conf and 'package_data' in conf['options']: package_data = conf['options']['package_data'] else: package_data = {} else: package_data = {} if exclude: warnings.warn( "Use of the exclude parameter is no longer supported since it does " "not work as expected. Use add_exclude_packages instead. Note that " "it must be called prior to any other calls from setup helpers.", AstropyDeprecationWarning) # Use the find_packages tool to locate all packages and modules packages = find_packages(srcdir, exclude=exclude) # Update package_dir if the package lies in a subdirectory if srcdir != '.': package_dir[''] = srcdir # For each of the setup_package.py modules, extract any # information that is needed to install them. The build options # are extracted first, so that their values will be available in # subsequent calls to `get_extensions`, etc. for setuppkg in iter_setup_packages(srcdir, packages): if hasattr(setuppkg, 'get_build_options'): options = setuppkg.get_build_options() for option in options: add_command_option('build', *option) if hasattr(setuppkg, 'get_external_libraries'): libraries = setuppkg.get_external_libraries() for library in libraries: add_external_library(library) for setuppkg in iter_setup_packages(srcdir, packages): # get_extensions must include any Cython extensions by their .pyx # filename. if hasattr(setuppkg, 'get_extensions'): ext_modules.extend(setuppkg.get_extensions()) if hasattr(setuppkg, 'get_package_data'): package_data.update(setuppkg.get_package_data()) # Locate any .pyx files not already specified, and add their extensions in. # The default include dirs include numpy to facilitate numerical work. ext_modules.extend(get_cython_extensions(srcdir, packages, ext_modules, ['numpy'])) # Now remove extensions that have the special name 'skip_cython', as they # exist Only to indicate that the cython extensions shouldn't be built for i, ext in reversed(list(enumerate(ext_modules))): if ext.name == 'skip_cython': del ext_modules[i] # On Microsoft compilers, we need to pass the '/MANIFEST' # commandline argument. This was the default on MSVC 9.0, but is # now required on MSVC 10.0, but it doesn't seem to hurt to add # it unconditionally. if get_compiler_option() == 'msvc': for ext in ext_modules: ext.extra_link_args.append('/MANIFEST') return { 'ext_modules': ext_modules, 'packages': packages, 'package_dir': package_dir, 'package_data': package_data, }
python
{ "resource": "" }
q11459
iter_setup_packages
train
def iter_setup_packages(srcdir, packages): """ A generator that finds and imports all of the ``setup_package.py`` modules in the source packages. Returns ------- modgen : generator A generator that yields (modname, mod), where `mod` is the module and `modname` is the module name for the ``setup_package.py`` modules. """ for packagename in packages: package_parts = packagename.split('.') package_path = os.path.join(srcdir, *package_parts) setup_package = os.path.relpath( os.path.join(package_path, 'setup_package.py')) if os.path.isfile(setup_package): module = import_file(setup_package, name=packagename + '.setup_package') yield module
python
{ "resource": "" }
q11460
get_cython_extensions
train
def get_cython_extensions(srcdir, packages, prevextensions=tuple(), extincludedirs=None): """ Looks for Cython files and generates Extensions if needed. Parameters ---------- srcdir : str Path to the root of the source directory to search. prevextensions : list of `~distutils.core.Extension` objects The extensions that are already defined. Any .pyx files already here will be ignored. extincludedirs : list of str or None Directories to include as the `include_dirs` argument to the generated `~distutils.core.Extension` objects. Returns ------- exts : list of `~distutils.core.Extension` objects The new extensions that are needed to compile all .pyx files (does not include any already in `prevextensions`). """ # Vanilla setuptools and old versions of distribute include Cython files # as .c files in the sources, not .pyx, so we cannot simply look for # existing .pyx sources in the previous sources, but we should also check # for .c files with the same remaining filename. So we look for .pyx and # .c files, and we strip the extension. prevsourcepaths = [] ext_modules = [] for ext in prevextensions: for s in ext.sources: if s.endswith(('.pyx', '.c', '.cpp')): sourcepath = os.path.realpath(os.path.splitext(s)[0]) prevsourcepaths.append(sourcepath) for package_name in packages: package_parts = package_name.split('.') package_path = os.path.join(srcdir, *package_parts) for extmod, pyxfn in iter_pyx_files(package_path, package_name): sourcepath = os.path.realpath(os.path.splitext(pyxfn)[0]) if sourcepath not in prevsourcepaths: ext_modules.append(Extension(extmod, [pyxfn], include_dirs=extincludedirs)) return ext_modules
python
{ "resource": "" }
q11461
pkg_config
train
def pkg_config(packages, default_libraries, executable='pkg-config'): """ Uses pkg-config to update a set of distutils Extension arguments to include the flags necessary to link against the given packages. If the pkg-config lookup fails, default_libraries is applied to libraries. Parameters ---------- packages : list of str A list of pkg-config packages to look up. default_libraries : list of str A list of library names to use if the pkg-config lookup fails. Returns ------- config : dict A dictionary containing keyword arguments to `distutils.Extension`. These entries include: - ``include_dirs``: A list of include directories - ``library_dirs``: A list of library directories - ``libraries``: A list of libraries - ``define_macros``: A list of macro defines - ``undef_macros``: A list of macros to undefine - ``extra_compile_args``: A list of extra arguments to pass to the compiler """ flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries', '-D': 'define_macros', '-U': 'undef_macros'} command = "{0} --libs --cflags {1}".format(executable, ' '.join(packages)), result = DistutilsExtensionArgs() try: pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].strip() except subprocess.CalledProcessError as e: lines = [ ("{0} failed. This may cause the build to fail below." .format(executable)), " command: {0}".format(e.cmd), " returncode: {0}".format(e.returncode), " output: {0}".format(e.output) ] log.warn('\n'.join(lines)) result['libraries'].extend(default_libraries) else: if pipe.returncode != 0: lines = [ "pkg-config could not lookup up package(s) {0}.".format( ", ".join(packages)), "This may cause the build to fail below." ] log.warn('\n'.join(lines)) result['libraries'].extend(default_libraries) else: for token in output.split(): # It's not clear what encoding the output of # pkg-config will come to us in. It will probably be # some combination of pure ASCII (for the compiler # flags) and the filesystem encoding (for any argument # that includes directories or filenames), but this is # just conjecture, as the pkg-config documentation # doesn't seem to address it. arg = token[:2].decode('ascii') value = token[2:].decode(sys.getfilesystemencoding()) if arg in flag_map: if arg == '-D': value = tuple(value.split('=', 1)) result[flag_map[arg]].append(value) else: result['extra_compile_args'].append(value) return result
python
{ "resource": "" }
q11462
add_external_library
train
def add_external_library(library): """ Add a build option for selecting the internal or system copy of a library. Parameters ---------- library : str The name of the library. If the library is `foo`, the build option will be called `--use-system-foo`. """ for command in ['build', 'build_ext', 'install']: add_command_option(command, str('use-system-' + library), 'Use the system {0} library'.format(library), is_bool=True)
python
{ "resource": "" }
q11463
find_packages
train
def find_packages(where='.', exclude=(), invalidate_cache=False): """ This version of ``find_packages`` caches previous results to speed up subsequent calls. Use ``invalide_cache=True`` to ignore cached results from previous ``find_packages`` calls, and repeat the package search. """ if exclude: warnings.warn( "Use of the exclude parameter is no longer supported since it does " "not work as expected. Use add_exclude_packages instead. Note that " "it must be called prior to any other calls from setup helpers.", AstropyDeprecationWarning) # Calling add_exclude_packages after this point will have no effect _module_state['excludes_too_late'] = True if not invalidate_cache and _module_state['package_cache'] is not None: return _module_state['package_cache'] packages = _find_packages( where=where, exclude=list(_module_state['exclude_packages'])) _module_state['package_cache'] = packages return packages
python
{ "resource": "" }
q11464
_get_flag_value_from_var
train
def _get_flag_value_from_var(flag, var, delim=' '): """ Extract flags from an environment variable. Parameters ---------- flag : str The flag to extract, for example '-I' or '-L' var : str The environment variable to extract the flag from, e.g. CFLAGS or LDFLAGS. delim : str, optional The delimiter separating flags inside the environment variable Examples -------- Let's assume the LDFLAGS is set to '-L/usr/local/include -customflag'. This function will then return the following: >>> _get_flag_value_from_var('-L', 'LDFLAGS') '/usr/local/include' Notes ----- Environment variables are first checked in ``os.environ[var]``, then in ``distutils.sysconfig.get_config_var(var)``. This function is not supported on Windows. """ if sys.platform.startswith('win'): return None # Simple input validation if not var or not flag: return None flag_length = len(flag) if not flag_length: return None # Look for var in os.eviron then in get_config_var if var in os.environ: flags = os.environ[var] else: try: flags = get_config_var(var) except KeyError: return None # Extract flag from {var:value} if flags: for item in flags.split(delim): if item.startswith(flag): return item[flag_length:]
python
{ "resource": "" }
q11465
get_openmp_flags
train
def get_openmp_flags(): """ Utility for returning compiler and linker flags possibly needed for OpenMP support. Returns ------- result : `{'compiler_flags':<flags>, 'linker_flags':<flags>}` Notes ----- The flags returned are not tested for validity, use `check_openmp_support(openmp_flags=get_openmp_flags())` to do so. """ compile_flags = [] link_flags = [] if get_compiler_option() == 'msvc': compile_flags.append('-openmp') else: include_path = _get_flag_value_from_var('-I', 'CFLAGS') if include_path: compile_flags.append('-I' + include_path) lib_path = _get_flag_value_from_var('-L', 'LDFLAGS') if lib_path: link_flags.append('-L' + lib_path) link_flags.append('-Wl,-rpath,' + lib_path) compile_flags.append('-fopenmp') link_flags.append('-fopenmp') return {'compiler_flags': compile_flags, 'linker_flags': link_flags}
python
{ "resource": "" }
q11466
check_openmp_support
train
def check_openmp_support(openmp_flags=None): """ Check whether OpenMP test code can be compiled and run. Parameters ---------- openmp_flags : dict, optional This should be a dictionary with keys ``compiler_flags`` and ``linker_flags`` giving the compiliation and linking flags respectively. These are passed as `extra_postargs` to `compile()` and `link_executable()` respectively. If this is not set, the flags will be automatically determined using environment variables. Returns ------- result : bool `True` if the test passed, `False` otherwise. """ ccompiler = new_compiler() customize_compiler(ccompiler) if not openmp_flags: # customize_compiler() extracts info from os.environ. If certain keys # exist it uses these plus those from sysconfig.get_config_vars(). # If the key is missing in os.environ it is not extracted from # sysconfig.get_config_var(). E.g. 'LDFLAGS' get left out, preventing # clang from finding libomp.dylib because -L<path> is not passed to # linker. Call get_openmp_flags() to get flags missed by # customize_compiler(). openmp_flags = get_openmp_flags() compile_flags = openmp_flags.get('compiler_flags') link_flags = openmp_flags.get('linker_flags') # Pass -coverage flag to linker. # https://github.com/astropy/astropy-helpers/pull/374 if '-coverage' in compile_flags and '-coverage' not in link_flags: link_flags.append('-coverage') tmp_dir = tempfile.mkdtemp() start_dir = os.path.abspath('.') try: os.chdir(tmp_dir) # Write test program with open('test_openmp.c', 'w') as f: f.write(CCODE) os.mkdir('objects') # Compile, test program ccompiler.compile(['test_openmp.c'], output_dir='objects', extra_postargs=compile_flags) # Link test program objects = glob.glob(os.path.join('objects', '*' + ccompiler.obj_extension)) ccompiler.link_executable(objects, 'test_openmp', extra_postargs=link_flags) # Run test program output = subprocess.check_output('./test_openmp') output = output.decode(sys.stdout.encoding or 'utf-8').splitlines() if 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) if len(output) == nthreads: is_openmp_supported = True else: log.warn("Unexpected number of lines from output of test OpenMP " "program (output was {0})".format(output)) is_openmp_supported = False else: log.warn("Unexpected output from test OpenMP " "program (output was {0})".format(output)) is_openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): is_openmp_supported = False finally: os.chdir(start_dir) return is_openmp_supported
python
{ "resource": "" }
q11467
is_openmp_supported
train
def is_openmp_supported(): """ Determine whether the build compiler has OpenMP support. """ log_threshold = log.set_threshold(log.FATAL) ret = check_openmp_support() log.set_threshold(log_threshold) return ret
python
{ "resource": "" }
q11468
generate_openmp_enabled_py
train
def generate_openmp_enabled_py(packagename, srcdir='.', disable_openmp=None): """ Generate ``package.openmp_enabled.is_openmp_enabled``, which can then be used to determine, post build, whether the package was built with or without OpenMP support. """ if packagename.lower() == 'astropy': packagetitle = 'Astropy' else: packagetitle = packagename epoch = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) timestamp = datetime.datetime.utcfromtimestamp(epoch) if disable_openmp is not None: import builtins builtins._ASTROPY_DISABLE_SETUP_WITH_OPENMP_ = disable_openmp if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_: log.info("OpenMP support has been explicitly disabled.") openmp_support = False if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_ else is_openmp_supported() src = _IS_OPENMP_ENABLED_SRC.format(packagetitle=packagetitle, timestamp=timestamp, return_bool=openmp_support) package_srcdir = os.path.join(srcdir, *packagename.split('.')) is_openmp_enabled_py = os.path.join(package_srcdir, 'openmp_enabled.py') with open(is_openmp_enabled_py, 'w') as f: f.write(src)
python
{ "resource": "" }
q11469
HugeTable.GetValue
train
def GetValue(self, row, col): """ Find the matching value from pandas DataFrame, return it. """ if len(self.dataframe): return str(self.dataframe.iloc[row, col]) return ''
python
{ "resource": "" }
q11470
HugeTable.SetValue
train
def SetValue(self, row, col, value): """ Set value in the pandas DataFrame """ self.dataframe.iloc[row, col] = value
python
{ "resource": "" }
q11471
HugeTable.SetColumnValues
train
def SetColumnValues(self, col, value): """ Custom method to efficiently set all values in a column. Parameters ---------- col : str or int name or index position of column value : list-like values to assign to all cells in the column """ try: self.dataframe.iloc[:, col] = value except ValueError: self.dataframe.loc[:, col] = value
python
{ "resource": "" }
q11472
HugeTable.GetColLabelValue
train
def GetColLabelValue(self, col): """ Get col label from dataframe """ if len(self.dataframe): return self.dataframe.columns[col] return ''
python
{ "resource": "" }
q11473
HugeTable.SetColLabelValue
train
def SetColLabelValue(self, col, value): """ Set col label value in dataframe """ if len(self.dataframe): col_name = str(self.dataframe.columns[col]) self.dataframe.rename(columns={col_name: str(value)}, inplace=True) return None
python
{ "resource": "" }
q11474
BaseMagicGrid.set_scrollbars
train
def set_scrollbars(self): """ Set to always have vertical scrollbar. Have horizontal scrollbar unless grid has very few rows. Older versions of wxPython will choke on this, in which case nothing happens. """ try: if len(self.row_labels) < 5: show_horizontal = wx.SHOW_SB_NEVER else: show_horizontal = wx.SHOW_SB_DEFAULT self.ShowScrollbars(show_horizontal, wx.SHOW_SB_DEFAULT) except AttributeError: pass
python
{ "resource": "" }
q11475
BaseMagicGrid.on_edit_grid
train
def on_edit_grid(self, event): """sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor""" if not self.changes: self.changes = {event.Row} else: self.changes.add(event.Row) #self.changes = True try: editor = event.GetControl() editor.Bind(wx.EVT_KEY_DOWN, self.onEditorKey) except AttributeError: # if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method pass
python
{ "resource": "" }
q11476
BaseMagicGrid.do_paste
train
def do_paste(self, event): """ Read clipboard into dataframe Paste data into grid, adding extra rows if needed and ignoring extra columns. """ # find where the user has clicked col_ind = self.GetGridCursorCol() row_ind = self.GetGridCursorRow() # read in clipboard text text_df = pd.read_clipboard(header=None, sep='\t').fillna('') # add extra rows if need to accomadate clipboard text row_length_diff = len(text_df) - (len(self.row_labels) - row_ind) if row_length_diff > 0: for n in range(row_length_diff): self.add_row() # ignore excess columns if present col_length_diff = len(text_df.columns) - (len(self.col_labels) - col_ind) if col_length_diff > 0: text_df = text_df.iloc[:, :-col_length_diff].copy() # go through copied text and parse it into the grid rows for label, row_data in text_df.iterrows(): col_range = list(range(col_ind, col_ind + len(row_data))) if len(row_data) > 1: cols = list(zip(col_range, row_data.index)) for column in cols: value = row_data[column[1]] this_col = column[0] self.SetCellValue(row_ind, this_col, str(value)) else: value = row_data[0] self.SetCellValue(row_ind, col_ind, str(value)) row_ind += 1 # could instead use wxPython clipboard here # see old git history for that self.size_grid() event.Skip()
python
{ "resource": "" }
q11477
BaseMagicGrid.update_changes_after_row_delete
train
def update_changes_after_row_delete(self, row_num): """ Update self.changes so that row numbers for edited rows are still correct. I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3. This function updates self.changes to reflect that. """ if row_num in self.changes.copy(): self.changes.remove(row_num) updated_rows = [] for changed_row in self.changes: if changed_row == -1: updated_rows.append(-1) if changed_row > row_num: updated_rows.append(changed_row - 1) if changed_row < row_num: updated_rows.append(changed_row) self.changes = set(updated_rows)
python
{ "resource": "" }
q11478
BaseMagicGrid.paint_invalid_cell
train
def paint_invalid_cell(self, row, col, color='MEDIUM VIOLET RED', skip_cell=False): """ Take row, column, and turn it color """ self.SetColLabelRenderer(col, MyColLabelRenderer('#1101e0')) # SetCellRenderer doesn't work with table-based grid (HugeGrid class) if not skip_cell: self.SetCellRenderer(row, col, MyCustomRenderer(color))
python
{ "resource": "" }
q11479
HugeMagicGrid.add_col
train
def add_col(self, label): """ Update table dataframe, and append a new column Parameters ---------- label : str Returns --------- last_col: int index column number of added col """ self.table.dataframe[label] = '' self.AppendCols(1, updateLabels=False) last_col = self.table.GetNumberCols() - 1 self.SetColLabelValue(last_col, label) self.col_labels.append(label) self.size_grid() return last_col
python
{ "resource": "" }
q11480
HugeMagicGrid.remove_col
train
def remove_col(self, col_num): """ update table dataframe, and remove a column. resize grid to display correctly """ label_value = self.GetColLabelValue(col_num).strip('**').strip('^^') self.col_labels.remove(label_value) del self.table.dataframe[label_value] result = self.DeleteCols(pos=col_num, numCols=1, updateLabels=True) self.size_grid() return result
python
{ "resource": "" }
q11481
main
train
def main(): """ NAME plotdi_a.py DESCRIPTION plots equal area projection from dec inc data and fisher mean, cone of confidence INPUT FORMAT takes dec, inc, alpha95 as first three columns in space delimited file SYNTAX plotdi_a.py [-i][-f FILE] OPTIONS -f FILE to read file name from command line -fmt [png,jpg,eps,pdf,svg] set plot file format ['svg' is default] -sav save plot and quit """ fmt,plot='svg',0 if len(sys.argv) > 0: if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit if '-fmt' in sys.argv: ind=sys.argv.index('-fmt') fmt=sys.argv[ind+1] if '-sav' in sys.argv:plot=1 if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') data=f.readlines() else: data=sys.stdin.readlines() # read in data from standard input DIs,Pars=[],[] for line in data: # read in the data from standard input pars=[] rec=line.split() # split each line on space to get records DIs.append([float(rec[0]),float(rec[1])]) pars.append(float(rec[0])) pars.append(float(rec[1])) pars.append(float(rec[2])) pars.append(float(rec[0])) isign=abs(float(rec[1])) / float(rec[1]) pars.append(float(rec[1])-isign*90.) #Beta inc pars.append(float(rec[2])) # gamma pars.append(float(rec[0])+90.) # Beta dec pars.append(0.) #Beta inc Pars.append(pars) # EQ={'eq':1} # make plot dictionary pmagplotlib.plot_init(EQ['eq'],5,5) title='Equal area projection' pmagplotlib.plot_eq(EQ['eq'],DIs,title)# plot directions for k in range(len(Pars)): pmagplotlib.plot_ell(EQ['eq'],Pars[k],'b',0,1) # plot ellipses files={} for key in list(EQ.keys()): files[key]=key+'.'+fmt titles={} titles['eq']='Equal Area Plot' if pmagplotlib.isServer: black = '#000000' purple = '#800080' EQ = pmagplotlib.add_borders(EQ,titles,black,purple) pmagplotlib.save_plots(EQ,files) elif plot==0: pmagplotlib.draw_figs(EQ) ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans=="q": sys.exit() if ans=="a": pmagplotlib.save_plots(EQ,files) else: pmagplotlib.save_plots(EQ,files)
python
{ "resource": "" }
q11482
main
train
def main(): """ NAME di_rot.py DESCRIPTION rotates set of directions to new coordinate system SYNTAX di_rot.py [command line options] OPTIONS -h prints help message and quits -f specify input file, default is standard input -F specify output file, default is standard output -D D specify Dec of new coordinate system, default is 0 -I I specify Inc of new coordinate system, default is 90 INTPUT/OUTPUT dec inc [space delimited] """ D,I=0.,90. outfile="" infile="" if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') infile=sys.argv[ind+1] data=numpy.loadtxt(infile) else: data=numpy.loadtxt(sys.stdin,dtype=numpy.float) if '-F' in sys.argv: ind=sys.argv.index('-F') outfile=sys.argv[ind+1] out=open(outfile,'w') if '-D' in sys.argv: ind=sys.argv.index('-D') D=float(sys.argv[ind+1]) if '-I' in sys.argv: ind=sys.argv.index('-I') I=float(sys.argv[ind+1]) if len(data.shape)>1: # 2-D array N=data.shape[0] DipDir,Dip=numpy.ones(N,dtype=numpy.float).transpose()*(D-180.),numpy.ones(N,dtype=numpy.float).transpose()*(90.-I) data=data.transpose() data=numpy.array([data[0],data[1],DipDir ,Dip]).transpose() drot,irot=pmag.dotilt_V(data) drot=(drot-180.)%360. # for k in range(N): if outfile=="": print('%7.1f %7.1f ' % (drot[k],irot[k])) else: out.write('%7.1f %7.1f\n' % (drot[k],irot[k])) else: d,i=pmag.dotilt(data[0],data[1],(D-180.),90.-I) if outfile=="": print('%7.1f %7.1f ' % ((d-180.)%360.,i)) else: out.write('%7.1f %7.1f\n' % ((d-180.)%360.,i))
python
{ "resource": "" }
q11483
Fit.get
train
def get(self,coordinate_system): """ Return the pmagpy paramters dictionary associated with this fit and the given coordinate system @param: coordinate_system -> the coordinate system who's parameters to return """ if coordinate_system == 'DA-DIR' or coordinate_system == 'specimen': return self.pars elif coordinate_system == 'DA-DIR-GEO' or coordinate_system == 'geographic': return self.geopars elif coordinate_system == 'DA-DIR-TILT' or coordinate_system == 'tilt-corrected': return self.tiltpars else: print("-E- no such parameters to fetch for " + coordinate_system + " in fit: " + self.name) return None
python
{ "resource": "" }
q11484
Fit.has_values
train
def has_values(self, name, tmin, tmax): """ A basic fit equality checker compares name and bounds of 2 fits @param: name -> name of the other fit @param: tmin -> lower bound of the other fit @param: tmax -> upper bound of the other fit @return: boolean comaparing 2 fits """ return str(self.name) == str(name) and str(self.tmin) == str(tmin) and str(self.tmax) == str(tmax)
python
{ "resource": "" }
q11485
get_n_tail
train
def get_n_tail(tmax, tail_temps): """determines number of included tail checks in best fit segment""" #print "tail_temps: {0}, tmax: {0}".format(tail_temps, tmax) t_index = 0 adj_tmax = 0 if tmax < tail_temps[0]: return 0 try: t_index = list(tail_temps).index(tmax) except: # finds correct tmax if there was no tail check performed at tmax for temp in tail_temps: if temp <= tmax: adj_tmax = temp t_index = list(tail_temps).index(adj_tmax) incl_temps = tail_temps[0:t_index+1] # b/c not inclusive return len(incl_temps)
python
{ "resource": "" }
q11486
main
train
def main(): """ NAME dir_redo.py DESCRIPTION converts the Cogne DIR format to PmagPy redo file SYNTAX dir_redo.py [-h] [command line options] OPTIONS -h: prints help message and quits -f FILE: specify input file -F FILE: specify output file, default is 'zeq_redo' """ dir_path='.' zfile='zeq_redo' if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') inspec=sys.argv[ind+1] if '-F' in sys.argv: ind=sys.argv.index('-F') zfile=sys.argv[ind+1] inspec=dir_path+"/"+inspec zfile=dir_path+"/"+zfile zredo=open(zfile,"w") # # read in DIR file # specs=[] prior_spec_data=open(inspec,'r').readlines() for line in prior_spec_data: line=line.replace("Dir"," Dir") line=line.replace("OKir"," OKir") line=line.replace("Fish"," Fish") line=line.replace("Man"," Man") line=line.replace("GC"," GC") line=line.replace("-T"," - T") line=line.replace("-M"," - M") rec=line.split() if len(rec)<2: sys.exit() if rec[1]=='Dir' or rec[1]=='GC': # skip all the other stuff spec=rec[0] specs.append(spec) comp_name=string.uppercase[specs.count(spec)-1] # assign component names calculation_type="DE-FM" if rec[1]=='Dir' and rec[2]=="Kir": calculation_type="DE-BFL" # assume default calculation type is best-fit line if rec[1]=='Dir' and rec[2]=="OKir": calculation_type="DE-BFL-A" # anchored best-fit line if rec[1]=='Dir' and rec[2]=="Fish": calculation_type="DE-FM" # fisher mean if rec[1]=='GC' : calculation_type="DE-BFP" # best-fit plane min,max=rec[3],rec[5] beg,end="","" if min=="NRM": beg=0 if min[0]=='M': beg=float(min[1:])*1e-3 # convert to T from mT elif min[0]=='T': beg=float(min[1:])+273 # convert to C to kelvin if max[0]=='M': end=float(max[1:])*1e-3 # convert to T from mT elif max[0]=='T': end=float(max[1:])+273 # convert to C to kelvin if beg==0:beg=273 outstring='%s %s %s %s %s \n'%(spec,calculation_type,beg,end,comp_name) zredo.write(outstring)
python
{ "resource": "" }
q11487
MagMainFrame.set_dm
train
def set_dm(self, num): """ Make GUI changes based on data model num. Get info from WD in appropriate format. """ #enable or disable self.btn1a if self.data_model_num == 3: self.btn1a.Enable() else: self.btn1a.Disable() # # set pmag_gui_dialogs global pmag_gui_dialogs if self.data_model_num == 2: pmag_gui_dialogs = pgd2 wx.CallAfter(self.get_wd_data2) elif self.data_model_num == 3: pmag_gui_dialogs = pgd3 wx.CallAfter(self.get_wd_data) # do / re-do menubar menubar = pmag_gui_menu.MagICMenu(self, data_model_num=self.data_model_num) self.SetMenuBar(menubar) self.menubar = menubar
python
{ "resource": "" }
q11488
MagMainFrame.get_wd_data
train
def get_wd_data(self): """ Show dialog to get user input for which directory to set as working directory. Called by self.get_dm_and_wd """ wait = wx.BusyInfo('Reading in data from current working directory, please wait...') #wx.Yield() print('-I- Read in any available data from working directory') self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) del wait
python
{ "resource": "" }
q11489
MagMainFrame.get_wd_data2
train
def get_wd_data2(self): """ Get 2.5 data from self.WD and put it into ErMagicBuilder object. Called by get_dm_and_wd """ wait = wx.BusyInfo('Reading in data from current working directory, please wait...') #wx.Yield() print('-I- Read in any available data from working directory (data model 2)') self.er_magic = builder.ErMagicBuilder(self.WD, data_model=self.data_model) del wait
python
{ "resource": "" }
q11490
MagMainFrame.get_dir
train
def get_dir(self): """ Choose a working directory dialog. Called by self.get_dm_and_wd. """ if "-WD" in sys.argv and self.FIRST_RUN: ind = sys.argv.index('-WD') self.WD = os.path.abspath(sys.argv[ind+1]) os.chdir(self.WD) self.WD = os.getcwd() self.dir_path.SetValue(self.WD) else: self.on_change_dir_button(None) #self.WD = os.getcwd() self.FIRST_RUN = False
python
{ "resource": "" }
q11491
MagMainFrame.on_btn_thellier_gui
train
def on_btn_thellier_gui(self, event): """ Open Thellier GUI """ if not self.check_for_meas_file(): return if not self.check_for_uncombined_files(): return outstring = "thellier_gui.py -WD %s"%self.WD print("-I- running python script:\n %s"%(outstring)) if self.data_model_num == 2.5: thellier_gui.main(self.WD, standalone_app=False, parent=self, DM=self.data_model_num) else: # disable and hide Pmag GUI mainframe self.Disable() self.Hide() # show busyinfo wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() # create custom Thellier GUI closing event and bind it ThellierGuiExitEvent, EVT_THELLIER_GUI_EXIT = newevent.NewCommandEvent() self.Bind(EVT_THELLIER_GUI_EXIT, self.on_analysis_gui_exit) # make and show the Thellier GUI frame thellier_gui_frame = thellier_gui.Arai_GUI(self.WD, self, standalone=False, DM=self.data_model_num, evt_quit=ThellierGuiExitEvent) if not thellier_gui_frame: print("Thellier GUI failed to start aborting"); del wait; return thellier_gui_frame.Centre() thellier_gui_frame.Show() del wait
python
{ "resource": "" }
q11492
MagMainFrame.on_btn_demag_gui
train
def on_btn_demag_gui(self, event): """ Open Demag GUI """ if not self.check_for_meas_file(): return if not self.check_for_uncombined_files(): return outstring = "demag_gui.py -WD %s"%self.WD print("-I- running python script:\n %s"%(outstring)) if self.data_model_num == 2: demag_gui.start(self.WD, standalone_app=False, parent=self, DM=self.data_model_num) else: # disable and hide Pmag GUI mainframe self.Disable() self.Hide() # show busyinfo wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() # create custom Demag GUI closing event and bind it DemagGuiExitEvent, EVT_DEMAG_GUI_EXIT = newevent.NewCommandEvent() self.Bind(EVT_DEMAG_GUI_EXIT, self.on_analysis_gui_exit) # make and show the Demag GUI frame demag_gui_frame = demag_gui.Demag_GUI(self.WD, self, write_to_log_file=False, data_model=self.data_model_num, evt_quit=DemagGuiExitEvent) demag_gui_frame.Centre() demag_gui_frame.Show() del wait
python
{ "resource": "" }
q11493
MagMainFrame.on_btn_convert_3
train
def on_btn_convert_3(self, event): """ Open dialog for rough conversion of 2.5 files to 3.0 files. Offer link to earthref for proper upgrade. """ dia = pw.UpgradeDialog(None) dia.Center() res = dia.ShowModal() if res == wx.ID_CANCEL: webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2) return ## more nicely styled way, but doesn't link to earthref #msg = "This tool is meant for relatively simple upgrades (for instance, a measurement file, a sample file, and a criteria file).\nIf you have a more complex contribution to upgrade, and you want maximum accuracy, use the upgrade tool at https://www2.earthref.org/MagIC/upgrade.\n\nDo you want to continue?" #result = pw.warning_with_override(msg) #if result == wx.ID_NO: #webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2) #return # turn files from 2.5 --> 3.0 (rough translation) meas, upgraded, no_upgrade = pmag.convert_directory_2_to_3('magic_measurements.txt', input_dir=self.WD, output_dir=self.WD, data_model=self.contribution.data_model) if not meas: wx.MessageBox('2.5 --> 3.0 failed. Do you have a magic_measurements.txt file in your working directory?', 'Info', wx.OK | wx.ICON_INFORMATION) return # create a contribution self.contribution = cb.Contribution(self.WD) # make skeleton files with specimen, sample, site, location data self.contribution.propagate_measurement_info() # pop up upgraded_string = ", ".join(upgraded) if no_upgrade: no_upgrade_string = ", ".join(no_upgrade) msg = '2.5 --> 3.0 translation completed!\n\nThese 3.0 format files were created: {}.\n\nHowever, these 2.5 format files could not be upgraded: {}.\n\nTo convert all 2.5 files, use the MagIC upgrade tool: https://www2.earthref.org/MagIC/upgrade\n'.format(upgraded_string, no_upgrade_string) if 'criteria.txt' in upgraded: msg += '\nNote: Please check your criteria file for completeness and accuracy, as not all 2.5 files will be fully upgraded.' if 'pmag_criteria.txt' in no_upgrade: msg += '\nNote: Not all criteria files can be upgraded, even on the MagIC site. You may need to recreate an old pmag_criteria file from scratch in Thellier GUI or Demag GUI.' wx.MessageBox(msg, 'Warning', wx.OK | wx.ICON_INFORMATION) else: msg = '2.5 --> 3.0 translation completed!\nThese files were converted: {}'.format(upgraded_string) wx.MessageBox(msg, 'Info', wx.OK | wx.ICON_INFORMATION)
python
{ "resource": "" }
q11494
MagMainFrame.on_btn_metadata
train
def on_btn_metadata(self, event): """ Initiate the series of windows to add metadata to the contribution. """ # make sure we have a measurements file if not self.check_for_meas_file(): return # make sure all files of the same type have been combined if not self.check_for_uncombined_files(): return if self.data_model_num == 2: wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder(self.WD, self, self.er_magic) elif self.data_model_num == 3: wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder3(self.WD, self, self.contribution) # self.ErMagic_frame.Show() self.ErMagic_frame.Center() # gets total available screen space - 10% size = wx.DisplaySize() size = (size[0] - 0.3 * size[0], size[1] - 0.3 * size[1]) self.ErMagic_frame.Raise() del wait
python
{ "resource": "" }
q11495
MagMainFrame.on_btn_orientation
train
def on_btn_orientation(self, event): """ Create and fill wxPython grid for entering orientation data. """ wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() #dw, dh = wx.DisplaySize() size = wx.DisplaySize() size = (size[0]-0.1 * size[0], size[1]-0.1 * size[1]) if self.data_model_num == 3: frame = pmag_gui_dialogs.OrientFrameGrid3(self, -1, 'demag_orient.txt', self.WD, self.contribution, size) else: frame = pmag_gui_dialogs.OrientFrameGrid(self, -1, 'demag_orient.txt', self.WD, self.er_magic, size) frame.Show(True) frame.Centre() self.Hide() del wait
python
{ "resource": "" }
q11496
MagMainFrame.on_btn_unpack
train
def on_btn_unpack(self, event): """ Create dialog to choose a file to unpack with download magic. Then run download_magic and create self.contribution. """ dlg = wx.FileDialog( None, message = "choose txt file to unpack", defaultDir=self.WD, defaultFile="", style=wx.FD_OPEN #| wx.FD_CHANGE_DIR ) if dlg.ShowModal() == wx.ID_OK: FILE = dlg.GetPath() input_dir, f = os.path.split(FILE) else: return False outstring="download_magic.py -f {} -WD {} -ID {} -DM {}".format(f, self.WD, input_dir, self.data_model_num) # run as module: print("-I- running python script:\n %s"%(outstring)) wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() ex = None try: if ipmag.download_magic(f, self.WD, input_dir, overwrite=True, data_model=self.data_model): text = "Successfully ran download_magic.py program.\nMagIC files were saved in your working directory.\nSee Terminal/message window for details." else: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." except Exception as ex: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." del wait dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK) result = dlg.ShowModal() if result == wx.ID_OK: dlg.Destroy() if ex: raise(ex) self.contribution = cb.Contribution(self.WD)
python
{ "resource": "" }
q11497
MagMainFrame.on_end_validation
train
def on_end_validation(self, event): """ Switch back from validation mode to main Pmag GUI mode. Hide validation frame and show main frame. """ self.Enable() self.Show() self.magic_gui_frame.Destroy()
python
{ "resource": "" }
q11498
MagMainFrame.on_menu_exit
train
def on_menu_exit(self, event): """ Exit the GUI """ # also delete appropriate copy file try: self.help_window.Destroy() except: pass if '-i' in sys.argv: self.Destroy() try: sys.exit() # can raise TypeError if wx inspector was used except Exception as ex: if isinstance(ex, TypeError): pass else: raise ex
python
{ "resource": "" }
q11499
MagMainFrame.check_for_meas_file
train
def check_for_meas_file(self): """ Check the working directory for a measurement file. If not found, show a warning and return False. Otherwise return True. """ if self.data_model_num == 2: meas_file_name = "magic_measurements.txt" dm = "2.5" else: meas_file_name = "measurements.txt" dm = "3.0" if not os.path.isfile(os.path.join(self.WD, meas_file_name)): pw.simple_warning("Your working directory must have a {} format {} file to run this step. Make sure you have fully completed step 1 (import magnetometer file) and ALSO converted to 3.0., if necessary), then try again.\n\nIf you are trying to look at data downloaded from MagIC, you must unpack the txt file first. Some contributions do not contain measurement data, in which case you won't be able to use this function.".format(dm, meas_file_name)) return False return True
python
{ "resource": "" }