code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
'''Searches ChEBI via ols.''' url = 'https://www.ebi.ac.uk/ols/api/search?ontology=chebi' + \ '&exact=' + str(exact) + '&q=' + term + \ '&rows=' + str(int(rows)) response = requests.get(url) data = response.json() return [ChebiEntity(doc['obo_id']) for doc in data['response']['docs']]
def search(term, exact=False, rows=1e6)
Searches ChEBI via ols.
3.653198
3.015822
1.211344
try: return self.parser.get(section, key) except (NoOptionError, NoSectionError) as e: logger.warning("%s", e) return None
def get(self, section, key)
get function reads the config value for the requested section and key and returns it Parameters: * **section (string):** the section to look for the config value either - oxd, client * **key (string):** the key for the config value required Returns: **value (string):** the function returns the value of the key in the appropriate format if found or returns None if such a section or key couldnot be found Example: config = Configurer(location) oxd_port = config.get('oxd', 'port') # returns the port of the oxd
2.759536
4.248405
0.649546
if not self.parser.has_section(section): logger.warning("Invalid config section: %s", section) return False self.parser.set(section, key, value) with open(self.config_file, 'wb') as cfile: self.parser.write(cfile) return True
def set(self, section, key, value)
set function sets a particular value for the specified key in the specified section and writes it to the config file. Parameters: * **section (string):** the section under which the config should be saved. Only accepted values are - oxd, client * **key (string):** the key/name of the config value * **value (string):** the value which needs to be stored as a string Returns: **success (bool):** a boolean indication of whether the value was stored successfully in the file
2.532696
3.093745
0.818651
'''Returns all formulae''' all_formulae = [get_formulae(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_formulae for x in sublist]
def get_all_formulae(chebi_ids)
Returns all formulae
2.301386
2.539103
0.906378
'''Returns mass''' if len(__MASSES) == 0: __parse_chemical_data() return __MASSES[chebi_id] if chebi_id in __MASSES else float('NaN')
def get_mass(chebi_id)
Returns mass
4.018003
4.341086
0.925575
'''Returns charge''' if len(__CHARGES) == 0: __parse_chemical_data() return __CHARGES[chebi_id] if chebi_id in __CHARGES else float('NaN')
def get_charge(chebi_id)
Returns charge
4.56146
5.072176
0.89931
'''Gets and parses file''' filename = get_file('chemical_data.tsv') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') if tokens[3] == 'FORMULA': # Many seemingly contradictory formulae exist, # depending upon the source database chebi_id = int(tokens[1]) if chebi_id not in __FORMULAE: __FORMULAE[chebi_id] = [] # Append formula: form = Formula(tokens[4], tokens[2]) __FORMULAE[chebi_id].append(form) elif tokens[3] == 'MASS': __MASSES[int(tokens[1])] = float(tokens[4]) elif tokens[3] == 'CHARGE': __CHARGES[int(tokens[1])] = int(tokens[4] if tokens[4][-1] != '-' else '-' + tokens[4][:-1])
def __parse_chemical_data()
Gets and parses file
3.52196
3.454762
1.019451
'''Returns all comments''' all_comments = [get_comments(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_comments for x in sublist]
def get_all_comments(chebi_ids)
Returns all comments
2.636415
2.905982
0.907237
'''Gets and parses file''' filename = get_file('comments.tsv') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') chebi_id = int(tokens[1]) if chebi_id not in __COMMENTS: __COMMENTS[chebi_id] = [] # Append Comment: com = Comment(tokens[3], tokens[4], tokens[5], datetime.datetime.strptime(tokens[2], '%Y-%M-%d')) __COMMENTS[chebi_id].append(com)
def __parse_comments()
Gets and parses file
3.786868
3.626494
1.044223
'''Returns all compound origins''' all_compound_origins = [get_compound_origins(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_compound_origins for x in sublist]
def get_all_compound_origins(chebi_ids)
Returns all compound origins
2.422929
2.541777
0.953242
'''Gets and parses file''' filename = get_file('compound_origins.tsv') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') if len(tokens) > 10: chebi_id = int(tokens[1]) if chebi_id not in __COMPOUND_ORIGINS: __COMPOUND_ORIGINS[chebi_id] = [] # Append CompoundOrigin: comp_orig = CompoundOrigin(tokens[2], tokens[3], tokens[4], tokens[5], tokens[6], tokens[7], tokens[8], tokens[9], tokens[10]) __COMPOUND_ORIGINS[chebi_id].append(comp_orig)
def __parse_compound_origins()
Gets and parses file
2.580425
2.470756
1.044387
'''Returns parent id''' if len(__PARENT_IDS) == 0: __parse_compounds() return __PARENT_IDS[chebi_id] if chebi_id in __PARENT_IDS else float('NaN')
def get_parent_id(chebi_id)
Returns parent id
4.78679
4.853532
0.986249
'''Returns all modified on''' all_modified_ons = [get_modified_on(chebi_id) for chebi_id in chebi_ids] all_modified_ons = [modified_on for modified_on in all_modified_ons if modified_on is not None] return None if len(all_modified_ons) == 0 else sorted(all_modified_ons)[-1]
def get_all_modified_on(chebi_ids)
Returns all modified on
2.163296
2.196897
0.984705
'''Returns created by''' if len(__STARS) == 0: __parse_compounds() return __STARS[chebi_id] if chebi_id in __STARS else float('NaN')
def get_star(chebi_id)
Returns created by
6.773267
5.679543
1.192572
'''Gets and parses file''' filename = get_file('compounds.tsv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') chebi_id = int(tokens[0]) __STATUSES[chebi_id] = tokens[1] __SOURCES[chebi_id] = tokens[3] parent_id_token = tokens[4] __PARENT_IDS[chebi_id] = float('NaN') \ if parent_id_token == 'null' \ else int(parent_id_token) __put_all_ids(chebi_id, chebi_id) if parent_id_token != 'null': parent_id = int(parent_id_token) __put_all_ids(parent_id, chebi_id) __NAMES[chebi_id] = None if tokens[5] == 'null' else tokens[5] __DEFINITIONS[chebi_id] = None if tokens[6] == 'null' \ else tokens[6] __MODIFIED_ONS[chebi_id] = None if tokens[7] == 'null' \ else datetime.datetime.strptime(tokens[7], '%Y-%m-%d') __CREATED_BYS[chebi_id] = None if tokens[8] == 'null' \ or len(tokens) == 9 else tokens[8] __STARS[chebi_id] = float('NaN') \ if tokens[9 if len(tokens) > 9 else 8] == 'null' \ else int(tokens[9 if len(tokens) > 9 else 8])
def __parse_compounds()
Gets and parses file
2.377506
2.329214
1.020734
'''COMMENT''' if parent_id in __ALL_IDS: __ALL_IDS[parent_id].append(child_id) else: __ALL_IDS[parent_id] = [child_id]
def __put_all_ids(parent_id, child_id)
COMMENT
2.981661
2.604397
1.144857
'''Returns all database accessions''' all_database_accessions = [get_database_accessions(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_database_accessions for x in sublist]
def get_all_database_accessions(chebi_ids)
Returns all database accessions
2.57025
2.710038
0.948418
'''Gets and parses file''' filename = get_file('database_accession.tsv') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') chebi_id = int(tokens[1]) if chebi_id not in __DATABASE_ACCESSIONS: __DATABASE_ACCESSIONS[chebi_id] = [] # Append DatabaseAccession: dat_acc = DatabaseAccession(tokens[3], tokens[4], tokens[2]) __DATABASE_ACCESSIONS[chebi_id].append(dat_acc)
def __parse_database_accessions()
Gets and parses file
3.51464
3.42305
1.026757
'''Gets and parses file''' filename = get_file('chebiId_inchi.tsv') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') __INCHIS[int(tokens[0])] = tokens[1]
def __parse_inchi()
Gets and parses file
5.227216
4.751341
1.100156
'''Returns all names''' all_names = [get_names(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_names for x in sublist]
def get_all_names(chebi_ids)
Returns all names
2.660594
2.906978
0.915244
'''Gets and parses file''' filename = get_file('names.tsv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') chebi_id = int(tokens[1]) if chebi_id not in __ALL_NAMES: __ALL_NAMES[chebi_id] = [] # Append Name: nme = Name(tokens[4], tokens[2], tokens[3], tokens[5] == 'T', tokens[6]) __ALL_NAMES[chebi_id].append(nme)
def __parse_names()
Gets and parses file
3.977268
3.853838
1.032028
'''Returns references''' references = [] chebi_ids = [str(chebi_id) for chebi_id in chebi_ids] filename = get_file('reference.tsv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') if tokens[0] in chebi_ids: # Append Reference: if len(tokens) > 3: ref = Reference(tokens[1], tokens[2], tokens[3], tokens[4]) else: ref = Reference(tokens[1], tokens[2]) references.append(ref) return references
def get_references(chebi_ids)
Returns references
2.84806
2.86988
0.992397
'''Returns all outgoings''' all_outgoings = [get_outgoings(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_outgoings for x in sublist]
def get_all_outgoings(chebi_ids)
Returns all outgoings
2.358227
2.540676
0.928189
'''Returns all incomings''' all_incomings = [get_incomings(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_incomings for x in sublist]
def get_all_incomings(chebi_ids)
Returns all incomings
2.369883
2.496808
0.949165
'''Gets and parses file''' relation_filename = get_file('relation.tsv') vertice_filename = get_file('vertice.tsv') relation_textfile = open(relation_filename, 'r') vertice_textfile = open(vertice_filename, 'r') # Parse vertice: vertices = {} next(vertice_textfile) for line in vertice_textfile: tokens = line.strip().split('\t') vertices[tokens[0]] = tokens[1] next(relation_textfile) for line in relation_textfile: tokens = line.strip().split('\t') source_chebi_id = int(vertices[tokens[3]]) target_chebi_id = int(vertices[tokens[2]]) typ = tokens[1] if source_chebi_id not in __OUTGOINGS: __OUTGOINGS[source_chebi_id] = [] if target_chebi_id not in __INCOMINGS: __INCOMINGS[target_chebi_id] = [] target_relation = Relation(typ, str(target_chebi_id), tokens[4]) source_relation = Relation(typ, str(source_chebi_id), tokens[4]) __OUTGOINGS[source_chebi_id].append(target_relation) __INCOMINGS[target_chebi_id].append(source_relation)
def __parse_relation()
Gets and parses file
2.192417
2.153135
1.018244
'''Returns mol''' chebi_id_regexp = '^\\d+\\,' + str(chebi_id) + '\\,.*' mol_file_end_regexp = '\",mol,\\dD' this_structure = [] filename = get_file('structures.csv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: in_chebi_id = False next(textfile) for line in textfile: if in_chebi_id or line[0].isdigit(): if re.match(chebi_id_regexp, line) \ and int(line.split(',')[0]) \ in __get_default_structure_ids(): tokens = line.strip().split(',') in_chebi_id = True this_structure = [] this_structure.append(','.join(tokens[2:]) .replace('\"', '')) this_structure.append('\n') elif in_chebi_id: if re.match(mol_file_end_regexp, line): tokens = line.strip().split(',') this_structure.append(tokens[0].replace('\"', '')) return Structure(''.join(this_structure), Structure.mol, int(tokens[2][0])) else: # In Molfile: this_structure.append(line) return None
def get_mol(chebi_id)
Returns mol
4.253942
4.282882
0.993243
'''Returns mol file''' mol = get_mol(chebi_id) if mol is None: return None file_descriptor, mol_filename = tempfile.mkstemp(str(chebi_id) + '_', '.mol') mol_file = open(mol_filename, 'w') mol_file.write(mol.get_structure()) mol_file.close() os.close(file_descriptor) return mol_filename
def get_mol_filename(chebi_id)
Returns mol file
3.126164
3.096102
1.00971
'''COMMENT''' filename = get_file('structures.csv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split(',') if len(tokens) == 5: if tokens[3] == 'InChIKey': __INCHI_KEYS[int(tokens[1])] = \ Structure(tokens[2], Structure.InChIKey, int(tokens[4][0])) elif tokens[3] == 'SMILES': __SMILES[int(tokens[1])] = \ Structure(tokens[2], Structure.SMILES, int(tokens[4][0]))
def __parse_structures()
COMMENT
3.051815
2.953263
1.03337
'''COMMENT''' if len(__DEFAULT_STRUCTURE_IDS) == 0: filename = get_file('default_structures.tsv') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') __DEFAULT_STRUCTURE_IDS.append(int(tokens[1])) return __DEFAULT_STRUCTURE_IDS
def __get_default_structure_ids()
COMMENT
3.442257
3.12806
1.100445
'''Downloads filename from ChEBI FTP site''' destination = __DOWNLOAD_PARAMS['path'] filepath = os.path.join(destination, filename) if not __is_current(filepath): if not os.path.exists(destination): os.makedirs(destination) url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/' + \ 'Flat_file_tab_delimited/' urlretrieve(urlparse.urljoin(url, filename), filepath) urlcleanup() if filepath.endswith('.zip'): zfile = zipfile.ZipFile(filepath, 'r') filepath = os.path.join(destination, zfile.namelist()[0]) zfile.extractall(destination) elif filepath.endswith('.gz'): unzipped_filepath = filepath[:-len('.gz')] if os.path.exists(unzipped_filepath) \ and __is_current(unzipped_filepath): filepath = unzipped_filepath else: input_file = gzip.open(filepath, 'rb') filepath = os.path.join(destination, input_file.name[:-len('.gz')]) output_file = open(filepath, 'wb') for line in input_file: output_file.write(line) input_file.close() output_file.close() return filepath
def get_file(filename)
Downloads filename from ChEBI FTP site
2.548866
2.397093
1.063315
'''Checks whether file is current''' if not __DOWNLOAD_PARAMS['auto_update']: return True if not os.path.isfile(filepath): return False return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)) \ > __get_last_update_time()
def __is_current(filepath)
Checks whether file is current
4.495999
4.791716
0.938286
'''Returns last FTP site update time''' now = datetime.datetime.utcnow() # Get the first Tuesday of the month first_tuesday = __get_first_tuesday(now) if first_tuesday < now: return first_tuesday else: first_of_month = datetime.datetime(now.year, now.month, 1) last_month = first_of_month + datetime.timedelta(days=-1) return __get_first_tuesday(last_month)
def __get_last_update_time()
Returns last FTP site update time
3.328157
2.799711
1.18875
'''Get the first Tuesday of the month''' month_range = calendar.monthrange(this_date.year, this_date.month) first_of_month = datetime.datetime(this_date.year, this_date.month, 1) first_tuesday_day = (calendar.TUESDAY - month_range[0]) % 7 first_tuesday = first_of_month + datetime.timedelta(days=first_tuesday_day) return first_tuesday
def __get_first_tuesday(this_date)
Get the first Tuesday of the month
2.180048
2.243077
0.971901
if validate: if not patterns.MANUFACTURER_CODE.match(manufacturer): raise ValueError('Invalid manufacturer code') if not patterns.LOGGER_ID.match(logger_id): raise ValueError('Invalid logger id') record = '%s%s' % (manufacturer, logger_id) if extension: record = record + extension self.write_record('A', record)
def write_logger_id(self, manufacturer, logger_id, extension=None, validate=True)
Write the manufacturer and logger id header line:: writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1') # -> AXXXABCFLIGHT:1 Some older loggers have decimal logger ids which can be written like this:: writer.write_logger_id('FIL', '13961', validate=False) # -> AFIL13961 :param manufacturer: the three-letter-code of the manufacturer :param logger_id: the logger id as three-letter-code :param extension: anything else that should be appended to this header (e.g. ``FLIGHT:1``) :param validate: whether to validate the manufacturer and logger_id three-letter-codes
2.952797
3.200201
0.922691
if accuracy is None: accuracy = 500 accuracy = int(accuracy) if not 0 < accuracy < 1000: raise ValueError('Invalid fix accuracy') self.write_fr_header('FXA', '%03d' % accuracy)
def write_fix_accuracy(self, accuracy=None)
Write the GPS fix accuracy header:: writer.write_fix_accuracy() # -> HFFXA500 writer.write_fix_accuracy(25) # -> HFFXA025 :param accuracy: the estimated GPS fix accuracy in meters (optional)
5.521961
4.854019
1.137606
if code is None: code = 100 if gps_datum is None: gps_datum = 'WGS-1984' self.write_fr_header( 'DTM', '%03d' % code, subtype_long='GPSDATUM', value_long=gps_datum, )
def write_gps_datum(self, code=None, gps_datum=None)
Write the mandatory GPS datum header:: writer.write_gps_datum() # -> HFDTM100GPSDATUM:WGS-1984 writer.write_gps_datum(33, 'Guam-1963') # -> HFDTM033GPSDATUM:Guam-1963 Note that the default GPS datum is WGS-1984 and you should use that unless you have very good reasons against it. :param code: the GPS datum code as defined in the IGC file specification, section A8 :param gps_datum: the GPS datum in written form
5.120681
4.430873
1.155682
for header in self.REQUIRED_HEADERS: if header not in headers: raise ValueError('%s header missing' % header) self.write_logger_id( headers['manufacturer_code'], headers['logger_id'], extension=headers.get('logger_id_extension') ) self.write_date(headers['date']) self.write_fix_accuracy(headers.get('fix_accuracy')) self.write_pilot(headers.get('pilot', '')) if 'copilot' in headers: self.write_copilot(headers['copilot']) self.write_glider_type(headers.get('glider_type', '')) self.write_glider_id(headers.get('glider_id', '')) self.write_gps_datum( code=headers.get('gps_datum_code'), gps_datum=headers.get('gps_datum'), ) self.write_firmware_version(headers.get('firmware_version', '')) self.write_hardware_version(headers.get('hardware_version', '')) self.write_logger_type(headers['logger_type']) self.write_gps_receiver(headers['gps_receiver']) self.write_pressure_sensor(headers.get('pressure_sensor', '')) if 'competition_id' in headers: self.write_competition_id(headers['competition_id']) if 'competition_class' in headers: self.write_competition_class(headers['competition_class']) if 'club' in headers: self.write_club(headers['club'])
def write_headers(self, headers)
Write all the necessary headers in the correct order:: writer.write_headers({ 'manufacturer_code': 'XCS', 'logger_id': 'TBX', 'date': datetime.date(1987, 2, 24), 'fix_accuracy': 50, 'pilot': 'Tobias Bieniek', 'copilot': 'John Doe', 'glider_type': 'Duo Discus', 'glider_id': 'D-KKHH', 'firmware_version': '2.2', 'hardware_version': '2', 'logger_type': 'LXNAVIGATION,LX8000F', 'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m', 'pressure_sensor': 'INTERSEMA,MS5534A,max10000m', 'competition_id': '2H', 'competition_class': 'Doubleseater', }) # -> AXCSTBX # -> HFDTE870224 # -> HFFXA050 # -> HFPLTPILOTINCHARGE:Tobias Bieniek # -> HFCM2CREW2:John Doe # -> HFGTYGLIDERTYPE:Duo Discus # -> HFGIDGLIDERID:D-KKHH # -> HFDTM100GPSDATUM:WGS-1984 # -> HFRFWFIRMWAREVERSION:2.2 # -> HFRHWHARDWAREVERSION:2 # -> HFFTYFRTYPE:LXNAVIGATION,LX8000F # -> HFGPSuBLOX LEA-4S-2,16,max9000m # -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m # -> HFCIDCOMPETITIONID:2H # -> HFCCLCOMPETITIONCLASS:Doubleseater This method will throw a :class:`ValueError` if a mandatory header is missing and will fill others up with empty strings if no value was given. The optional headers are only written if they are part of the specified :class:`dict`. .. admonition:: Note The use of this method is encouraged compared to calling all the other header-writing methods manually! :param headers: a :class:`dict` of all the headers that should be written.
2.443269
1.746801
1.398711
if declaration_datetime is None: declaration_datetime = datetime.datetime.utcnow() if isinstance(declaration_datetime, datetime.datetime): declaration_datetime = ( self.format_date(declaration_datetime) + self.format_time(declaration_datetime) ) elif not patterns.DATETIME.match(declaration_datetime): raise ValueError('Invalid declaration datetime: %s' % declaration_datetime) if flight_date is None: flight_date = '000000' else: flight_date = self.format_date(flight_date) if task_number is None: task_number = 1 elif not isinstance(task_number, int): raise ValueError('Invalid task number: %s' % task_number) if not isinstance(turnpoints, int): raise ValueError('Invalid turnpoints: %s' % turnpoints) record = '{0}{1}{2:04d}{3:02d}'.format( declaration_datetime, flight_date, task_number, turnpoints, ) if text: record += text self.write_record('C', record)
def write_task_metadata( self, declaration_datetime=None, flight_date=None, task_number=None, turnpoints=None, text=None)
Write the task declaration metadata record:: writer.write_task_metadata( datetime.datetime(2014, 4, 13, 12, 53, 02), task_number=42, turnpoints=3, ) # -> C140413125302000000004203 There are sensible defaults in place for all parameters except for the ``turnpoints`` parameter. If you don't pass that parameter the method will raise a :class:`ValueError`. The other parameter defaults are mentioned in the list below. :param declaration_datetime: a :class:`datetime.datetime` instance of the UTC date and time at the time of declaration (default: current date and time) :param flight_date: a :class:`datetime.date` instance of the intended date of the flight (default: ``000000``, which means "use declaration date") :param task_number: task number for the flight date or an integer-based identifier (default: ``0001``) :param turnpoints: the number of turnpoints in the task (not counting start and finish points!) :param text: optional text to append to the metadata record
2.028598
1.936089
1.047782
latitude = self.format_latitude(latitude) longitude = self.format_longitude(longitude) record = latitude + longitude if None not in [distance_min, distance_max, bearing1, bearing2]: record += '%04d' % int(distance_min) record += '%03d' % int((distance_min - int(distance_min)) * 1000) record += '%04d' % int(distance_max) record += '%03d' % int((distance_max - int(distance_max)) * 1000) record += '%03d' % int(bearing1) record += '%03d' % int((bearing1 - int(bearing1)) * 1000) record += '%03d' % int(bearing2) record += '%03d' % int((bearing2 - int(bearing2)) * 1000) if text: record += text self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='', distance_min=None, distance_max=None, bearing1=None, bearing2=None)
Write a task declaration point:: writer.write_task_point( latitude=(51 + 7.345 / 60.), longitude=(6 + 24.765 / 60.), text='Meiersberg', ) # -> C5107345N00624765EMeiersberg If no ``latitude`` or ``longitude`` is passed, the fields will be filled with zeros (i.e. unknown coordinates). This however should only be used for ``TAKEOFF`` and ``LANDING`` points. For area tasks there are some additional parameters that can be used to specify the relevant areas:: writer.write_task_point( -(12 + 32.112 / 60.), -(178 + .001 / 60.), 'TURN AREA', distance_min=12.0, distance_max=32.0, bearing1=122.0, bearing2=182.0, ) # -> C1232112S17800001W00120000032000122000182000TURN AREA :param latitude: latitude of the point (between -90 and 90 degrees) :param longitude: longitude of the point (between -180 and 180 degrees) :param text: type and/or name of the waypoint (e.g. ``TAKEOFF``, ``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
1.704927
1.773651
0.961253
for args in points: if len(args) not in [3, 7]: raise ValueError('Invalid number of task point tuple items') self.write_task_point(*args)
def write_task_points(self, points)
Write multiple task declaration points with one call:: writer.write_task_points([ (None, None, 'TAKEOFF'), (51.40375, 6.41275, 'START'), (50.38210, 8.82105, 'TURN 1'), (50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180), (51.40375, 6.41275, 'FINISH'), (None, None, 'LANDING'), ]) # -> C0000000N00000000ETAKEOFF # -> C5124225N00624765ESTART # -> C5022926N00849263ETURN 1 # -> C5035427N00702133E00000000032500000000180000TURN 2 # -> C5124225N00624765EFINISH # -> C0000000N00000000ELANDING see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more information. :param points: a list of ``(latitude, longitude, text)`` tuples. use ``(latitude, longitude, text, distance_min, distance_max, bearing1, bearing2)`` tuples for area task points.
6.421734
6.778564
0.947359
for start in range(0, len(security), bytes_per_line): self.write_record('G', security[start:start + bytes_per_line])
def write_security(self, security, bytes_per_line=75)
Write the security signature:: writer.write_security('ABCDEF') # -> GABCDEF If a signature of more than 75 bytes is used the G record will be broken into multiple lines according to the IGC file specification. This rule can be configured with the ``bytes_per_line`` parameter if necessary. :param security: the security signature :param bytes_per_line: the maximum number of bytes per line (default: ``75``)
4.153735
3.47815
1.194237
if time is None: time = datetime.datetime.utcnow() record = self.format_time(time) record += self.format_latitude(latitude) record += self.format_longitude(longitude) record += 'A' if valid else 'V' record += '%05d' % (pressure_alt or 0) record += '%05d' % (gps_alt or 0) if self.fix_extensions or extensions: if not (isinstance(extensions, list) and isinstance(self.fix_extensions, list)): raise ValueError('Invalid extensions list') if len(extensions) != len(self.fix_extensions): raise ValueError( 'Number of extensions does not match declaration') for type_length, value in zip(self.fix_extensions, extensions): length = type_length[1] if isinstance(value, (int, float)): value = ('%0' + str(length) + 'd') % value if len(value) != length: raise ValueError('Extension value has wrong length') record += value self.write_record('B', record)
def write_fix(self, time=None, latitude=None, longitude=None, valid=False, pressure_alt=None, gps_alt=None, extensions=None)
Write a fix record:: writer.write_fix( datetime.time(12, 34, 56), latitude=51.40375, longitude=6.41275, valid=True, pressure_alt=1234, gps_alt=1432, ) # -> B1234565124225N00624765EA0123401432 :param time: UTC time of the fix record (default: :meth:`~datetime.datetime.utcnow`) :param latitude: longitude of the last GPS fix :param longitude: latitude of the last GPS fix :param valid: ``True`` if the current GPS fix is 3D :param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa sea level datum :param gps_alt: altitude above the WGS84 ellipsoid :param extensions: a list of extension values according to previous declaration through :meth:`~aerofiles.igc.Writer.write_fix_extensions`
2.313271
2.284847
1.01244
num_args = len(args) if not (1 <= num_args <= 3): raise ValueError('Invalid number of parameters received') if num_args == 3: time, code, text = args elif num_args == 1: code = args[0] time = text = None elif isinstance(args[0], (datetime.time, datetime.datetime)): time, code = args text = None else: code, text = args time = None if time is None: time = datetime.datetime.utcnow() if not patterns.THREE_LETTER_CODE.match(code): raise ValueError('Invalid event code') record = self.format_time(time) record += code if text: record += text self.write_record('E', record)
def write_event(self, *args)
Write an event record:: writer.write_event(datetime.time(12, 34, 56), 'PEV') # -> B123456PEV writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text') # -> B123456PEVSome Text writer.write_event('PEV') # uses utcnow() # -> B121503PEV :param time: UTC time of the fix record (default: :meth:`~datetime.datetime.utcnow`) :param code: event type as three-letter-code :param text: additional text describing the event (optional)
3.111805
2.678299
1.161859
num_args = len(args) if num_args not in (1, 2): raise ValueError('Invalid number of parameters received') if num_args == 1: satellites = args[0] time = None else: time, satellites = args if time is None: time = datetime.datetime.utcnow() record = self.format_time(time) for satellite in satellites: if isinstance(satellite, int): satellite = '%02d' % satellite if len(satellite) != 2: raise ValueError('Invalid satellite ID') record += satellite self.write_record('F', record)
def write_satellites(self, *args)
Write a satellite constellation record:: writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22]) # -> F12345601020522 :param time: UTC time of the satellite constellation record (default: :meth:`~datetime.datetime.utcnow`) :param satellites: a list of satellite IDs as either two-character strings or integers below 100
2.620544
2.349189
1.11551
num_args = len(args) if num_args not in (1, 2): raise ValueError('Invalid number of parameters received') if num_args == 1: extensions = args[0] time = None else: time, extensions = args if time is None: time = datetime.datetime.utcnow() record = self.format_time(time) if not (isinstance(extensions, list) and isinstance(self.k_record_extensions, list)): raise ValueError('Invalid extensions list') if len(extensions) != len(self.k_record_extensions): raise ValueError( 'Number of extensions does not match declaration') for type_length, value in zip(self.k_record_extensions, extensions): length = type_length[1] if isinstance(value, (int, float)): value = ('%0' + str(length) + 'd') % value if len(value) != length: raise ValueError('Extension value has wrong length') record += value self.write_record('K', record)
def write_k_record(self, *args)
Write a K record:: writer.write_k_record_extensions([ ('FXA', 3), ('SIU', 2), ('ENL', 3), ]) writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2]) # -> J030810FXA1112SIU1315ENL # -> K02030402313002 :param time: UTC time of the k record (default: :meth:`~datetime.datetime.utcnow`) :param extensions: a list of extension values according to previous declaration through :meth:`~aerofiles.igc.Writer.write_k_record_extensions`
2.766431
2.435484
1.135885
if not patterns.THREE_LETTER_CODE.match(code): raise ValueError('Invalid source') self.write_record('L', code + text)
def write_comment(self, code, text)
Write a comment record:: writer.write_comment('PLT', 'Arrived at the first turnpoint') # -> LPLTArrived at the first turnpoint :param code: a three-letter-code describing the source of the comment (e.g. ``PLT`` for pilot) :param text: the text that should be added to the comment
13.794737
9.255806
1.490387
placeholders = list(cms_page.placeholders.all()) placeholder_frontend_data_dict = get_frontend_data_dict_for_placeholders( placeholders=placeholders, request=request, editable=editable ) global_placeholder_data_dict = get_global_placeholder_data(placeholder_frontend_data_dict) data = { 'containers': placeholder_frontend_data_dict, 'meta': { 'title': cms_page_title.page_title if cms_page_title.page_title else cms_page_title.title, 'description': cms_page_title.meta_description or '', } } language_links = get_language_links(cms_page=cms_page, request=request) if language_links: data['meta']['languages'] = language_links if placeholder_frontend_data_dict: data['containers'] = placeholder_frontend_data_dict if global_placeholder_data_dict: data['global_placeholder_data'] = global_placeholder_data_dict post_processer = settings.DJANGOCMS_SPA_CMS_PAGE_DATA_POST_PROCESSOR if post_processer: func = get_function_by_path(post_processer) data = func(cms_page=cms_page, data=data, request=request) return data
def get_frontend_data_dict_for_cms_page(cms_page, cms_page_title, request, editable=False)
Returns the data dictionary of a CMS page that is used by the frontend.
2.335683
2.331236
1.001907
data_dict = {} for placeholder in placeholders: if placeholder: plugins = [] # We don't use the helper method `placeholder.get_plugins()` because of the wrong order by path. placeholder_plugins = placeholder.cmsplugin_set.filter(language=request.LANGUAGE_CODE).order_by( settings.DJANGOCMS_SPA_PLUGIN_ORDER_FIELD) for plugin in placeholder_plugins: # We need the complete cascading structure of the plugins in the frontend. This is why we ignore the # children here and add them later in the loop. if not plugin.parent: plugins.append(get_frontend_data_dict_for_plugin( request=request, plugin=plugin, editable=editable) ) if plugins or editable: data_dict[placeholder.slot] = { 'type': 'cmp-%s' % placeholder.slot, 'plugins': plugins, } if editable: # This is the structure of the template `cms/toolbar/placeholder.html` that is used to register # the frontend editing. from cms.plugin_pool import plugin_pool plugin_types = [cls.__name__ for cls in plugin_pool.get_all_plugins(placeholder.slot, placeholder.page)] allowed_plugins = plugin_types + plugin_pool.get_system_plugins() data_dict[placeholder.slot]['cms'] = [ 'cms-placeholder-{}'.format(placeholder.pk), { 'type': 'placeholder', 'name': str(placeholder.get_label()), 'page_language': request.LANGUAGE_CODE, 'placeholder_id': placeholder.pk, 'plugin_language': request.LANGUAGE_CODE, 'plugin_restriction': [module for module in allowed_plugins], 'addPluginHelpTitle': 'Add plugin to placeholder {}'.format(placeholder.get_label()), 'urls': { 'add_plugin': placeholder.get_add_url(), 'copy_plugin': placeholder.get_copy_url() } } ] return data_dict
def get_frontend_data_dict_for_placeholders(placeholders, request, editable=False)
Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents. The returned dict is grouped by placeholder slots.
4.083675
4.118442
0.991558
json_data = {} instance, plugin = plugin.get_plugin_instance() if not instance: return json_data renderer = renderer_pool.renderer_for_plugin(plugin) if renderer: json_data = renderer.render(request=request, plugin=plugin, instance=instance, editable=editable) if hasattr(plugin, 'parse_child_plugins') and plugin.parse_child_plugins: children = [] for child_plugin in instance.get_children().order_by(settings.DJANGOCMS_SPA_PLUGIN_ORDER_FIELD): # Parse all children children.append( get_frontend_data_dict_for_plugin( request=request, plugin=child_plugin, editable=editable ) ) if children: json_data['plugins'] = children return json_data
def get_frontend_data_dict_for_plugin(request, plugin, editable)
Returns a serializable data dict of a CMS plugin and all its children. It expects a `render_json_plugin()` method from each plugin. Make sure you implement it for your custom plugins and monkey patch all third-party plugins.
2.976731
2.754621
1.080632
# Split static placeholders from partials that have a custom callback. static_placeholder_names = [] custom_callback_partials = [] for partial in partials: if partial in settings.DJANGOCMS_SPA_PARTIAL_CALLBACKS.keys(): custom_callback_partials.append(partial) else: static_placeholder_names.append(partial) # Get the data of all static placeholders use_static_placeholder_draft = (hasattr(request, 'toolbar') and request.toolbar.edit_mode and request.user.has_perm('cms.edit_static_placeholder')) static_placeholders = [] for static_placeholder_name in static_placeholder_names: static_placeholders.append(get_static_placeholder(static_placeholder_name, use_static_placeholder_draft)) partial_data = get_frontend_data_dict_for_placeholders( placeholders=static_placeholders, request=request, editable=editable ) # Get the data of all partials that have a custom callback. for partial_settings_key in custom_callback_partials: dotted_function_module_path = settings.DJANGOCMS_SPA_PARTIAL_CALLBACKS[partial_settings_key] callback_function = get_function_by_path(dotted_function_module_path) partial_data[partial_settings_key] = callback_function(request, renderer) return partial_data
def get_frontend_data_dict_for_partials(partials, request, editable=False, renderer=None)
We call global page elements that are used to render a template `partial`. The contents of a partial do not change from one page to another. In a django CMS project partials are implemented as static placeholders. But there are usually other parts (e.g. menu) that work pretty much the same way. Because we don't have a template that allows us to render template tags, we need to have a custom implementation for those needs. We decided to use a `callback` approach that allows developers to bring custom data into the partial list. To prevent infinite recursions when using callbacks we have the `renderer` parameter.
2.684713
2.600736
1.03229
post_processer = settings.DJANGOCMS_SPA_PLACEHOLDER_DATA_POST_PROCESSOR if not post_processer: return {} func = get_function_by_path(post_processer) return func(placeholder_frontend_data_dict=placeholder_frontend_data_dict)
def get_global_placeholder_data(placeholder_frontend_data_dict)
In some rare cases you need to post process the placeholder data and add additional, global data to the route object. Define your post-processor in the DJANGOCMS_SPA_VUE_JS_PLACEHOLDER_DATA_POST_PROCESSOR setting variable (e.g. `my_app.my_module.my_function` and return the data you need.
4.310298
2.669518
1.614635
if not description: description = '' latitude = self.format_latitude(latitude) longitude = self.format_longitude(longitude) self.write_config( 'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50]) )
def write_waypoint(self, latitude=None, longitude=None, description=None)
Adds a waypoint to the current task declaration. The first and the last waypoint added will be treated as takeoff and landing location, respectively. :: writer.write_waypoint( latitude=(51 + 7.345 / 60.), longitude=(6 + 24.765 / 60.), text='Meiersberg', ) # -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg If no ``latitude`` or ``longitude`` is passed, the fields will be filled with zeros (i.e. unknown coordinates). This however should only be used for takeoff and landing points. :param latitude: latitude of the point (between -90 and 90 degrees) :param longitude: longitude of the point (between -180 and 180 degrees) :param description: arbitrary text description of waypoint
3.911789
3.703044
1.056371
for args in points: if len(args) != 3: raise ValueError('Invalid number of task point tuple items') self.write_waypoint(*args)
def write_waypoints(self, points)
Write multiple task declaration points with one call:: writer.write_waypoints([ (None, None, 'TAKEOFF'), (51.40375, 6.41275, 'START'), (50.38210, 8.82105, 'TURN 1'), (50.59045, 7.03555, 'TURN 2'), (51.40375, 6.41275, 'FINISH'), (None, None, 'LANDING'), ]) # -> $PFLAC,S,ADDWP,0000000N,00000000E,TAKEOFF # -> $PFLAC,S,ADDWP,5124225N,00624765E,START # -> $PFLAC,S,ADDWP,5022926N,00849263E,TURN 1 # -> $PFLAC,S,ADDWP,5035427N,00702133E,TURN 2 # -> $PFLAC,S,ADDWP,5124225N,00624765E,FINISH # -> $PFLAC,S,ADDWP,0000000N,00000000E,LANDING see the :meth:`~aerofiles.flarmcfg.Writer.write_waypoint` method for more information. :param points: a list of ``(latitude, longitude, text)`` tuples.
9.175541
8.432825
1.088074
self.control_path = self.expt.control_path self.input_basepath = self.expt.lab.input_basepath self.work_path = self.expt.work_path self.codebase_path = self.expt.lab.codebase_path if len(self.expt.models) > 1: self.control_path = os.path.join(self.control_path, self.name) self.work_path = os.path.join(self.work_path, self.name) self.codebase_path = os.path.join(self.codebase_path, self.name) # NOTE: Individual models may override the work subdirectories self.work_input_path = self.work_path self.work_restart_path = self.work_path self.work_output_path = self.work_path self.work_init_path = self.work_path self.exec_prefix = self.config.get('exe_prefix', '') self.exec_name = self.config.get('exe', self.default_exec) if self.exec_name: # By default os.path.join will not prepend the lab bin_path # to an absolute path self.exec_path = os.path.join(self.expt.lab.bin_path, self.exec_name) else: self.exec_path = None if self.exec_path: # Make exec_name consistent for models with fully qualified path. # In all cases it will just be the name of the executable without a # path self.exec_name = os.path.basename(self.exec_path)
def set_model_pathnames(self)
Define the paths associated with this model.
2.946825
2.883849
1.021838
# Traverse the model directory deleting symlinks, zero length files # and empty directories for path, dirs, files in os.walk(self.work_path, topdown=False): for f_name in files: f_path = os.path.join(path, f_name) if os.path.islink(f_path) or os.path.getsize(f_path) == 0: os.remove(f_path) if len(os.listdir(path)) == 0: os.rmdir(path)
def archive(self)
Store model output to laboratory archive.
2.761104
2.604948
1.059946
# Build the list of subcommand modules modnames = [mod for (_, mod, _) in pkgutil.iter_modules(payu.subcommands.__path__, prefix=payu.subcommands.__name__ + '.') if mod.endswith('_cmd')] subcmds = [importlib.import_module(mod) for mod in modnames] # Construct the subcommand parser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='payu {0}'.format(payu.__version__)) subparsers = parser.add_subparsers() for cmd in subcmds: cmd_parser = subparsers.add_parser(cmd.title, **cmd.parameters) cmd_parser.set_defaults(run_cmd=cmd.runcmd) for arg in cmd.arguments: cmd_parser.add_argument(*arg['flags'], **arg['parameters']) # Display help if no arguments are provided if len(sys.argv) == 1: parser.print_help() else: args = vars(parser.parse_args()) run_cmd = args.pop('run_cmd') run_cmd(**args)
def parse()
Parse the command line inputs and execute the subcommand.
2.512746
2.422172
1.037394
# If no model type is given, then check the config file if not model_type: model_type = config.get('model') # If there is still no model type, try the parent directory if not model_type: model_type = os.path.basename(os.path.abspath(os.pardir)) print('payu: warning: Assuming model is {0} based on parent directory ' 'name.'.format(model_type)) if model_type not in supported_models: print('payu: error: Unknown model {0}'.format(model_type)) sys.exit(-1)
def get_model_type(model_type, config)
Determine and validate the active model type.
3.19294
3.244404
0.984138
payu_env_vars = {} # Setup Python dynamic library link lib_paths = sysconfig.get_config_vars('LIBDIR') payu_env_vars['LD_LIBRARY_PATH'] = ':'.join(lib_paths) if 'PYTHONPATH' in os.environ: payu_env_vars['PYTHONPATH'] = os.environ['PYTHONPATH'] # Set (or import) the path to the PAYU scripts (PAYU_PATH) # NOTE: We may be able to use sys.path[0] here. payu_binpath = os.environ.get('PAYU_PATH') if not payu_binpath or not os.path.isdir(payu_binpath): payu_binpath = os.path.dirname(sys.argv[0]) payu_env_vars['PAYU_PATH'] = payu_binpath # Set the run counters if init_run: init_run = int(init_run) assert init_run >= 0 payu_env_vars['PAYU_CURRENT_RUN'] = init_run if n_runs: n_runs = int(n_runs) assert n_runs > 0 payu_env_vars['PAYU_N_RUNS'] = n_runs # Import explicit project paths if lab_path: payu_env_vars['PAYU_LAB_PATH'] = os.path.normpath(lab_path) if dir_path: payu_env_vars['PAYU_DIR_PATH'] = os.path.normpath(dir_path) if reproduce: payu_env_vars['PAYU_REPRODUCE'] = reproduce return payu_env_vars
def set_env_vars(init_run=None, n_runs=None, lab_path=None, dir_path=None, reproduce=None)
Construct the environment variables used by payu for resubmissions.
2.22781
2.151594
1.035423
# Initialisation if pbs_vars is None: pbs_vars = {} pbs_flags = [] pbs_queue = pbs_config.get('queue', 'normal') pbs_flags.append('-q {queue}'.format(queue=pbs_queue)) pbs_project = pbs_config.get('project', os.environ['PROJECT']) pbs_flags.append('-P {project}'.format(project=pbs_project)) pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs'] for res_key in pbs_resources: res_flags = [] res_val = pbs_config.get(res_key) if res_val: res_flags.append('{key}={val}'.format(key=res_key, val=res_val)) if res_flags: pbs_flags.append('-l {res}'.format(res=','.join(res_flags))) # TODO: Need to pass lab.config_path somehow... pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd())) if pbs_jobname: # PBSPro has a 15-character jobname limit pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15])) pbs_priority = pbs_config.get('priority') if pbs_priority: pbs_flags.append('-p {priority}'.format(priority=pbs_priority)) pbs_flags.append('-l wd') pbs_join = pbs_config.get('join', 'n') if pbs_join not in ('oe', 'eo', 'n'): print('payu: error: unknown qsub IO stream join setting.') sys.exit(-1) else: pbs_flags.append('-j {join}'.format(join=pbs_join)) # Append environment variables to qsub command # TODO: Support full export of environment variables: `qsub -V` pbs_vstring = ','.join('{0}={1}'.format(k, v) for k, v in pbs_vars.items()) pbs_flags.append('-v ' + pbs_vstring) # Append any additional qsub flags here pbs_flags_extend = pbs_config.get('qsub_flags') if pbs_flags_extend: pbs_flags.append(pbs_flags_extend) if not os.path.isabs(pbs_script): # NOTE: PAYU_PATH is always set if `set_env_vars` was always called. # This is currently always true, but is not explicitly enforced. # So this conditional check is a bit redundant. payu_bin = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0])) pbs_script = os.path.join(payu_bin, pbs_script) assert os.path.isfile(pbs_script) # Set up environment modules here for PBS. envmod.setup() envmod.module('load', 'pbs') # Construct job submission command cmd = 'qsub {flags} -- {python} {script}'.format( flags=' '.join(pbs_flags), python=sys.executable, script=pbs_script ) print(cmd) subprocess.check_call(shlex.split(cmd))
def submit_job(pbs_script, pbs_config, pbs_vars=None)
Submit a userscript the scheduler.
2.96799
2.975129
0.997601
# Make one change at a time, each change affects subsequent matches. timestep_changed = False while True: matches = re.finditer(regex, self.str, re.MULTILINE | re.DOTALL) none_updated = True for m in matches: if m.group(1) == timestep: continue else: self.str = (self.str[:m.start(1)] + timestep + self.str[m.end(1):]) none_updated = False timestep_changed = True break if none_updated: break if not timestep_changed: sys.stderr.write('WARNING: no update with {0}.\n'.format(regex))
def substitute_timestep(self, regex, timestep)
Substitute a new timestep value using regex.
3.523786
3.34627
1.053049
year = date // 10**4 month = date % 10**4 // 10**2 day = date % 10**2 return datetime.date(year, month, day)
def int_to_date(date)
Convert an int of form yyyymmdd to a python date object.
2.357388
1.920087
1.227751
end_date = start_date + relativedelta(years=years, months=months, days=days) runtime = end_date - start_date if caltype == NOLEAP: runtime -= get_leapdays(start_date, end_date) return int(runtime.total_seconds() + seconds)
def runtime_from_date(start_date, years, months, days, seconds, caltype)
Get the number of seconds from start date to start date + date_delta. Ignores Feb 29 for caltype == NOLEAP.
3.38999
3.292768
1.029526
end_date = init_date + datetime.timedelta(seconds=seconds) if caltype == NOLEAP: end_date += get_leapdays(init_date, end_date) if end_date.month == 2 and end_date.day == 29: end_date += datetime.timedelta(days=1) return end_date
def date_plus_seconds(init_date, seconds, caltype)
Get a new_date = date + seconds. Ignores Feb 29 for no-leap days.
2.574601
2.616816
0.983868
curr_date = init_date leap_days = 0 while curr_date != final_date: if curr_date.month == 2 and curr_date.day == 29: leap_days += 1 curr_date += datetime.timedelta(days=1) return datetime.timedelta(days=leap_days)
def get_leapdays(init_date, final_date)
Find the number of leap days between arbitrary dates. Returns a timedelta object. FIXME: calculate this instead of iterating.
1.929558
1.739699
1.109134
leap_days = (final_date.year - 1) // 4 - (init_date.year - 1) // 4 leap_days -= (final_date.year - 1) // 100 - (init_date.year - 1) // 100 leap_days += (final_date.year - 1) // 400 - (init_date.year - 1) // 400 # TODO: Internal date correction (e.g. init_date is 1-March or later) return datetime.timedelta(days=leap_days)
def calculate_leapdays(init_date, final_date)
Currently unsupported, it only works for differences in years.
2.514468
2.380913
1.056094
# Default path settings # Append project name if present (NCI-specific) default_project = os.environ.get('PROJECT', '') default_short_path = os.path.join('/short', default_project) default_user = pwd.getpwuid(os.getuid()).pw_name short_path = config.get('shortpath', default_short_path) lab_name = config.get('laboratory', self.model_type) if os.path.isabs(lab_name): lab_path = lab_name else: user_name = config.get('user', default_user) lab_path = os.path.join(short_path, user_name, lab_name) return lab_path
def get_default_lab_path(self, config)
Generate a default laboratory path based on user environment.
3.577262
3.490297
1.024916
mkdir_p(self.archive_path) mkdir_p(self.bin_path) mkdir_p(self.codebase_path) mkdir_p(self.input_basepath)
def initialize(self)
Create the laboratory directories.
4.962448
3.801501
1.305392
jobid = os.environ.get('PBS_JOBID', '') if short: # Strip off '.rman2' jobid = jobid.split('.')[0] return(jobid)
def get_job_id(short=True)
Return PBS job id
5.160103
4.362428
1.182851
jobid = get_job_id() if jobid == '': return None info = get_qstat_info('-ft {0}'.format(jobid), 'Job Id:') # Select the dict for this job (there should only be one entry in any case) info = info['Job Id: {}'.format(jobid)] # Add the jobid to the dict and then return info['Job_ID'] = jobid return info
def get_job_info()
Get information about the job from the PBS server
6.894554
6.513298
1.058535
assert self.postscript envmod.setup() envmod.module('load', 'pbs') cmd = 'qsub {script}'.format(script=self.postscript) cmd = shlex.split(cmd) rc = sp.call(cmd) assert rc == 0, 'Postprocessing script submission failed.'
def postprocess(self)
Submit a postprocessing script after collation
8.434851
6.431413
1.311508
assert(date.month <= 12) decade = date.year // 10 # UM can only handle 36 decades then goes back to the beginning. decade = decade % 36 year = date.year % 10 month = date.month day = date.day um_d = string.digits + string.ascii_letters[:26] um_dump_date = ( '{decade}{year}{month}{day}0'.format( decade=um_d[decade], year=um_d[year], month=um_d[month], day=um_d[day] ) ) return um_dump_date
def date_to_um_dump_date(date)
Convert a time date object to a um dump format date which is <decade><year><month><day>0 To accommodate two digit months and days the UM uses letters. e.g. 1st oct is writing 01a10.
3.213266
2.926744
1.097898
assert date.hour == 0 and date.minute == 0 and date.second == 0 return [date.year, date.month, date.day, 0, 0, 0]
def date_to_um_date(date)
Convert a date object to 'year, month, day, hour, minute, second.'
2.723794
2.332197
1.167909
return datetime.datetime(year=d[0], month=d[1], day=d[2], hour=d[3], minute=d[4], second=d[5])
def um_date_to_date(d)
Convert a string with format 'year, month, day, hour, minute, second' to a datetime date.
2.295211
2.039204
1.125543
module_version = os.environ.get('MODULE_VERSION', DEFAULT_VERSION) moduleshome = os.path.join(basepath, module_version) # Abort if MODULESHOME does not exist if not os.path.isdir(moduleshome): print('payu: warning: MODULESHOME does not exist; disabling ' 'environment modules.') os.environ['MODULESHOME'] = '' return os.environ['MODULE_VERSION'] = module_version os.environ['MODULE_VERSION_STACK'] = module_version os.environ['MODULESHOME'] = moduleshome if 'MODULEPATH' not in os.environ: module_initpath = os.path.join(moduleshome, 'init', '.modulespath') with open(module_initpath) as initpaths: modpaths = [ line.partition('#')[0].strip() for line in initpaths.readlines() if not line.startswith('#') ] os.environ['MODULEPATH'] = ':'.join(modpaths) os.environ['LOADEDMODULES'] = os.environ.get('LOADEDMODULES', '') # Environment modules with certain characters will cause corruption # when MPI jobs get launched on other nodes (possibly a PBS issue). # # Bash processes obscure the issue on Raijin, since it occurs in an # environment module function, and bash moves those to the end of # the environment variable list. # # Raijin's mpirun wrapper is a bash script, and therefore "fixes" by doing # the shuffle and limiting the damage to other bash functions, but some # wrappers (e.g. OpenMPI 2.1.x) may not be present. So we manually patch # the problematic variable here. But a more general solution would be nice # someday. if 'BASH_FUNC_module()' in os.environ: bash_func_module = os.environ['BASH_FUNC_module()'] os.environ['BASH_FUNC_module()'] = bash_func_module.replace('\n', ';')
def setup(basepath=DEFAULT_BASEPATH)
Set the environment modules used by the Environment Module system.
5.912081
5.758064
1.026748
if 'MODULESHOME' not in os.environ: print('payu: warning: No Environment Modules found; skipping {0} call.' ''.format(command)) return modulecmd = ('{0}/bin/modulecmd'.format(os.environ['MODULESHOME'])) cmd = '{0} python {1} {2}'.format(modulecmd, command, ' '.join(args)) envs, _ = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE).communicate() exec(envs)
def module(command, *args)
Run the modulecmd tool and use its Python-formatted output to set the environment variables.
5.677893
5.003112
1.134872
input_fpath = os.path.join(self.work_path, 'input.nml') input_nml = f90nml.read(input_fpath) if self.expt.counter == 0 or self.expt.repeat_run: input_type = 'n' else: input_type = 'r' input_nml['MOM_input_nml']['input_filename'] = input_type f90nml.write(input_nml, input_fpath, force=True)
def init_config(self)
Patch input.nml as a new or restart run.
4.278423
3.607924
1.18584
try: os.makedirs(path) except EnvironmentError as exc: if exc.errno != errno.EEXIST: raise
def mkdir_p(path)
Create a new directory; ignore if it already exists.
3.038451
2.961169
1.026099
if not config_fname: config_fname = DEFAULT_CONFIG_FNAME try: with open(config_fname, 'r') as config_file: config = yaml.load(config_file) except IOError as exc: if exc.errno == errno.ENOENT: print('payu: warning: Configuration file {0} not found!' .format(config_fname)) config = {} else: raise collate_config = config.pop('collate', {}) # Transform legacy collate config options if type(collate_config) is bool: collate_config = {'enable': collate_config} collatestr = 'collate_' foundkeys = [] # Cycle through old collate config and convert to newer dict format for key in list(config.keys()): if key.startswith(collatestr): foundkeys.append(key) collate_config[key[len(collatestr):]] = config.pop(key) if foundkeys: print("Use of these keys is deprecated: {}.".format( ", ".join(foundkeys))) print("Instead use collate dictionary and subkey " "without 'collate_' prefix") config['collate'] = collate_config return config
def read_config(config_fname=None)
Parse input configuration file and return a config dict.
3.462003
3.409415
1.015424
# Check for Lustre 60-character symbolic link path bug if CHECK_LUSTRE_PATH_LEN: src_path = patch_lustre_path(src_path) lnk_path = patch_lustre_path(lnk_path) # os.symlink will happily make a symlink to a non-existent # file, but we don't want that behaviour # XXX: Do we want to be doing this? if not os.path.exists(src_path): return try: os.symlink(src_path, lnk_path) except EnvironmentError as exc: if exc.errno != errno.EEXIST: raise elif not os.path.islink(lnk_path): # Warn the user, but do not interrupt the job print("Warning: Cannot create symbolic link to {p}; a file named " "{f} already exists.".format(p=src_path, f=lnk_path)) else: # Overwrite any existing symbolic link if os.path.realpath(lnk_path) != src_path: os.remove(lnk_path) os.symlink(src_path, lnk_path)
def make_symlink(src_path, lnk_path)
Safely create a symbolic link to an input field.
3.310174
3.342851
0.990225
head, tail = os.path.split(path) if tail == '': return head, elif head == '': return tail, else: return splitpath(head) + (tail,)
def splitpath(path)
Recursively split a filepath into all directories and files.
2.751589
2.599524
1.058497
if CHECK_LUSTRE_PATH_LEN and len(f_path) == 60: if os.path.isabs(f_path): f_path = '/.' + f_path else: f_path = './' + f_path return f_path
def patch_lustre_path(f_path)
Patch any 60-character pathnames, to avoid a current Lustre bug.
3.468307
2.771615
1.251367
hashvals = {} fast_check = self.check_file( filepaths=self.data.keys(), hashvals=hashvals, hashfn=fast_hashes, shortcircuit=True, **args ) if not fast_check: # Save all the fast hashes for failed files that we've already # calculated for filepath in hashvals: for hash, val in hashvals[filepath].items(): self.data[filepath]['hashes'][hash] = val if reproduce: for filepath in hashvals: print('Check failed for {0} {1}' ''.format(filepath, hashvals[filepath])) tmphash = {} full_check = self.check_file( filepaths=filepath, hashfn=full_hashes, hashvals=tmphash, shortcircuit=False, **args ) if full_check: # File is still ok, so replace fast hashes print('Full hashes ({0}) checked ok' ''.format(full_hashes)) print('Updating fast hashes for {0} in {1}' ''.format(filepath, self.path)) self.add_fast(filepath, force=True) print('Saving updated manifest') self.needsync = True else: sys.stderr.write( 'Run cannot reproduce: manifest {0} is not ' 'correct\n'.format(self.path) ) for path, hashdict in tmphash.items(): print(' {0}:'.format(path)) for hash, val in hashdict.items(): hash_table = self.data[path]['hashes'] hash_table_val = hash_table.get(hash, None) print(' {0}: {1} != {2}' ''.format(hash, val, hash_table_val)) sys.exit(1) else: # Not relevant if full hashes are correct. Regenerate full # hashes for all filepaths that failed fast check. print('Updating full hashes for {0} files in {1}' ''.format(len(hashvals), self.path)) # Add all full hashes at once -- much faster. Definitely want # to force the full hash to be updated. In the specific case of # an empty hash the value will be None, without force it will # be written as null. self.add( filepaths=list(hashvals.keys()), hashfn=full_hashes, force=True, fullpaths=[self.fullpath(fpath) for fpath in list(hashvals.keys())] ) # Flag need to update version on disk self.needsync = True
def check_fast(self, reproduce=False, **args)
Check hash value for all filepaths using a fast hash function and fall back to slower full hash functions if fast hashes fail to agree.
4.540131
4.332084
1.048025
# Ignore directories if os.path.isdir(fullpath): return False # Ignore anything matching the ignore patterns for pattern in self.ignore: if fnmatch.fnmatch(os.path.basename(fullpath), pattern): return False if filepath not in self.data: self.data[filepath] = {} self.data[filepath]['fullpath'] = fullpath if 'hashes' not in self.data[filepath]: self.data[filepath]['hashes'] = {hash: None for hash in all_hashes} if copy: self.data[filepath]['copy'] = copy if hasattr(self, 'existing_filepaths'): if filepath in self.existing_filepaths: self.existing_filepaths.remove(filepath) return True
def add_filepath(self, filepath, fullpath, copy=False)
Bespoke function to add filepath & fullpath to manifest object without hashing. Can defer hashing until all files are added. Hashing all at once is much faster as overhead for threading is spread over all files
2.509221
2.563476
0.978836
if hashfn is None: hashfn = fast_hashes self.add(filepath, hashfn, force, shortcircuit=True)
def add_fast(self, filepath, hashfn=None, force=False)
Bespoke function to add filepaths but set shortcircuit to True, which means only the first calculable hash will be stored. In this way only one "fast" hashing function need be called for each filepath.
5.474544
3.689701
1.483736
copy_file = False try: copy_file = self.data[filepath]['copy'] except KeyError: return False return copy_file
def copy_file(self, filepath)
Returns flag which says to copy rather than link a file.
4.826125
3.495077
1.380835
# Check file exists. It may have been deleted but still in manifest if not os.path.exists(self.fullpath(filepath)): print('File not found: {filepath}'.format( filepath=self.fullpath(filepath))) if self.contains(filepath): print('removing from manifest') self.delete(filepath) self.needsync = True else: try: destdir = os.path.dirname(filepath) # Make destination directory if not already exists # Necessary because sometimes this is called before # individual model setup if not os.path.exists(destdir): os.makedirs(destdir) if self.copy_file(filepath): shutil.copy(self.fullpath(filepath), filepath) perm = (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR) os.chmod(filepath, perm) else: make_symlink(self.fullpath(filepath), filepath) except Exception: action = 'copying' if self.copy_file else 'linking' print('payu: error: {action} orig: {orig} ' 'local: {local}'.format(action=action, orig=self.fullpath(filepath), local=filepath)) raise
def make_link(self, filepath)
Payu integration function for creating symlinks in work directories which point back to the original file.
3.949842
3.891165
1.01508
filepath = os.path.normpath(filepath) if self.manifests[manifest].add_filepath(filepath, fullpath, copy): # Only link if filepath was added self.manifests[manifest].make_link(filepath)
def add_filepath(self, manifest, filepath, fullpath, copy=False)
Wrapper to the add_filepath function in PayuManifest. Prevents outside code from directly calling anything in PayuManifest.
4.56756
4.441004
1.028497
cmd = ['git', 'rev-parse', 'HEAD'] try: with open(os.devnull, 'w') as devnull: revision_hash = subprocess.check_output( cmd, cwd=dir, stderr=devnull ) if sys.version_info.major > 2: revision_hash = revision_hash.decode('ascii') return revision_hash.strip() except subprocess.CalledProcessError: return None
def commit_hash(dir='.')
Return commit hash for HEAD of checked out branch of the specified directory.
2.362961
2.186433
1.080738
config_path = os.path.join(self.expt.control_path, DEFAULT_CONFIG_FNAME) self.manifest = [] if os.path.isfile(config_path): self.manifest.append(config_path) for model in self.expt.models: config_files = model.config_files + model.optional_config_files self.manifest.extend(os.path.join(model.control_path, f) for f in config_files) # Add file manifests to runlog manifest for mf in self.expt.manifest: self.manifest.append(mf.path)
def create_manifest(self)
Construct the list of files to be tracked by the runlog.
4.080473
3.555129
1.147771
expt_name = self.config.get('name', self.expt.name) default_ssh_key = 'id_rsa_payu_' + expt_name ssh_key = self.config.get('sshid', default_ssh_key) ssh_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'payu', ssh_key) if not os.path.isfile(ssh_key_path): print('payu: error: Github SSH key {key} not found.' ''.format(key=ssh_key_path)) print('payu: error: Run `payu ghsetup` to generate a new key.') sys.exit(-1) cmd = ('ssh-agent bash -c "ssh-add {key}; git push --all payu"' ''.format(key=ssh_key_path)) subprocess.check_call(shlex.split(cmd), cwd=self.expt.control_path)
def push(self)
Push the changes to the remote repository. Usage: payu push This command pushes local runlog changes to the remote runlog repository, currently named `payu`, using the SSH key associated with this experiment. For an experiment `test`, it is equivalent to the following command:: ssh-agent bash -c " ssh-add $HOME/.ssh/payu/id_rsa_payu_test git push --all payu "
3.726795
2.986148
1.248028
if re.match(r'^[1-9][0-9]*$', expiration): return expiration + ".0a1" if re.match(r'^[1-9][0-9]*\.0$', expiration): return expiration + "a1" return expiration
def add_expiration_postfix(expiration)
Formats the expiration version and adds a version postfix if needed. :param expiration: the expiration version string. :return: the modified expiration string.
3.198048
3.17634
1.006834
try: with open(filename, 'r') as f: return yaml.safe_load(f) except IOError as e: raise ParserError('Error opening ' + filename + ': ' + e.message) except ValueError as e: raise ParserError('Error parsing processes in {}: {}' .format(filename, e.message))
def load_yaml_file(filename)
Load a YAML file from disk, throw a ParserError on failure.
3.021659
2.66521
1.133741
if string in self.table: return self.table[string] else: result = self.current_index self.table[string] = result self.current_index += self.c_strlen(string) return result
def stringIndex(self, string)
Returns the index in the table of the provided string. Adds the string to the table if it's not there. :param string: the input string.
3.562003
3.793287
0.939028
entries = self.table.items() entries.sort(key=lambda x: x[1]) # Avoid null-in-string warnings with GCC and potentially # overlong string constants; write everything out the long way. def explodeToCharArray(string): def toCChar(s): if s == "'": return "'\\''" else: return "'%s'" % s return ", ".join(map(toCChar, string)) f.write("const char %s[] = {\n" % name) for (string, offset) in entries: if "*/" in string: raise ValueError("String in string table contains unexpected sequence '*/': %s" % string) e = explodeToCharArray(string) if e: f.write(" /* %5d - \"%s\" */ %s, '\\0',\n" % (offset, string, explodeToCharArray(string))) else: f.write(" /* %5d - \"%s\" */ '\\0',\n" % (offset, string)) f.write("};\n\n")
def writeDefinition(self, f, name)
Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array.
5.133181
4.908601
1.045752
content_type = ContentType.objects.get_for_model(obj.__class__) comment_list = LogEntry.objects.filter( content_type=content_type, object_id=obj.pk, action_flag=COMMENT ) return { 'obj': obj, 'comment_list': comment_list, 'is_admin': context['is_admin'], }
def comments(context, obj)
Render comments for obj.
2.167509
2.16326
1.001964
try: ua = Account.objects.get( username=username, date_deleted__isnull=True) except Account.DoesNotExist: return 'Account not found' result = ua.get_disk_quota() if result is None: return False return result * 1048576
def get_disk_quota(username, machine_name=None)
Returns disk quota for username in KB
4.578815
4.472534
1.023763
filename = graphs.get_project_trend_graph_filename(project, start, end) csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv') png_filename = os.path.join(GRAPH_ROOT, filename + '.png') _check_directory_exists(csv_filename) _check_directory_exists(png_filename) if not settings.GRAPH_DEBUG or force_overwrite: if os.path.exists(csv_filename): if os.path.exists(png_filename): return query = CPUJob.objects.filter( project=project, date__range=(start, end) ) query = query.values('account', 'account__username', 'date') query = query.annotate(Sum('cpu_usage')).order_by('account', 'date') t_start = start t_end = end start_str = start.strftime('%Y-%m-%d') end_str = end.strftime('%Y-%m-%d') fig, ax = plt.subplots(figsize=(6, 4)) ax.set_xlim(start, end + datetime.timedelta(days=1)) ax.set_title('%s %s - %s' % (project.pid, start_str, end_str)) ax.set_ylabel("CPU Time (hours)") ax.set_xlabel("Date") locator = mdates.AutoDateLocator() ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator)) ax.xaxis.set_minor_locator(mdates.DayLocator()) data = {} x_data = {} y_data = {} with open(csv_filename, 'w') as csv_file: csv_writer = csv.writer(csv_file) for row in query.iterator(): csv_writer.writerow([ row['account__username'], row['date'], row['cpu_usage__sum'] / 3600.00 ]) account = row['account'] date = row['date'] if account not in data: data[account] = {} x_data[account] = [] y_data[account] = [] data[account][date] = row['cpu_usage__sum'] for account, dates in six.iteritems(data): start = t_start end = t_end while start <= end: total = 0 if start in dates: total = dates[start] x_data[account].append(start) y_data[account].append(total / 3600.00) start = start + datetime.timedelta(days=1) del data totals = [] start = t_start end = t_end while start <= end: totals.append(0) start = start + datetime.timedelta(days=1) count = 0 for account in x_data.keys(): ax.bar( x_data[account], y_data[account], bottom=totals, color=graphs.get_colour(count), edgecolor=graphs.get_colour(count), align='edge') count = count + 1 i = 0 start = t_start end = t_end while start <= end: totals[i] += y_data[account][i] i = i + 1 start = start + datetime.timedelta(days=1) del x_data del y_data del totals fig.autofmt_xdate() plt.tight_layout() plt.savefig(png_filename) plt.close()
def _gen_project_trend_graph(project, start, end, force_overwrite=False)
Generates a bar graph for a project Keyword arguments: project -- Project start -- start date end -- end date
1.952854
2.002125
0.975391
filename = graphs.get_institute_graph_filename(start, end) csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv') png_filename = os.path.join(GRAPH_ROOT, filename + '.png') _check_directory_exists(csv_filename) _check_directory_exists(png_filename) if not settings.GRAPH_DEBUG or force_overwrite: if os.path.exists(csv_filename): if os.path.exists(png_filename): return institute_list = Institute.active.all() plt.subplots(figsize=(4, 4)) data = [] labels = [] total = 0 with open(csv_filename, 'w') as csv_file: csv_writer = csv.writer(csv_file) for institute in institute_list.iterator(): hours, jobs = usage.get_institute_usage(institute, start, end) total = total + int(hours) if hours > 0: csv_writer.writerow([institute.name, hours, jobs]) data.append(hours) labels.append(institute.name) mcu = usage.get_machine_category_usage(start, end) hours = int(mcu.available_time - total) csv_writer.writerow(["unused", hours]) data.append(hours) labels.append('Unused') plt.pie(data, labels=labels, autopct='%1.1f%%', shadow=True) plt.tight_layout() plt.savefig(png_filename) plt.close()
def _gen_institute_graph(start, end, force_overwrite=False)
Pie chart comparing institutes usage.
2.628359
2.562345
1.025763
filename = graphs.get_machine_graph_filename(start, end) csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv') png_filename = os.path.join(GRAPH_ROOT, filename + '.png') _check_directory_exists(csv_filename) _check_directory_exists(png_filename) if not settings.GRAPH_DEBUG or force_overwrite: if os.path.exists(csv_filename): if os.path.exists(png_filename): return machine_list = Machine.objects.all() plt.subplots(figsize=(4, 4)) data = [] labels = [] with open(csv_filename, 'w') as csv_file: csv_writer = csv.writer(csv_file) for machine in machine_list.iterator(): hours, jobs = usage.get_machine_usage(machine, start, end) if hours > 0: csv_writer.writerow([machine.name, hours, jobs]) data.append(hours) labels.append(machine.name) plt.pie(data, labels=labels, autopct='%1.1f%%', shadow=True) del data del labels plt.tight_layout() plt.savefig(png_filename) plt.close()
def _gen_machine_graph(start, end, force_overwrite=False)
Pie chart comparing machines usage.
2.321159
2.208687
1.050922
filename = graphs.get_trend_graph_filename(start, end) csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv') png_filename = os.path.join(GRAPH_ROOT, filename + '.png') _check_directory_exists(csv_filename) _check_directory_exists(png_filename) if not settings.GRAPH_DEBUG or force_overwrite: if os.path.exists(csv_filename): if os.path.exists(png_filename): return query = CPUJob.objects.filter( date__range=(start, end) ) query = query.values('date').annotate(Sum('cpu_usage')) query = query.order_by('date') t_start = start t_end = end start_str = start.strftime('%Y-%m-%d') end_str = end.strftime('%Y-%m-%d') fig, ax = plt.subplots(figsize=(6, 4)) ax.set_xlim(start, end) ax.set_title('%s - %s' % (start_str, end_str)) ax.set_ylabel("CPU Time (hours)") ax.set_xlabel("Date") locator = mdates.AutoDateLocator() ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator)) ax.xaxis.set_minor_locator(mdates.DayLocator()) data = {} x_data = [] y_data = [] with open(csv_filename, 'w') as csv_file: csv_writer = csv.writer(csv_file) for row in query.iterator(): csv_writer.writerow([ row['date'], row['cpu_usage__sum'] / 3600.00 ]) date = row['date'] data[date] = row['cpu_usage__sum'] start = t_start end = t_end while start <= end: total = 0 if start in data: total = data[start] x_data.append(start) y_data.append(total / 3600.00) start = start + datetime.timedelta(days=1) del data ax.plot(x_data, y_data) del x_data del y_data fig.autofmt_xdate() plt.tight_layout() plt.savefig(png_filename) plt.close()
def _gen_trend_graph(start, end, force_overwrite=False)
Total trend graph for machine category.
2.019798
2.004606
1.007579