content
stringlengths
22
815k
id
int64
0
4.91M
def purchase_index(request): """displays users purchase history""" login_id = request.user.id context = {'histories': Purchase_history.objects.all().filter(acc_id=login_id).order_by('-date')} # get users purchase history return render(request, 'profile/histories/purchase_history.html', context)
16,100
def solve_EEC(self): """Compute the parameters dict for the equivalent electrical circuit cf "Advanced Electrical Drives, analysis, modeling, control" Rik de doncker, Duco W.J. Pulle, Andre Veltman, Springer edition <--- ---> -----R-----wsLqIq---- -----R-----wsLdId---- | | | | | | | BEMF | | | | ---------Id---------- ---------Iq---------- ---> ---> Ud Uq Parameters ---------- self : EEC_PMSM an EEC_PMSM object Return ------ out_dict : dict Dict containing all magnetic quantities that have been calculated in EEC """ felec = self.freq0 ws = 2 * pi * felec out_dict = dict() if "Ud" in self.parameters: # Voltage driven # Prepare linear system XR = array( [ [self.parameters["R20"], -ws * self.parameters["Lq"]], [ws * self.parameters["Ld"], self.parameters["R20"]], ] ) XE = array([0, ws * self.parameters["phi"]]) XU = array([self.parameters["Ud"], self.parameters["Uq"]]) # Solve system XI = solve(XR, XU - XE) out_dict["Id"] = XI[0] out_dict["Iq"] = XI[1] out_dict["Ud"] = self.parameters["Ud"] out_dict["Uq"] = self.parameters["Uq"] else: # Current Driven Ud = ( self.parameters["R20"] * self.parameters["Id"] - ws * self.parameters["Phiq"] ) Uq = ( self.parameters["R20"] * self.parameters["Iq"] + ws * self.parameters["Phid"] ) out_dict["Ud"] = Ud out_dict["Uq"] = Uq out_dict["Id"] = self.parameters["Id"] out_dict["Iq"] = self.parameters["Iq"] return out_dict
16,101
def plot_figure_legend(results_dir): """ Make a standalone legend :return: """ from hips.plotting.layout import create_legend_figure labels = ["SBM-LDS (Gibbs)", "HMM (Gibbs)", "Raw LDS (Gibbs)", "LNM-LDS (pMCMC)"] fig = create_legend_figure(labels, colors[:4], size=(5.25,0.5), lineargs={"lw": 2}, legendargs={"columnspacing": 0.75, "handletextpad": 0.1}) fig.savefig(os.path.join(results_dir, "legend.pdf"))
16,102
def submit(job): """Submit a job.""" # Change into the working directory and submit the job. cmd = ["cd " + job["destdir"] + "\n", "sbatch " + job["subfile"]] # Process the submit try: shellout = shellwrappers.sendtossh(job, cmd) except exceptions.SSHError as inst: if "violates" in inst.stderr and "job submit limit" in inst.stderr: raise exceptions.QueuemaxError else: raise exceptions.JobsubmitError( "Something went wrong when submitting. The following output " "came back from the SSH call:\nstdout: {0}\nstderr {1}" .format(inst.stdout, inst.stderr)) try: # Do the regex in Longbow rather than in the subprocess. jobid = re.search(r'\d+', shellout[0]).group() except AttributeError: raise exceptions.JobsubmitError( "Could not detect the job id during submission, this means that " "either the submission failed in an unexpected way, or that " "Longbow could not understand the returned information.") # Put jobid into the job dictionary. job["jobid"] = jobid
16,103
def ns_alarm_create(ctx, name, ns, vnf, vdu, metric, severity, threshold_value, threshold_operator, statistic): """creates a new alarm for a NS instance""" # TODO: Check how to validate threshold_value. # Should it be an integer (1-100), percentage, or decimal (0.01-1.00)? try: ns_instance = ctx.obj.ns.get(ns) alarm = {} alarm['alarm_name'] = name alarm['ns_id'] = ns_instance['_id'] alarm['correlation_id'] = ns_instance['_id'] alarm['vnf_member_index'] = vnf alarm['vdu_name'] = vdu alarm['metric_name'] = metric alarm['severity'] = severity alarm['threshold_value'] = int(threshold_value) alarm['operation'] = threshold_operator alarm['statistic'] = statistic check_client_version(ctx.obj, ctx.command.name) ctx.obj.ns.create_alarm(alarm) except ClientException as inst: print((inst.message)) exit(1)
16,104
def is_active(seat): """Return True if seat is empty. If occupied return False. """ active = seat_map.get(seat, ".") return True if active == "#" else False
16,105
def calibrate_intensity_to_powder(peak_intensity: dict, powder_peak_intensity: dict, powder_peak_label: List[str], image_numbers: List[int], powder_start: int = 1): """Calibrate peak intensity values to intensity measurements taken from a 'random' powder sample.""" corrected_peak_intensity = dict() first_iteration = True for image_number in tqdm(image_numbers): corrected_peak_intensity[image_number] = dict() for label in powder_peak_label: powder_average = np.average(powder_peak_intensity[powder_start][label]) powder_error = np.std(powder_peak_intensity[powder_start][label], ddof=1) corrected_peak_intensity[image_number][label] = [] corrected_peak_intensity[image_number][label] = peak_intensity[image_number][label] / powder_average if first_iteration: print(f"Normalised {label} intensities by a value of {powder_average} +/- {powder_error} from average powder intensity.") else: continue first_iteration = False return corrected_peak_intensity
16,106
def obsrio_temperatures( observatory: str, input_factory: Optional[TimeseriesFactory] = None, output_factory: Optional[TimeseriesFactory] = None, realtime_interval: int = 600, update_limit: int = 10, ): """Filter temperatures 1Hz miniseed (LK1-4) to 1 minute legacy (UK1-4).""" starttime, endtime = get_realtime_interval(realtime_interval) controller = Controller( inputFactory=input_factory or get_miniseed_factory(), inputInterval="second", outputFactory=output_factory or get_edge_factory(), outputInterval="minute", ) renames = {"LK1": "UK1", "LK2": "UK2", "LK3": "UK3", "LK4": "UK4"} for input_channel in renames.keys(): output_channel = renames[input_channel] controller.run_as_update( algorithm=FilterAlgorithm( input_sample_period=1, output_sample_period=60, inchannels=(input_channel,), outchannels=(output_channel,), ), observatory=(observatory,), output_observatory=(observatory,), starttime=starttime, endtime=endtime, input_channels=(input_channel,), output_channels=(output_channel,), realtime=realtime_interval, rename_output_channel=((input_channel, output_channel),), update_limit=update_limit, )
16,107
def examine(path): """ Look for forbidden tasks in a job-output.json file path """ data = json.load(open(path)) to_fix = False for playbook in data: if playbook['trusted']: continue for play in playbook['plays']: for task in play['tasks']: for hostname, host in task['hosts'].items(): if hostname != 'localhost': continue if host['action'] in ['command', 'shell']: print("Found disallowed task:") print(" Playbook: %s" % playbook['playbook']) print(" Role: %s" % task.get('role', {}).get('name')) print(" Task: %s" % task.get('task', {}).get('name')) to_fix = True return to_fix
16,108
def determine_disjuct_modules_alternative(src_rep): """ Potentially get rid of determine_added_modules and get_modules_lst() """ findimports_output = subprocess.check_output(['findimports', src_rep]) findimports_output = findimports_output.decode('utf-8').splitlines() custom_modules_lst = [] for i, elem in enumerate(findimports_output): if ':' in elem: continue elem = elem.rstrip('\n').split('.',1)[0].strip() #print(f" element : {elem}") custom_modules_lst.append(elem) custom_modules_lst = set(custom_modules_lst) #beautify this disjunct_modules = [] for i, elem in enumerate(custom_modules_lst): if elem in sys.modules: continue else: disjunct_modules.append(elem) return disjunct_modules
16,109
def test_start_notasks(event_loop): """If there are no tasks, the event is not started""" event = LoadLimitEvent() assert not event.started assert len(event.tasks) == 0 with pytest.raises(NoEventTasksError): event.start(loop=event_loop) assert not event.started
16,110
def observe_simulation(star_error_model=None, progenitor_error_model=None, selection_expr=None, output_file=None, overwrite=False, seed=None, simulation_path=None, snapfile=None): """ Observe simulation data and write the output to an HDF5 file """ if os.path.exists(output_file) and overwrite: os.remove(output_file) if os.path.exists(output_file): raise IOError("File '{}' already exists! Did you " "want to use overwrite=True?".format(output_file)) # read the simulation data from the specified class if seed is None: seed = np.random.randint(100) logger.debug("Using seed: {}".format(seed)) np.random.seed(seed) random.seed(seed) scf = SCFReader(simulation_path) snap_data = scf.read_snap(snapfile, units=usys) # select out particles that meet these cuts idx = numexpr.evaluate("(tub!=0)", snap_data) star_data = snap_data[idx] logger.debug("Read in {} particles".format(len(star_data))) # coordinate transform star_gc = np.vstack([star_data[n] for n in galactocentric_names]).T star_hel = gal_to_hel(star_gc) # create table for star data star_tbl = at.Table(star_hel, names=heliocentric_names) star_tbl.add_column(star_data["tub"]) # add tub # select bound particles to median to get satellite position idx = numexpr.evaluate("(tub==0)", snap_data) prog_data = snap_data[idx] # coordinate transform prog_gc = np.vstack([prog_data[n] for n in galactocentric_names]).T prog_gc = np.median(prog_gc, axis=0).reshape(1,6) logger.debug("Used {} particles to estimate progenitor position.".format(len(prog_data))) prog_hel = gal_to_hel(prog_gc) # create table for progenitor data prog_tbl = at.Table(prog_hel, names=heliocentric_names) prog_tbl.add_column(at.Column([snap_data["m"].sum()], name="m0")) # add mass # determine tail assignment for stars by relative energy dE = energy(star_gc) - energy(prog_gc) tail = np.zeros(len(star_tbl)) lead = dE <= 0. trail = dE > 0. tail[lead] = -1. # leading tail tail[trail] = 1. # trailing star_tbl.add_column(at.Column(tail, name="tail")) # add tail # observe the data observed_star_tbl,star_err_tbl = observe_table(star_tbl, star_error_model) observed_prog_tbl,prog_err_tbl = observe_table(prog_tbl, progenitor_error_model) # make a plot of true and observed positions obs_hel = np.vstack([observed_star_tbl[n] for n in heliocentric_names]).T obs_gc = hel_to_gal(obs_hel) fig,axes = plt.subplots(2,2,figsize=(16,16)) mpl = dict(markersize=3., marker='o', linestyle='none', alpha=0.5) axes[0,0].plot(star_gc[:,0], star_gc[:,1], **mpl) axes[0,1].plot(star_gc[:,0], star_gc[:,2], **mpl) axes[0,0].plot(obs_gc[trail,0], obs_gc[trail,1], label='trailing', c='#ca0020', **mpl) axes[0,1].plot(obs_gc[trail,0], obs_gc[trail,2], c='#ca0020', **mpl) axes[0,0].plot(obs_gc[lead,0], obs_gc[lead,1], label='leading', **mpl) axes[0,1].plot(obs_gc[lead,0], obs_gc[lead,2], **mpl) axes[0,0].legend() axes[1,0].plot(star_gc[:,3], star_gc[:,4], **mpl) axes[1,1].plot(star_gc[:,3], star_gc[:,5], **mpl) axes[1,0].plot(obs_gc[trail,3], obs_gc[trail,4], c='#ca0020', **mpl) axes[1,1].plot(obs_gc[trail,3], obs_gc[trail,5], c='#ca0020', **mpl) axes[1,0].plot(obs_gc[lead,3], obs_gc[lead,4], **mpl) axes[1,1].plot(obs_gc[lead,3], obs_gc[lead,5], **mpl) fname = os.path.splitext(os.path.basename(output_file))[0] fig.savefig(os.path.join(os.path.split(output_file)[0], "{}.{}".format(fname, 'png'))) # write tables to output_file observed_star_tbl.write(output_file, format="hdf5", path="stars", overwrite=overwrite) observed_prog_tbl.write(output_file, format="hdf5", path="progenitor", append=True) star_err_tbl.write(output_file, format="hdf5", path="error_stars", append=True) prog_err_tbl.write(output_file, format="hdf5", path="error_progenitor", append=True) star_tbl.write(output_file, format="hdf5", path="true_stars", append=True) prog_tbl.write(output_file, format="hdf5", path="true_progenitor", append=True) integ_tbl = at.Table(np.array([[np.nan]])) integ_tbl.meta['t1'] = snap_data.meta['time'] integ_tbl.meta['t2'] = 0. integ_tbl.write(output_file, format="hdf5", path="integration", append=True)
16,111
def config_ask(default_message = True, config_args = config_variables): """Formats user command line input for configuration details""" if default_message: print("Enter configuration parameters for the following variables... ") config_dictionary = dict() for v in config_args: config_dictionary.update({v:input("{}: ".format(v))}) return config_dictionary else: print(default_message) config_dictionary = dict() for v in config_args: config_dictionary.update({v:input("{}: ".format(v))}) return config_dictionary
16,112
def parseAndRun(args): """interface used by Main program and py.test (arelle_test.py) """ try: from arelle import webserver hasWebServer = True except ImportError: hasWebServer = False cntlr = CntlrCmdLine() # need controller for plug ins to be loaded usage = "usage: %prog [options]" parser = OptionParser(usage, version="Arelle(r) {0} ({1}bit)".format(Version.__version__, cntlr.systemWordSize), conflict_handler="resolve") # allow reloading plug-in options without errors parser.add_option("-f", "--file", dest="entrypointFile", help=_("FILENAME is an entry point, which may be " "an XBRL instance, schema, linkbase file, " "inline XBRL instance, testcase file, " "testcase index file. FILENAME may be " "a local file or a URI to a web located file. " "For multiple instance filings may be | separated file names or JSON list " "of file/parameter dicts [{\"file\":\"filepath\"}, {\"file\":\"file2path\"} ...].")) parser.add_option("--username", dest="username", help=_("user name if needed (with password) for web file retrieval")) parser.add_option("--password", dest="password", help=_("password if needed (with user name) for web retrieval")) # special option for web interfaces to suppress closing an opened modelXbrl parser.add_option("--keepOpen", dest="keepOpen", action="store_true", help=SUPPRESS_HELP) parser.add_option("-i", "--import", dest="importFiles", help=_("FILENAME is a list of files to import to the DTS, such as " "additional formula or label linkbases. " "Multiple file names are separated by a '|' character. ")) parser.add_option("-d", "--diff", dest="diffFile", help=_("FILENAME is a second entry point when " "comparing (diffing) two DTSes producing a versioning report.")) parser.add_option("-r", "--report", dest="versReportFile", help=_("FILENAME is the filename to save as the versioning report.")) parser.add_option("-v", "--validate", action="store_true", dest="validate", help=_("Validate the file according to the entry " "file type. If an XBRL file, it is validated " "according to XBRL validation 2.1, calculation linkbase validation " "if either --calcDecimals or --calcPrecision are specified, and " "SEC EDGAR Filing Manual (if --efm selected) or Global Filer Manual " "disclosure system validation (if --gfm=XXX selected). " "If a test suite or testcase, the test case variations " "are individually so validated. " "If formulae are present they will be validated and run unless --formula=none is specified. " )) parser.add_option("--calcDecimals", action="store_true", dest="calcDecimals", help=_("Specify calculation linkbase validation inferring decimals.")) parser.add_option("--calcdecimals", action="store_true", dest="calcDecimals", help=SUPPRESS_HELP) parser.add_option("--calcPrecision", action="store_true", dest="calcPrecision", help=_("Specify calculation linkbase validation inferring precision.")) parser.add_option("--calcprecision", action="store_true", dest="calcPrecision", help=SUPPRESS_HELP) parser.add_option("--calcDeduplicate", action="store_true", dest="calcDeduplicate", help=_("Specify de-duplication of consistent facts when performing calculation validation, chooses most accurate fact.")) parser.add_option("--calcdeduplicate", action="store_true", dest="calcDeduplicate", help=SUPPRESS_HELP) parser.add_option("--efm", action="store_true", dest="validateEFM", help=_("Select Edgar Filer Manual (U.S. SEC) disclosure system validation (strict).")) parser.add_option("--gfm", action="store", dest="disclosureSystemName", help=SUPPRESS_HELP) parser.add_option("--disclosureSystem", action="store", dest="disclosureSystemName", help=_("Specify a disclosure system name and" " select disclosure system validation. " "Enter --disclosureSystem=help for list of names or help-verbose for list of names and descriptions. ")) parser.add_option("--disclosuresystem", action="store", dest="disclosureSystemName", help=SUPPRESS_HELP) parser.add_option("--hmrc", action="store_true", dest="validateHMRC", help=_("Select U.K. HMRC disclosure system validation.")) parser.add_option("--utr", action="store_true", dest="utrValidate", help=_("Select validation with respect to Unit Type Registry.")) parser.add_option("--utrUrl", action="store", dest="utrUrl", help=_("Override disclosure systems Unit Type Registry location (URL or file path).")) parser.add_option("--utrurl", action="store", dest="utrUrl", help=SUPPRESS_HELP) parser.add_option("--infoset", action="store_true", dest="infosetValidate", help=_("Select validation with respect testcase infosets.")) parser.add_option("--labelLang", action="store", dest="labelLang", help=_("Language for labels in following file options (override system settings)")) parser.add_option("--labellang", action="store", dest="labelLang", help=SUPPRESS_HELP) parser.add_option("--labelRole", action="store", dest="labelRole", help=_("Label role for labels in following file options (instead of standard label)")) parser.add_option("--labelrole", action="store", dest="labelRole", help=SUPPRESS_HELP) parser.add_option("--DTS", "--csvDTS", action="store", dest="DTSFile", help=_("Write DTS tree into FILE (may be .csv or .html)")) parser.add_option("--facts", "--csvFacts", action="store", dest="factsFile", help=_("Write fact list into FILE")) parser.add_option("--factListCols", action="store", dest="factListCols", help=_("Columns for fact list file")) parser.add_option("--factTable", "--csvFactTable", action="store", dest="factTableFile", help=_("Write fact table into FILE")) parser.add_option("--concepts", "--csvConcepts", action="store", dest="conceptsFile", help=_("Write concepts into FILE")) parser.add_option("--pre", "--csvPre", action="store", dest="preFile", help=_("Write presentation linkbase into FILE")) parser.add_option("--table", "--csvTable", action="store", dest="tableFile", help=_("Write table linkbase into FILE")) parser.add_option("--cal", "--csvCal", action="store", dest="calFile", help=_("Write calculation linkbase into FILE")) parser.add_option("--dim", "--csvDim", action="store", dest="dimFile", help=_("Write dimensions (of definition) linkbase into FILE")) parser.add_option("--anch", action="store", dest="anchFile", help=_("Write anchoring relationships (of definition) linkbase into FILE")) parser.add_option("--formulae", "--htmlFormulae", action="store", dest="formulaeFile", help=_("Write formulae linkbase into FILE")) parser.add_option("--viewArcrole", action="store", dest="viewArcrole", help=_("Write linkbase relationships for viewArcrole into viewFile")) parser.add_option("--viewarcrole", action="store", dest="viewArcrole", help=SUPPRESS_HELP) parser.add_option("--viewFile", action="store", dest="viewFile", help=_("Write linkbase relationships for viewArcrole into viewFile")) parser.add_option("--viewfile", action="store", dest="viewFile", help=SUPPRESS_HELP) parser.add_option("--roleTypes", action="store", dest="roleTypesFile", help=_("Write defined role types into FILE")) parser.add_option("--roletypes", action="store", dest="roleTypesFile", help=SUPPRESS_HELP) parser.add_option("--arcroleTypes", action="store", dest="arcroleTypesFile", help=_("Write defined arcrole types into FILE")) parser.add_option("--arcroletypes", action="store", dest="arcroleTypesFile", help=SUPPRESS_HELP) parser.add_option("--testReport", "--csvTestReport", action="store", dest="testReport", help=_("Write test report of validation (of test cases) into FILE")) parser.add_option("--testreport", "--csvtestreport", action="store", dest="testReport", help=SUPPRESS_HELP) parser.add_option("--testReportCols", action="store", dest="testReportCols", help=_("Columns for test report file")) parser.add_option("--testreportcols", action="store", dest="testReportCols", help=SUPPRESS_HELP) parser.add_option("--rssReport", action="store", dest="rssReport", help=_("Write RSS report into FILE")) parser.add_option("--rssreport", action="store", dest="rssReport", help=SUPPRESS_HELP) parser.add_option("--rssReportCols", action="store", dest="rssReportCols", help=_("Columns for RSS report file")) parser.add_option("--rssreportcols", action="store", dest="rssReportCols", help=SUPPRESS_HELP) parser.add_option("--skipDTS", action="store_true", dest="skipDTS", help=_("Skip DTS activities (loading, discovery, validation), useful when an instance needs only to be parsed.")) parser.add_option("--skipdts", action="store_true", dest="skipDTS", help=SUPPRESS_HELP) parser.add_option("--skipLoading", action="store", dest="skipLoading", help=_("Skip loading discovered or schemaLocated files matching pattern (unix-style file name patterns separated by '|'), useful when not all linkbases are needed.")) parser.add_option("--skiploading", action="store", dest="skipLoading", help=SUPPRESS_HELP) parser.add_option("--logFile", action="store", dest="logFile", help=_("Write log messages into file, otherwise they go to standard output. " "If file ends in .xml it is xml-formatted, otherwise it is text. ")) parser.add_option("--logfile", action="store", dest="logFile", help=SUPPRESS_HELP) parser.add_option("--logFormat", action="store", dest="logFormat", help=_("Logging format for messages capture, otherwise default is \"[%(messageCode)s] %(message)s - %(file)s\".")) parser.add_option("--logformat", action="store", dest="logFormat", help=SUPPRESS_HELP) parser.add_option("--logLevel", action="store", dest="logLevel", help=_("Minimum level for messages capture, otherwise the message is ignored. " "Current order of levels are debug, info, info-semantic, warning, warning-semantic, warning, assertion-satisfied, inconsistency, error-semantic, assertion-not-satisfied, and error. ")) parser.add_option("--loglevel", action="store", dest="logLevel", help=SUPPRESS_HELP) parser.add_option("--logLevelFilter", action="store", dest="logLevelFilter", help=_("Regular expression filter for logLevel. " "(E.g., to not match *-semantic levels, logLevelFilter=(?!^.*-semantic$)(.+). ")) parser.add_option("--loglevelfilter", action="store", dest="logLevelFilter", help=SUPPRESS_HELP) parser.add_option("--logCodeFilter", action="store", dest="logCodeFilter", help=_("Regular expression filter for log message code.")) parser.add_option("--logcodefilter", action="store", dest="logCodeFilter", help=SUPPRESS_HELP) parser.add_option("--logTextMaxLength", action="store", dest="logTextMaxLength", type="int", help=_("Log file text field max length override.")) parser.add_option("--logtextmaxlength", action="store", dest="logTextMaxLength", type="int", help=SUPPRESS_HELP) parser.add_option("--logRefObjectProperties", action="store_true", dest="logRefObjectProperties", help=_("Log reference object properties (default)."), default=True) parser.add_option("--logrefobjectproperties", action="store_true", dest="logRefObjectProperties", help=SUPPRESS_HELP) parser.add_option("--logNoRefObjectProperties", action="store_false", dest="logRefObjectProperties", help=_("Do not log reference object properties.")) parser.add_option("--lognorefobjectproperties", action="store_false", dest="logRefObjectProperties", help=SUPPRESS_HELP) parser.add_option("--statusPipe", action="store", dest="statusPipe", help=SUPPRESS_HELP) parser.add_option("--monitorParentProcess", action="store", dest="monitorParentProcess", help=SUPPRESS_HELP) parser.add_option("--outputAttribution", action="store", dest="outputAttribution", help=SUPPRESS_HELP) parser.add_option("--outputattribution", action="store", dest="outputAttribution", help=SUPPRESS_HELP) parser.add_option("--showOptions", action="store_true", dest="showOptions", help=SUPPRESS_HELP) parser.add_option("--parameters", action="store", dest="parameters", help=_("Specify parameters for formula and validation (name=value[,name=value]).")) parser.add_option("--parameterSeparator", action="store", dest="parameterSeparator", help=_("Specify parameters separator string (if other than comma).")) parser.add_option("--parameterseparator", action="store", dest="parameterSeparator", help=SUPPRESS_HELP) parser.add_option("--formula", choices=("validate", "run", "none"), dest="formulaAction", help=_("Specify formula action: " "validate - validate only, without running, " "run - validate and run, or " "none - prevent formula validation or running when also specifying -v or --validate. " "if this option is not specified, -v or --validate will validate and run formulas if present")) parser.add_option("--formulaParamExprResult", action="store_true", dest="formulaParamExprResult", help=_("Specify formula tracing.")) parser.add_option("--formulaparamexprresult", action="store_true", dest="formulaParamExprResult", help=SUPPRESS_HELP) parser.add_option("--formulaParamInputValue", action="store_true", dest="formulaParamInputValue", help=_("Specify formula tracing.")) parser.add_option("--formulaparaminputvalue", action="store_true", dest="formulaParamInputValue", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprSource", action="store_true", dest="formulaCallExprSource", help=_("Specify formula tracing.")) parser.add_option("--formulacallexprsource", action="store_true", dest="formulaCallExprSource", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprCode", action="store_true", dest="formulaCallExprCode", help=_("Specify formula tracing.")) parser.add_option("--formulacallexprcode", action="store_true", dest="formulaCallExprCode", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprEval", action="store_true", dest="formulaCallExprEval", help=_("Specify formula tracing.")) parser.add_option("--formulacallexpreval", action="store_true", dest="formulaCallExprEval", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprResult", action="store_true", dest="formulaCallExprResult", help=_("Specify formula tracing.")) parser.add_option("--formulacallexprtesult", action="store_true", dest="formulaCallExprResult", help=SUPPRESS_HELP) parser.add_option("--formulaVarSetExprEval", action="store_true", dest="formulaVarSetExprEval", help=_("Specify formula tracing.")) parser.add_option("--formulavarsetexpreval", action="store_true", dest="formulaVarSetExprEval", help=SUPPRESS_HELP) parser.add_option("--formulaVarSetExprResult", action="store_true", dest="formulaVarSetExprResult", help=_("Specify formula tracing.")) parser.add_option("--formulavarsetexprresult", action="store_true", dest="formulaVarSetExprResult", help=SUPPRESS_HELP) parser.add_option("--formulaVarSetTiming", action="store_true", dest="timeVariableSetEvaluation", help=_("Specify showing times of variable set evaluation.")) parser.add_option("--formulavarsettiming", action="store_true", dest="timeVariableSetEvaluation", help=SUPPRESS_HELP) parser.add_option("--formulaAsserResultCounts", action="store_true", dest="formulaAsserResultCounts", help=_("Specify formula tracing.")) parser.add_option("--formulaasserresultcounts", action="store_true", dest="formulaAsserResultCounts", help=SUPPRESS_HELP) parser.add_option("--formulaSatisfiedAsser", action="store_true", dest="formulaSatisfiedAsser", help=_("Specify formula tracing.")) parser.add_option("--formulasatisfiedasser", action="store_true", dest="formulaSatisfiedAsser", help=SUPPRESS_HELP) parser.add_option("--formulaUnsatisfiedAsser", action="store_true", dest="formulaUnsatisfiedAsser", help=_("Specify formula tracing.")) parser.add_option("--formulaunsatisfiedasser", action="store_true", dest="formulaUnsatisfiedAsser", help=SUPPRESS_HELP) parser.add_option("--formulaUnsatisfiedAsserError", action="store_true", dest="formulaUnsatisfiedAsserError", help=_("Specify formula tracing.")) parser.add_option("--formulaunsatisfiedassererror", action="store_true", dest="formulaUnsatisfiedAsserError", help=SUPPRESS_HELP) parser.add_option("--formulaFormulaRules", action="store_true", dest="formulaFormulaRules", help=_("Specify formula tracing.")) parser.add_option("--formulaformularules", action="store_true", dest="formulaFormulaRules", help=SUPPRESS_HELP) parser.add_option("--formulaVarsOrder", action="store_true", dest="formulaVarsOrder", help=_("Specify formula tracing.")) parser.add_option("--formulavarsorder", action="store_true", dest="formulaVarsOrder", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionSource", action="store_true", dest="formulaVarExpressionSource", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressionsource", action="store_true", dest="formulaVarExpressionSource", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionCode", action="store_true", dest="formulaVarExpressionCode", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressioncode", action="store_true", dest="formulaVarExpressionCode", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionEvaluation", action="store_true", dest="formulaVarExpressionEvaluation", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressionevaluation", action="store_true", dest="formulaVarExpressionEvaluation", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionResult", action="store_true", dest="formulaVarExpressionResult", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressionresult", action="store_true", dest="formulaVarExpressionResult", help=SUPPRESS_HELP) parser.add_option("--formulaVarFilterWinnowing", action="store_true", dest="formulaVarFilterWinnowing", help=_("Specify formula tracing.")) parser.add_option("--formulavarfilterwinnowing", action="store_true", dest="formulaVarFilterWinnowing", help=SUPPRESS_HELP) parser.add_option("--formulaVarFiltersResult", action="store_true", dest="formulaVarFiltersResult", help=_("Specify formula tracing.")) parser.add_option("--formulavarfiltersresult", action="store_true", dest="formulaVarFiltersResult", help=SUPPRESS_HELP) parser.add_option("--testcaseResultsCaptureWarnings", action="store_true", dest="testcaseResultsCaptureWarnings", help=_("For testcase variations capture warning results, default is inconsistency or warning if there is any warning expected result. ")) parser.add_option("--testcaseresultscapturewarnings", action="store_true", dest="testcaseResultsCaptureWarnings", help=SUPPRESS_HELP) parser.add_option("--formulaRunIDs", action="store", dest="formulaRunIDs", help=_("Specify formula/assertion IDs to run, separated by a '|' character.")) parser.add_option("--formularunids", action="store", dest="formulaRunIDs", help=SUPPRESS_HELP) parser.add_option("--formulaCompileOnly", action="store_true", dest="formulaCompileOnly", help=_("Specify formula are to be compiled but not executed.")) parser.add_option("--formulacompileonly", action="store_true", dest="formulaCompileOnly", help=SUPPRESS_HELP) parser.add_option("--uiLang", action="store", dest="uiLang", help=_("Language for user interface (override system settings, such as program messages). Does not save setting.")) parser.add_option("--uilang", action="store", dest="uiLang", help=SUPPRESS_HELP) parser.add_option("--proxy", action="store", dest="proxy", help=_("Modify and re-save proxy settings configuration. " "Enter 'system' to use system proxy setting, 'none' to use no proxy, " "'http://[user[:password]@]host[:port]' " " (e.g., http://192.168.1.253, http://example.com:8080, http://joe:secret@example.com:8080), " " or 'show' to show current setting, ." )) parser.add_option("--internetConnectivity", choices=("online", "offline"), dest="internetConnectivity", help=_("Specify internet connectivity: online or offline")) parser.add_option("--internetconnectivity", action="store", dest="internetConnectivity", help=SUPPRESS_HELP) parser.add_option("--internetTimeout", type="int", dest="internetTimeout", help=_("Specify internet connection timeout in seconds (0 means unlimited).")) parser.add_option("--internettimeout", type="int", action="store", dest="internetTimeout", help=SUPPRESS_HELP) parser.add_option("--internetRecheck", choices=("weekly", "daily", "never"), dest="internetRecheck", help=_("Specify rechecking cache files (weekly is default)")) parser.add_option("--internetrecheck", choices=("weekly", "daily", "never"), action="store", dest="internetRecheck", help=SUPPRESS_HELP) parser.add_option("--internetLogDownloads", action="store_true", dest="internetLogDownloads", help=_("Log info message for downloads to web cache.")) parser.add_option("--internetlogdownloads", action="store_true", dest="internetLogDownloads", help=SUPPRESS_HELP) parser.add_option("--noCertificateCheck", action="store_true", dest="noCertificateCheck", help=_("Specify no checking of internet secure connection certificate")) parser.add_option("--nocertificatecheck", action="store_true", dest="noCertificateCheck", help=SUPPRESS_HELP) parser.add_option("--xdgConfigHome", action="store", dest="xdgConfigHome", help=_("Specify non-standard location for configuration and cache files (overrides environment parameter XDG_CONFIG_HOME).")) parser.add_option("--plugins", action="store", dest="plugins", help=_("Specify plug-in configuration for this invocation. " "Enter 'show' to confirm plug-in configuration. " "Commands show, and module urls are '|' separated: " "url specifies a plug-in by its url or filename, " "relative URLs are relative to installation plug-in directory, " " (e.g., 'http://arelle.org/files/hello_web.py', 'C:\Program Files\Arelle\examples\plugin\hello_dolly.py' to load, " "or ../examples/plugin/hello_dolly.py for relative use of examples directory) " "Local python files do not require .py suffix, e.g., hello_dolly without .py is sufficient, " "Packaged plug-in urls are their directory's url (e.g., --plugins EdgarRenderer or --plugins xbrlDB). " )) parser.add_option("--packages", action="store", dest="packages", help=_("Specify taxonomy packages configuration. " "Enter 'show' to show current packages configuration. " "Commands show, and module urls are '|' separated: " "url specifies a package by its url or filename, please use full paths. " "(Package settings from GUI are no longer shared with cmd line operation. " "Cmd line package settings are not persistent.) " )) parser.add_option("--package", action="store", dest="packages", help=SUPPRESS_HELP) parser.add_option("--packageManifestName", action="store", dest="packageManifestName", help=_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). " "Uses unix file name pattern matching. " "Multiple manifest files are supported in archive (such as oasis catalogs). " "(Replaces search for either .taxonomyPackage.xml or catalog.xml). " )) parser.add_option("--abortOnMajorError", action="store_true", dest="abortOnMajorError", help=_("Abort process on major error, such as when load is unable to find an entry or discovered file.")) parser.add_option("--showEnvironment", action="store_true", dest="showEnvironment", help=_("Show Arelle's config and cache directory and host OS environment parameters.")) parser.add_option("--showenvironment", action="store_true", dest="showEnvironment", help=SUPPRESS_HELP) parser.add_option("--collectProfileStats", action="store_true", dest="collectProfileStats", help=_("Collect profile statistics, such as timing of validation activities and formulae.")) if hasWebServer: parser.add_option("--webserver", action="store", dest="webserver", help=_("start web server on host:port[:server] for REST and web access, e.g., --webserver locahost:8080, " "or specify nondefault a server name, such as cherrypy, --webserver locahost:8080:cherrypy. " "(It is possible to specify options to be defaults for the web server, such as disclosureSystem and validations, but not including file names.) ")) pluginOptionsIndex = len(parser.option_list) # install any dynamic plugins so their command line options can be parsed if present for i, arg in enumerate(args): if arg.startswith('--plugin'): # allow singular or plural (option must simply be non-ambiguous if len(arg) > 9 and arg[9] == '=': preloadPlugins = arg[10:] elif i < len(args) - 1: preloadPlugins = args[i+1] else: preloadPlugins = "" for pluginCmd in preloadPlugins.split('|'): cmd = pluginCmd.strip() if cmd not in ("show", "temp") and len(cmd) > 0 and cmd[0] not in ('-', '~', '+'): moduleInfo = PluginManager.addPluginModule(cmd) if moduleInfo: cntlr.preloadedPlugins[cmd] = moduleInfo PluginManager.reset() break # add plug-in options for optionsExtender in pluginClassMethods("CntlrCmdLine.Options"): optionsExtender(parser) pluginLastOptionIndex = len(parser.option_list) parser.add_option("-a", "--about", action="store_true", dest="about", help=_("Show product version, copyright, and license.")) if not args and cntlr.isGAE: args = ["--webserver=::gae"] elif cntlr.isCGI: args = ["--webserver=::cgi"] elif cntlr.isMSW: # if called from java on Windows any empty-string arguments are lost, see: # http://bugs.java.com/view_bug.do?bug_id=6518827 # insert needed arguments sourceArgs = args args = [] namedOptions = set() optionsWithArg = set() for option in parser.option_list: names = str(option).split('/') namedOptions.update(names) if option.action == "store": optionsWithArg.update(names) priorArg = None for arg in sourceArgs: if priorArg in optionsWithArg and arg in namedOptions: # probable java/MSFT interface bug 6518827 args.append('') # add empty string argument # remove quoting if arguments quoted according to http://bugs.java.com/view_bug.do?bug_id=6518827 if r'\"' in arg: # e.g., [{\"foo\":\"bar\"}] -> [{"foo":"bar"}] arg = arg.replace(r'\"', '"') args.append(arg) priorArg = arg (options, leftoverArgs) = parser.parse_args(args) if options.about: print(_("\narelle(r) {0} ({1}bit)\n\n" "An open source XBRL platform\n" "(c) 2010-{2} Mark V Systems Limited\n" "All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n" "Licensed under the Apache License, Version 2.0 (the \"License\"); " "you may not \nuse this file except in compliance with the License. " "You may obtain a copy \nof the License at " "'http://www.apache.org/licenses/LICENSE-2.0'\n\n" "Unless required by applicable law or agreed to in writing, software \n" "distributed under the License is distributed on an \"AS IS\" BASIS, \n" "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n" "See the License for the specific language governing permissions and \n" "limitations under the License." "\n\nIncludes:" "\n Python(r) {4[0]}.{4[1]}.{4[2]} (c) 2001-2013 Python Software Foundation" "\n PyParsing (c) 2003-2013 Paul T. McGuire" "\n lxml {5[0]}.{5[1]}.{5[2]} (c) 2004 Infrae, ElementTree (c) 1999-2004 by Fredrik Lundh" "{3}" "\n May include installable plug-in modules with author-specific license terms" ).format(Version.__version__, cntlr.systemWordSize, Version.copyrightLatestYear, _("\n Bottle (c) 2011-2013 Marcel Hellkamp") if hasWebServer else "", sys.version_info, etree.LXML_VERSION)) elif options.disclosureSystemName in ("help", "help-verbose"): text = _("Disclosure system choices: \n{0}").format(' \n'.join(cntlr.modelManager.disclosureSystem.dirlist(options.disclosureSystemName))) try: print(text) except UnicodeEncodeError: print(text.encode("ascii", "replace").decode("ascii")) elif len(leftoverArgs) != 0 and (not hasWebServer or options.webserver is None): parser.error(_("unrecognized arguments: {}").format(', '.join(leftoverArgs))) elif (options.entrypointFile is None and ((not options.proxy) and (not options.plugins) and (not any(pluginOption for pluginOption in parser.option_list[pluginOptionsIndex:pluginLastOptionIndex])) and (not hasWebServer or options.webserver is None))): parser.error(_("incorrect arguments, please try\n python CntlrCmdLine.py --help")) elif hasWebServer and options.webserver: # webserver incompatible with file operations if any((options.entrypointFile, options.importFiles, options.diffFile, options.versReportFile, options.factsFile, options.factListCols, options.factTableFile, options.conceptsFile, options.preFile, options.tableFile, options.calFile, options.dimFile, options.anchFile, options.formulaeFile, options.viewArcrole, options.viewFile, options.roleTypesFile, options.arcroleTypesFile )): parser.error(_("incorrect arguments with --webserver, please try\n python CntlrCmdLine.py --help")) else: # note that web server logging does not strip time stamp, use logFormat if that is desired cntlr.startLogging(logFileName='logToBuffer', logTextMaxLength=options.logTextMaxLength, logRefObjectProperties=options.logRefObjectProperties) from arelle import CntlrWebMain app = CntlrWebMain.startWebserver(cntlr, options) if options.webserver == '::wsgi': return app else: # parse and run the FILENAME cntlr.startLogging(logFileName=(options.logFile or "logToPrint"), logFormat=(options.logFormat or "[%(messageCode)s] %(message)s - %(file)s"), logLevel=(options.logLevel or "DEBUG"), logToBuffer=getattr(options, "logToBuffer", False), logTextMaxLength=options.logTextMaxLength, # e.g., used by EdgarRenderer to require buffered logging logRefObjectProperties=options.logRefObjectProperties) cntlr.run(options) return cntlr
16,113
def build_A(N): """ Build A based on the defined problem. Args: N -- (int) as defined above Returns: NumPy ndarray - A """ A = np.hstack( (np.eye(N), np.negative(np.eye(N))) ) A = np.vstack( (A, np.negative(np.hstack( (np.eye(N), np.eye(N)) ))) ) A = np.vstack( (A, np.hstack( (np.ones(N), np.zeros(N)) )) ) return A
16,114
def test_multiple_genbanks_multiple_cazymes(db_session, monkeypatch): """test adding protein to db when finding multiple identical CAZymes and GenBank accesisons.""" def mock_add_protein_to_db(*args, **kwargs): return monkeypatch.setattr(sql_interface, "add_data_to_protein_record", mock_add_protein_to_db) identical_genbank_accession = "identical_accession" args = {'args': Namespace(streamline=None)} sql_interface.add_protein_to_db( "test_cazyme_name", "cazy_family", "source_genus organism", "kingdom", identical_genbank_accession, db_session, args['args'], )
16,115
def gms_change_est2(T_cont, T_pert, q_cont, precip, level, lat, lev_sfc=925., gamma=1.): """ Gross moist stability change estimate. Near surface MSE difference between ITCZ and local latitude, neglecting geopotential term and applying a thermodynamic scaling for the moisture term, and multiplying the ITCZ terms by cos(lat) and a fixed fraction gamma to account for deviation of upper level MSE from the near surface ITCZ value. """ # ITCZ defined as latitude with maximum zonal mean precip. itcz_ind = np.argmax(precip.mean(axis=-1)) # Need temperature change at T_pert = np.squeeze(T_pert[np.where(level == lev_sfc)].mean(axis=-1)) T_cont = np.squeeze(T_cont[np.where(level == lev_sfc)].mean(axis=-1)) dT = T_pert - T_cont dT_itcz = T_pert[itcz_ind] - T_cont[itcz_ind] q_cont = np.squeeze(q_cont[np.where(level == lev_sfc)].mean(axis=-1)) # GMS is difference between surface alpha = 0.07 return (np.cos(np.deg2rad(lat))**2*gamma* (c_p + L_v*alpha*q_cont[itcz_ind])*dT_itcz - (c_p + L_v*alpha*q_cont)*dT)/c_p
16,116
def solid_polygon_info_(base_sides, printed=False): """Get information about a solid polygon from its side count.""" # Example: A rectangular solid (Each base has four sides) is made up of # 12 edges, 8 vertices, 6 faces, and 12 triangles. edges = base_sides * 3 vertices = base_sides * 2 faces = base_sides + 2 triangles = (base_sides - 2) * 2 + vertices if printed: print(f"Edges: {edges}\nVertices: {vertices}\nFaces: {faces}\nTriangles: {triangles}") else: return {"edges": edges, "vertices": vertices, "faces": faces, "triangles": triangles}
16,117
def read(id=None): """ This function responds to a request for /api/people with the complete lists of people :return: sorted list of people """ # Create the list of people from our data with client() as mcl: # Database ppldb = mcl.ppldb # collection (kind of like a table) pplclxn = ppldb.people log.debug(pplclxn) if id is None: ppl = [Person(p) for p in pplclxn.find()] log.debug(ppl) else: p = pplclxn.find_one({'lname': id}) return Person(p) return ppl # return [PEOPLE[key] for key in sorted(PEOPLE.keys())]
16,118
def load_prism_theme(): """Loads a PrismJS theme from settings.""" theme = get_theme() if theme: script = ( f"""<link href="{PRISM_PREFIX}{PRISM_VERSION}/themes/prism-{theme}""" """.min.css" rel="stylesheet">""" ) return mark_safe(script) return ""
16,119
def get_root_name(depth): """ Returns the Rootname. """ return Alphabet.get_null_character() * depth
16,120
def md5(fname): """ Cacualte the MD5 hash of the file given as input. Returns the hash value of the input file. """ hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
16,121
def date2num(date_axis, units, calendar): """ A wrapper from ``netCDF4.date2num`` able to handle "years since" and "months since" units. If time units are not "years since" or "months since" calls usual ``netcdftime.date2num``. :param numpy.array date_axis: The date axis following units :param str units: The proper time units :param str calendar: The NetCDF calendar attribute :returns: The corresponding numerical time axis :rtype: *array* """ # date_axis is the date time axis incremented following units (i.e., by years, months, etc). if not units.split(' ')[0] in ['years', 'months']: # If units are not 'years' or 'months since', call usual netcdftime.date2num: return nc.date2num(date_axis, units=units, calendar=calendar) else: # Return to time reference with 'days since' units_as_days = 'days ' + ' '.join(units.split(' ')[1:]) # Convert date axis as number of days since time reference days_axis = nc.date2num(date_axis, units=units_as_days, calendar=calendar) # Convert the time reference 'units_as_days' as datetime object start_date = nc.num2date(0.0, units=units_as_days, calendar=calendar) # Create years axis from input date axis years = np.array([date.year for date in np.atleast_1d(np.array(date_axis))]) if units.split(' ')[0] == 'years': # If units are 'years since' # Define the number of maximum and minimum years to build a date axis covering # the whole 'num_axis' period max_years = np.max(years - start_date.year + 1) min_years = np.min(years - start_date.year - 1) # Create a date axis with one year that spans the entire period by year years_axis = np.array([add_year(start_date, yid) for yid in np.arange(min_years, max_years + 2)]) # Convert years axis as number of days since time reference cdftime = netcdftime.utime(units_as_days, calendar=calendar) years_axis_as_days = cdftime.date2num(years_axis) # Find closest index for years_axis_as_days in days_axis closest_index = np.searchsorted(years_axis_as_days, days_axis) # Compute the difference between closest value of year axis and start date, in number of days num = days_axis - years_axis_as_days[closest_index] # Number of days of the corresponding closest year den = np.diff(years_axis_as_days)[closest_index] return min_years + closest_index + num / den elif units.split(' ')[0] == 'months': # If units are 'months since' # Define the number of maximum and minimum months to build a date axis covering # the whole 'num_axis' period max_months = np.max(12 * (years - start_date.year + 12)) min_months = np.min(12 * (years - start_date.year - 12)) # Create a date axis with one month that spans the entire period by month months_axis = np.array([add_month(start_date, mid) for mid in np.arange(min_months, max_months)]) # Convert months axis as number of days since time reference cdftime = netcdftime.utime(units_as_days, calendar=calendar) months_axis_as_days = cdftime.date2num(months_axis) # Find closest index for months_axis_as_days in days_axis closest_index = np.searchsorted(months_axis_as_days, days_axis) # Compute the difference between closest value of months axis and start date, in number of days num = days_axis - months_axis_as_days[closest_index] # Number of days of the corresponding closest month den = np.diff(months_axis_as_days)[closest_index] return min_months + closest_index + num / den
16,122
def generate_int_file_from_fit( fitfn_zbt, fitfn_sp, fitfn_mp, exp_list, mass_range, std_io_map=STANDARD_IO_MAP, metafitter_zbt=single_particle_firstp_zbt_metafit, metafitter_sp=single_particle_firstp_metafit, metafitter_mp=multi_particle_firstp_metafit, dpath_source=DPATH_FILES_INT, **kwargs ): """Given fit functions for zbt, sp, and mp, as well as a set of e_hw_pairs, a range of mass numbers, and specific metafitter algorithms, generates fake interaction files_INT based on the fit functions :param fitfn_zbt: fit function for zero body term :param fitfn_sp: fit function for single particle :param fitfn_mp: fit function for interaction :param exp_list: e, hw, ... pairs used for the metafitters :param mass_range: range of masses for which to produces files_INT :param std_io_map: io_map for generating index-orbital keys :param metafitter_zbt: (Optional) zero body term fitter :param metafitter_sp: (Optional) single particle fitter :param metafitter_mp: (Optional) interactions fitter :param dpath_source: directory housing data files :param kwargs: (Optional) Additional keyword arguments to pass to the helper function """ imsrg_data_map = DataMapInt( dpath_source, exp_list=exp_list, standard_indices=std_io_map) results_zbt = metafitter_zbt( fitfn_zbt, exp_list, imsrg_data_map=imsrg_data_map) results_sp = metafitter_sp( fitfn_sp, exp_list, imsrg_data_map=imsrg_data_map) results_mp = metafitter_mp( fitfn_mp, exp_list, imsrg_data_map=imsrg_data_map) generate_int_file_from_fit_results( results_zbt=results_zbt, results_sp=results_sp, results_mp=results_mp, exp_list=exp_list, io_map=std_io_map, mass_range=mass_range, **kwargs )
16,123
def _(txt): """ Custom gettext translation function that uses the CurlyTx domain """ t = gettext.dgettext("CurlyTx", txt) if t == txt: #print "[CurlyTx] fallback to default translation for", txt t = gettext.gettext(txt) return t
16,124
def bell(num=1, delay=100): """Rings the bell num times using tk's bell command. Inputs: - num number of times to ring the bell - delay delay (ms) between each ring Note: always rings at least once, even if num < 1 """ global _TkWdg if not _TkWdg: _TkWdg = tkinter.Frame() _TkWdg.bell() if num > 1: _TkWdg.after(int(delay), bell, int(num)-1, int(delay))
16,125
def output_node(ctx, difference, path, indentstr, indentnum): """Returns a tuple (parent, continuation) where - parent is a PartialString representing the body of the node, including its comments, visuals, unified_diff and headers for its children - but not the bodies of the children - continuation is either None or (only in html-dir mode) a function which when called with a single integer arg, the maximum size to print, will print any remaining "split" pages for unified_diff up to the given size. """ indent = tuple(indentstr * (indentnum + x) for x in range(3)) t, cont = PartialString.cont() comments = u"" if difference.comments: comments = u'{0[1]}<div class="comment">\n{1}{0[1]}</div>\n'.format( indent, "".join( u"{0[2]}{1}<br/>\n".format(indent, html.escape(x)) for x in difference.comments ), ) visuals = u"" for visual in difference.visuals: visuals += output_visual(visual, path, indentstr, indentnum + 1) udiff = u"" ud_cont = None if difference.unified_diff: ud_cont = HTMLSideBySidePresenter().output_unified_diff( ctx, difference.unified_diff, difference.has_internal_linenos ) udiff = next(ud_cont) if isinstance(udiff, PartialString): ud_cont = ud_cont.send udiff = udiff.pformatl(PartialString.of(ud_cont)) else: for _ in ud_cont: pass # exhaust the iterator, avoids GeneratorExit ud_cont = None # PartialString for this node body = PartialString.numl(u"{0}{1}{2}{-1}", 3, cont).pformatl( comments, visuals, udiff ) if len(path) == 1: # root node, frame it body = output_node_frame(difference, path, indentstr, indentnum, body) t = cont(t, body) # Add holes for child nodes for d in difference.details: child = output_node_frame( d, path + [d], indentstr, indentnum + 1, PartialString.of(d) ) child = PartialString.numl( u"""{0[1]}<div class="difference"> {1}{0[1]}</div> {-1}""", 2, cont, ).pformatl(indent, child) t = cont(t, child) # there might be extra holes for the unified diff continuation assert len(t.holes) >= len(difference.details) + 1 return cont(t, u""), ud_cont
16,126
def split_tree_into_feature_groups(tree: TreeObsForRailEnv.Node, max_tree_depth: int) -> ( np.ndarray, np.ndarray, np.ndarray): """ This function splits the tree into three difference arrays of values """ data, distance, agent_data = _split_node_into_feature_groups(tree) for direction in TreeObsForRailEnv.tree_explored_actions_char: sub_data, sub_distance, sub_agent_data = _split_subtree_into_feature_groups(tree.childs[direction], 1, max_tree_depth) data = np.concatenate((data, sub_data)) distance = np.concatenate((distance, sub_distance)) agent_data = np.concatenate((agent_data, sub_agent_data)) return data, distance, agent_data
16,127
def _generate_training_batch(ground_truth_data, representation_function, batch_size, num_points, random_state): """Sample a set of training samples based on a batch of ground-truth data. Args: ground_truth_data: GroundTruthData to be sampled from. representation_function: Function that takes observations as input and outputs a dim_representation sized representation for each observation. batch_size: Number of points to be used to compute the training_sample. num_points: Number of points to be sampled for training set. random_state: Numpy random state used for randomness. Returns: points: (num_points, dim_representation)-sized numpy array with training set features. labels: (num_points)-sized numpy array with training set labels. """ points = None # Dimensionality depends on the representation function. labels = np.zeros(num_points, dtype=np.int64) for i in range(num_points): labels[i], feature_vector = _generate_training_sample( ground_truth_data, representation_function, batch_size, random_state) if points is None: points = np.zeros((num_points, feature_vector.shape[0])) points[i, :] = feature_vector return points, labels
16,128
def get_mnist_loaders(data_dir, b_sz, shuffle=True): """Helper function that deserializes MNIST data and returns the relevant data loaders. params: data_dir: string - root directory where the data will be saved b_sz: integer - the batch size shuffle: boolean - whether to shuffle the training set or not """ train_loader = DataLoader( MNIST(data_dir, train=True, transform=ToTensor(), download=True), shuffle=shuffle, batch_size=b_sz) test_loader = DataLoader( MNIST(data_dir, train=False, transform=ToTensor(), download=True), shuffle=False, batch_size=b_sz) return train_loader, test_loader
16,129
def run_example_interactive(): """Example function Running the exact same Example QuEST provides in the QuEST git repository with the interactive python interface of PyQuEST-cffi """ print('PyQuEST-cffi tutorial based on QuEST tutorial') print(' Basic 3 qubit circuit') # creating environment env = utils.createQuestEnv()() # allocating qubit register qureg = utils.createQureg()(3, env=env) cheat.initZeroState()(qureg=qureg) # Using the report function to print system status print('This is the environment:') reporting.reportQuESTEnv()(env=env) print('This is the qubit register:') reporting.reportQuregParams()(qureg=qureg) print('This we can easily do in interactive python:') print('Result of qureg.isDensityMatrix: ', qureg.isDensityMatrix) # Apply circuit ops.hadamard()(qureg=qureg, qubit=0) ops.controlledNot()(qureg=qureg, control=0, qubit=1) ops.rotateY()(qureg=qureg, qubit=2, theta=0.1) ops.multiControlledPhaseFlip()(qureg=qureg, controls=[0, 1, 2], number_controls=3) u = np.zeros((2, 2), dtype=complex) u[0, 0] = 0.5 * (1 + 1j) u[0, 1] = 0.5 * (1 - 1j) u[1, 0] = 0.5 * (1 - 1j) u[1, 1] = 0.5 * (1 + 1j) ops.unitary()(qureg=qureg, qubit=0, matrix=u) a = 0.5 + 0.5 * 1j b = 0.5 - 0.5 * 1j ops.compactUnitary()(qureg=qureg, qubit=1, alpha=a, beta=b) v = np.array([1, 0, 0]) ops.rotateAroundAxis()(qureg=qureg, qubit=2, theta=np.pi / 2, vector=v) ops.controlledCompactUnitary()(qureg=qureg, control=0, qubit=1, alpha=a, beta=b) ops.multiControlledUnitary()(qureg=qureg, controls=[ 0, 1], number_controls=2, qubit=2, matrix=u) # cheated results print('Circuit output') print('Probability amplitude of |111> by knowing the index: ', cheat.getProbAmp()(qureg=qureg, index=7)) print('Probability amplitude of |111> by referencing basis state: ', cheat.getProbAmp()(qureg=qureg, index=[1, 1, 1])) # measuring: measurement = ops.measure()(qureg=qureg, qubit=0) print('Qubit 0 was measured as: ', measurement)
16,130
def atomic_coordinates_as_json(pk): """Get atomic coordinates from database.""" subset = models.Subset.objects.get(pk=pk) vectors = models.NumericalValue.objects.filter( datapoint__subset=subset).filter( datapoint__symbols__isnull=True).order_by( 'datapoint_id', 'counter') data = {'vectors': [[x.formatted('.10g') for x in vectors[:3]], [x.formatted('.10g') for x in vectors[3:6]], [x.formatted('.10g') for x in vectors[6:9]]]} # Here counter=1 filters out the first six entries symbols = models.Symbol.objects.filter( datapoint__subset=subset).filter(counter=1).order_by( 'datapoint_id').values_list('value', flat=True) coords = models.NumericalValue.objects.filter( datapoint__subset=subset).filter( datapoint__symbols__counter=1).select_related('error').order_by( 'counter', 'datapoint_id') tmp = models.Symbol.objects.filter( datapoint__subset=subset).annotate( num=models.models.Count('datapoint__symbols')).filter( num=2).first() if tmp: data['coord-type'] = tmp.value data['coordinates'] = [] N = int(len(coords)/3) for symbol, coord_x, coord_y, coord_z in zip( symbols, coords[:N], coords[N:2*N], coords[2*N:3*N]): data['coordinates'].append((symbol, coord_x.formatted('.9g'), coord_y.formatted('.9g'), coord_z.formatted('.9g'))) return data
16,131
def additional_bases(): """"Manually added bases that cannot be retrieved from the REST API""" return [ { "facility_name": "Koltyr Northern Warpgate", "facility_id": 400014, "facility_type_id": 7, "facility_type": "Warpgate" }, { "facility_name": "Koltyr Eastern Warpgate", "facility_id": 400015, "facility_type_id": 7, "facility_type": "Warpgate" }, { "facility_name": "Koltyr Southern Warpgate", "facility_id": 400016, "facility_type_id": 7, "facility_type": "Warpgate" }, { "facility_name": "Zorja", "facility_id": 400017, "facility_type_id": 2, "facility_type": "Amp Station" }, { "facility_name": "Xander", "facility_id": 400018, "facility_type_id": 3, "facility_type": "Bio Lab" }, { "facility_name": "Svarog", "facility_id": 400019, "facility_type_id": 4, "facility_type": "Tech Plant" }, { "facility_name": "Koltyr Tech Plant Outpost", "facility_id": 400020, "facility_type_id": 5, "facility_type": "Large Outpost" }, { "facility_name": "Koltyr Biolab Outpost", "facility_id": 400021, "facility_type_id": 5, "facility_type": "Large Outpost" }, { "facility_name": "Koltyr Amp Station Outpost", "facility_id": 400022, "facility_type_id": 5, "facility_type": "Large Outpost" } ]
16,132
async def test_flow_non_encrypted_already_configured_abort(opp): """Test flow without encryption and existing config entry abortion.""" MockConfigEntry( domain=DOMAIN, unique_id="0.0.0.0", data=MOCK_CONFIG_DATA, ).add_to_opp(opp) result = await opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_BASIC_DATA, ) assert result["type"] == "abort" assert result["reason"] == "already_configured"
16,133
def write_junit_xml(name, message=None): """ Write a JUnit results XML file describing the outcome of a quality check. """ if message: failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) else: failure_element = '' data = { 'failure_count': 1 if message else 0, 'failure_element': failure_element, 'name': name, 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), } Env.QUALITY_DIR.makedirs_p() filename = Env.QUALITY_DIR / f'{name}.xml' with open(filename, 'w') as f: f.write(JUNIT_XML_TEMPLATE.format(**data))
16,134
def get_all_label_values(dataset_info): """Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`. Args: dataset_info: a `Seq2LabelDatasetInfo` message. Returns: A dictionary mapping each label name to a tuple of its permissible values. """ return { label_info.name: tuple(label_info.values) for label_info in dataset_info.labels }
16,135
def load_input(file: str) -> ArrayLike: """Load the puzzle input and duplicate 5 times in each direction, adding 1 to the array for each copy. """ input = puzzle_1.load_input(file) input_1x5 = np.copy(input) for _ in range(4): input = np.clip(np.mod(input + 1, 10), a_min=1, a_max=None) input_1x5 = np.concatenate([input_1x5, input], axis=1) input_5x5 = np.copy(input_1x5) for _ in range(4): input_1x5 = np.clip(np.mod(input_1x5 + 1, 10), a_min=1, a_max=None) input_5x5 = np.concatenate([input_5x5, input_1x5], axis=0) return input_5x5
16,136
def _get_xvals(end, dx): """Returns a integer numpy array of x-values incrementing by "dx" and ending with "end". Args: end (int) dx (int) """ arange = np.arange(0, end-1+dx, dx, dtype=int) xvals = arange[1:] return xvals
16,137
def check_destroy_image_view(test, device, image_view, device_properties): """Checks the |index|'th vkDestroyImageView command call atom, including the device handler value and the image view handler value. """ destroy_image_view = require(test.next_call_of("vkDestroyImageView")) require_equal(device, destroy_image_view.int_device) require_equal(image_view, destroy_image_view.int_imageView)
16,138
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. """ # batch support! if top_k > 0: values, _ = torch.topk(logits, top_k) min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1]) logits = torch.where(logits < min_values, torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'), logits) if top_p > 0.0: # Compute cumulative probabilities of sorted tokens sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probabilities > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value) logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits) return logits
16,139
def _write_batch_lmdb(db, batch): """ Write a batch to an LMDB database """ try: with db.begin(write=True) as lmdb_txn: for i, temp in enumerate(batch): datum, _id = temp key = str(_id) lmdb_txn.put(key, datum.SerializeToString()) except lmdb.MapFullError: # double the map_size curr_limit = db.info()['map_size'] new_limit = curr_limit * 2 print('Doubling LMDB map size to %sMB ...' % (new_limit >> 20,)) try: db.set_mapsize(new_limit) # double it except AttributeError as e: version = tuple(int(x) for x in lmdb.__version__.split('.')) if version < (0, 87): raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__) else: raise e # try again _write_batch_lmdb(db, batch)
16,140
def get_reference_shift( self, seqID ): """Get a ``reference_shift`` attached to a particular ``seqID``. If none was provided, it will return **1** as default. :param str seqID: |seqID_param|. :type shift: Union[:class:`int`, :class:`list`] :raises: :TypeError: |indf_error|. .. rubric:: Example .. ipython:: In [1]: from rstoolbox.io import parse_rosetta_file ...: import pandas as pd ...: pd.set_option('display.width', 1000) ...: pd.set_option('display.max_columns', 500) ...: df = parse_rosetta_file("../rstoolbox/tests/data/input_ssebig.minisilent.gz", ...: {'sequence': 'C', 'structure': 'C'}) ...: df.add_reference_structure('C', df.iloc[0].get_structure('C')) ...: df.add_reference_shift('C', 3) ...: df.get_reference_shift('C') """ if not isinstance(self, (pd.DataFrame, pd.Series)): raise TypeError("Data container has to be a DataFrame/Series or a derived class.") if self._subtyp != "sequence_frame" and (seqID not in self.get_available_structures() and seqID not in self.get_available_sequences()): raise KeyError("Data container does not have data for structure {}".format(seqID)) if seqID in self._reference: return self._reference[seqID]["sft"] else: return 1
16,141
def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # Short circuit. If we're given a full path which matches the mode # and it exists, we're done here. if _access_check(cmd, mode): return cmd path = (path or os.environ.get("PATH", os.defpath)).split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: os.sys.path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())] # If it does match, only test that one, otherwise we have to try # others. files = [cmd] if matches else [cmd + ext.lower() for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: dir = os.path.normcase(dir) if dir not in seen: seen.add(dir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
16,142
def test_splitinfo_throws(): """make sure bad behavior is caught""" short_profile = dict(DEMO_SPLIT) short_profile.pop('split_rate', None) with pytest.raises(exceptions.InvalidSplitConfig): split_obj = split_utils.SplitInfo(short_profile) bad_split = dict(DEMO_SPLIT) bad_split['split_rate'] = 'bacon' with pytest.raises(exceptions.InvalidSplitConfig): split_obj = split_utils.SplitInfo(bad_split) bad_date = dict(DEMO_SPLIT) bad_date['split_date'] = 'Tomorrow' with pytest.raises(exceptions.InvalidSplitConfig): split_obj = split_utils.SplitInfo(bad_date) bad_bool = dict(DEMO_SPLIT) bad_bool['bool_mult_div'] = 'bacon' with pytest.raises(exceptions.InvalidSplitConfig): split_obj = split_utils.SplitInfo(bad_bool)
16,143
def load_and_resolve_feature_metadata(eval_saved_model_path: Text, graph: tf.Graph): """Get feature data (feature columns, feature) from EvalSavedModel metadata. Like load_feature_metadata, but additionally resolves the Tensors in the given graph. Args: eval_saved_model_path: Path to EvalSavedModel, for the purposes of loading the feature_metadata file. graph: tf.Graph to resolve the Tensors in. Returns: Same as load_feature_metadata, except associated_tensors and features contain the Tensors resolved in the graph instead of TensorInfos. """ result = load_feature_metadata(eval_saved_model_path=eval_saved_model_path) # Resolve Tensors in graph result['associated_tensors'] = [ tf.compat.v1.saved_model.get_tensor_from_tensor_info(tensor_info, graph) for tensor_info in result['associated_tensors'] ] result['features'] = { k: tf.compat.v1.saved_model.get_tensor_from_tensor_info(v, graph) for k, v in result['features'].items() } return result
16,144
def greater_than_or_eq(quant1, quant2): """Binary function to call the operator""" return quant1 >= quant2
16,145
def _download_smeagol_PWMset(): """Function to download the curated set of motifs used in the SMEAGOL paper. Returns: df (pandas df): contains matrices """ download_dir = 'motifs/smeagol_datasets' remote_paths = ['https://github.com/gruber-sciencelab/VirusHostInteractionAtlas/tree/master/DATA/PWMs/attract_rbpdb_encode_filtered_human_pwms.h5', 'https://github.com/gruber-sciencelab/VirusHostInteractionAtlas/tree/master/DATA/PWMs/attract_rbpdb_encode_representative_matrices.txt'] print(f"Downloading custom PWM set into {download_dir}") if not os.path.exists(download_dir): os.mkdir(download_dir) for remote_path in remote_paths: os.system('wget -P ' + download_dir + ' ' + remote_path) else: print(f"Folder {download_dir} already exists.")
16,146
def test_peekleft_after_appendleft(deque_fixture): """Test peekleft after appending to the left of deque.""" deque_fixture.appendleft(7) assert deque_fixture.peekleft() == 7
16,147
def pr_define_role(pe_id, role=None, role_type=None, entity_type=None, sub_type=None): """ Back-end method to define a new affiliates-role for a person entity @param pe_id: the person entity ID @param role: the role name @param role_type: the role type (from pr_role_types), default 9 @param entity_type: limit selection in CRUD forms to this entity type @param sub_type: limit selection in CRUD forms to this entity sub-type @return: the role ID """ if not pe_id: return None s3db = current.s3db if role_type not in s3db.pr_role_types: role_type = 9 # Other data = {"pe_id": pe_id, "role": role, "role_type": role_type, "entity_type": entity_type, "sub_type": sub_type} rtable = s3db.pr_role if role: query = (rtable.pe_id == pe_id) & \ (rtable.role == role) duplicate = current.db(query).select(rtable.id, rtable.role_type, limitby=(0, 1)).first() else: duplicate = None if duplicate: if duplicate.role_type != role_type: # Clear paths if this changes the role type if str(role_type) != str(OU): data["path"] = None s3db.pr_role_rebuild_path(duplicate.id, clear=True) duplicate.update_record(**data) record_id = duplicate.id else: record_id = rtable.insert(**data) return record_id
16,148
def inherently_superior(df): """ Find rows in a dataframe with all values 'inherently superior', meaning that all values for certain metrics are as high or higher then for all other rows. Parameters ---------- df : DataFrame Pandas dataframe containing the columns to be compared. The columns should be in a format in which higher values are superior. Returns ------- DataFrame with index of best values and values compared. """ # Copy dataframe to prevent altering the columns. df_copy = df.copy() # Reset index to reference location of values. Also, convert to numpy. df_copy.reset_index(inplace=True) arr = df_copy.values # Repeat and tile the array for comparison. Given indices [1, 2], arr1 is # in format [1, 1, 2, 2], and arr2 is in format [1, 2, 1, 2]. arr1 = np.repeat(arr, arr.shape[0], axis=0) arr2 = np.tile(arr, (arr.shape[0], 1)) # Check if any values are greater than for other rows. any_arr = np.all(arr1[:, 1:] >= arr2[:, 1:], axis=1) # Adjust array so that all points at which a row is being compared to itself # are labeled as superior. same_idx = np.array(range(0, len(any_arr), arr.shape[0])) + np.array(range(arr.shape[0])) any_arr[same_idx] = 1 # Concatenate arr1 and array with superior labels. arr1_any = np.concatenate([arr1, any_arr.reshape(-1, 1)], axis=1) # Split data at unique indices. Used to check if greater than all other rows. splits = np.array(np.split(arr1_any, np.unique(arr1[:, 0], return_index=True)[1][1:])) perc_sup = np.mean(splits[:, :, -1], axis=1) idx = np.all(splits[:, :, -1], axis=1) # Choose superior data idx and create dataframe. columns = df_copy.columns.tolist() + ['perc_sup', 'fully_sup'] data = np.concatenate([arr, perc_sup.reshape(-1, 1), idx.reshape(-1, 1)], axis=1) arr_df = pd.DataFrame(data, columns=columns) arr_df.drop('index', axis=1, inplace=True) arr_df['fully_sup'] = arr_df['fully_sup'].astype(bool) return arr_df
16,149
def LineColourArray(): """Line colour options array""" Colour = [ 'Black', 'dimgrey', 'darkgrey', 'silver', 'lightgrey', 'maroon', 'darkred', 'firebrick', 'red', 'orangered', 'darkorange', 'orange', 'saddlebrown', 'darkgoldenrod', 'goldenrod', 'gold', 'darkolivegreen', 'olivedrab', 'olive', 'y', 'darkkhaki', 'khaki', 'darkgreen', 'Green', 'limegreen', 'lime', 'mediumspringgreen', 'palegreen', 'greenyellow', 'midnightblue', 'navy', 'darkblue', 'mediumblue', 'blue', 'slateblue', 'indigo', 'purple', 'darkmagenta', 'darkorchid', 'mediumorchid', 'orchid', 'plum', 'crimson', 'deeppink', 'magenta', 'hotpink', 'pink' ] return Colour
16,150
def os_to_maestral_error(exc, dbx_path=None, local_path=None): """ Gets the OSError and tries to add a reasonably informative error message. .. note:: The following exception types should not typically be raised during syncing: InterruptedError: Python will automatically retry on interrupted connections. NotADirectoryError: If raised, this likely is a Maestral bug. IsADirectoryError: If raised, this likely is a Maestral bug. :param OSError exc: Python Exception. :param str dbx_path: Dropbox path of file which triggered the error. :param str local_path: Local path of file which triggered the error. :returns: :class:`MaestralApiError` instance or :class:`OSError` instance. """ title = 'Cannot upload or download file' if isinstance(exc, PermissionError): err_cls = InsufficientPermissionsError # subclass of SyncError text = 'Insufficient read or write permissions for this location.' elif isinstance(exc, FileNotFoundError): err_cls = NotFoundError # subclass of SyncError text = 'The given path does not exist.' elif isinstance(exc, FileExistsError): err_cls = ExistsError # subclass of SyncError title = 'Could not download file' text = 'There already is an item at the given path.' elif isinstance(exc, IsADirectoryError): err_cls = IsAFolderError # subclass of SyncError text = 'The given path refers to a folder.' elif isinstance(exc, NotADirectoryError): err_cls = NotAFolderError # subclass of SyncError text = 'The given path refers to a file.' elif exc.errno == errno.ENAMETOOLONG: err_cls = PathError # subclass of SyncError title = 'Could not create local file' text = 'The file name (including path) is too long.' elif exc.errno == errno.EFBIG: err_cls = FileSizeError # subclass of SyncError title = 'Could not download file' text = 'The file size too large.' elif exc.errno == errno.ENOSPC: err_cls = InsufficientSpaceError # subclass of SyncError title = 'Could not download file' text = 'There is not enough space left on the selected drive.' elif exc.errno == errno.ENOMEM: err_cls = OutOfMemoryError # subclass of MaestralApiError text = 'Out of memory. Please reduce the number of memory consuming processes.' else: return exc return err_cls(title, text, dbx_path=dbx_path, local_path=local_path)
16,151
def parse_ccu_sys_var(data: dict[str, Any]) -> tuple[str, Any]: """Helper to parse type of system variables of CCU.""" # pylint: disable=no-else-return if data[ATTR_TYPE] == ATTR_HM_LOGIC: return data[ATTR_NAME], data[ATTR_VALUE] == "true" if data[ATTR_TYPE] == ATTR_HM_ALARM: return data[ATTR_NAME], data[ATTR_VALUE] == "true" elif data[ATTR_TYPE] == ATTR_HM_NUMBER: return data[ATTR_NAME], float(data[ATTR_VALUE]) elif data[ATTR_TYPE] == ATTR_HM_LIST: return data[ATTR_NAME], int(data[ATTR_VALUE]) return data[ATTR_NAME], data[ATTR_VALUE]
16,152
def one_time_log_fixture(request, workspace) -> Single_Use_Log: """ Pytest Fixture for setting up a single use log file At test conclusion, runs the cleanup to delete the single use text file :return: Single_Use_Log class """ log_class = Single_Use_Log(workspace) request.addfinalizer(log_class.cleanup) return log_class
16,153
def details(request, path): """ Returns detailed information on the entity at path. :param path: Path to the entity (namespaceName/.../.../.../) :return: JSON Struct: {property1: value, property2: value, ...} """ item = CACHE.get(ENTITIES_DETAIL_CACHE_KEY) # ENTITIES_DETAIL : {"namespaceName": {"name":"", "description": "", "stream":{}, "artifact":"", "dataset":"", # "application":""}, {}...} Each part in path.split('/') matches the key name in ENTITIES_DETAIL # The detailed information of entity at path stores in the last dict for k in path.strip('/').split('/'): item = item[k] item["privileges"] = _get_privileges_for_path(request.user, path) return HttpResponse(json.dumps(item), content_type='application/json')
16,154
def ca_get_container_capability_set(slot, h_container): """ Get the container capabilities of the given slot. :param int slot: target slot number :param int h_container: target container handle :return: result code, {id: val} dict of capabilities (None if command failed) """ slot_id = CK_SLOT_ID(slot) cont_id = CK_ULONG(h_container) cap_ids = AutoCArray() cap_vals = AutoCArray() @refresh_c_arrays(1) def _get_container_caps(): """Closer for retries to work w/ properties""" return CA_GetContainerCapabilitySet( slot_id, cont_id, cap_ids.array, cap_ids.size, cap_vals.array, cap_vals.size ) ret = _get_container_caps() return ret, dict(list(zip(cap_ids, cap_vals)))
16,155
def load_pyfunc(model_file): """ Loads a Keras model as a PyFunc from the passed-in persisted Keras model file. :param model_file: Path to Keras model file. :return: PyFunc model. """ return _KerasModelWrapper(_load_model(model_file))
16,156
def business_days(start, stop): """ Return business days between two datetimes (inclusive). """ return dt_business_days(start.date(), stop.date())
16,157
def empty_nzb_document(): """ Creates xmldoc XML document for a NZB file. """ # http://stackoverflow.com/questions/1980380/how-to-render-a-doctype-with-pythons-xml-dom-minidom imp = minidom.getDOMImplementation() dt = imp.createDocumentType("nzb", "-//newzBin//DTD NZB 1.1//EN", "http://www.newzbin.com/DTD/nzb/nzb-1.1.dtd") doc = imp.createDocument("http://www.newzbin.com/DTD/2003/nzb", "nzb", dt) # http://stackoverflow.com/questions/2306149/how-to-write-xml-elements-with-namespaces-in-python doc.documentElement.setAttribute('xmlns', 'http://www.newzbin.com/DTD/2003/nzb') return doc
16,158
def get_output_directory(create_statistics=None, undersample=None, oversample=None): """ Determines the output directory given the balance of the dataset as well as columns. Parameters ---------- create_statistics: bool Whether the std, min and max columns have been created undersample: bool Whether the data has been undersampled oversample: bool Whether the data has been oversampled Returns ------- Output directory """ if create_statistics is None: create_statistics = AppConfig.create_statistics if undersample is None: undersample = AppConfig.balance_data if oversample is None: oversample = AppConfig.oversample stat = 'st' if create_statistics else 'ns' bal = 'us' if undersample else 'ub' bal = 'os' if oversample else bal return f'./output/{stat}_{bal}/'
16,159
def removePrefixes(word, prefixes): """ Attempts to remove the given prefixes from the given word. Args: word (string): Word to remove prefixes from. prefixes (collections.Iterable or string): Prefixes to remove from given word. Returns: (string): Word with prefixes removed. """ if isinstance(prefixes, str): return word.split(prefixes)[-1] for prefix in prefixes: word = word.split(prefix)[-1] return word
16,160
def isSol(res): """ Check if the string is of the type ai bj ck """ if not res or res[0] != 'a' or res[-1] != 'c': return False l = 0 r = len(res)-1 while res[l] == "a": l+=1 while res[r] == "c": r-=1 if r-l+1 <= 0: return False for x in res[l:r-l+1]: if x != 'b': return False return True
16,161
def test_fleurinpgen_with_parameters(aiida_profile, fixture_sandbox, generate_calc_job, fixture_code, generate_structure): # file_regression """Test a default `FleurinputgenCalculation`.""" # Todo add (more) tests with full parameter possibilities, i.e econfig, los, .... entry_point_name = 'fleur.inpgen' parameters = { 'atom': { 'element': 'Si', 'rmt': 2.1, 'jri': 981, 'lmax': 12, 'lnonsph': 6 }, #'econfig': '[He] 2s2 2p6 | 3s2 3p2', 'lo': ''}, 'comp': { 'kmax': 5.0, 'gmaxxc': 12.5, 'gmax': 15.0 }, 'kpt': { 'div1': 17, 'div2': 17, 'div3': 17, 'tkb': 0.0005 } } inputs = { 'code': fixture_code(entry_point_name), 'structure': generate_structure(), 'parameters': orm.Dict(dict=parameters), 'metadata': { 'options': { 'resources': { 'num_machines': 1 }, 'max_wallclock_seconds': int(100), 'withmpi': False } } } calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs) with fixture_sandbox.open('aiida.in') as handle: input_written = handle.read() aiida_in_text = """A Fleur input generator calculation with aiida\n&input cartesian=F / 0.000000000 5.130606429 5.130606429 5.130606429 0.000000000 5.130606429 5.130606429 5.130606429 0.000000000 1.0000000000 1.000000000 1.000000000 1.000000000 2\n 14 0.0000000000 0.0000000000 0.0000000000 14 0.2500000000 0.2500000000 0.2500000000 &atom element="Si" jri=981 lmax=12 lnonsph=6 rmt=2.1 / &comp gmax=15.0 gmaxxc=12.5 kmax=5.0 / &kpt div1=17 div2=17 div3=17 tkb=0.0005 / """ # Checks on the files written to the sandbox folder as raw input assert sorted(fixture_sandbox.get_content_list()) == sorted(['aiida.in']) assert input_written == aiida_in_text # file_regression.check(input_written, encoding='utf-8', extension='.in')
16,162
def new_trip(direction, day, driver, time): """ Adds a new trip to the system. :param direction: "Salita" or "Discesa". :param day: A day spanning the whole work week ("Lunedì"-"Venerdì"). :param driver: The chat_id of the driver. :param time: The time of departure. :return: """ dt.groups[direction][day][driver] = {"Time": time, "Permanent": [], "Temporary": [], "SuspendedUsers": [], "Suspended": False}
16,163
def nixpkgs_python_configure( name = "nixpkgs_python_toolchain", python2_attribute_path = None, python2_bin_path = "bin/python", python3_attribute_path = "python3", python3_bin_path = "bin/python", repository = None, repositories = {}, nix_file_deps = None, nixopts = [], fail_not_supported = True): """Define and register a Python toolchain provided by nixpkgs. Creates `nixpkgs_package`s for Python 2 or 3 `py_runtime` instances and a corresponding `py_runtime_pair` and `toolchain`. The toolchain is automatically registered and uses the constraint: "@io_tweag_rules_nixpkgs//nixpkgs/constraints:nixpkgs" Attrs: name: The name-prefix for the created external repositories. python2_attribute_path: The nixpkgs attribute path for python2. python2_bin_path: The path to the interpreter within the package. python3_attribute_path: The nixpkgs attribute path for python3. python3_bin_path: The path to the interpreter within the package. ...: See `nixpkgs_package` for the remaining attributes. """ python2_specified = python2_attribute_path and python2_bin_path python3_specified = python3_attribute_path and python3_bin_path if not python2_specified and not python3_specified: fail("At least one of python2 or python3 has to be specified.") kwargs = dict( repository = repository, repositories = repositories, nix_file_deps = nix_file_deps, nixopts = nixopts, fail_not_supported = fail_not_supported, ) python2_runtime = None if python2_attribute_path: python2_runtime = "@%s_python2//:runtime" % name nixpkgs_package( name = name + "_python2", nix_file_content = _python_nix_file_content.format( attribute_path = python2_attribute_path, bin_path = python2_bin_path, version = "PY2", ), **kwargs ) python3_runtime = None if python3_attribute_path: python3_runtime = "@%s_python3//:runtime" % name nixpkgs_package( name = name + "_python3", nix_file_content = _python_nix_file_content.format( attribute_path = python3_attribute_path, bin_path = python3_bin_path, version = "PY3", ), **kwargs ) _nixpkgs_python_toolchain( name = name, python2_runtime = python2_runtime, python3_runtime = python3_runtime, ) native.register_toolchains("@%s//:toolchain" % name)
16,164
def _h1_to_dataframe(h1: Histogram1D) -> pandas.DataFrame: """Convert histogram to pandas DataFrame.""" return pandas.DataFrame( {"frequency": h1.frequencies, "error": h1.errors}, index=binning_to_index(h1.binning, name=h1.name), )
16,165
def fit_one_grain( gr, flt, pars): """ Uses scipy.optimize to fit a single grain """ args = flt, pars, gr ub = np.linalg.inv(gr.ubi) x0 = ub.ravel().copy() xf, cov_v, info, mesg, ier = leastsq( calc_teo_fit, x0, args, full_output=True) ub = xf.copy() ub.shape = 3, 3 ubi = np.linalg.inv(ub) gr.set_ubi(ubi)
16,166
def wgt_area_sum(data, lat_wgt, lon_wgt): """wgt_area_sum() performas weighted area addition over a geographical area. data: data of which last 2 dimensions are lat and lon. Strictly needs to be a masked array lat_wgt: weights over latitude of area (usually cos(lat * pi/180)) lon_wgt: weights over longitude of area (usually 1) Returns, Numpy array with 2 less dimensions (Masked array. Mask is False if no mask was supplied with the input data. Else mask is derived from the input data)""" # Get data shape shp = data.shape ndims = data.ndim if(isinstance(lat_wgt, float)): lat_wgt = [lat_wgt] * shp[ndims - 2] if(isinstance(lon_wgt, float)): lon_wgt = [lon_wgt] * shp[ndims - 1] lat_wgt = np.array(lat_wgt).reshape(len(lat_wgt), 1) lon_wgt = np.array(lon_wgt) # Make grid of lon_wgt, lat_wgt with lat and lon coordinates (last 2 axis of data) wy = np.broadcast_to(lon_wgt, data.shape[ndims - 2:ndims]) wx = np.broadcast_to(lat_wgt, data.shape[ndims - 2:ndims]) # Mask the array # Get 2D mask from the array ds = data[0] for el in shp[1:ndims-2]: ds = ds[0] if(isinstance(ds, np.ma.masked_array)): msk = ds.mask else: msk = False wy = np.ma.masked_array(wy, msk) wx = np.ma.masked_array(wx, msk) data_wgt = data * wy * wx sm_wgt = data_wgt.sum(axis = (ndims - 2, ndims - 1)) # sm_wgt = sm_wgt/np.sum(wy * wx) return sm_wgt
16,167
def test_complain_about_missing_fields(tmp_path: Path, l1_ls8_folder: Path): """ It should complain immediately if I add a file without enough metadata to write the filename. (and with a friendly error message) """ out = tmp_path / "out" out.mkdir() [blue_geotiff_path] = l1_ls8_folder.rglob("L*_B2.TIF") # Default simple naming conventions need at least a date and family... with pytest.raises( ValueError, match="Need more properties to fulfill naming conventions." ): with DatasetAssembler(out) as p: p.write_measurement("blue", blue_geotiff_path) # It should mention the field that's missing (we added a date, so product_family is needed) with DatasetAssembler(out) as p: with pytest.raises(ValueError, match="odc:product_family"): p.datetime = datetime(2019, 7, 4, 13, 7, 5) p.write_measurement("blue", blue_geotiff_path) # DEA naming conventions should have stricter standards, and will tell your which fields you need to add. with DatasetAssembler(out, naming_conventions="dea") as p: # We set all the fields that work in default naming conventions. p.datetime = datetime(2019, 7, 4, 13, 7, 5) p.product_family = "quaternarius" p.processed_now() # These fields are mandatory for DEA, and so should be complained about. expected_extra_fields_needed = ( "eo:platform", "eo:instrument", "odc:dataset_version", "odc:producer", "odc:region_code", ) with pytest.raises(ValueError) as got_error: p.write_measurement("blue", blue_geotiff_path) # All needed fields should have been in the error message. for needed_field_name in expected_extra_fields_needed: assert needed_field_name in got_error.value.args[0], ( f"Expected field {needed_field_name} to " f"be listed as mandatory in the error message" )
16,168
def get_files_from_path(path, recurse=False, full_path=True): """ Get Files_Path From Input Path :param full_path: Full path flag :param path: Input Path :param recurse: Whether Recursive :return: List of Files_Path """ files_path_list = [] if not os.path.exists(path): return [] dir_list = SimpleProgressBar(os.listdir(path)) dir_list.show_title("Processing") for file_path in dir_list: if full_path: file_path = os.path.join(path, file_path) if os.path.isdir(file_path): if recurse: files_path_list += get_files_from_path(file_path, recurse=True) else: pass else: files_path_list.append(file_path) return files_path_list
16,169
def load(filename): """Load the labels and scores for Hits at K evaluation. Loads labels and model predictions from files of the format: Query \t Example \t Label \t Score :param filename: Filename to load. :return: list_of_list_of_labels, list_of_list_of_scores """ result_labels = [] result_scores = [] current_block_name = "" current_block_scores = [] current_block_labels = [] with open(filename,'r') as fin: for line in fin: splt = line.strip().split("\t") block_name = splt[0] block_example = splt[1] example_label = int(splt[2]) example_score = float(splt[3]) if block_name != current_block_name and current_block_name != "": result_labels.append(current_block_labels) result_scores.append(current_block_scores) current_block_labels = [] current_block_scores = [] current_block_labels.append(example_label) current_block_scores.append(example_score) current_block_name = block_name result_labels.append(current_block_labels) result_scores.append(current_block_scores) return result_labels,result_scores
16,170
def test_send_message_two_chat_ids(get_token: str, get_chat_id: int): """Отправка базового сообщения в два чата""" test_name = inspect.currentframe().f_code.co_name msg = f"test two chat_ids(2 msg to one chat id) send message. {test_name}" two_tokens = [get_chat_id, get_chat_id] client = Telegram(token=get_token, chat_id=two_tokens) client.send_message(msg)
16,171
def expanding_sum(a, axis = 0, data = None, state = None): """ equivalent to pandas a.expanding().sum(). - works with np.arrays - handles nan without forward filling. - supports state parameters :Parameters: ------------ a : array, pd.Series, pd.DataFrame or list/dict of these timeseries axis : int, optional 0/1/-1. The default is 0. data: None unused at the moment. Allow code such as func(live, **func_(history)) to work state: dict, optional state parameters used to instantiate the internal calculations, based on history prior to 'a' provided. :Example: agreement with pandas -------------------------------- >>> from pyg import *; import pandas as pd; import numpy as np >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)) >>> panda = a.expanding().sum(); ts = expanding_sum(a) >>> assert eq(ts,panda) :Example: nan handling ---------------------- Unlike pandas, timeseries does not forward fill the nans. >>> a[a<0.1] = np.nan >>> panda = a.expanding().sum(); ts = expanding_sum(a) >>> pd.concat([panda,ts], axis=1) >>> 0 1 >>> 1993-09-23 NaN NaN >>> 1993-09-24 NaN NaN >>> 1993-09-25 0.645944 0.645944 >>> 1993-09-26 2.816321 2.816321 >>> 1993-09-27 2.816321 NaN >>> ... ... >>> 2021-02-03 3976.911348 3976.911348 >>> 2021-02-04 3976.911348 NaN >>> 2021-02-05 3976.911348 NaN >>> 2021-02-06 3976.911348 NaN >>> 2021-02-07 3976.911348 NaN :Example: state management -------------------------- One can split the calculation and run old and new data separately. >>> old = a.iloc[:5000] >>> new = a.iloc[5000:] >>> ts = expanding_sum(a) >>> old_ts = expanding_sum_(old) >>> new_ts = expanding_sum(new, **old_ts) >>> assert eq(new_ts, ts.iloc[5000:]) :Example: dict/list inputs --------------------------- >>> assert eq(expanding_sum(dict(x = a, y = a**2)), dict(x = expanding_sum(a), y = expanding_sum(a**2))) >>> assert eq(expanding_sum([a,a**2]), [expanding_sum(a), expanding_sum(a**2)]) """ state = state or {} return first_(_expanding_sum(a, axis = axis, **state))
16,172
def accuracy(output, target, top_k=(1,)): """Calculate classification accuracy between output and target. :param output: output of classification network :type output: pytorch tensor :param target: ground truth from dataset :type target: pytorch tensor :param top_k: top k of metric, k is an interger :type top_k: tuple of interger :return: results of top k :rtype: list """ max_k = max(top_k) batch_size = target.size(0) _, pred = output.topk(max_k, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in top_k: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res
16,173
def read_configuration_from_file(path: str) -> Dict[str, Any]: """ Read the JSON file and return a dict. :param path: path on file system :return: raw, unchanged dict """ if os.path.isfile(path): with open(path) as json_file: return json.load(json_file) else: raise ConfigNotFoundException
16,174
def logwrap( func: typing.Optional[typing.Callable] = None, *, log: logging.Logger = _log_wrap_shared.logger, log_level: int = logging.DEBUG, exc_level: int = logging.ERROR, max_indent: int = 20, spec: typing.Optional[typing.Callable] = None, blacklisted_names: typing.Optional[typing.List[str]] = None, blacklisted_exceptions: typing.Optional[typing.List[typing.Type[Exception]]] = None, log_call_args: bool = True, log_call_args_on_exc: bool = True, log_result_obj: bool = True ) -> typing.Union[LogWrap, typing.Callable]: """Log function calls and return values. Python 3.4+ version. :param func: function to wrap :type func: typing.Optional[typing.Callable] :param log: logger object for decorator, by default used 'logwrap' :type log: logging.Logger :param log_level: log level for successful calls :type log_level: int :param exc_level: log level for exception cases :type exc_level: int :param max_indent: maximum indent before classic `repr()` call. :type max_indent: int :param spec: callable object used as spec for arguments bind. This is designed for the special cases only, when impossible to change signature of target object, but processed/redirected signature is accessible. Note: this object should provide fully compatible signature with decorated function, or arguments bind will be failed! :type spec: typing.Optional[typing.Callable] :param blacklisted_names: Blacklisted argument names. Arguments with this names will be skipped in log. :type blacklisted_names: typing.Optional[typing.Iterable[str]] :param blacklisted_exceptions: list of exceptions, which should be re-raised without producing log record. :type blacklisted_exceptions: typing.Optional[typing.Iterable[typing.Type[Exception]]] :param log_call_args: log call arguments before executing wrapped function. :type log_call_args: bool :param log_call_args_on_exc: log call arguments if exception raised. :type log_call_args_on_exc: bool :param log_result_obj: log result of function call. :type log_result_obj: bool :return: built real decorator. :rtype: _log_wrap_shared.BaseLogWrap .. versionchanged:: 3.3.0 Extract func from log and do not use Union. .. versionchanged:: 3.3.0 Deprecation of *args .. versionchanged:: 4.0.0 Drop of *args """ wrapper = LogWrap( log=log, log_level=log_level, exc_level=exc_level, max_indent=max_indent, spec=spec, blacklisted_names=blacklisted_names, blacklisted_exceptions=blacklisted_exceptions, log_call_args=log_call_args, log_call_args_on_exc=log_call_args_on_exc, log_result_obj=log_result_obj ) if func is not None: return wrapper(func) return wrapper
16,175
def sum_digits(number): """ Write a function named sum_digits which takes a number as input and returns the sum of the absolute value of each of the number's decimal digits. """ return sum(int(n) for n in str(number) if n.isdigit())
16,176
def label(input, structure=None, output=None): """Labels features in an array. Args: input (cupy.ndarray): The input array. structure (array_like or None): A structuring element that defines feature connections. ```structure``` must be centersymmetric. If None, structure is automatically generated with a squared connectivity equal to one. output (cupy.ndarray, dtype or None): The array in which to place the output. Returns: label (cupy.ndarray): An integer array where each unique feature in ```input``` has a unique label in the array. num_features (int): Number of features found. .. warning:: This function may synchronize the device. .. seealso:: :func:`scipy.ndimage.label` """ if not isinstance(input, cupy.ndarray): raise TypeError('input must be cupy.ndarray') if input.dtype.char in 'FD': raise TypeError('Complex type not supported') if structure is None: structure = _generate_binary_structure(input.ndim, 1) elif isinstance(structure, cupy.ndarray): structure = cupy.asnumpy(structure) structure = numpy.array(structure, dtype=bool) if structure.ndim != input.ndim: raise RuntimeError('structure and input must have equal rank') for i in structure.shape: if i != 3: raise ValueError('structure dimensions must be equal to 3') if isinstance(output, cupy.ndarray): if output.shape != input.shape: raise ValueError("output shape not correct") caller_provided_output = True else: caller_provided_output = False if output is None: output = cupy.empty(input.shape, numpy.int32) else: output = cupy.empty(input.shape, output) if input.size == 0: # empty maxlabel = 0 elif input.ndim == 0: # 0-dim array maxlabel = 0 if input.item() == 0 else 1 output[...] = maxlabel else: if output.dtype != numpy.int32: y = cupy.empty(input.shape, numpy.int32) else: y = output maxlabel = _label(input, structure, y) if output.dtype != numpy.int32: output[...] = y[...] if caller_provided_output: return maxlabel else: return output, maxlabel
16,177
def test_two_related_w_a_wout_c(clean_db, family_with_trials, capsys): """Test two related experiments with --all.""" orion.core.cli.main(["status", "--all"]) captured = capsys.readouterr().out expected = """\ test_double_exp-v1 ================== id status -------------------------------- ----------- c2187f4954884c801e423d851aec9a0b broken e42cc22a15188d72df315b9eac79c9c0 completed b849f69cc3a77f39382d7435d0d41b14 interrupted 7fbbd152f7ca2c064bf00441e311609d new 667513aa2cb2244bee9c4f41c7ff1cea reserved 557b9fdb9f96569dff7eb2de10d3946f suspended test_double_exp_child-v1 ======================== id status -------------------------------- ----------- 9bd1ebc475bcb9e077a9e81a7c954a65 broken 3c1af2af2c8dc9862df2cef0a65d6e1f completed 614ec3fc127d52129bc9d66d9aeec36c interrupted 4487e7fc87c288d254f94dfa82cd79cc new 7877287c718d7844570003fd654f66ba reserved ff997e666e20c5a8c1a816dde0b5e2e9 suspended """ assert captured == expected
16,178
def get_experiment_fn(nnObj,data_dir, num_gpus,variable_strategy,use_distortion_for_training=True): """Returns an Experiment function. Experiments perform training on several workers in parallel, in other words experiments know how to invoke train and eval in a sensible fashion for distributed training. Arguments passed directly to this function are not tunable, all other arguments should be passed within tf.HParams, passed to the enclosed function. Args: data_dir: str. Location of the data for input_fns. num_gpus: int. Number of GPUs on each worker. variable_strategy: String. CPU to use CPU as the parameter server and GPU to use the GPUs as the parameter server. use_distortion_for_training: bool. See cifar10.Cifar10DataSet. Returns: A function (tf.estimator.RunConfig, tf.contrib.training.HParams) -> tf.contrib.learn.Experiment. Suitable for use by tf.contrib.learn.learn_runner, which will run various methods on Experiment (train, evaluate) based on information about the current runner in `run_config`. """ def _experiment_fn(run_config, hparams): """Returns an Experiment.""" # Create estimator. train_input_fn = functools.partial( cifar_main.input_fn, data_dir, subset='train', num_shards=num_gpus, batch_size=hparams.train_batch_size, use_distortion_for_training=use_distortion_for_training) eval_input_fn = functools.partial( cifar_main.input_fn, data_dir, subset='eval', batch_size=hparams.eval_batch_size, num_shards=num_gpus) num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch('eval') if num_eval_examples % hparams.eval_batch_size != 0: raise ValueError( 'validation set size must be multiple of eval_batch_size') train_steps = hparams.train_steps eval_steps = num_eval_examples // hparams.eval_batch_size classifier = tf.estimator.Estimator( model_fn=cifar_main.get_model_fn(nnObj,num_gpus, variable_strategy, run_config.num_worker_replicas or 1), config=run_config, params=hparams) vail_accuracy=[] for loop in range(20): classifier.train(train_input_fn,steps=train_steps) vail_accuracy.append(classifier.evaluate(eval_input_fn,steps=eval_steps)) print("finished iter:"+str((loop+1)*train_steps)) print("accuracy:") print(vail_accuracy) # Create experiment. return tf.contrib.learn.Experiment( classifier, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=train_steps, eval_steps=eval_steps) return _experiment_fn
16,179
def calc_psnr(tar_img, ref_img): """ Compute the peak signal to noise ratio (PSNR) for an image. Parameters ---------- tar_img : sitk Test image. ref_img : sitk Ground-truth image. Returns ------- psnr : float The PSNR metric. References ---------- .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio """ tar_vol = tar_img ref_vol = ref_img ref_vol, tar_vol = _as_floats(ref_vol, tar_vol) err = calc_mse(ref_img, tar_img) return 10 * np.log10((256 ** 2) / err)
16,180
def celegans(path): """Load the neural network of the worm C. Elegans [@watts1998collective]. The neural network consists of around 300 neurons. Each connection between neurons is associated with a weight (positive integer) capturing the strength of the connection. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `celegansneural.gml`. Returns: Adjacency matrix as a np.darray `x_train` with 297 rows and 297 columns. """ import networkx as nx path = os.path.expanduser(path) filename = 'celegansneural.gml' if not os.path.exists(os.path.join(path, filename)): url = 'http://www-personal.umich.edu/~mejn/netdata/celegansneural.zip' maybe_download_and_extract(path, url) graph = nx.read_gml(os.path.join(path, filename)) x_train = np.zeros([graph.number_of_nodes(), graph.number_of_nodes()], dtype=np.int) for i, j in graph.edges(): x_train[i, j] = int(graph[i][j][0]['value']) return x_train
16,181
def glacier_wrap( f: Callable[..., None], enum_map: Dict[str, Dict[str, Any]], ) -> Callable[..., None]: """ Return the new function which is click-compatible (has no enum signature arguments) from the arbitrary glacier compatible function """ # Implemented the argument convert logic @functools.wraps(f) def wrapped(*args: Any, **kwargs: Any) -> None: # convert args and kwargs converted_kwargs = {} for name, value in kwargs.items(): if name in enum_map: converted_kwargs[name] = enum_map[name][value] else: converted_kwargs[name] = value return f(*args, **converted_kwargs) return wrapped
16,182
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker): """Returns a device list given a cluster spec.""" cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec) devices = [] for task_type in ("chief", "worker"): for task_id in range(len(cluster_spec.as_dict().get(task_type, []))): if num_gpus_per_worker == 0: devices.append("/job:%s/task:%d" % (task_type, task_id)) else: devices.extend([ "/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id) for gpu_id in range(num_gpus_per_worker) ]) return devices
16,183
def group_by_time(df, col, by='day', fun='max', args=(), kwargs={}, index='categories'): """ See <https://pandas.pydata.org/pandas-docs/stable/api.html#groupby>_ for the set of `fun` parameters available. Examples are: 'count', 'max', 'min', 'median', etc .. Tip:: Since Access inherits from TimeIntervalTable, the underlaying data format is a `pandas.DataFrame`, not a `pandas.Series`. Consequently, only the groupby functions of a generic GroupBy or DataFrameGroupBy are valid. Functions of SeriesGroupBy are not allowed. """ if col == 'index': t = df.index else: t = df.loc[:, col].dt if by.lower() in ['y', 'year']: group = df.groupby([t.year]) group = getattr(group, fun)(*args, **kwargs) group.index.names = ['year'] elif by.lower() in ['m', 'month']: group = df.groupby([t.year, t.month]) group = getattr(group, fun)(*args, **kwargs) group.index.names = ['year', 'month'] elif by.lower() in ['d', 'day']: group = df.groupby([t.year, t.month, t.day]) group = getattr(group, fun)(*args, **kwargs) group.index.names = ['year', 'month', 'day'] elif by.lower() in ['h', 'hour']: group = df.groupby([t.year, t.month, t.day, t.hour]) group = getattr(group, fun)(*args, **kwargs) group.index.names = ['year', 'month', 'day', 'hour'] elif by.lower() in ['m', 'min', 'minute']: group = df.groupby([t.year, t.month, t.day, t.hour, t.minute]) group = getattr(group, fun)(*args, **kwargs) group.index.names = ['year', 'month', 'day', 'hour', 'min'] elif by.lower() in ['s', 'sec', 'second']: group = df.groupby([t.year, t.month, t.day, t.hour, t.minute, t.second]) group = getattr(group, fun)(*args, **kwargs) group.index.names = ['year', 'month', 'day', 'hour', 'min', 'sec'] else: raise KeyError('Grouping can be by "year", "month", "day", "min" and "sec" only') # Choose index if index == 'categories': return group elif index == 'times': group.index = pd.DatetimeIndex([pd.Timestamp(*i) for i, _ in group.iterrows()]) return group else: raise KeyError('Argument "index={}"" is not valid. Options are "categories" or "times"')
16,184
def fetch(url, params=None, keepalive=False, requireValidCert=False, debug=False): """ Fetches the desired @url using an HTTP GET request and appending and @params provided in a dictionary. If @keepalive is False, a fresh connection will be made for this request. If @requireValidCert is True, then an exception is thrown if the remote server cannot provide a valid TLS certificate. If @keepalive is False, connections are closed and so subsequent connections must make fresh (cold) HTTPS connections. @returns the result as a dictionary, decoded from server-provided JSON. @raises an exception if there are any problems connecting to the remote server, receiving a valiud HTTP status 200 response, or decoding the resulting JSON response. """ # Set the certificate verification flag httpClient.disable_ssl_certificate_validation = not requireValidCert # Assemble the URL url = getUrl(url, params) if debug: print "Fetching " + url # Fetch the URL with a GET request. response, content = httpClient.request(url, "GET") # Check the status code. if response.status != 200: m = "Remote service reported an error (status:{} {}) for "\ "URL {}".format(response.status, response.reason, url) raise Exception(m) # Close the connection if requested. if not keepalive: map(lambda (k,v): v.close(), httpClient.connections.iteritems()) # Parse the response return json.loads(content)
16,185
def param_rischDE(fa, fd, G, DE): """ Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)). Given a derivation D in k(t), f in k(t), and G = [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and a matrix A with m + r columns and entries in Const(k) such that Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj, (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, d1, ..., dr) is a solution of Ax == 0. Elements of k(t) are tuples (a, d) with a and d in k[t]. """ m = len(G) q, (fa, fd) = weak_normalizer(fa, fd, DE) # Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi) # correspond to solutions y = z/q of the original equation. gamma = q G = [(q*ga).cancel(gd, include=True) for ga, gd in G] a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE) # Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond # to solutions z = q/hn of the weakly normalized equation. gamma *= hn A, B, G, hs = prde_special_denom(a, ba, bd, G, DE) # Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond # to solutions q = p/hs of the previous equation. gamma *= hs g = A.gcd(B) a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G] # a*Dp + b*p = Sum(ci*gi) may have a polynomial solution # only if the sum is in k[t]. q, M = prde_linear_constraints(a, b, g, DE) # q = [q1, ..., qm] where qi in k[t] is the polynomial component # of the partial fraction expansion of gi. # M is a matrix with m columns and entries in k. # Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k, # is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0, # in which case the sum is equal to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1), DE) # M is a matrix with m columns and entries in Const(k). # Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k) # if and only if M*Matrix([c1, ..., cm]) == 0, # in which case the sum is Sum(ci*qi). ## Reduce number of constants at this point V = M.nullspace() # V = [v1, ..., vu] where each vj is a column matrix with # entries aj1, ..., ajm in Const(k). # Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u). # Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji) # (i = 1, ..., m) for some d1, ..., du in Const(k). # In that case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj = Sum(aji*qi) (j = 1, ..., u) in k[t]. if not V: # No non-trivial solution return [], eye(m) Mq = Matrix([q]) # A single row. r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru] # Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions # y = p/gamma of the initial equation with ci = Sum(dj*aji). try: # We try n=5. At least for prde_spde, it will always # terminate no matter what n is. n = bound_degree(a, b, r, DE, parametric=True) except NotImplementedError: # A temporary bound is set. Eventually, it will be removed. # the currently added test case takes large time # even with n=5, and much longer with large n's. n = 5 h, B = param_poly_rischDE(a, b, r, n, DE) # h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v # columns and entries in Const(k) such that # a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n # in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. # The solutions of the original equation for ci = Sum(dj*aji) # (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma. ## Build combined relation matrix with m + u + v columns. A = -eye(m) for vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du. W = A.nullspace() # W = [w1, ..., wt] where each wl is a column matrix with # entries blk (k = 1, ..., m + u + v) in Const(k). # The vectors (bl1, ..., blm) generate the space of those # constant families (c1, ..., cm) for which a solution of # the equation Dy + f*y == Sum(ci*Gi) exists. They generate # the space and form a basis except possibly when Dy + f*y == 0 # is solvable in k(t}. The corresponding solutions are # y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u. v = len(h) M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's. N = M.nullspace() # N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column # vectors generating the space of linear relations between # c1, ..., cm, e1, ..., ev. C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns. return [hk.cancel(gamma, include=True) for hk in h], C
16,186
def main(df: pyam.IamDataFrame) -> pyam.IamDataFrame: """Main function for validation and processing (for the ARIADNE-intern instance)""" # load list of allowed scenario names with open(path / "scenarios.yml", "r") as stream: scenario_list = yaml.load(stream, Loader=yaml.FullLoader) # validate list of submitted scenarios illegal_scens = [s for s in df.scenario if s not in scenario_list] if illegal_scens: raise_error("scenarios", illegal_scens) # call validation function for variables, regions and subannual time resolution df = _validate(df) # call validation function for meta indicators df = _validate_meta(df, ALLOWED_META_ARIADNE) return df
16,187
def test_upgrade_tz_noop(tz): """Tests that non-shim, non-pytz zones are unaffected by upgrade_tzinfo.""" actual = pds_helpers.upgrade_tzinfo(tz) assert actual is tz
16,188
def test_acq_func_set_acq_func_fails_wrong_acqfunc_name(ref_model_and_training_data): """ test that set_acq_func does not set acquisition function if wrong name chosen """ # load data and model train_X = ref_model_and_training_data[0] train_Y = ref_model_and_training_data[1] # load pretrained model model_obj = ref_model_and_training_data[2] lh = ref_model_and_training_data[3] ll = ref_model_and_training_data[4] # the acq func cls = AcqFunction() cls.acq_func = { "type": "WRONG", # define the type of acquisition function "object": None } # set attributes needed for test: train_Y to not None, model to None cls.train_Y = train_Y cls.model = {"model": model_obj, "likelihood": lh, "loglikelihood": ll, } with pytest.raises(Exception) as e: assert cls.set_acq_func() assert str(e.value) == "greattunes.greattunes._acq_func.AcqFunction.set_acq_func: unsupported acquisition " \ "function name provided. '" + cls.acq_func["type"] + "' not in list of supported " \ "acquisition functions [ConstrainedExpectedImprovement, ExpectedImprovement, " \ "NoisyExpectedImprovement, PosteriorMean, ProbabilityOfImprovement, UpperConfidenceBound, "\ "qExpectedImprovement, qKnowledgeGradient, qMaxValueEntropy, " \ "qMultiFidelityMaxValueEntropy, qNoisyExpectedImprovement, qProbabilityOfImprovement, " \ "qSimpleRegret, qUpperConfidenceBound]."
16,189
def huber_loss(x, delta=1.): """ Standard Huber loss of parameter delta https://en.wikipedia.org/wiki/Huber_loss returns 0.5 * x^2 if |a| <= \delta \delta * (|a| - 0.5 * \delta) o.w. """ if torch.abs(x) <= delta: return 0.5 * (x ** 2) else: return delta * (torch.abs(x) - 0.5 * delta)
16,190
def licenses_mapper(license, licenses, package): # NOQA """ Update package licensing and return package based on the `license` and `licenses` values found in a package. Licensing data structure has evolved over time and is a tad messy. https://docs.npmjs.com/files/package.json#license license(s) is either: - a string with: - an SPDX id or expression { "license" : "(ISC OR GPL-3.0)" } - some license name or id - "SEE LICENSE IN <filename>" - (Deprecated) an array or a list of arrays of type, url. - "license": "UNLICENSED" means this is proprietary """ declared_license = get_declared_licenses(license) or [] declared_license.extend(get_declared_licenses(licenses) or []) package.declared_license = declared_license return package
16,191
def send_command(target, data): """sends a nudge api command""" url = urljoin(settings.NUDGE_REMOTE_ADDRESS, target) req = urllib2.Request(url, urllib.urlencode(data)) try: return urllib2.urlopen(req) except urllib2.HTTPError, e: raise CommandException( 'An exception occurred while contacting %s: %s' % (url, e), e)
16,192
def logistic_log_partial_ij(x_i, y_i, beta, j): """i is index of point and j is index of derivative""" return (y_i - logistic(dot(x_i, beta))) * x_i[j]
16,193
def expected_win(theirs, mine): """Compute the expected win rate of my strategy given theirs""" assert abs(theirs.r + theirs.p + theirs.s - 1) < 0.001 assert abs(mine.r + mine.p + mine.s - 1) < 0.001 wins = theirs.r * mine.p + theirs.p * mine.s + theirs.s * mine.r losses = theirs.r * mine.s + theirs.p * mine.r + theirs.s * mine.p return wins - losses
16,194
def rock_paper_scissors(): """This function Handles the main operation of Rock, Paper, Scissors. The User will input their choice against computer and win.""" player_points = 0 comp_points = 0 while player_points < 3 or comp_points < 3: lst = ['rock', 'paper', 'scissors'] computer = choice(lst) while player := input('HEY! what is your choice: '): if player == computer: print(f'{computer} \nDraw!\n') break elif (player == 'rock' and computer == 'scissors') or (player == 'paper' and computer == 'rock') or (player == 'scissors' and computer == 'paper'): print(f'{computer} \nYou Win!\n') player_points += 1 break else: print(f'{computer} \nYou Lose!\n') comp_points += 1 break print(f'You: {player_points}') print(f'Comp: {comp_points}') if player_points == 3: print('Congrats! You Won the Game') break elif comp_points == 3: print('Sorry, You Lost the Game') break
16,195
def get_first_where(data, compare): """ Gets first dictionary in list that fit to compare-dictionary. :param data: List with dictionarys :param compare: Dictionary with keys for comparison {'key';'expected value'} :return: list with dictionarys that fit to compare """ l = get_all_where(data, compare) if len(l) < 1: raise Exception('Data not found! (' + str(compare) + ')') return l[0]
16,196
def parse_megam_weights(s, features_count, explicit=True): """ Given the stdout output generated by ``megam`` when training a model, return a ``numpy`` array containing the corresponding weight vector. This function does not currently handle bias features. """ if numpy is None: raise ValueError("This function requires that numpy be installed") assert explicit, "non-explicit not supported yet" lines = s.strip().split("\n") weights = numpy.zeros(features_count, "d") for line in lines: if line.strip(): fid, weight = line.split() weights[int(fid)] = float(weight) return weights
16,197
def syntheticModeOn(): """Sets the global syntheticMode flag to True.""" setGlobalVariable('syntheticModeFlag', True)
16,198
def project_statistics(contributions): """Returns a dictionary containing statistics about all projects.""" projects = {} for contribution in contributions: # Don't count unreviewed contributions if contribution["status"] == "unreviewed": continue project = contribution["repository"] utopian_vote = contribution["utopian_vote"] # Set default in case category doesn't exist projects.setdefault( project, { "project": project, "average_score": [], "average_without_0": [], "voted": 0, "not_voted": 0, "unvoted": 0, "task-requests": 0, "moderators": [], "average_payout": [], "total_payout": 0, "utopian_total": [] } ) # Check if contribution was voted on or unvoted if contribution["status"] == "unvoted": projects[project]["unvoted"] += 1 projects[project]["not_voted"] += 1 elif contribution["voted_on"]: projects[project]["voted"] += 1 else: projects[project]["not_voted"] += 1 # If contribution was a task request count this if "task" in contribution["category"]: projects[project]["task-requests"] += 1 # Add moderator and score projects[project]["moderators"].append(contribution["moderator"]) projects[project]["average_score"].append(contribution["score"]) projects[project]["total_payout"] += contribution["total_payout"] projects[project]["utopian_total"].append(utopian_vote) if contribution["score"] > 0: projects[project]["average_without_0"].append( contribution["score"]) project_list = [] for project, value in projects.items(): # Set new keys and append value to list value["reviewed"] = value["voted"] + value["not_voted"] value["average_score"] = average(value["average_score"]) value["average_without_0"] = average(value["average_without_0"]) value["average_payout"] = value["total_payout"] / value["reviewed"] value["moderators"] = Counter(value["moderators"]).most_common() value["pct_voted"] = percentage(value["reviewed"], value["voted"]) # Add Utopian.io's vote statistics value["utopian_total"] = [vote for vote in value["utopian_total"] if vote != 0] value["average_utopian_vote"] = average(value["utopian_total"]) value["utopian_total"] = sum(value["utopian_total"]) project_list.append(value) return {"projects": project_list}
16,199