content
stringlengths
22
815k
id
int64
0
4.91M
def compute_win_state_str_row(n_rows, n_cols, n_connects): """Each win state will be a string of 0s and 1s which can then converted into an integer in base 2. I assume that at the maximum n_rows = n_cols = 5, which means that a 31 bit integer (since in Python it's always signed) should be more than enough for a 25 bit string. """ n_cells = n_rows * n_cols win_states = list() # each iteration in the for loop computes the possible # winning states for a particular row, e.g., # - if n_connects == n_cols, there's just one winning state for row_ind in range(n_rows): prefix = '0' * (row_ind * n_cols) row_end = (row_ind * n_cols) + n_cols win_start_ind = row_ind * n_cols win_end_ind = win_start_ind + n_connects while win_end_ind <= row_end: # save the winning state suffix = '0' * (n_cells - win_end_ind) win_state = prefix + '1' * n_connects + suffix win_states.append(win_state) # update for the next possible win state of the row win_start_ind = win_start_ind + 1 win_end_ind = win_start_ind + n_connects prefix += '0' return win_states
5,331,000
def test_xi_transform_vgp_vs_gpr(gpr_and_vgp, xi_transform): """ With other transforms the solution is not given in a single step, but it should still give the same answer after a number of smaller steps. """ gpr, vgp = gpr_and_vgp assert_gpr_vs_vgp(gpr, vgp, gamma=0.01, xi_transform=xi_transform, maxiter=500)
5,331,001
def hmac_sha512(key: bytes, data: bytes) -> bytes: """ Return the SHA512 HMAC for the byte sequence ``data`` generated with the secret key ``key``. Corresponds directly to the "HMAC-SHA512(Key = ..., Data = ...)" function in BIP32 (https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions). :param key: The secret key used for HMAC calculation. :param data: The data for which an HMAC should be calculated. :return: A byte sequence containing the HMAC of ``data`` generated with the secret key ``key``. """ h = hmac.new(key, data, hashlib.sha512) return h.digest()
5,331,002
def __scheduler_trigger(cron_time_now, now_sec_tuple, crontask, deltasec=2): """ SchedulerCore logic actual time: cron_time_now format: (WD, H, M, S) actual time in sec: now_sec_tuple: (H sec, M sec, S) crontask: ("WD:H:M:S", "LM FUNC") deltasec: sample time window: +/- sec: -sec--|event|--sec- """ # Resolve "normal" time check_time = tuple(int(t.strip()) if t.isdigit() else t.strip() for t in crontask[0].split(':')) # Resolve "time tag" to "normal" time if len(check_time) < 3: tag = crontask[0].strip() value = Sun.TIME.get(tag, None) if value is None or len(value) < 3: errlog_add('cron syntax error: {}:{}'.format(tag, value)) return False check_time = ('*', value[0], value[1], value[2]) # Cron actual time (now) parts summary in sec check_time_now_sec = now_sec_tuple[0] + now_sec_tuple[1] + now_sec_tuple[2] # Cron overall requested time in sec - hour in sec, minute in sec, sec check_time_scheduler_sec = int(now_sec_tuple[0] if check_time[1] == '*' else check_time[1] * 3600) \ + int(now_sec_tuple[1] if check_time[2] == '*' else check_time[2] * 60) \ + int(now_sec_tuple[2] if check_time[3] == '*' else check_time[3]) # Time frame +/- corrections tolerance_min_sec = 0 if check_time_now_sec - deltasec < 0 else check_time_now_sec - deltasec tolerance_max_sec = check_time_now_sec + deltasec task_id = "{}:{}|{}".format(check_time[0], check_time_scheduler_sec, str(crontask[1]).replace(' ', '')) # Check WD - WEEK DAY if check_time[0] == '*' or check_time[0] == cron_time_now[0]: # Check H, M, S in sec format between tolerance range if tolerance_min_sec <= check_time_scheduler_sec <= tolerance_max_sec: __cron_task_cache_manager(check_time_now_sec, deltasec) if check_time[3] == '*' or task_id not in LAST_CRON_TASKS: lm_state = False if isinstance(crontask[1], str): # [1] Execute Load Module as a string (user LMs) lm_state = exec_lm_core_schedule(crontask[1].split()) else: try: # [2] Execute function reference (built-in functions) console_write("[builtin cron] {}".format(crontask[1]())) lm_state = True except Exception as e: errlog_add("[cron] function exec error: {}".format(e)) if not lm_state: console_write("[cron]now[{}] {} <-> {} conf[{}] exec[{}] LM: {}".format(cron_time_now, __convert_sec_to_time(tolerance_min_sec), __convert_sec_to_time(tolerance_max_sec), crontask[0], lm_state, crontask[1])) # SAVE TASK TO CACHE if check_time[3] != '*': # SAVE WHEN SEC not * LAST_CRON_TASKS.append(task_id) return True return False
5,331,003
def gen_canvas_config() -> Dict: """Generates yaml config from user input for canvas interface Returns ------- Dict canvas config dict """ # check for existing canvas config if ( os.path.exists(CANVAS_CONF_PATH) and (input("Delete existing canvas config and restart [y/N] ?: ").lower() != "y") ): with open(CANVAS_CONF_PATH, "r") as canvas_conf_in: return yaml.load(canvas_conf_in, Loader=yaml.Loader) # init canvas conf canvas_conf = {} # get API url, username, key canvas_conf["api_url"] = input("Canvas URL: ") canvas_conf["api_username"] = input("Canvas Username: ") keyring.set_password('canvas-token', canvas_conf["api_username"], getpass("Canvas Key: ")) # dump config with open(CANVAS_CONF_PATH, "w") as canvas_conf_out: yaml.dump(canvas_conf, canvas_conf_out) return canvas_conf
5,331,004
def retweet(tweet): """Attempts to retweet a tweet.""" t.statuses.retweet._id(_id=tweet["id"])
5,331,005
def get13FAmendmentType(accNo, formType=None) : """ Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS. This turned out to be unreliable (often missing or wrong), so I don't use it to get the combined holdings for an investor. Instead I just look at the number of holdings in an amendment compared to the previous filing, and treat it as a restatement if the new number of holdings is more than half the old number. """ info = basicInfo.getSecFormInfo(accNo, formType) xmlUrls = [l[-1] for l in info['links'] if l[0].lower().endswith('xml')] xmlSummTab = utils.downloadSecUrl(xmlUrls[0],toFormat='xml') coverPage = findChildSeries(xmlSummTab,['formdata','coverpage']) isAmendment = findChildEndingWith(coverPage,'isamendment') if isAmendment is None or isAmendment.text.strip().lower() not in ['true','yes'] : return None return findChildSeries(coverPage,['amendmentinfo','amendmenttype']).text.strip()
5,331,006
def tabs_to_cover_string(string): """ Get the number of tabs required to be at least the same length as a given string. :param string: The string :return: The number of tabs to cover it :rtype: int """ num_tabs = int(np.floor(len(string) / 8) + 1) return num_tabs
5,331,007
def kl(p, q): """ Kullback-Leibler divergence for discrete distributions Parameters ---------- p: ndarray probability mass function q: ndarray probability mass function Returns -------- float : D(P || Q) = sum(p(i) * log(p(i)/q(i)) Discrete probability distributions. """ return np.sum(np.where(p != 0, p * np.log(p / q), 0))
5,331,008
def ProcessDirectory(dirname, output_lines): """Processes a directory of batch sim files. If directory contents are sufficiently old, appends to outfile comments that describe the directory and a command to remove it. Args: dirname: Full Cloud Storage path of the directory, with trailing slash. output_lines: Cumulative list of lines for the GC script. """ contents = subprocess.check_output(['gsutil', 'ls', '-lh', dirname + '*']) last_update = None for line in contents.splitlines(): dates = DATE_REGEX.findall(line) if dates: assert len(dates) == 1 t_updated = datetime.datetime.strptime(dates[0], '%Y-%m-%dT%H:%M:%SZ') if last_update is None or t_updated > last_update: last_update = t_updated assert last_update is not None if (datetime.datetime.today() - t_updated).days <= DAYS_TO_KEEP: return output_lines += ['# Directory %s' % dirname, '# Last update: %s' % last_update, 'gsutil -m rm -rf %s' % dirname, '']
5,331,009
def map_uris(uris): """Map URIs from external URI to HDFS :return: """ pkgs_path = __pillar__['hdfs']['pkgs_path'] ns = nameservice_names() return map(lambda x: 'hdfs://{0}{1}/{2}'.format(ns[0], pkgs_path, __salt__['system.basename'](x)), uris)
5,331,010
def delete_task(id: str, db: Session = Depends(get_db)) -> Any: """Delete a task""" try: todo_interactor = ToDoInteractor(db=db) todo_interactor.remove(id=id) return {"success": f"removed task: {id}"} except HTTPException as e: logger.exception(e) raise HTTPException(e) except Exception as e: logger.exception(e) raise ToDoInteractorError(e)
5,331,011
def get_body(name): """Retrieve the Body structure of a JPL .bsp file object Args: name (str) Return: :py:class:`~beyond.constants.Body` """ return Pck()[name]
5,331,012
def is_available(): """ Convenience function to check if the current platform is supported by this module. """ return ProcessMemoryInfo().update()
5,331,013
def test_dequeue(dq): """Test append to tail.""" dq.append(2) assert dq.tail.val == 2
5,331,014
def render_path_spiral(c2w, up, rads, focal, zrate, rots, N): """ enumerate list of poses around a spiral used for test set visualization """ render_poses = [] rads = np.array(list(rads) + [1.]) for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]: c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads) z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.]))) render_poses.append(viewmatrix(z, up, c)) return render_poses
5,331,015
def placeAnchorSourceToLagunaTX( common_anchor_connections: Dict[str, List[Dict[str, Any]]] ) -> List[str]: """ The anchors are placed on the Laguna RX registers We move the source cell of the anchor onto the corresponding TX registers """ anchor_to_source_cell = _getAnchorToSourceCell(common_anchor_connections) slr_to_source_cell_to_loc = defaultdict(dict) for anchor, loc in anchor_2_loc.items(): assert 'LAGUNA' in loc and 'RX_REG' in loc source_cell = anchor_to_source_cell[anchor] # if two anchor registers are connected if 'q0_reg' in source_cell: assert False, source_cell target_tx = getPairingLagunaTXOfRX(loc) slr_index = getSLRIndexOfLaguna(target_tx) slr_to_source_cell_to_loc[slr_index][source_cell] = target_tx script = [] for slr_index, source_cell_to_loc in slr_to_source_cell_to_loc.items(): script.append('catch { place_cell { \\') for source_cell, loc in source_cell_to_loc.items(): script.append(f' {source_cell} {loc} \\') script.append('} }') # if both the TX and the RX lagunas are in the FIXED state, the router will not perform hold violation fix script.append('catch { set_property IS_LOC_FIXED 0 [get_cells -hierachical -filter { BEL =~ *LAGUNA*TX* }] }') open('place_laguna_anchor_source_cells.tcl', 'w').write('\n'.join(script)) return script
5,331,016
def sendEmailWithAttachment(subject, message, from_email, to_email=[], attachment=[]): """ :param subject: email subject :param message: Body content of the email (string), can be HTML/CSS or plain text :param from_email: Email address from where the email is sent :param to_email: List of email recipients, example: ["a@a.com", "b@b.com"] :param attachment: List of attachments, exmaple: ["file1.txt", "file2.txt"] """ msg = MIMEMultipart() msg['Subject'] = subject msg['From'] = from_email msg['To'] = ", ".join(to_email) msg.attach(MIMEText(message, 'html')) for f in attachment: with open(f, 'rb') as a_file: basename = os.path.basename(f) part = MIMEApplication(a_file.read(), Name=basename) part['Content-Disposition'] = 'attachment; filename="%s"' % basename msg.attach(part) # email = smtplib.SMTP('your-smtp-host-name.com') # email.sendmail(from_email, to_email, msg.as_string()) s = smtplib.SMTP('smtp.gmail.com', 587) try: s.starttls() s.login("hoangson0409@gmail.com", "methambeo1997") s.sendmail(from_email, to_email, msg.as_string()) except Exception as err: print("Error while sending email: ",err) finally: s.quit()
5,331,017
def post_example_form(): """Example of a post form.""" return render_template("post-form.html")
5,331,018
def validate(data): """Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid """ if not isinstance(data, dict): raise ValueError("data should be dict") if "text" not in data or not isinstance(data["text"], str) or len(data["text"]) < 1: raise ValueError("text field is required and should not be empty") if "markdown" in data and not isinstance(data["markdown"], bool): raise ValueError("markdown field should be bool") if "attachments" in data: if not isinstance(data["attachments"], list): raise ValueError("attachments field should be list") for attachment in data["attachments"]: if "text" not in attachment and "title" not in attachment: raise ValueError("text or title is required in attachment") return True
5,331,019
def stripExtra(name): """This function removes paranthesis from a string *Can later be implemented for other uses like removing other characters from string Args: name (string): character's name Returns: string: character's name without paranthesis """ startIndexPer=name.find('(') start = 0 if(startIndexPer!=-1): start = startIndexPer if(start==0): return name else: return name[0:start-1]
5,331,020
def google_maps(maiden: str) -> str: """ generate Google Maps URL from Maidenhead grid Parameters ---------- maiden : str Maidenhead grid Results ------- url : str Google Maps URL """ latlon = toLoc(maiden) url = "https://www.google.com/maps/@?api=1&map_action=map" "&center={},{}".format( latlon[0], latlon[1] ) return url
5,331,021
def test_lin_norm_1(): """Check that negative entries become 0.""" x = torch.ones((5, 5)) x[:, 0] = -1 x_normed = ei.lin_norm(x) assert all(x_normed[:, 0] == 0) for row in x_normed[:, 1:]: assert all(row == 0.25)
5,331,022
def format_signed(feature, # type: Dict[str, Any] formatter=None, # type: Callable[..., str] **kwargs ): # type: (...) -> str """ Format unhashed feature with sign. >>> format_signed({'name': 'foo', 'sign': 1}) 'foo' >>> format_signed({'name': 'foo', 'sign': -1}) '(-)foo' >>> format_signed({'name': ' foo', 'sign': -1}, lambda x: '"{}"'.format(x)) '(-)" foo"' """ txt = '' if feature['sign'] > 0 else '(-)' name = feature['name'] # type: str if formatter is not None: name = formatter(name, **kwargs) return '{}{}'.format(txt, name)
5,331,023
def load_ligand(sdf): """Loads a ligand from an sdf file and fragments it. Args: sdf: Path to sdf file containing a ligand. """ lig = next(Chem.SDMolSupplier(sdf, sanitize=False)) frags = generate_fragments(lig) return lig, frags
5,331,024
def CMDpending(parser, args): """Lists pending jobs.""" parser.add_option('-b', '--builder', dest='builders', action='append', default=[], help='Builders to filter on') options, args, buildbot = parser.parse_args(args) if args: parser.error('Unrecognized parameters: %s' % ' '.join(args)) if not options.builders: options.builders = buildbot.builders.keys for builder in options.builders: builder = buildbot.builders[builder] pending_builds = builder.data.get('pendingBuilds', 0) if not pending_builds: continue print('Builder %s: %d' % (builder.name, pending_builds)) if not options.quiet: for pending in builder.pending_builds.data: if 'revision' in pending['source']: print(' revision: %s' % pending['source']['revision']) for change in pending['source']['changes']: print(' change:') print(' comment: %r' % unicode(change['comments'][:50])) print(' who: %s' % change['who']) return 0
5,331,025
def numpy_translation(xyz): """Returns the dual quaternion for a pure translation. """ res = np.zeros(8) res[3] = 1.0 res[4] = xyz[0]/2.0 res[5] = xyz[1]/2.0 res[6] = xyz[2]/2.0 return res
5,331,026
def make_crops(bids_folder, metadata_file, out_dir, out_file, new_size): """ Create a new dataset of crops from an existing dataset. Given a folder of images, and a csv file containing the info about the dataset, this function makes random crops of the images and save them to disk bids_folder: folder where the images are stored (in BIDS format) metadata_file: path to csv file where the info about images is stored. out_dir: directory where to save the cropped images out_file: file where to store the info about the crops resample_size: size to resample the images. """ df_metadata = pd.read_csv(metadata_file) layout = bids.layout.BIDSLayout([(bids_folder, 'bids')]) # For each entry in the metadata file rows_list = [] for subj in df_metadata.itertuples(): # locate the corresponding MRI scan ptid = subj.PTID ptid_bids = 'ADNI' + ptid[0:3] + 'S' + ptid[6:] # Hardcoded baselines try: file = layout.get(subject=ptid_bids, extensions='.nii.gz', modality='anat', session='M00', return_type='file')[0] except: print('Ignoring subject ' + ptid) # Actually perform the cropping new_crops = slice_generator(file, out_dir, new_size) # Iterate over all the new crops for crop in new_crops: dict = {"path": crop, "PTID": subj.PTID, "DX": subj.DX} rows_list.append(dict) # Save the new info about the image in df_crop df_crop = pd.DataFrame(rows_list) df_crop.to_csv(out_file)
5,331,027
def sort_car_models(car_db): """return a copy of the cars dict with the car models (values) sorted alphabetically""" sorted_db = {} for model in car_db: sorted_db[model] = sorted(car_db[model]) return sorted_db
5,331,028
def test_module(params) -> str: """Tests API connectivity and authentication'" Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """ try: url = params.get('url')[:-1] if str(params.get('url')).endswith('/') \ else params.get('url') credentials = params.get('apikey') creds = "Bearer " + credentials headers = {"Authorization": creds} url = urljoin(url, '/customer/getSubmission/7bf5ba92-30e1-4d42-821f-6d4ac94c3be1') response = requests.request("GET", url, headers=headers) status = response.status_code if status != 200: if 'UnauthorizedError' in str(response.content): return 'Authorization Error: make sure API Key is correctly set' else: return str(status) except Exception as e: raise e return 'ok'
5,331,029
def create(auto_remove: bool = False) -> Tuple[str, str]: """ Creates a database inside a docker container :return: container name, database name :rtype: Tuple[str, str] """ piccolo_docker_repository = PiccoloDockerRepository(auto_remove=auto_remove) piccolo_docker_repository.create_container() container: Container = piccolo_docker_repository.container database_name: str = "" if container: loop = asyncio.get_event_loop() database_name = loop.run_until_complete( piccolo_docker_repository.create_database() ) return container.name, database_name
5,331,030
def get_by_id(db: Session, work_item_id): """Get a specified WorkItem and return it.""" workitem = db.get(WorkItem, work_item_id) if workitem: return workitem if not workitem: logger.debug("Item not found") raise HTTPException(status_code=404, detail="Item not found")
5,331,031
def evaluate(args, model, eval_dataset, tokenizer, step, prefix=""): """ Evaluation of model :param args: input arguments from parser :param model: pytorch model to be evaluated :param eval_dataset: dataset used for evaluation :param tokenizer: tokenizer used by the model :param step: the current step in training :param prefix: prescript to be added to the beginning of save file :return: results of evaluation """ # Loop to handle MNLI double evaluation (matched, mis-matched) print('') eval_output_dir = args.output_dir if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=True, mlm_probability=0.15 ) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=eval_batch_size, collate_fn=data_collator ) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.train_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating", position=0, leave=True): with torch.no_grad(): outputs = model(input_ids=batch['input_ids'].to(args.device), labels=batch['labels'].to(args.device)) loss = outputs['loss'] eval_loss += loss.mean().item() nb_eval_steps += 1 eval_loss /= nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = { "perplexity": perplexity, 'loss': eval_loss, "Iteration": str(step) } output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "a") as writer: logger.info("***** Eval results {} *****".format(prefix)) writer.write('\n') for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s, " % (key, str(result[key]))) writer.close() return result
5,331,032
def main(): """ Existing commands: stop, start, cancel, show, set_work, set_password, set_user, year, day, list, overtime (ot), public-holiday (public), projects """ # Parsing the data parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('command', type=str) parser.add_argument('option', type=str, nargs='?') parser.add_argument('note', type=str, nargs='?') parser.add_argument('-s', '--start', metavar='sarting_time', help='Set the starting time of the action', type=str, default=None) args = parser.parse_args() command = args.command note = args.note option = None if not args.option else args.option config = load_config() data = config.get('data', None) start = args.start if start is None: start = NOW # command stop if command == "stop": if data is None: exit("You are not currently working") add_to_tt(config, data, start) remove_data(config) # command start elif command == "start": if data is not None: add_to_tt(config, data, start) project = get_project(config, option, start, note) config['data'] = project.__dict__ save_config(config) elif command == "cancel": remove_data(config) elif command == "show": if data is not None: project = Task.from_dict(data) print("Current work: {}".format(project.to_str())) else: print("No current work.") project = Task.from_dict(config.get("saved_work", {})) if project is not None: print("Current default work: {}".format(project.to_str())) else: print("No current default work.") print("Current user: {}".format(config.get("user", "None"))) elif command == "set_work": project = get_project(config, option, start, note) config["saved_work"] = project.__dict__ save_config(config) elif command == "set_password": config["password"] = option save_config(config) elif command == "set_user": config["user"] = option save_config(config) elif command == "year": if not option: option = YEAR from_ = "01.01.{}".format(str(option)) to_ = "31.12.{}".format(str(option)) tot = get_hour(config, from_, to_) tot += remaining(data) print(tot) elif command == "day": if not option: option = TODAY_FOR_REPORT tot = get_hour(config, option, option) tot += remaining(data) print(tot) elif command == "overtime" or command == "ot": working_day = np.busday_count(YEAR, TODAY, weekmask='1111100', holidays=PUBLIC_HOLIDAY) working_hours = (working_day * HOURS_PER_DAY) * int(config.get("percentage", 100))/100 from_ = "01.01.{}".format(str(YEAR)) yesterday = (datetime.now() - timedelta(1)).strftime(DATE_FMT_FOR_REPORT) worked_hours = get_hour(config, from_, yesterday) worked_hours_today = get_hour(config, TODAY_FOR_REPORT, TODAY_FOR_REPORT) + remaining(data) worked_hours += worked_hours_today if worked_hours_today < HOURS_PER_DAY: working_hours += worked_hours_today should_work_until = (datetime.strptime(NOW, FMT) + timedelta(hours=(HOURS_PER_DAY - worked_hours_today))) print("Your day is not done, you should work until", datetime.strftime(should_work_until, FMT)) else: working_hours += HOURS_PER_DAY overtime_hours, overtime_minutes =\ hours_to_hours_mn((worked_hours_today - HOURS_PER_DAY)) print(f"Well, imma head out ! {overtime_hours}h {overtime_minutes}mn" f" of overtime today !") print("You worked {} hours in {}:".format(round(worked_hours, 2), YEAR)) print("You should have worked at least:", working_hours) overtime = round(worked_hours - working_hours, 2) if overtime >= 0: overtime_h, overtime_mn = hours_to_hours_mn(overtime) print("You have {}h {}mn overtime hours. GG! 🍺🥳".format(overtime_h, overtime_mn)) else: print("Uh oh, you are {} late! Gotta catch up! 🤡".format(-overtime)) elif command == "public-holiday" or command == "public": print("Here are the public holidays for {}:".format(YEAR)) print(PUBLIC_HOLIDAY) elif command == "projects": for name, value in TASK_MAP.items(): print(value.to_str()) else: exit("Unknown command")
5,331,033
def test(path, shell, indent=2): """Run test at path and return input, output, and diff. This returns a 3-tuple containing the following: (list of lines in test, same list with actual output, diff) diff is a generator that yields the diff between the two lists. If a test exits with return code 80, the actual output is set to None and diff is set to []. """ indent = ' ' * indent cmdline = '%s$ ' % indent conline = '%s> ' % indent f = open(path) abspath = os.path.abspath(path) env = os.environ.copy() env['TESTDIR'] = os.path.dirname(abspath) env['TESTFILE'] = os.path.basename(abspath) p = subprocess.Popen([shell, '-'], bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=env, preexec_fn=makeresetsigpipe(), close_fds=os.name == 'posix') salt = 'CRAM%s' % time.time() after = {} refout, postout = [], [] i = pos = prepos = -1 stdin = [] for i, line in enumerate(f): refout.append(line) if line.startswith(cmdline): after.setdefault(pos, []).append(line) prepos = pos pos = i stdin.append('echo "\n%s %s $?"\n' % (salt, i)) stdin.append(line[len(cmdline):]) elif line.startswith(conline): after.setdefault(prepos, []).append(line) stdin.append(line[len(conline):]) elif not line.startswith(indent): after.setdefault(pos, []).append(line) stdin.append('echo "\n%s %s $?"\n' % (salt, i + 1)) output = p.communicate(input=''.join(stdin))[0] if p.returncode == 80: return (refout, None, []) # Add a trailing newline to the input script if it's missing. if refout and not refout[-1].endswith('\n'): refout[-1] += '\n' # We use str.split instead of splitlines to get consistent # behavior between Python 2 and 3. In 3, we use unicode strings, # which has more line breaks than \n and \r. pos = -1 ret = 0 for i, line in enumerate(output[:-1].split('\n')): line += '\n' if line.startswith(salt): presalt = postout.pop() if presalt != '%s\n' % indent: postout.append(presalt[:-1] + ' (no-eol)\n') ret = int(line.split()[2]) if ret != 0: postout.append('%s[%s]\n' % (indent, ret)) postout += after.pop(pos, []) pos = int(line.split()[1]) else: if needescape(line): line = escape(line) postout.append(indent + line) postout += after.pop(pos, []) diffpath = os.path.basename(abspath) diff = unified_diff(refout, postout, diffpath, diffpath + '.err') for firstline in diff: return refout, postout, itertools.chain([firstline], diff) return refout, postout, []
5,331,034
def boot(configfile=None, use_argv=False): """Boot the environment containing the classes and configurations.""" setupdir = dirname(dirname(__file__)) curdir = getcwd() if configfile: pass # Already a string elif use_argv and len(sys.argv) > 1: configfile = sys.argv[1] else: alternatives = [ join(setupdir, "local.cfg"), join(setupdir, "dev.cfg"), join(setupdir, "prod.cfg") ] for alternative in alternatives: if exists(alternative): configfile = alternative break if not configfile: try: configfile = pkg_resources.resource_filename( pkg_resources.Requirement.parse("buzzbot"), "config/default.cfg") except pkg_resources.DistributionNotFound: raise ConfigurationError("Could not find default configuration.") global configuration configuration = configfile print "** Booting configuration: %s" % configfile turbogears.update_config(configfile=configfile, modulename="buzzbot.config")
5,331,035
def load_expected_results(file, pattern): """Reads the file, named file, which contains test results separated by the regular expression pattern. The test results are returned as a dictionary. """ expected = {} compiled_pattern = re.compile(pattern) with open(file) as f: test = None for line in f: line = line.rstrip().decode('utf-8') match = compiled_pattern.search(line) if match: test = match.groups()[0] expected[test] = '' else: expected[test] += line + '\n' return expected
5,331,036
def play(index): """ wiringpiによるソフトウェアトーン再生""" melody_list = [ ((262, 0.5), (294, 0.5), (330, 0.5), (349, 0.5), (392, 0.5), (440, 0.5), (494, 0.5), (525, 0.5)), ((525, 0.5), (494, 0.5), (440, 0.5), (392, 0.5), (349, 0.5), (330, 0.5), (294, 0.5), (262, 0.5)), ((262, 1), (294, 1), (330, 1), (349, 1), (392, 1), (440, 1), (494, 1), (525, 1)), ] for v, play_time in melody_list[index]: # 指定されたメロディーの再生 wiringpi.softToneWrite(SPK_PIN, v) # トーン発生 time.sleep(play_time) # 同じ音を出力するために処理を遅延 play_stop()
5,331,037
def following(request): """View all posts from followed users""" if request.method == "GET": user = User.objects.get(pk=request.user.id) following = user.follow_list.following.all() # Post pagination: https://docs.djangoproject.com/en/3.1/topics/pagination/ posts = Post.objects.filter(user__in=following).order_by("-date") following_paginator = Paginator(posts, 10) following_page = request.GET.get('page') page_obj = following_paginator.get_page(following_page) else: return redirect("index") context = {"page_obj": page_obj} return render(request, "network/following.html", context)
5,331,038
def create_random_polygon(min_x, min_y, max_x, max_y, vertex_num): """Create a random polygon with the passed x and y bounds and the passed number of vertices; code adapted from: https://stackoverflow.com/a/45841790""" # generate the point coordinates within the bounds x = np.random.uniform(min_x, max_x, vertex_num) y = np.random.uniform(min_y, max_y, vertex_num) # determine the center of all points center = (sum(x) / vertex_num, sum(y) / vertex_num) # find the angle of each point from the center angles = np.arctan2(x - center[0], y - center[1]) # sort points by their angle from the center to avoid self-intersections points_sorted_by_angle = sorted([(i, j, k) for i, j, k in zip(x, y, angles)], key=lambda t: t[2]) # the process fails if there are duplicate points if len(points_sorted_by_angle) != len(set(points_sorted_by_angle)): return None # structure points as x-y tuples points = [(x, y) for (x, y, a) in points_sorted_by_angle] # create the polygon return Polygon(points)
5,331,039
def document_hidden(session): """Polls for the document to become hidden.""" def hidden(session): return session.execute_script("return document.hidden") return Poll(session, timeout=3, raises=None).until(hidden)
5,331,040
def delete_directory_files(directory_path): """ Method for deleting temporary html files created by show in browser process. """ for file_object in os.listdir(directory_path): file_object_path = os.path.join(directory_path, file_object) if os.path.isfile(file_object_path): os.unlink(file_object_path) else: shutil.rmtree(file_object_path)
5,331,041
def cut_sounds(sound_directory, time): """Split file into specific time intervals. Parameters ---------- sound_directory: str path to the directory for the audio file time: str duration of the splits Returns ------- None """ target_path = f'{sound_directory}/split_{time}' subprocess.run(f'mkdir {target_path}', shell=True) print(f'Creating the {target_path} folder') audio_files = [file for file in os.listdir(sound_directory) if not file.startswith('split_')] time_cmd = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 " for element in audio_files: full = f'{time_cmd}{sound_directory}/{element}' output = subprocess.check_output(full, shell=True) if float(output) < int(time) + 1: subprocess.run(f'cp {sound_directory}/{element} {target_path}/', shell=True) else: base, extension = element.split('.') split_cmd = f'ffmpeg -i {sound_directory}/{element} -f segment -segment_time 5 -c copy "{target_path}/{base}_%3d.{extension}"' print(split_cmd) subprocess.run(split_cmd, shell=True) return
5,331,042
def create_blueprint(): """Creates a Blueprint""" blueprint = Blueprint('Tasks Blueprint', __name__, url_prefix='/tasks') blueprint.route('/', methods=['POST'])(tasks.create) blueprint.route('/', methods=['PATCH'])(tasks.patch) blueprint.route('/', methods=['GET'])(tasks.list) return blueprint
5,331,043
def gauss2d(sigma, fsize): """ Create a 2D Gaussian filter Args: sigma: width of the Gaussian filter fsize: (W, H) dimensions of the filter Returns: *normalized* Gaussian filter as (H, W) np.array """ # # You code here #
5,331,044
def estimate_null_variance_gs(gs_lists, statslist, Wsq, single_gs_hpo=False, n_or_bins=1): """ Estimates null variance from the average of a list of known causal windows """ statspaths = {h : p for h, p in [x.rstrip().split('\t')[:2] \ for x in open(statslist).readlines()]} with gzip.open(list(statspaths.values())[0], 'rt') as ex_statfile: statscols = ex_statfile.readline().rstrip().split('\t') # Estimate null variance for each entry in gs_lists for gspath in gs_lists: for hpo, statspath in statspaths.items(): # Intersect sumstats for phenotype with GS regions gsdf = pbt.BedTool(statspath).\ intersect(pbt.BedTool(gspath), u=True, f=1.0).\ to_dataframe(names=statscols) gsdf['window'] = gsdf[['#chr', 'start', 'end']].astype(str).\ aggregate('_'.join, axis=1) # Read effect sizes per window and convert to mean variance stats = gsdf.loc[:, 'window meta_lnOR'.split()].\ rename(columns={'meta_lnOR' : 'lnOR'}) gs_var = np.nanmean((stats.lnOR.astype(float) / 1.96) ** 2) # Update Wsq estimates for all sig. and effect size quantiles if single_gs_hpo: for hpo in Wsq.keys(): for sig in 'gw fdr'.split(): for i in range(n_or_bins): Wsq[hpo][sig][i].append(gs_var) break else: for sig in 'gw fdr'.split(): for i in range(n_or_bins): Wsq[hpo][sig][i].append(gs_var) return Wsq
5,331,045
def create_model(config): """Create the score model.""" model_name = config.model.name score_model = get_model(model_name)(config) score_model = score_model.to(config.device) score_model = torch.nn.DataParallel(score_model) return score_model
5,331,046
def at(addr): """Look up an object by its id.""" import gc for o in gc.get_objects(): if id(o) == addr: return o return None
5,331,047
def money_flow_index(close_data, high_data, low_data, volume, period): """ Money Flow Index. Formula: MFI = 100 - (100 / (1 + PMF / NMF)) """ catch_errors.check_for_input_len_diff( close_data, high_data, low_data, volume ) catch_errors.check_for_period_error(close_data, period) mf = money_flow(close_data, high_data, low_data, volume) tp = typical_price(close_data, high_data, low_data) flow = [tp[idx] > tp[idx-1] for idx in range(1, len(tp))] pf = [mf[idx] if flow[idx] else 0 for idx in range(0, len(flow))] nf = [mf[idx] if not flow[idx] else 0 for idx in range(0, len(flow))] pmf = [sum(pf[idx+1-period:idx+1]) for idx in range(period-1, len(pf))] nmf = [sum(nf[idx+1-period:idx+1]) for idx in range(period-1, len(nf))] # Dividing by 0 is not an issue, it turns the value into NaN which we would # want in that case with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) money_ratio = np.array(pmf) / np.array(nmf) mfi = 100 - (100 / (1 + money_ratio)) mfi = fill_for_noncomputable_vals(close_data, mfi) return mfi
5,331,048
def get_variable_name(ds, args): """Tries to automatically set the variable to plot.""" if args.varn is None: varns = set(ds.variables.keys()).difference(ds.coords) if len(varns) == 1: args.varn = varns.pop() else: errmsg = 'More than one variable in data set! Specify varn' raise IOError(errmsg)
5,331,049
def copytree(src, dst, symlinks=False, ignore=None): """ Copy all the contents of directory scr to directory dst. :param src: String Source directory. :param dst: String Destination directory. :param symlinks: default False :param ignore: default None :return: None """ for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d)
5,331,050
def Get_Weights(dict_rank): """Converts rankings into weights.""" Weights = adapt.create_Weightings(dict_rank) return Weights
5,331,051
def load_client_model(models_path, config): """ Returns Pytorch client model loaded given the client model's path """ device = load_device() client_hparams = config.get("client_hparams") for needed_param in client_hparams.get("needed", []): client_hparams[needed_param] = config.get(needed_param) model_file = os.path.join(models_path, "client_model.pt") model = models.ResNet18Client(client_hparams) try: model.load_state_dict(torch.load(model_file, map_location=device)) except: model = models.ResNet18Client(client_hparams) state_dict = cleanse_state_dict(torch.load(model_file, map_location=device)) model.load_state_dict(state_dict) if torch.cuda.is_available(): model.cuda() return model
5,331,052
def psisloo(log_likelihood): """ Summarize the model fit using Pareto-smoothed importance sampling (PSIS) and approximate Leave-One-Out cross-validation (LOO). Takes as input an ndarray of posterior log likelihood terms [ p( y_i | theta^s ) ] per observation unit. e.x. if using pystan: loosummary = stanity.psisloo(stan_fit.extract()['log_lik']) Returns a Psisloo object. Useful methods such as print_summary() & plot(). References ---------- Aki Vehtari, Andrew Gelman and Jonah Gabry (2015). Efficient implementation of leave-one-out cross-validation and WAIC for evaluating fitted Bayesian models. arXiv preprint arXiv:1507.04544. Aki Vehtari and Andrew Gelman (2015). Pareto smoothed importance sampling. arXiv preprint arXiv:1507.02646. """ return Psisloo(log_likelihood)
5,331,053
def test_pamap2_varyeps(dataset): """ This routine performs a series of tests on the PAMAP2 dataset varying epsilon """ eps=[500, 1000, 2000] num_samples = 100000 minpts = 50 grid=(4,4,4,4) timing_actual_results = [] timing_theoretical_results = [] Xfull=dataset # Create smaller dataset X=the_rng.choice(Xfull,size=num_samples,replace=False) mins=[] maxs=[] for i in range(len(Xfull[0])): mins.append(min(Xfull[:,i])) maxs.append(max(Xfull[:,i])) mins=np.array(mins) maxs=np.array(maxs) dimrange=[themax-themin for themax, themin in zip(maxs,mins)] dimrange=np.array(dimrange) print(dimrange) for e in eps: # Calculate baseline DBSCAN result start_time=time.time() dbscan=DBSCAN(eps=e,min_samples=minpts).fit(X) end_time=time.time() dbscan_elapsed=end_time-start_time print(f'baseline: {dbscan_elapsed}') # Calculate results for GriDBSCAN start_time=time.time() gridbscan=GriDBSCAN(eps=e,min_samples=minpts,grid=grid).fit(X) end_time=time.time() gridbscan_elapsed=end_time-start_time theoretical_improvement=dbscan_elapsed/gridbscan.dbscan_total_time_ actual_improvement=dbscan_elapsed/gridbscan_elapsed print(f'{i:02d}: {gridbscan_elapsed:1.4f} {gridbscan.dbscan_total_time_:1.4f}') print(f'\t\t theoretical improvement: {theoretical_improvement:02.4f}') print(f'\t\t actual improvement: {actual_improvement:02.4f}') timing_theoretical_results.append(theoretical_improvement) timing_actual_results.append(actual_improvement) x_coord=[e for e in eps] style=['bo'] tmp, = plt.plot(x_coord,timing_theoretical_results, 'bo-') stylelines=[tmp] plt.title('Theoretical Improvement') plt.show() x_coord=[e for e in eps] tmp, = plt.plot(x_coord,timing_actual_results, 'bo-') stylelines=[tmp] plt.title('Actual Improvement') plt.show() return
5,331,054
def rxdelim(content: str) -> Tuple[Optional[Pattern], Optional[Pattern]]: """ Return suitable begin and end delimiters for the content `content`. If no matching delimiters are found, return `None, None`. """ tp = magic.from_buffer(content).lower() for rxtp, rxbegin, rxend in DELIMITERS: if rxtp.match(tp): return rxbegin, rxend return None, None
5,331,055
def main(): """loxberry plugin for solaredge PV API sends every 5 minutes the actual power production value to the miniserver""" # create file strings from os environment variables lbplog = os.environ['LBPLOG'] + "/solaredge/solaredge.log" lbpconfig = os.environ['LBPCONFIG'] + "/solaredge/plugin.cfg" lbsconfig = os.environ['LBSCONFIG'] + "/general.cfg" # creating log file and set log format logging.basicConfig(filename=lbplog,level=logging.INFO,format='%(asctime)s: %(message)s ') #logging.info("<INFO> initialise logging...") # open config file and read options try: from solaredge_interface.api.SolarEdgeAPI import SolarEdgeAPI except: logging.error("<ERROR> Error loading SolarEdgeAPI module... exit script") return try: cfg = ConfigParser() global_cfg = ConfigParser() cfg.read(lbpconfig) global_cfg.read(lbsconfig) except: logging.error("<ERROR> Error parsing config files...") #define variables with values from config files apiKey = cfg.get("SOLAREDGE", "API_KEY") location = cfg.get("SOLAREDGE", "LOCATION") # comment for local debugging miniserver = global_cfg.get("MINISERVER1", "IPADDRESS") udp_port = int(cfg.get("MINISERVER", "PORT")) # uncomment for local debugging #miniserver = "127.0.0.1" #udp_port = 15555 try: api = SolarEdgeAPI(api_key=apiKey, datetime_response=True, pandas_response=False) response = api.get_site_current_power_flow(location) y = json.loads(response.text) curPwr = y['siteCurrentPowerFlow']['PV']['currentPower'] consPwr = y['siteCurrentPowerFlow']['LOAD']['currentPower'] actPwr = float(consPwr) - float(curPwr) unit = y['siteCurrentPowerFlow']['unit'] msg = str(actPwr.__round__(2)) except: logging.error("<ERROR> Failed to execute API call...") msg = None if msg != None: send_udp(miniserver, udp_port, msg) logging.info("<INFO> Message sent to Miniserver IP: %s" % miniserver) else: logging.error("<ERROR> Nothing sent to Miniserver IP: %s" % miniserver)
5,331,056
def GenKeyOrderAttrs(soappy_service, ns, type_name): """Generates the order and attributes of keys in a complex type. Args: soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating the information stored in the WSDL. ns: string The namespace the given WSDL-defined type belongs to. type_name: string The name of the WSDL-defined type to search for. Returns: list A list of dictionaries containing the attributes of keys within a complex type, in order. """ complex_type = soappy_service.wsdl.types[ns].types[type_name] if IsASubType(type_name, ns, soappy_service): # This is an extension of another type. key_order = GenKeyOrderAttrs( soappy_service, complex_type.content.derivation.attributes['base'].getTargetNamespace(), complex_type.content.derivation.attributes['base'].getName()) if hasattr(complex_type.content.derivation.content, 'content'): key_order.extend([element.attributes for element in complex_type.content.derivation.content.content]) return key_order else: # This is a base type. return [element.attributes for element in complex_type.content.content]
5,331,057
def read_binary_stl(filename): """Reads a 3D triangular mesh from an STL file (binary format). :param filename: path of the stl file :type filename: str :return: The vertices, normals and index array of the mesh :rtype: Mesh :raises: ValueError """ with open(filename, 'rb') as stl_file: stl_file.seek(80) face_count = np.frombuffer(stl_file.read(4), dtype=np.int32)[0] record_dtype = np.dtype([ ('normals', np.float32, (3,)), ('vertices', np.float32, (3, 3)), ('attr', '<i2', (1,)), ]) data = np.fromfile(stl_file, dtype=record_dtype) if face_count != data.size: raise ValueError('stl data has incorrect size') vertices = data['vertices'].reshape(-1, 3) indices = np.arange(face_count * 3).astype(np.uint32) normals = np.repeat(data['normals'], 3, axis=0) return Mesh(vertices, indices, normals, clean=True)
5,331,058
def exit_if_missing(token: str, org: str, assignment_title: str): """ Exit CLI program if missing any required values. """ if token is None: typer.echo( f'Must specify token key in .stoplightrc or with {TOKEN_OPTION_NAME} option', err=True) raise typer.Exit(code=1) if org is None: typer.echo( f'Must specify org key in .stoplightrc or with {ORG_OPTION_NAME} option', err=True) raise typer.Exit(code=1) if assignment_title is None: typer.echo( f'Must specify assignment_title key in .stoplightrc or with {ASSIGNMENT_TITLE_OPTION_NAME} option', err=True) raise typer.Exit(code=1)
5,331,059
def validate_currency(currency, locale=None): """ Check the currency code is recognized by Babel. Accepts a ``locale`` parameter for fined-grained validation, working as the one defined above in ``list_currencies()`` method. Raises a `UnknownCurrencyError` exception if the currency is unknown to Babel. """ if currency not in list_currencies(locale): raise UnknownCurrencyError(currency)
5,331,060
def test__rules__std_L003_process_raw_stack(generate_test_segments): """Test the _process_raw_stack function. Note: This test probably needs expanding. It doesn't really check enough of the full functionality. """ cfg = FluffConfig() r = get_rule_from_set('L003', config=cfg) test_stack = generate_test_segments(['bar', '\n', ' ', 'foo', 'baar', ' \t ']) res = r._process_raw_stack(test_stack) print(res) assert sorted(res.keys()) == [1, 2] assert res[2]['indent_size'] == 5
5,331,061
def create_plan( session, bucket, s3_object_key, job_identifier, parameters, template_url): """Shows a plan of what CloudFormation might create and how much it might cost""" cfn_client = session.client('cloudformation') template_summary = cfn_client.get_template_summary( TemplateURL=template_url) stack_parameters = gather( session=session, key_object=s3_object_key, parameters=parameters, bucket=bucket, job_identifier=job_identifier) try: cost_url = cfn_client.estimate_template_cost( TemplateURL=template_url, Parameters=stack_parameters)['Url'] except (botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError): cost_url = None click.echo("Estimated template cost URL: {}".format(cost_url)) display_changes( changes=template_summary['ResourceTypes'], change_set=False)
5,331,062
def raise302(url): """ Return a temporary redirect status and content to the user. """ raise web.HTTPStatus('302 Found', [('Location', url)], [''])
5,331,063
def testing(model, t): """TODO: Docstring for testing. :returns: TODO """ model.eval test_set = trainSet2(t) test_data = DataLoader(test_set, batch_size) for x, y in test_data: result = model(x) print(result) print(y) break
5,331,064
def monotonise_tree(tree, n_feats, incr_feats, decr_feats): """Helper to turn a tree into as set of rules """ PLUS = 0 MINUS = 1 mt_feats = np.asarray(list(incr_feats) + list(decr_feats)) def traverse_nodes(node_id=0, operator=None, threshold=None, feature=None, path=None): if path is None: path = np.zeros([n_feats, 2]) else: path[feature, PLUS if operator[0] == '>' else MINUS] = 1 if not node_is_leaf( tree, node_id): feature = tree.feature[node_id] threshold = tree.threshold[node_id] left_node_id = tree.children_left[node_id] traverse_nodes(left_node_id, "<=", threshold, feature, path.copy()) right_node_id = tree.children_right[node_id] traverse_nodes(right_node_id, ">", threshold, feature, path.copy()) else: # a leaf node if np.sum(path) > 0: # check if all increasing all_increasing = np.sum(np.asarray([path[i_feat, MINUS] if i_feat + 1 in incr_feats else path[i_feat, PLUS] for i_feat in mt_feats - 1])) == 0 all_decreasing = np.sum(np.asarray([path[i_feat, MINUS] if i_feat + 1 in decr_feats else path[i_feat, PLUS] for i_feat in mt_feats - 1])) == 0 counts = np.asarray(tree.value[node_id][0]) probs = counts / np.sum(counts) predicted_value = np.sign(probs[1] - 0.5) if predicted_value >= 0 and all_increasing: # ok pass elif predicted_value <= 0 and all_decreasing: # ok pass else: # not a valid rule tree.value[node_id][0] = [0., 0.] else: print('Tree has only one node (i.e. the root node!)') return None if len(mt_feats) > 0: traverse_nodes() return tree
5,331,065
def main(): """Main function.""" syn = login() args = get_args() # In order to make >3 Entrez requests/sec, 'email' and 'api_key' # params need to be set. Entrez.email = os.getenv('ENTREZ_EMAIL') Entrez.api_key = os.getenv('ENTREZ_API_KEY') find_publications(syn, args)
5,331,066
def pythonify_and_pickle(file, out_filename): """Convert all the data in the XML file and save as pickled files for nodes, ways, relations and tags separately. :param file: Filename (the file will be opened 4 times, so passing a file object will not work). Can be anything which :module:`digest` can parse. :param out_filename: If is `test` then writes files `test_nodes.pic.xz` through `test_tags.pic.xz` :return: A tuple of the 4 output filenames for nodes, ways, relations and tags. """ obj = NodesPacked(file) out = [out_filename + "_nodes.pic.xz"] pickle(obj, out[0]) for typpe, name in [(Ways, "ways"), (Relations, "relations"), (Tags, "tags")]: obj = None obj = typpe(file) name = "{}_{}.pic.xz".format(out_filename, name) pickle(obj, name) out.append(name) return out
5,331,067
def compute_inverse_volatility_weights(df: pd.DataFrame) -> pd.Series: """ Calculate inverse volatility relative weights. :param df: cols contain log returns :return: series of weights """ dbg.dassert_isinstance(df, pd.DataFrame) dbg.dassert(not df.columns.has_duplicates) # Compute inverse volatility weights. # The result of `compute_volatility_normalization_factor()` # is independent of the `target_volatility`. weights = df.apply( lambda x: compute_volatility_normalization_factor( x, target_volatility=0.1 ) ) # Replace inf's with 0's in weights. weights.replace([np.inf, -np.inf], np.nan, inplace=True) # Rescale weights to percentages. weights /= weights.sum() weights.name = "weights" # Replace NaN with zero for weights. weights = hdataf.apply_nan_mode(weights, mode="fill_with_zero") return weights
5,331,068
def log(msg): """ Since this script is used in cron and cron sends mails if there is stdout, we print ordinarily. """ print(msg)
5,331,069
def resattnet164(**kwargs): """ ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=164, model_name="resattnet164", **kwargs)
5,331,070
def write_oldmos(fname, Cas, Cbs=None, mode='w'): """ Deprecated. May be deleted at some point. Write MOs in Cfour OLDMOS format That means packages of four MOs C[0,0] C[0,1] C[0,2] C[0,3] C[1,0] C[1,1] C[1,2] C[1,3] C[2,0] C[2,1] C[2,2] C[2,3] ... ... ... ... Format for each individual coefficient: 30.20E Write alpha and beta orbitals, or the alpha set twice. Writting the alpha set (RHF) twice doesn't affect a RHF guess and can serve as a UHF guess. Parameters ---------- fname : str filename Cs : np.array with MO coefficients Cs[ao,mo] Returns ------- None. """ f = open(fname, mode) nbf = Cas.shape[0] for j in range(0, nbf, 4): """ Normally we write groups of 4 MOs. The last group may be smaller. """ ngr = 4 if j + ngr >= nbf: ngr = nbf - j for ao in range(nbf): line = '' for igr in range(ngr): line += f"{Cas[ao,j+igr]:30.20E}" line += "\n" f.write(line) if Cbs is None: return for j in range(0, nbf, 4): """ Normally we write groups of 4 MOs. The last group may be smaller. """ ngr = 4 if j + ngr >= nbf: ngr = nbf - j for ao in range(nbf): line = '' for igr in range(ngr): line += f"{Cbs[ao,j+igr]:30.20E}" line += "\n" f.write(line) f.close() return
5,331,071
def test_retired_locale_redirects(base_url, slug, retired_locale): """Ensure that requests for retired locales properly redirect.""" resp = request("get", f"{base_url}/{retired_locale}{slug}") assert resp.status_code == 302 slug_parts = slug.split("?") expected_slug = slug_parts[0].lstrip("/") expected_qs = f"?retiredLocale={retired_locale}" if len(slug_parts) > 1: expected_qs += f"&{slug_parts[1]}" assert ( resp.headers["Location"] == f"/en-US/{expected_slug}{expected_qs}" ), f"{resp.headers['Location']} is not /en-US/{expected_slug}{expected_qs}"
5,331,072
def remove_ntp_system_peer(device, system_peer, vrf=None): """ Remove ntp system peer config Args: device (`obj`): Device object system_peer ('str'): System peer IP address Returns: None Raises: SubCommandFailure """ remove_config = '' log.info("Getting ntp server infomation") out = device.execute('show running-config | include ntp server') for line in out.splitlines(): line = line.strip() if vrf and vrf != 'default': if system_peer in line and vrf in line: remove_config = line break else: if system_peer in line: remove_config = line break log.info("Removing ntp server: {}".format(remove_config)) try: device.configure("no {}".format(remove_config)) except SubCommandFailure as e: raise SubCommandFailure( "Failed in removing ntp system " "peer {system_peer} on device {device}, " "Error: {e}".format( system_peer=system_peer, device=device.name, e=str(e) ) )
5,331,073
def stype(obj): """ Return string shape representation of structured objects. >>> import numpy as np >>> a = np.zeros((3,4), dtype='uint8') >>> b = np.zeros((1,2), dtype='float32') >>> stype(a) '<ndarray> 3x4:uint8' >>> stype(b) '<ndarray> 1x2:float32' >>> stype([a, (b, b)]) '[<ndarray> 3x4:uint8, (<ndarray> 1x2:float32, <ndarray> 1x2:float32)]' >>> stype([1, 2.0, [a], [b]]) '[<int> 1, <float> 2.0, [<ndarray> 3x4:uint8], [<ndarray> 1x2:float32]]' >>> stype({'a':a, 'b':b, 'c':True}) '{a:<ndarray> 3x4:uint8, b:<ndarray> 1x2:float32, c:<bool> True}' :param object obj: Any object :return: String representation of object where arrays are replace by their shape and dtype descriptions :rtype: str """ typestr = lambda obj: '<' + type(obj).__name__ + '> ' mklist = lambda obj: ', '.join(stype(o) for o in obj) mkset = lambda obj: ', '.join(stype(o) for o in sorted(obj)) mkdict = lambda obj: ', '.join( str(k) + ':' + stype(v) for k, v in sorted(obj.items())) if istensor(obj, ['shape', 'dtype']): return typestr(obj) + shapestr(obj, True) if isinstance(obj, list): return '[' + mklist(obj) + ']' if isinstance(obj, tuple): return '(' + mklist(obj) + ')' if isinstance(obj, set): return '{' + mkset(obj) + '}' if isinstance(obj, dict): return '{' + mkdict(obj) + '}' return typestr(obj) + str(obj)
5,331,074
def board2key(Z): """ Turn a "Game of Life" board into a key. """ return(bin2hex(array2string(Z[1:-1, 1:-1].reshape((1, 512 * 512 * 4))[0])))
5,331,075
def CipherArray(Array = [[" "]," "], Random = 1): """ Array - array to coding Key - Key number to coding It's а function that encodes elements Returns an array consisting of coded elements """ if (type(Array) != list): raise TypeError("Неправильний формат масиву") if (type(Random) != int): raise TypeError("Неправильний формат коду") for i in range(len(Array)): for j in range(len(Array[i])): Array[i][j] = chr(ord(Array[i][j]) * Random) return Mover(Array)
5,331,076
def voltage(raw_value, v_min=0, v_max=10, res=32760, gain=1): """Converts a raw value to a voltage measurement. ``V = raw_value / res * (v_max - v_min) * gain`` """ return (float(raw_value) / res * (v_max - v_min) * gain, "V")
5,331,077
def extract_characteristics_from_string(species_string): """ Species are named for the SBML as species_name_dot_characteristic1_dot_characteristic2 So this transforms them into a set Parameters: species_string (str) = species string in MobsPy for SBML format (with _dot_ instead of .) """ return set(species_string.split('_dot_'))
5,331,078
def _get_bzr_version(): """Looks up bzr version by calling bzr --version. :raises: VcsError if bzr is not installed""" try: value, output, _ = run_shell_command('bzr --version', shell=True, us_env=True) if value == 0 and output is not None and len(output.splitlines()) > 0: version = output.splitlines()[0] else: raise VcsError("bzr --version returned %s," + " maybe bzr is not installed" % value) except VcsError as e: raise VcsError("Coud not determine whether bzr is installed: %s" % e) return version
5,331,079
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Old way of setting up Terncy curtain. Can only be called when a user accidentally mentions Terncy platform in their config. But even in that case it would have been ignored. """ _LOGGER.info(" terncy curtain async_setup_platform")
5,331,080
def register_blueprints(app: Flask): """注册需要的蓝图程序包到 Flask 程序实例 app 中""" app.register_blueprint(auth_bp) app.register_blueprint(oauth_bp) app.register_blueprint(chat_bp) app.register_blueprint(admin_bp)
5,331,081
def C(source): """Compile at runtime and run code in-line""" return _embed_or_inline_c(source, True)
5,331,082
def normalize(x): """Standardize the original data set.""" max_x = np.max(x, axis=0) min_x = np.min(x, axis=0) x = (x-min_x) / (max_x-min_x) return x
5,331,083
def test_indefinite_freeze_attack(using_tuf=False): """ <Arguments> using_tuf: If set to 'False' all directories that start with 'tuf_' are ignored, indicating that tuf is not implemented. The idea here is to expire timestamp metadata so that the attacker """ ERROR_MSG = '\tIndefinite Freeze Attack was Successful!\n\n' try: # Setup. root_repo, url, server_proc, keyids = util_test_tools.init_repo(using_tuf) reg_repo = os.path.join(root_repo, 'reg_repo') tuf_repo = os.path.join(root_repo, 'tuf_repo') metadata_dir = os.path.join(tuf_repo, 'metadata') downloads = os.path.join(root_repo, 'downloads') # Add file to 'repo' directory: {root_repo} filepath = util_test_tools.add_file_to_repository(reg_repo, 'Test A') file_basename = os.path.basename(filepath) url_to_repo = url+'reg_repo/'+file_basename downloaded_file = os.path.join(downloads, file_basename) if using_tuf: print('TUF ...') # Update TUF metadata before attacker modifies anything. util_test_tools.tuf_refresh_repo(root_repo, keyids) # Modify the url. Remember that the interposition will intercept # urls that have 'localhost:9999' hostname, which was specified in # the json interposition configuration file. Look for 'hostname' # in 'util_test_tools.py'. Further, the 'file_basename' is the target # path relative to 'targets_dir'. url_to_repo = 'http://localhost:9999/'+file_basename # Make timestamp metadata with close expiration date (2s). _remake_timestamp(metadata_dir, keyids) # Client performs initial download. If the computer is slow, it may # take longer time than expiration time. In this case you will see # an ExpiredMetadataError. try: _download(url_to_repo, downloaded_file, using_tuf) except: print('Initial download failed! It may be because your machine is '+ \ 'busy. Try again later.') else: # Expire timestamp. time.sleep(EXPIRATION) # Try downloading again, this should raise an error. try: _download(url_to_repo, downloaded_file, using_tuf) except tuf.ExpiredMetadataError, error: print('Caught an expiration error!') else: raise IndefiniteFreezeAttackAlert(ERROR_MSG) finally: util_test_tools.cleanup(root_repo, server_proc)
5,331,084
def _diff_bearings(bearings, bearing_thresh=40): """ Identify kinked nodes (nodes that change direction of an edge) by diffing Args: bearings (list(tuple)): containing (start_node, end_node, bearing) bearing_thresh (int): threshold for identifying kinked nodes (range 0, 360) Returns: list[str] of kinked nodes """ kinked_nodes = [] # diff bearings nodes = [b[0] for b in bearings] bearings_comp = [b[2] for b in bearings] bearing_diff = [y - x for x, y in zip(bearings_comp, bearings_comp[1:])] node2bearing_diff = list(zip(nodes[1:-1], bearing_diff)) # id nodes to remove for n in node2bearing_diff: # controlling for differences on either side of 360 if min(abs(n[1]), abs(n[1] - 360)) > bearing_thresh: kinked_nodes.append(n[0]) return kinked_nodes
5,331,085
def update_holidays(cal: TextIOWrapper): """ics file from e.g., https://www.calendarlabs.com/ical-calendar/holidays/norway-holidays-62/""" import_calendar(cal)
5,331,086
async def request(method: str, url: str, params: dict = None, data: Any = None, credential: Credential = None, no_csrf: bool = False, json_body: bool = False, **kwargs): """ 向接口发送请求。 Args: method (str) : 请求方法。 url (str) : 请求 URL。 params (dict, optional) : 请求参数。 data (Any, optional) : 请求载荷。 credential (Credential, optional): Credential 类。 no_csrf (bool, optional) : 不要自动添加 CSRF。 json_body (bool, optional) 载荷是否为 JSON Returns: 接口未返回数据时,返回 None,否则返回该接口提供的 data 或 result 字段的数据。 """ if credential is None: credential = Credential() method = method.upper() # 请求为非 GET 且 no_csrf 不为 True 时要求 bili_jct if method != 'GET' and not no_csrf: credential.raise_for_no_bili_jct() # 使用 Referer 和 UA 请求头以绕过反爬虫机制 DEFAULT_HEADERS = { "Referer": "https://www.bilibili.com", "User-Agent": "Mozilla/5.0" } headers = DEFAULT_HEADERS if params is None: params = {} # 自动添加 csrf if not no_csrf and method in ['POST', 'DELETE', 'PATCH']: if data is None: data = {} data['csrf'] = credential.bili_jct data['csrf_token'] = credential.bili_jct # jsonp if params.get("jsonp", "") == "jsonp": params["callback"] = "callback" config = { "method": method, "url": url, "params": params, "data": data, "headers": headers, "cookies": credential.get_cookies() } config.update(kwargs) if json_body: config["headers"]["Content-Type"] = "application/json" config["data"] = json.dumps(config["data"]) # 如果用户提供代理则设置代理 if settings.proxy: config["proxy"] = settings.proxy session = get_session() async with session.request(**config) as resp: # 检查状态码 try: resp.raise_for_status() except aiohttp.ClientResponseError as e: raise NetworkException(e.status, e.message) # 检查响应头 Content-Length content_length = resp.headers.get("content-length") if content_length and int(content_length) == 0: return None # 检查响应头 Content-Type content_type = resp.headers.get("content-type") # 不是 application/json if content_type.lower().index("application/json") == -1: raise ResponseException("响应不是 application/json 类型") raw_data = await resp.text() resp_data: dict if 'callback' in params: # JSONP 请求 resp_data = json.loads( re.match("^.*?({.*}).*$", raw_data, re.S).group(1)) else: # JSON resp_data = json.loads(raw_data) # 检查 code code = resp_data.get("code", None) if code is None: raise ResponseCodeException(-1, "API 返回数据未含 code 字段", resp_data) if code != 0: msg = resp_data.get('msg', None) if msg is None: msg = resp_data.get('message', None) if msg is None: msg = "接口未返回错误信息" raise ResponseCodeException(code, msg, resp_data) real_data = resp_data.get("data", None) if real_data is None: real_data = resp_data.get("result", None) return real_data
5,331,087
def InflRate(): """Inflation rate""" return asmp.InflRate()
5,331,088
def evaluate(FLAGS, y_test, y_score, n_class): """Evaluate the quality of the model Args: FLAGS (argument parser): input information y_test (2D array): true label of test data y_score (2D) array: prediction label of test data n_class (int): number of classes Returns: [None] """ if os.path.exists(FLAGS.json_label_decode): label_decode = get_target_names(FLAGS.json_label_decode) target_names = [] for i in label_decode: target_names.append(label_decode[i]) else: raise ValueError('[ERROR]: {} is not found'.format(FLAGS.json_label_decode)) #statistics print("[INFOR]: Plot Statistics\n") statistics(FLAGS, y_test, target_names) #plot roc print("[INFOR]: Plot Receiver Operating Characteristic\n") roc_plot(FLAGS, y_test, y_score, target_names) #plot history print("[INFOR]: Plot History.\n") if os.path.exists(FLAGS.json_history): history_plot(FLAGS, n_class) else: warnings.warn('''[WARNING]: {} is not found, plot history is ignored'''.format(FLAGS.json_history)) #convert to 1D array y_test = np.argmax(y_test, axis=1) y_score = np.argmax(y_score, axis=1) print("\n\n[INFOR]: Plot confusion matrix\n") cm = confusion_matrix(y_test, y_score) plot_confusion_matrix(FLAGS, cm, target_names, normalize=False) print("\n\n[INFOR]: Report test data\n") print(classification_report(y_test, y_score, target_names=target_names)) print("\n\n[INFOR]: Report accuracy\n") print(accuracy_score(y_test, y_score), '\n') print("\n\n[INFOR]: See more result in {} folder\n".format(FLAGS.output_dir)) print()
5,331,089
def author_single_view(request, slug): """ Render Single User :param request: :param slug: :return: """ author = get_object_or_404(Profile, slug=slug) author_forum_list = Forum.objects.filter(forum_author=author.id).order_by("-is_created")[:10] author_comments = Comment.objects.filter(comment_author=author.id).order_by("-is_created")[:10] total_forums = Forum.objects.filter(forum_author=author.id).annotate(num_comments=Count('forum_author')) total_comments = Comment.objects.filter(comment_author=author.id).annotate(num_comments=Count('comment_author')) template = 'app_author/author_single.html' context = { 'author': author, 'author_forum_list': author_forum_list, 'author_comments': author_comments, 'total_forums': total_forums, 'total_comments': total_comments } return render(request, template, context)
5,331,090
def get_predefined(schedule): """ Predefined learn rate changes at specified epochs :param schedule: dictionary that maps epochs to to learn rate values. """ def update(lr, epoch): if epoch in schedule: return floatX(schedule[epoch]) else: return floatX(lr) return update
5,331,091
def launch_ec2_instances(config, nb=1): """ Launch new ec2 instance(s) """ conf = config[AWS_CONFIG_SECTION] ami_image_id = conf.get(AMI_IMAGE_ID_FIELD) ami_name = conf.get(AMI_IMAGE_NAME_FIELD) if ami_image_id and ami_name: raise ValueError('The fields ami_image_id and ami_image_name cannot be both' 'specified at the same time. Please specify either ami_image_id' 'or ami_image_name') if ami_name: ami_image_id = _get_image_id(config, ami_name) instance_type = conf[INSTANCE_TYPE_FIELD] key_name = conf[KEY_NAME_FIELD] security_group = conf[SECURITY_GROUP_FIELD] logger.info('Launching {} new ec2 instance(s)...'.format(nb)) # tag all instances using RAMP_AWS_BACKEND_TAG to be able # to list all instances later tags = [{ 'ResourceType': 'instance', 'Tags': [ {'Key': RAMP_AWS_BACKEND_TAG, 'Value': '1'}, ] }] sess = _get_boto_session(config) resource = sess.resource('ec2') instances = resource.create_instances( ImageId=ami_image_id, MinCount=nb, MaxCount=nb, InstanceType=instance_type, KeyName=key_name, TagSpecifications=tags, SecurityGroups=[security_group], ) return instances
5,331,092
def residual_error(X_train,X_test,y_train,y_test, reg="linear"): """ Plot the residual error of the Regresssion model for the input data, and return the fitted Regression model. ------------------------------------------------------------------- # Parameters # X_train,X_test,y_train,y_test (np.arrays): Given X, a 2-D array of Data, and y, an array of target data, we can use: sklearn.model_selection.train_test_split(X,y) to obtain X_train, X_test, y_train, and y_test. # reg (string): Whether the regression model is linear or logistical (default="linear"). """ if reg.lower() == "linear": reg=LinearRegression() reg.fit(X_train,y_train) elif reg.lower() == "logistic": reg=LogisticRegression() reg.fit(X_train,y_train) ## setting plot style plt.style.use('fivethirtyeight') ## plotting residual errors in training data plt.scatter(reg.predict(X_train), reg.predict(X_train) - y_train, color = "green", s = 10, label = 'Train data') ## plotting residual errors in test data plt.scatter(reg.predict(X_test), reg.predict(X_test) - y_test, color = "blue", s = 10, label = 'Test data') ## plotting line for zero residual error plt.hlines(y = 0, xmin = 0, xmax = 50, linewidth = 2) ## plotting legend plt.legend(loc = 'upper right') ## plot title plt.title("Residual errors") return reg
5,331,093
def next_row(update, context): """Increase the row, append one if necessary.""" cid = update.message.chat_id user = ClassDB.get_user(cid) row = context.chat_data["row"] + 1 context.chat_data["col"] = 0 # append row when index exceeded if row == len(user.temp_question.keyboard): user.temp_question.keyboard.append([]) context.chat_data["row"] = row
5,331,094
def ListMethods(dev): """List user-callable methods for the device. Example: >>> ListMethods(phi) """ dev = session.getDevice(dev, Device) items = [] listed = builtins.set() def _list(cls): if cls in listed: return listed.add(cls) for name, (args, doc, mcls, is_user) in sorted(cls.methods.items()): if cls is mcls and is_user: items.append((dev.name + '.' + name + args, cls.__name__, doc)) for base in cls.__bases__: if issubclass(base, (Device, DeviceMixinBase)): _list(base) _list(dev.__class__) dev.log.info('Device methods:') printTable(('method', 'from class', 'description'), items, session.log.info)
5,331,095
def centerSquare(pil_img: Image.Image): """Adds padding on both sides to make an image square. (Centered)""" pil_img = pil_img.convert('RGBA') # ensure transparency background_color = (0, 0, 0, 0) width, height = pil_img.size if width == height: return pil_img elif width > height: result = Image.new(pil_img.mode, (width, width), background_color) result.paste(pil_img, (0, (width - height) // 2)) return result else: result = Image.new(pil_img.mode, (height, height), background_color) result.paste(pil_img, ((height - width) // 2, 0)) return result
5,331,096
def _convert_steplist_to_string(step_data): """Converts list of step data into a single string. Parameters ---------- step_data : list List of step data Returns ------- str A space delimited string where every 6th value is followed by a newline. """ text = '' for i, datum in enumerate(step_data): if i == 0: text += f'\n{datum}\n' else: if i%6 == 0: text += f'{datum}\n' else: text += f'{datum} ' return text
5,331,097
def get_global_public_delegated_prefix(project: Optional[str] = None, public_delegated_prefix: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalPublicDelegatedPrefixResult: """ Returns the specified global PublicDelegatedPrefix resource. """ __args__ = dict() __args__['project'] = project __args__['publicDelegatedPrefix'] = public_delegated_prefix if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:compute/beta:getGlobalPublicDelegatedPrefix', __args__, opts=opts, typ=GetGlobalPublicDelegatedPrefixResult).value return AwaitableGetGlobalPublicDelegatedPrefixResult( creation_timestamp=__ret__.creation_timestamp, description=__ret__.description, fingerprint=__ret__.fingerprint, ip_cidr_range=__ret__.ip_cidr_range, is_live_migration=__ret__.is_live_migration, kind=__ret__.kind, name=__ret__.name, parent_prefix=__ret__.parent_prefix, public_delegated_sub_prefixs=__ret__.public_delegated_sub_prefixs, region=__ret__.region, self_link=__ret__.self_link, status=__ret__.status)
5,331,098
def evol_indices( out, msa_data_folder, msa_list, protein_index, theta_reweighting, model_parameters_location, computation_mode, num_samples_compute_evol_indices, batch_size, device, ): """ Compute evolutionary indices """ # Generate paths to output folders and make new folders if necessary MSA_weights_location = os.path.join(out, "msa_weigths") VAE_checkpoint_location = os.path.join(out, "model") all_singles_mutations_folder = os.path.join(out, "mutations") output_evol_indices_location = os.path.join(out, "evol_indices") os.makedirs(all_singles_mutations_folder, exist_ok=True) os.makedirs(output_evol_indices_location, exist_ok=True) compute_evol_indices( MSA_data_folder=msa_data_folder, MSA_list=msa_list, protein_index=protein_index, MSA_weights_location=MSA_weights_location, theta_reweighting=theta_reweighting, VAE_checkpoint_location=VAE_checkpoint_location, model_parameters_location=model_parameters_location, computation_mode=computation_mode, all_singles_mutations_folder=all_singles_mutations_folder, mutations_location=all_singles_mutations_folder, output_evol_indices_location=output_evol_indices_location, num_samples_compute_evol_indices=num_samples_compute_evol_indices, batch_size=batch_size, device=device, )
5,331,099