content
stringlengths
22
815k
id
int64
0
4.91M
def test_setup_dirs(): """Test that all GOATOOLS package dirs are in the setup.py file""" pkgs_setup = set(m for m in PACKAGES if 'test_' not in m) pkgs_dirs = _get_pkgmods() assert pkgs_dirs.issubset(pkgs_setup), _errmsg(pkgs_setup, pkgs_dirs) print('**NOTE: TEST PASSED')
21,800
def config_context(**new_config): """** EXPERIMENTAL FEATURE ** Inspired by sklearn's ``config_context`` """ old_config = asdict(Config) Config.update(new=new_config) try: yield finally: Config.update(new=old_config)
21,801
def split_on_first_brace(input,begin_brace = "{",end_brace = "}",error_replacement="brace_error"): """ input: string with {Something1} Something2 output: tuple (Something1,Something2) """ if error_replacement=="chapter_error": print(input[:20]) input = remove_empty_at_begin(input) if len(input) == 0: #raise RuntimeError("hi") print("first brace empty string ERROR") return error_replacement,input if input[0] != begin_brace: print("first brace NOT Brace ERROR") return error_replacement,input brace_count = 0 out1 = "" for elem in input: out1 += elem if elem == begin_brace: brace_count = brace_count + 1 if elem == end_brace: brace_count = brace_count - 1 if brace_count == 0: break out2 = input[len(out1):] out1 = out1[1:-1] return out1, out2
21,802
def find_files(folder_path: str, pattern: str, maxdepth: int = 1): """ Read the absolute path of files under a folder TODO: make it recursive """ assert isinstance(folder_path, str), 'folder path must be a string' assert maxdepth >= 0 if maxdepth == 0: return [] res = [] for file_name in os.listdir(folder_path): if file_name.startswith('__') or file_name.startswith('.'): continue abs_path = osp.join(folder_path, file_name) if osp.isfile(abs_path): if re.search(pattern, file_name): res.append(abs_path) elif osp.isdir(abs_path): sub_list = find_files(abs_path, pattern, maxdepth-1) res += sub_list return res
21,803
def test_encryption_materials_cache_in_grace_period_acquire_lock(): """Test encryption grace period behavior. When the TTL is GRACE_PERIOD and we successfully acquire the lock for retrieving new materials, we call to the provider store for new materials. """ store = MockProviderStore() name = "material" provider = CachingMostRecentProvider(provider_store=store, material_name=name, version_ttl=0.0) provider._grace_period = 10.0 test1 = provider.encryption_materials(sentinel.encryption_context_1) assert test1 is sentinel.material_0_encryption assert provider._version == 0 assert len(provider._cache._cache) == 1 # On the first call, we expect calls to each of the provider's APIs expected_calls = [ ("max_version", name), ("get_or_create_provider", name, 0), ("version_from_material_description", 0), ] assert store.provider_calls == expected_calls provider._lock = MagicMock() provider._lock.acquire.return_value = True test2 = provider.encryption_materials(sentinel.encryption_context_1) assert test2 is sentinel.material_0_encryption assert provider._version == 0 assert len(provider._cache._cache) == 1 # On the second call, we acquired the lock so we should have tried to retrieve new materials (note no extra call # to get_or_create_provider, because the version has not changed) expected_calls.append(("max_version", name)) expected_calls.append(("version_from_material_description", 0)) assert store.provider_calls == expected_calls
21,804
def check_count(value, total_count, dimension_type): """check the value for count.""" value = validate(value, "count", int) if value > total_count: raise ValueError( f"Cannot set the count, {value}, more than the number of coordinates, " f"{total_count}, for the {dimension_type} dimensions." ) if value < total_count: warnings.warn(f"The number of labels, {total_count}, are truncated to {value}.") return value
21,805
def test_get_all_pairs_indices(): """check i < j < n""" ns = onp.random.randint(5, 50, 10) for n in ns: inds_i, inds_j = get_all_pairs_indices(n) assert (inds_i < inds_j).all() assert (inds_j < n).all()
21,806
def request_certificate(request): """Request the on-demand creation of a certificate for some user, course. A request doesn't imply a guarantee that such a creation will take place. We intentionally use the same machinery as is used for doing certification at the end of a course run, so that we can be sure users get graded and then if and only if they pass, do they get a certificate issued. """ if request.method == "POST": if request.user.is_authenticated(): # Enter your api key here xqci = CertificateGeneration( api_key=settings.APPSEMBLER_FEATURES['ACCREDIBLE_API_KEY'] ) username = request.user.username student = User.objects.get(username=username) course_key = CourseKey.from_string( request.POST.get('course_id') ) course = get_course(course_key) status = certificate_status_for_student( student, course_key)['status'] if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]: logger.info( 'Grading and certification requested for user {} in course {} via /request_certificate call'.format(username, course_key)) status = xqci.add_cert(student, course_key, course=course) return HttpResponse( json.dumps( {'add_status': status} ), content_type='application/json') return HttpResponse( json.dumps( {'add_status': 'ERRORANONYMOUSUSER'} ), content_type='application/json')
21,807
def get_useable_checkers(): """ 列出可用插件列表 :return: """ useable_checkers = list() for (checker_name, checker_instance) in CHECKER_INSTANCE_DICT.items(): if checker_instance.useable: useable_checkers.append(checker_instance) return useable_checkers
21,808
def genBubbleChart(df: pd.DataFrame, years, title: str, quartile: str, name: str, mlTitle: str): """Generates bubble scatter plot for avg pdsi color - precipAvg mean of all counties present for the year size - pdsiAvg mean of all counties present for the year y - number of counties in lower pdsi quartile x - year """ x = [] y = [] size = [] color = [] for year in years: currentYearData = df[df['year'] == year] x.append(year) y.append(currentYearData['pdsiAvg'].count()) preSize = currentYearData['pdsiAvg'].sum() * -1 if (preSize < 0): size.append(1) else: size.append(abs(currentYearData['pdsiAvg'].mean())) color.append(currentYearData['precipAvg'].mean()) d = {'year': x, 'countyAmt': y, 'size': size, 'color': color} df = pd.DataFrame(data=d) knNeighbor(df, title, name, mlTitle, quartile) fig = px.scatter( df, x='year', y='countyAmt', trendline='lowess', color='color', size='size', opacity=0.7, title=title, color_continuous_scale="Viridis_r" ) fig.update_layout(coloraxis_colorbar=dict( title='Precipitation Avg', thicknessmode='pixels', thickness=20, lenmode='pixels', len=200, yanchor='top', y=1, ticks='inside', ticksuffix=' inches', dtick=0.5 )) # plotly figure layout fig.update_layout(xaxis_title='Year', yaxis_title='Number of Counties {0} PDSI Avg'.format(quartile)) exportPlotlyPNG(fig, name, 'visualizations/bubbleCharts')
21,809
def get_training_roidb(imdb): """Returns a roidb (Region of Interest database) for use in training.""" if cfg.TRAIN.USE_FLIPPED: print 'Appending horizontally-flipped training examples...' imdb.append_flipped_images() print 'done' print 'Preparing training data...' wrdl_roidb.prepare_roidb(imdb) print 'done' return imdb.roidb
21,810
def _used_in_calls(schedule_name: str, schedule: ScheduleBlock) -> bool: """Recursively find if the schedule calls a schedule with name ``schedule_name``. Args: schedule_name: The name of the callee to identify. schedule: The schedule to parse. Returns: True if ``schedule``calls a ``ScheduleBlock`` with name ``schedule_name``. """ blocks_have_schedule = False for block in schedule.blocks: if isinstance(block, Call): if block.subroutine.name == schedule_name: return True else: blocks_have_schedule = blocks_have_schedule or _used_in_calls( schedule_name, block.subroutine ) if isinstance(block, ScheduleBlock): blocks_have_schedule = blocks_have_schedule or _used_in_calls(schedule_name, block) return blocks_have_schedule
21,811
def font_size_splitter(font_map): """ Split fonts to 4 category (small,medium,large,xlarge) by maximum length of letter in each font. :param font_map: input fontmap :type font_map : dict :return: splitted fonts as dict """ small_font = [] medium_font = [] large_font = [] xlarge_font = [] fonts = set(font_map.keys()) - set(RANDOM_FILTERED_FONTS) for font in fonts: length = max(map(len, font_map[font][0].values())) if length <= FONT_SMALL_THRESHOLD: small_font.append(font) elif length > FONT_SMALL_THRESHOLD and length <= FONT_MEDIUM_THRESHOLD: medium_font.append(font) elif length > FONT_MEDIUM_THRESHOLD and length <= FONT_LARGE_THRESHOLD: large_font.append(font) else: xlarge_font.append(font) return { "small_list": small_font, "medium_list": medium_font, "large_list": large_font, "xlarge_list": xlarge_font}
21,812
def get_candidate_set_size( mapped_triples: MappedTriples, restrict_entities_to: Optional[Collection[int]] = None, restrict_relations_to: Optional[Collection[int]] = None, additional_filter_triples: Union[None, MappedTriples, List[MappedTriples]] = None, num_entities: Optional[int] = None, ) -> pandas.DataFrame: """ Calculate the candidate set sizes for head/tail prediction for the given triples. :param mapped_triples: shape: (n, 3) the evaluation triples :param restrict_entities_to: The entity IDs of interest. If None, defaults to all entities. cf. :func:`restrict_triples`. :param restrict_relations_to: The relations IDs of interest. If None, defaults to all relations. cf. :func:`restrict_triples`. :param additional_filter_triples: shape: (n, 3) additional filter triples besides the evaluation triples themselves. cf. `_prepare_filter_triples`. :param num_entities: the number of entities. If not given, this number is inferred from all triples :return: columns: "index" | "head" | "relation" | "tail" | "head_candidates" | "tail_candidates" a dataframe of all evaluation triples, with the number of head and tail candidates """ # optionally restrict triples (nop if no restriction) mapped_triples = restrict_triples( mapped_triples=mapped_triples, entities=restrict_entities_to, relations=restrict_relations_to, ) # evaluation triples as dataframe columns = [LABEL_HEAD, LABEL_RELATION, LABEL_TAIL] df_eval = pandas.DataFrame( data=mapped_triples.numpy(), columns=columns, ).reset_index() # determine filter triples filter_triples = prepare_filter_triples( mapped_triples=mapped_triples, additional_filter_triples=additional_filter_triples, ) # infer num_entities if not given if restrict_entities_to: num_entities = len(restrict_entities_to) else: # TODO: unique, or max ID + 1? num_entities = num_entities or filter_triples[:, [0, 2]].view(-1).unique().numel() # optionally restrict triples filter_triples = restrict_triples( mapped_triples=filter_triples, entities=restrict_entities_to, relations=restrict_relations_to, ) df_filter = pandas.DataFrame( data=filter_triples.numpy(), columns=columns, ) # compute candidate set sizes for different targets # TODO: extend to relations? for target in [LABEL_HEAD, LABEL_TAIL]: total = num_entities group_keys = [c for c in columns if c != target] df_count = df_filter.groupby(by=group_keys).agg({target: "count"}) column = f"{target}_candidates" df_count[column] = total - df_count[target] df_count = df_count.drop(columns=target) df_eval = pandas.merge(df_eval, df_count, on=group_keys, how="left") df_eval[column] = df_eval[column].fillna(value=total) return df_eval
21,813
def add_new_user(user_info: dict): """ Add a new user to the database from first oidc login. First check if user with the same email exists. If so, add the auth_id to the user. Args: user_info (dict): Information about the user """ db_user = flask.g.db["users"].find_one({"email": user_info["email"]}) if db_user: db_user["auth_ids"].append(user_info["auth_id"]) result = flask.g.db["users"].update_one( {"email": user_info["email"]}, {"$set": {"auth_ids": db_user["auth_ids"]}} ) if not result.acknowledged: flask.current_app.logger.error( "Failed to add new auth_id to user with email %s", user_info["email"] ) flask.Response(status=500) else: utils.make_log("user", "edit", "Add OIDC entry to auth_ids", db_user, no_user=True) else: new_user = structure.user() new_user["email"] = user_info["email"] new_user["name"] = user_info["name"] new_user["auth_ids"] = [user_info["auth_id"]] result = flask.g.db["users"].insert_one(new_user) if not result.acknowledged: flask.current_app.logger.error( "Failed to add user with email %s via oidc", user_info["email"] ) flask.Response(status=500) else: utils.make_log("user", "add", "Creating new user from OAuth", new_user, no_user=True)
21,814
def dm2skin_normalizeWeightsConstraint(x): """Constraint used in optimization that ensures the weights in the solution sum to 1""" return sum(x) - 1.0
21,815
def _load_bitmap(filename): """ Load a bitmap file from the backends/images subdirectory in which the matplotlib library is installed. The filename parameter should not contain any path information as this is determined automatically. Returns a wx.Bitmap object """ basedir = os.path.join(rcParams['datapath'],'images') bmpFilename = os.path.normpath(os.path.join(basedir, filename)) if not os.path.exists(bmpFilename): raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename) bmp = wx.Bitmap(bmpFilename) return bmp
21,816
def figure_9(): """ Figure 9: Beach width versus storm duration for natural dune simulations colored by the change in dune volume (red: erosion, blue: accretion), similar to Figure 7 but with the y-axis re-scaled based on the initial beach width in each simulation. Each row of plots represents a different storm intensity (increasing from top to bottom) and each column of plots represents a different dune configuration. Because simulations for profiles having a fenced dune are synthesized using the toes-aligned configuration and therefore with a narrower beach with the toes-aligned and simulations with a fenced dune do not have any variations in the beach width, they are not included in this analysis. Highlighted regions (cyan line) indicate where the dune was inundated. """ # List of experiments in the order they will be plotted in experiments = ['Crests Joined Half Surge', 'Heels Joined Half Surge', 'Crests Joined Normal Surge', 'Heels Joined Normal Surge', 'Crests Joined One Half Surge', 'Heels Joined One Half Surge'] # Setup the figure fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(nrows=3, ncols=2, sharex='all', sharey='all', figsize=(figure_inches * 2, figure_inches * 1.5), dpi=figure_dpi) axes = [ax1, ax2, ax3, ax4, ax5, ax6] # Loop through the experiments and axes and plot for ax, experiment in zip(axes, experiments): # Load the specific data for the current experiment _, dune_ratio, use_stretches, _ = load_plot_data(experiment) d_volume, _, _, beach_width, _, _ = load_volume_loss(experiment) # Plot the phase diagrams plot = volume_loss_phases(ax=ax, x=use_stretches, y=beach_width, z=d_volume) # Add a colorbar volume_loss_colorbar(fig=fig, plot=plot) # Set the x-axes for ax in [axes[-2], axes[-1]]: ax.set_xlim(left=0, right=50) ax.set_xticks([0, 25, 50]) ax.set_xlabel('+Duration (hr)', **font) # Set the y-axes for ax, modifier in zip([axes[0], axes[2], axes[4]], [0.5, 1.0, 1.5]): ax.set_ylim(bottom=0, top=60) ax.set_ylabel(r'$\bf{' + str(modifier) + 'x\ Surge}$\n' + 'Width$_{Beach}$ (m)', **font) # Set the titles for ax, title in zip([axes[0], axes[1]], ['Crests Aligned', 'Heels Aligned']): ax.set_title(title, **font) # Save and close the figure save_and_close(fig=fig, title='Figure 9')
21,817
def get_auth(): """ POST request to users/login, returns auth token """ try: url_user_login = f"https://{url_core_data}/users/login" json = { "username": creds_name, "password": creds_pw } headers = { "Accept": "application/json" } r = requests.post(url_user_login, headers=headers, json=json, verify=False) response = r.json() code = r.status_code token = response["token"] # print(f"RESPONSE: {response}") # print(f"STATUS_CODE: {code}") # print(f"TOKEN: {token}") return token except Exception as e: auth_err_msg = f"Error authenticating with the DIVA API: \n\ {e}" logger.error(auth_err_msg)
21,818
def vector_between_points(P, Q): """ vector between initial point P and terminal point Q """ return vector_subtract(Q, P);
21,819
def execve(path, args, env): """Execute a new program, replacing the current process. :type path: bytes | unicode :type args: collections.Iterable :type env: collections.Mapping :rtype: None """ pass
21,820
def before_train(loaded_train_model, train_model, train_sess, global_step, hparams, log_f): """Misc tasks to do before training.""" stats = init_stats() info = {"train_ppl": 0.0, "speed": 0.0, "avg_step_time": 0.0, "avg_grad_norm": 0.0, "avg_train_sel": 0.0, "learning_rate": loaded_train_model.learning_rate.eval( session=train_sess)} start_train_time = time.time() print_out("# Start step %d, lr %g, %s" % (global_step, info["learning_rate"], time.ctime()), log_f) # Initialize all of the iterators skip_count = hparams.qe_batch_size * hparams.epoch_step print_out("# Init train iterator, skipping %d elements" % skip_count) train_sess.run( train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: skip_count}) return stats, info, start_train_time
21,821
def reg_file_comp(ref_file, comp_file): """Compare the reference file 'ref_file' with 'comp_file'. The order of these two files matter. The ref_file MUST be given first. Only values specified by reg_write() are compared. All other lines are ignored. Floating point values are compared based on rel_tol and abs_tol""" all_ref_lines = [] ref_values = [] comp_values = [] try: f = open(ref_file, "r") except IOError: print("File %s was not found. Cannot do comparison." % (ref_file)) return REG_ERROR for line in f.readlines(): all_ref_lines.append(line) if line[0:6] == "@value": ref_values.append(line) # end for f.close() try: f = open(comp_file, "r") except IOError: print("File %s was not found. Cannot do comparison." % (comp_file)) return REG_ERROR for line in f.readlines(): if line[0:6] == "@value": comp_values.append(line) # end for f.close() # Copy the comp_file to compe_file.orig os.system("cp %s %s.orig" % (comp_file, comp_file)) # We must check that we have the same number of @value's to compare: if len(ref_values) != len(comp_values): print("Error: number of @value lines in file not the same!") return REG_FILES_DO_NOT_MATCH # end if # Open the (new) comp_file: f = open(comp_file, "w") # Loop over all the ref_lines, for value lines, do the # comparison. If comparison is ok, write the ref line, otherwise # write orig line. j = 0 res = REG_FILES_MATCH for i in range(len(all_ref_lines)): line = all_ref_lines[i] if line[0:6] == "@value": if _reg_str_comp(line, comp_values[j]) is False: f.write(comp_values[j]) res = REG_FILES_DO_NOT_MATCH else: f.write(line) # end if j += 1 else: f.write(line) # end if # end for f.close() return res
21,822
def evaluate(indir, split, langs, truncate, cutoff, sort_lang, print_cm, fasttext_model): """ Evaluate language prediction performance. """ if langs is not None: langs = {l.strip() for l in langs.split(',')} in_langs = sorted([l for l in os.listdir(indir) if langs is None or l in langs]) if fasttext_model: try: import fasttext except ModuleNotFoundError: click.echo('FastText needs to be installed if --fasttext-model is set.', err=True) click.echo('Run "pip install fasttext" to install it.', err=True) return # FastText prints useless warnings after loading a model, so silence its print method to make it shut up # See: https://github.com/facebookresearch/fastText/issues/909 fasttext.FastText.eprint = lambda x: None fasttext_model = fasttext.load_model(fasttext_model) recall_matrix = defaultdict(list) precision_matrix = defaultdict(list) confusion_matrix = defaultdict(lambda: defaultdict(int)) for lang in tqdm(in_langs, desc='Evaluating languages', unit='language', leave=False): for line in tqdm(open(os.path.join(indir, lang, split + '.txt'), 'r'), desc=f'Predicting examples for {lang}', unit=' examples', leave=False): if truncate: line = line[:truncate] plang = rlang.detect_fast(line, cutoff=cutoff, langs=langs)[0] if plang == 'unknown': if fasttext_model: ft_pred = fasttext_model.predict(line.replace('\n', ' ')) plang = ft_pred[0][0].replace('__label__', '') if ft_pred else '-' else: plang = '-' recall_matrix[lang].append(plang == lang) precision_matrix[plang].append(plang == lang) confusion_matrix[lang][plang] += 1 acc = [] results = [] sum_examples = 0 for lang in in_langs: precision = sum(precision_matrix[lang]) / max(1, len(precision_matrix[lang])) recall = sum(recall_matrix[lang]) / max(1, len(recall_matrix[lang])) if precision + recall == 0: f1 = 0.0 else: f1 = 2.0 * (precision * recall) / (precision + recall) n_ex = len(recall_matrix[lang]) results.append((lang, precision, recall, f1, n_ex)) acc.append(recall * n_ex) sum_examples += n_ex click.echo('Lang, Precision, Recall, F1, Top Confusions, Num Examples') if not sort_lang: results.sort(key=lambda x: x[3], reverse=True) for r in results: click.echo(f'{r[0]}, {r[1]:.2f}, {r[2]:.2f}, {r[3]:.2f}, {r[4]}') acc = fsum(acc) / sum_examples click.echo(f'\nAccuracy: {acc:.2f}') if print_cm: label_width = max(len(l) for l in in_langs) col_width = max(max(max(len(str(l2)) for l2 in l1.values()) for l1 in confusion_matrix.values()), label_width) + 2 click.echo(f'\nConfusion matrix:\n' + (' ' * label_width), nl=False) for l in in_langs: click.echo(f'{l:>{col_width}}', nl=False) click.echo() for l1 in in_langs: click.echo(l1, nl=False) for l2 in in_langs: click.echo(f'{confusion_matrix[l1][l2]:>{col_width}}', nl=False) click.echo()
21,823
def test_egg_re(): """Make sure egg_info_re matches.""" egg_names_path = os.path.join(os.path.dirname(__file__), "eggnames.txt") with open(egg_names_path) as egg_names: for line in egg_names: line = line.strip() if line: assert egg_info_re.match(line), line
21,824
def methodInDB(method_name, dict_link, interface_db_cursor): #checks the database to see if the method exists already """ Method used to check the database to see if a method exists in the database returns a list [Boolean True/False of if the method exists in the db, dictionary link/ID] """ crsr = interface_db_cursor #splitting method into parts if "::" in method_name: method = method_name.split('::') cn = method[0].strip() mn = '::'.join(method[1:]).strip() else: cn = "Unknown" mn = method_name if dict_link == '': #dict link should only be empty on the initial call # search for any method with the same name and class crsr.execute("SELECT class_name, method_name, method_text, dict_link FROM methods WHERE class_name = ? AND method_name = ?", (cn, mn)) res = crsr.fetchall() if len(res) == 0: #method not in table return [False, ''] else: # found something, verify it is right if len(res) == 1: print('Method found in database.') if res[0][0] == 'Unknown': print(res[0][1]) else: print('::'.join(res[0][0:2])) print(res[0][2]) print('Is this the correct method? (Y/N)') #prompt the user to confirm that this is the right method k = input() k = k.strip() while( k not in ['N', 'n', 'Y', 'y' ] ): print('Invalid input, try again') k = input() if k == 'Y' or k == 'y': return [True, res[0][3]] elif k == 'N' or k == 'n': return [False, ''] elif len(res) > 1: print("\nMethod found in database") count = 1 for r in res: tmp = str(count) + ': ' print(tmp) if r[0] == 'Unknown': print(r[1]) else: print('::'.join(r[0:2])) print(r[2],'\n') count += 1 print('Which one of these is the correct method?\nPut 0 for none of them.') #if there are multiple versions of the method in the db # prompt the user to select which method is the right method, prints the method text k = input() try: k = int(k) except: k = -1 while( int(k) > len(res) or int(k) < 0 ): print("Invalid input: try again please") k = input() try: k = int(k) except: k = -1 if k == 0: return [False, ''] elif k > 0 and k <= len(res): return [True, res[k-1][3]] else: #there is a dict_link, can check for exact, usually what happens crsr.execute("SELECT class_name, method_name FROM methods WHERE class_name = ? AND method_name = ? AND dict_link = ?", (cn, mn, dict_link)) #simple sql select res = crsr.fetchall() if len(res) == 0: #method not in table return [False, dict_link] elif len(res) > 0: # we found something return [True, dict_link]
21,825
def dict_to_image(screen): """ Takes a dict of room locations and their block type output by RunGame. Renders the current state of the game screen. """ picture = np.zeros((51, 51)) # Color tiles according to what they represent on screen:. for tile in screen: pos_x, pos_y = tile if pos_x < 51 and pos_y < 51: if screen[tile] == 46: picture[pos_y][pos_x] = 0; elif screen[tile] == 35: picture[pos_y][pos_x] = 240; else: picture[pos_y][pos_x] = 150 return picture
21,826
def get_mobilenet(version, width_scale, model_name=None, pretrained=False, root=os.path.join('~', '.keras', 'models'), **kwargs): """ Create MobileNet or FD-MobileNet model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('orig' or 'fd'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if version == 'orig': channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]] first_stage_stride = False elif version == 'fd': channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]] first_stage_stride = True else: raise ValueError("Unsupported MobileNet version {}".format(version)) if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] net = mobilenet( channels=channels, first_stage_stride=first_stage_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net
21,827
def channels(context): """ *musicpd.org, client to client section:* ``channels`` Obtain a list of all channels. The response is a list of "channel:" lines. """ raise MpdNotImplemented
21,828
def test_svd_space_res(file_prefix='F_s'): """ test SVD decomposition of spatial residuals by generating SVD, saving to file and reloading """ from proteus.deim_utils import read_snapshots,generate_svd_decomposition ns = get_burgers_ns("test_svd_space_res",T=0.1,nDTout=10,archive_pod_res=True) failed = ns.calculateSolution("run_svd_space_res") assert not failed from proteus import Archiver archive = Archiver.XdmfArchive(".","test_svd_space_res",readOnly=True) U,s,V=generate_svd_decomposition(archive,len(ns.tnList),'spatial_residual0',file_prefix) S_svd = np.dot(U,np.dot(np.diag(s),V)) #now load back in and test S = read_snapshots(archive,len(ns.tnList),'spatial_residual0') npt.assert_almost_equal(S,S_svd)
21,829
def can_fuse_to(wallet): """We can only fuse to wallets that are p2pkh with HD generation. We do *not* need the private keys.""" return isinstance(wallet, Standard_Wallet)
21,830
def _build_context(hps, encoder_outputs): """Compute feature representations for attention/copy. Args: hps: hyperparameters. encoder_outputs: outputs by the encoder RNN. Returns: Feature representation of [batch_size, seq_len, decoder_dim] """ with tf.variable_scope("memory_context"): context = tf.layers.dense( encoder_outputs, units=hps.decoder_dim, activation=None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="memory_projector") return context
21,831
def rf_local_divide(left_tile_col: Column_type, rhs: Union[float, int, Column_type]) -> Column: """Divide two Tiles cell-wise, or divide a Tile's cell values by a scalar""" if isinstance(rhs, (float, int)): rhs = lit(rhs) return _apply_column_function('rf_local_divide', left_tile_col, rhs)
21,832
def test_init_with_params(color, mode): """Tests the initialization of a Printer with custom parameters.""" printer = Printer(Mode[mode], color) assert printer.mode == Mode[mode] assert printer.colored == color
21,833
def check_dependencies_ready(dependencies, start_date, dependencies_to_ignore): """Checks if every dependent pipeline has completed Args: dependencies(dict): dict from id to name of pipelines it depends on start_date(str): string representing the start date of the pipeline dependencies_to_ignore(list of str): dependencies to ignore if failed """ print 'Checking dependency at ', str(datetime.now()) dependency_ready = True # Convert date string to datetime object start_date = datetime.strptime(start_date, '%Y-%m-%d') for pipeline in dependencies.keys(): # Get instances of each pipeline instances = list_pipeline_instances(pipeline) failures = [] # Collect all pipeline instances that are scheduled for today instances_today = [] for instance in instances: date = datetime.strptime(instance[START_TIME], '%Y-%m-%dT%H:%M:%S') if date.date() == start_date.date(): instances_today.append(instance) # Dependency pipeline has not started from today if not instances_today: dependency_ready = False for instance in instances_today: # One of the dependency failed/cancelled if instance[STATUS] in FAILED_STATUSES: if dependencies[pipeline] not in dependencies_to_ignore: raise Exception( 'Pipeline %s (ID: %s) has bad status: %s' % (dependencies[pipeline], pipeline, instance[STATUS]) ) else: failures.append(dependencies[pipeline]) # Dependency is still running elif instance[STATUS] != FINISHED: dependency_ready = False return dependency_ready, failures
21,834
def factor_returns(factor_data, demeaned=True, group_adjust=False): """ 计算按因子值加权的投资组合的收益 权重为去均值的因子除以其绝对值之和 (实现总杠杆率为1). 参数 ---------- factor_data : pd.DataFrame - MultiIndex 一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex, values 包括因子的值, 各期因子远期收益, 因子分位数, 因子分组(可选), 因子权重(可选) demeaned : bool 因子分析是否基于一个多空组合? 如果是 True, 则计算权重时因子值需要去均值 group_adjust : bool 因子分析是否基于一个分组(行业)中性的组合? 如果是 True, 则计算权重时因子值需要根据分组和日期去均值 返回值 ------- returns : pd.DataFrame 每期零风险暴露的多空组合收益 """ def to_weights(group, is_long_short): if is_long_short: demeaned_vals = group - group.mean() return demeaned_vals / demeaned_vals.abs().sum() else: return group / group.abs().sum() grouper = [factor_data.index.get_level_values('date')] if group_adjust: grouper.append('group') weights = factor_data.groupby(grouper)['factor'] \ .apply(to_weights, demeaned) if group_adjust: weights = weights.groupby(level='date').apply(to_weights, False) weighted_returns = \ factor_data[get_forward_returns_columns(factor_data.columns)] \ .multiply(weights, axis=0) returns = weighted_returns.groupby(level='date').sum() return returns
21,835
def create_input_lambda(i): """Extracts off an object tensor from an input tensor""" return Lambda(lambda x: x[:, i])
21,836
def create_model_talos(params, time_steps, num_features, input_loss='mae', input_optimizer='adam', patience=3, monitor='val_loss', mode='min', epochs=100, validation_split=0.1): """Uses sequential model class from keras. Adds LSTM layer. Input samples, timesteps, features. Hyperparameters include number of cells, dropout rate. Output is encoded feature vector of the input data. Uses autoencoder by mirroring/reversing encoder to be a decoder.""" model = Sequential() model.add(LSTM(params['cells'], input_shape=(time_steps, num_features))) # one LSTM layer model.add(Dropout(params['dropout'])) model.add(RepeatVector(time_steps)) model.add(LSTM(params['cells'], return_sequences=True)) # mirror the encoder in the reverse fashion to create the decoder model.add(Dropout(params['dropout'])) model.add(TimeDistributed(Dense(num_features))) print(model.optimizer) model.compile(loss=input_loss, optimizer=input_optimizer) es = tf.keras.callbacks.EarlyStopping(monitor=monitor, patience=patience, mode=mode) history = model.fit( X_train, y_train, epochs=epochs, # just set to something high, early stopping will monitor. batch_size=params['batch_size'], # this can be optimized later validation_split=validation_split, # use 10% of data for validation, use 90% for training. callbacks=[es], # early stopping similar to earlier shuffle=False # because order matters ) return history, model
21,837
def parseAnswers(args, data, question_length = 0): """ parseAnswers(args, data): Parse all answers given to a query """ retval = [] # # Skip the headers and question # index = 12 + question_length logger.debug("question_length=%d total_length=%d" % (question_length, len(data))) if index >= len(data): logger.debug("parseAnswers(): index %d >= data length(%d), so no answers were received. Aborting." % ( index, len(data))) return(retval) # # Now loop through our answers. # while True: answer = {} logger.debug("parseAnswers(): Index is currently %d" % index) # # If we're doing a fake TTL, we also have to fudge the response header and overwrite # the original TTL. In this case, we're doing to overwrite it with the 4 byte string # of 0xDEADBEEF, so that it will be obvious upon inspection that this string was human-made. # if args.fake_ttl: ttl_index = index + 6 # # If the leading byte of the Answer Headers is zero (the question), then # the question was for a bad TLD, and the "pointer" is really just a single # byte, so go forward one byte less. # if data[index] == 0: ttl_index -= 1 data_new = bytes() logger.debug("parseAnswers(): --fake-ttl specified, forcing TTL to be -2") data_new = data[0:ttl_index] + struct.pack(">i", -2) + data[ttl_index + 4:] data = data_new answer["headers"] = parseAnswerHeaders(args, data[index:]) # # Advance our index to the start of the next answer, then put this entire # answer into answer["rddata_raw"] # index_old = index index_next = index + 12 + answer["headers"]["rdlength"] answer["rddata_raw"] = data[index:index_next] (answer["rddata"], answer["rddata_text"]) = parse_answer_body.parseAnswerBody(answer, index, data) index = index_next # # This is a bit of hack, but we want to grab the sanity data from the rddata # dictonary and put it into its own dictionary member so that the sanity # module can later extract it. # answer["sanity"] = {} if "sanity" in answer: answer["sanity"] = answer["rddata"]["sanity"] del answer["rddata"]["sanity"] # # Deleting the raw data because it will choke when convered to JSON # answer["rddata_hex"] = {} if "rddata_hex" in answer: answer["rddata_hex"] = output.formatHex(answer["rddata_raw"]) del answer["rddata_raw"] retval.append(answer) # # If we've run off the end of the packet, then break out of this loop # if index >= len(data): logger.debug("parseAnswer(): index %d >= data length (%d), stopping loop!" % (index, len(data))) break return(retval)
21,838
def ortho_init(scale=1.0): """ Orthogonal initialization for the policy weights :param scale: (float) Scaling factor for the weights. :return: (function) an initialization function for the weights """ # _ortho_init(shape, dtype, partition_info=None) def _ortho_init(shape, *_, **_kwargs): """Intialize weights as Orthogonal matrix. Orthogonal matrix initialization [1]_. For n-dimensional shapes where n > 2, the n-1 trailing axes are flattened. For convolutional layers, this corresponds to the fan-in, so this makes the initialization usable for both dense and convolutional layers. References ---------- .. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli. "Exact solutions to the nonlinear dynamics of learning in deep linear """ # lasagne ortho init for tf shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) # Added by Ronja elif len(shape) == 3: # assumes NWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError gaussian_noise = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(gaussian_noise, full_matrices=False) weights = u if u.shape == flat_shape else v # pick the one with the correct shape weights = weights.reshape(shape) return (scale * weights[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init
21,839
def setup(bot: commands.Bot) -> None: """Load the Ping cog.""" bot.add_cog(Ping(bot))
21,840
def get_rde_model(rde_version): """Get the model class of the specified rde_version. Factory method to return the model class based on the specified RDE version :param rde_version (str) :rtype model: NativeEntity """ rde_version: semantic_version.Version = semantic_version.Version(rde_version) # noqa: E501 if rde_version.major == 1: return NativeEntity1X elif rde_version.major == 2: return NativeEntity2X
21,841
def pnorm(x, mu, sd): """ Normal distribution PDF Args: * scalar: variable * scalar: mean * scalar: standard deviation Return type: scalar (probability density) """ return math.exp(- ((x - mu) / sd) ** 2 / 2) / (sd * 2.5)
21,842
def getTransformToPlane(planePosition, planeNormal, xDirection=None): """Returns transform matrix from World to Plane coordinate systems. Plane is defined in the World coordinate system by planePosition and planeNormal. Plane coordinate system: origin is planePosition, z axis is planeNormal, x and y axes are orthogonal to z. """ import numpy as np import math # Determine the plane coordinate system axes. planeZ_World = planeNormal/np.linalg.norm(planeNormal) # Generate a plane Y axis by generating an orthogonal vector to # plane Z axis vector by cross product plane Z axis vector with # an arbitrarily chosen vector (that is not parallel to the plane Z axis). if xDirection: unitX_World = np.array(xDirection) unitX_World = unitX_World/np.linalg.norm(unitX_World) else: unitX_World = np.array([0,0,1]) angle = math.acos(np.dot(planeZ_World,unitX_World)) # Normalize between -pi/2 .. +pi/2 if angle>math.pi/2: angle -= math.pi elif angle<-math.pi/2: angle += math.pi if abs(angle)*180.0/math.pi>20.0: # unitX is not parallel to planeZ, we can use it planeY_World = np.cross(planeZ_World, unitX_World) else: # unitX is parallel to planeZ, use unitY instead unitY_World = np.array([0,1,0]) planeY_World = np.cross(planeZ_World, unitY_World) planeY_World = planeY_World/np.linalg.norm(planeY_World) # X axis: orthogonal to tool's Y axis and Z axis planeX_World = np.cross(planeY_World, planeZ_World) planeX_World = planeX_World/np.linalg.norm(planeX_World) transformPlaneToWorld = np.row_stack((np.column_stack((planeX_World, planeY_World, planeZ_World, planePosition)), (0, 0, 0, 1))) transformWorldToPlane = np.linalg.inv(transformPlaneToWorld) return transformWorldToPlane
21,843
def jp_runtime_dir(tmp_path): """Provides a temporary Jupyter runtime dir directory value.""" return mkdir(tmp_path, "runtime")
21,844
def _softmax(X, n_samples, n_classes): """Derive the softmax of a 2D-array.""" maximum = np.empty((n_samples, 1)) for i in prange(n_samples): maximum[i, 0] = np.max(X[i]) exp = np.exp(X - maximum) sum_ = np.empty((n_samples, 1)) for i in prange(n_samples): sum_[i, 0] = np.sum(exp[i]) return exp / sum_
21,845
def merge_dicts(dict1, dict2, dict_class=OrderedDict): """Merge dictionary ``dict2`` into ``dict1``""" def _merge_inner(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in dict2: if isinstance(dict1[k], (dict, MutableMapping)) and isinstance( dict2[k], (dict, MutableMapping) ): yield k, dict_class(_merge_inner(dict1[k], dict2[k])) else: # If one of the values is not a dict, you can't continue # merging it. Value from second dict overrides one in # first and we move on. yield k, dict2[k] elif k in dict1: yield k, dict1[k] else: yield k, dict2[k] return dict_class(_merge_inner(dict1, dict2))
21,846
def jaccard_overlap_numpy(box_a: numpy.ndarray, box_b: numpy.ndarray) -> numpy.ndarray: """Compute the jaccard overlap of two sets of boxes. The jaccard overlap is simply the intersection over union of two boxes. E.g.: A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) Args: box_a: Multiple bounding boxes, Shape: [num_boxes,4] box_b: Single bounding box, Shape: [4] Return: jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]""" inter = intersect_numpy(box_a, box_b) area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]) # [A,B] area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) # [A,B] union = area_a + area_b - inter return inter / union
21,847
def find_pkg(pkg): """ Find the package file in the repository """ candidates = glob.glob('/repo/' + pkg + '*.rpm') if len(candidates) == 0: print("No candidates for: '{0}'".format(pkg)) assert len(candidates) == 1 return candidates[0]
21,848
def random_choice(lhs, ctx): """Element ℅ (lst) -> random element of a (num) -> Random integer from 0 to a """ if vy_type(lhs) == NUMBER_TYPE: return random.randint(0, lhs) return random.choice(iterable(lhs, ctx=ctx))
21,849
def ask(choices, message="Choose one from [{choices}]{default}{cancelmessage}: ", errormessage="Invalid input", default=None, cancel=False, cancelkey='c', cancelmessage='press {cancelkey} to cancel'): """ ask is a shorcut instantiate PickOne and use .ask method """ return PickOne(choices, message, errormessage, default, cancel, cancelkey, cancelmessage).ask()
21,850
def cmc(ctx, scores, evaluation, **kargs): """Plot CMC (cumulative match characteristic curve). graphical presentation of results of an identification task eval, plotting rank values on the x-axis and the probability of correct identification at or below that rank on the y-axis. The values for the axis will be computed using :py:func:`bob.measure.cmc`. You need to provide one or more development score file(s) for each experiment. You can also provide eval files along with dev files. If eval-scores are used, the flag `--eval` must be used. is required in that case. Files must be 4- or 5- columns format, see :py:func:`bob.bio.base.score.load.four_column` and :py:func:`bob.bio.base.score.load.five_column` for details. Examples: $ bob bio cmc -v dev-scores $ bob bio cmc -v dev-scores1 eval-scores1 dev-scores2 eval-scores2 $ bob bio cmc -v -o my_roc.pdf dev-scores1 eval-scores1 """ process = bio_figure.Cmc(ctx, scores, evaluation, load.cmc) process.run()
21,851
def error_frame(msg=''): """Print debug info The correct way to do this is to use the logging framework which exposes all of these things to you, but you can access them using the Python inspect module """ caller_frame = inspect.stack()[1] frame = caller_frame[0] frame_info = inspect.getframeinfo(frame) stderr.write('{}::{}: {}\n'.format(frame_info.function, frame_info.lineno, msg))
21,852
def test_delete_from(db_results): """Tests the delete_from function of the database """ for remote in db_results['remote']: DbManager.use_remote_db = remote DbManager.query_from_string( """DROP TABLE IF EXISTS temp;""", """CREATE TABLE temp( id int NOT NULL, PRIMARY KEY (id) );""") for i in range(5): DbManager.insert_into(table_name="temp", values=(i, )) count = DbManager.count_from(table_name="temp", where="id >= %s", where_args=(-1, )) assert count == 5 DbManager.delete_from(table_name="temp", where=" id >= %s", where_args=(3, )) count = DbManager.count_from(table_name="temp", where="id >= %s", where_args=(-1, )) assert count == 3 DbManager.delete_from(table_name="temp") count = DbManager.count_from(table_name="temp", where="id >= %s", where_args=(-1, )) assert count == 0 DbManager.query_from_string("""DROP TABLE temp;""")
21,853
def get_shares(depth): """ this is pretty janky, again, but simply grab the list of directories under /mnt/user0, an an unraid-specific shortcut to access shares """ rootdir = "/mnt/user0/" shares = [] pattern = "('\w+')" with os.scandir(rootdir) as p: depth -= 1 for entry in p: #yield entry.path if entry.is_dir() and depth > 0: sharematch = re.search(pattern, str(entry)) if sharematch: # extract share name utilizing the grouping regex and remove single quotes share_name = sharematch.group(1) share_name = str(share_name.replace("'","")) shares.append(share_name) shares.sort() return(shares)
21,854
def parse_cli_args(): """Function to parse the command-line arguments for PETRARCH2.""" __description__ = """ PETRARCH2 (https://openeventdata.github.io/) (v. 1.0.0) """ aparse = argparse.ArgumentParser(prog='petrarch2', description=__description__) sub_parse = aparse.add_subparsers(dest='command_name') parse_command = sub_parse.add_parser('parse', help=""" DEPRECATED Command to run the PETRARCH parser. Do not use unless you've used it before. If you need to process unparsed text, see the README""", description="""DEPRECATED Command to run the PETRARCH parser. Do not use unless you've used it before.If you need to process unparsed text, see the README""") parse_command.add_argument('-i', '--inputs', help='File, or directory of files, to parse.', required=True) parse_command.add_argument('-P', '--parsed', action='store_true', default=False, help="""Whether the input document contains StanfordNLP-parsed text.""") parse_command.add_argument('-o', '--output', help='File to write parsed events.', required=True) parse_command.add_argument('-c', '--config', help="""Filepath for the PETRARCH configuration file. Defaults to PETR_config.ini""", required=False) batch_command = sub_parse.add_parser('batch', help="""Command to run a batch process from parsed files specified by an optional config file.""", description="""Command to run a batch process from parsed files specified by an optional config file.""") batch_command.add_argument('-c', '--config', help="""Filepath for the PETRARCH configuration file. Defaults to PETR_config.ini""", required=False) batch_command.add_argument('-i', '--inputs', help="""Filepath for the input XML file. Defaults to data/text/Gigaword.sample.PETR.xml""", required=False) batch_command.add_argument('-o', '--outputs', help="""Filepath for the input XML file. Defaults to data/text/Gigaword.sample.PETR.xml""", required=False) nulloptions = aparse.add_mutually_exclusive_group() nulloptions.add_argument( '-na', '--nullactors', action='store_true', default=False, help="""Find noun phrases which are associated with a verb generating an event but are not in the dictionary; an integer giving the maximum number of words follows the command. Does not generate events. """, required=False) nulloptions.add_argument('-nv', '--nullverbs', help="""Find verb phrases which have source and targets but are not in the dictionary. Does not generate events. """, required=False, action="store_true", default=False) args = aparse.parse_args() return args
21,855
def register_webapi_capabilities(capabilities_id, caps): """Register a set of web API capabilities. These capabilities will appear in the dictionary of available capabilities with the ID as their key. A capabilities_id attribute passed in, and can only be registerd once. A KeyError will be thrown if attempting to register a second time. Args: capabilities_id (unicode): A unique ID representing this collection of capabilities. This can only be used once until unregistered. caps (dict): The dictionary of capabilities to register. Each key msut be a string, and each value should be a boolean or a dictionary of string keys to booleans. Raises: KeyError: The capabilities ID has already been used. """ if not capabilities_id: raise ValueError('The capabilities_id attribute must not be None') if capabilities_id in _registered_capabilities: raise KeyError('"%s" is already a registered set of capabilities' % capabilities_id) if capabilities_id in _capabilities_defaults: raise KeyError('"%s" is reserved for the default set of capabilities' % capabilities_id) _registered_capabilities[capabilities_id] = caps
21,856
def _check_eq(value): """Returns a function that checks whether the value equals a particular integer. """ return lambda x: int(x) == int(value)
21,857
def query_data(session, agency_code, start, end, page_start, page_stop): """ Request D2 file data Args: session - DB session agency_code - FREC or CGAC code for generation start - Beginning of period for D file end - End of period for D file page_start - Beginning of pagination page_stop - End of pagination """ rows = initial_query(session).\ filter(file_model.is_active.is_(True)).\ filter(file_model.awarding_agency_code == agency_code).\ filter(func.cast_as_date(file_model.action_date) >= start).\ filter(func.cast_as_date(file_model.action_date) <= end).\ slice(page_start, page_stop) return rows
21,858
def test_settings_files(): """Should load settings from this test files.""" def _callback(action: kuber.CommandAction): s = action.bundle.settings assert s.foo and s.foo == s.spam assert s.bar and s.bar == s.ham assert s.baz and s.baz == s.eggs cb = MagicMock() cb.side_effect = _callback kuber.cli( cb, arguments=[ "render", f'--settings={os.path.join(MY_DIRECTORY, "settings.yaml")}', f'--settings={os.path.join(MY_DIRECTORY, "settings.json")}', ], ) cb.assert_called_once()
21,859
def register_store(store_module, schemes): """ Registers a store module and a set of schemes for which a particular URI request should be routed. :param store_module: String representing the store module :param schemes: List of strings representing schemes for which this store should be used in routing """ try: utils.import_class(store_module + '.Store') except exception.NotFound: raise BackendException('Unable to register store. Could not find ' 'a class named Store in module %s.' % store_module) REGISTERED_STORE_MODULES.append(store_module) scheme_map = {} for scheme in schemes: scheme_map[scheme] = store_module location.register_scheme_map(scheme_map)
21,860
def ProfitBefTax(t): """Profit before Tax""" return (PremIncome(t) + InvstIncome(t) - BenefitTotal(t) - ExpsTotal(t) - ChangeRsrv(t))
21,861
def scroll(amount_x=0, amount_y=0): """Scroll the buffer Will scroll by 1 pixel horizontall if no arguments are supplied. :param amount_x: Amount to scroll along x axis (default 0) :param amount_y: Amount to scroll along y axis (default 0) :Examples: Scroll vertically:: microdotphat.scroll(amount_y=1) Scroll diagonally:: microdotphat.scroll(amount_x=1,amount_y=1) """ global _scroll_x, _scroll_y if amount_x == 0 and amount_y == 0: amount_x = 1 _scroll_x += amount_x _scroll_y += amount_y _scroll_x %= _buf.shape[1] _scroll_y %= _buf.shape[0]
21,862
def myCommand(): """ listens to commands spoken through microphone (audio) :returns text extracted from the speech which is our command """ r = sr.Recognizer() with sr.Microphone() as source: print('Say something...') r.pause_threshold = 1 r.adjust_for_ambient_noise(source, duration=1) # removed "duration=1" argument to reduce wait time audio = r.listen(source) try: command = r.recognize_google(audio).lower() print('You said: ' + command + '\n') #loop back to continue to listen for commands if unrecognizable speech is received except sr.UnknownValueError: print('....') command = myCommand() except sr.RequestError as e: print("????") return command
21,863
def home(): """Display the home screen.""" with open("front/home.md") as file: text = file.read() st.markdown(text, unsafe_allow_html=True)
21,864
def main(): """ Runs the gamefix, with splash if zenity or cefpython3 is available """ if 'iscriptevaluator.exe' in sys.argv[2]: log.debug('Not running protonfixes for iscriptevaluator.exe') return if 'getcompatpath' in sys.argv[1]: log.debug('Not running protonfixes for getcompatpath') return if 'getnativepath' in sys.argv[1]: log.debug('Not running protonfixes for getnativepath') return log.info('Running protonfixes') with splash(): run_fix(game_id())
21,865
def hammer(ohlc_df): """returns dataframe with hammer candle column""" df = ohlc_df.copy() df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \ ((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \ ((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \ (abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"])) return df
21,866
def longest_dimension_first(vector, start=(0, 0), width=None, height=None): """Generate the (x, y) steps on a longest-dimension first route. Note that when multiple dimensions are the same magnitude, one will be chosen at random with uniform probability. Parameters ---------- vector : (x, y, z) The vector which the path should cover. start : (x, y) The coordinates from which the path should start (note this is a 2D coordinate). width : int or None The width of the topology beyond which we wrap around (0 <= x < width). If None, no wrapping on the X axis will occur. height : int or None The height of the topology beyond which we wrap around (0 <= y < height). If None, no wrapping on the Y axis will occur. Generates --------- (:py:class:`~rig.links.Links`, (x, y)) Produces (in order) a (direction, (x, y)) pair for every hop along the longest dimension first route. The direction gives the direction to travel in from the previous step to reach the current step. Ties are broken randomly. The first generated value is that of the first hop after the starting position, the last generated value is the destination position. """ x, y = start for dimension, magnitude in sorted(enumerate(vector), key=(lambda x: abs(x[1]) + random.random()), reverse=True): if magnitude == 0: break # Advance in the specified direction sign = 1 if magnitude > 0 else -1 for _ in range(abs(magnitude)): if dimension == 0: dx, dy = sign, 0 elif dimension == 1: dx, dy = 0, sign elif dimension == 2: # pragma: no branch dx, dy = -sign, -sign x += dx y += dy # Wrap-around if required if width is not None: x %= width if height is not None: y %= height direction = Links.from_vector((dx, dy)) yield (direction, (x, y))
21,867
def calcPhase(star,time): """ Calculate the phase of an orbit, very simple calculation but used quite a lot """ period = star.period phase = time/period return phase
21,868
def advanced_search(): """ Get a json dictionary of search filter values suitable for use with the javascript queryBuilder plugin """ filters = [ dict( id='name', label='Name', type='string', operators=['equal', 'not_equal', 'begins_with', 'ends_with', 'contains'] ), dict( id='old_name', label='Old Name', type='string', operators=['equal', 'not_equal', 'begins_with', 'ends_with', 'contains'] ), dict( id='label', label='Label', type='string', operators=['contains'] ), dict( id='qtext', label='Question Text', type='string', operators=['contains'] ), dict( id='probe', label='Probe', type='string', operators=['contains'] ), dict( id='data_source', label='Data Source', type='string', input='select', values=valid_filters['data_source'], operators=['equal', 'not_equal', 'in', 'not_in'], multiple=True, plugin='selectpicker' ), dict( id='survey', label='Survey', type='string', input='select', values=valid_filters['survey'], operators=['equal', 'not_equal', 'in', 'not_in'], multiple=True, plugin='selectpicker' ), dict( id='wave', label='Wave', type='string', input='select', values=valid_filters['wave'], operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'], multiple=True, plugin='selectpicker' ), dict( id='respondent', label='Respondent', type='string', input='select', values=valid_filters['respondent'], operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'], multiple=True, plugin='selectpicker' ), dict( id='focal_person', label='Focal Person', type='string', input='select', values={'Focal Child': 'Focal Child', 'Mother': 'Mother', 'Father': 'Father', 'Primary Caregiver': 'Primary Caregiver', 'Partner': 'Partner', 'Other': 'Other'}, operators=['contains', 'is_null', 'is_not_null'] ), dict( id='topics', label='Topics', type='string', input='select', values=valid_filters['topic'], operators=['contains'], multiple=True, plugin='selectpicker' ), dict( id='subtopics', label='Sub-Topics', type='string', input='select', values=valid_filters['subtopic'], operators=['contains'], multiple=True, plugin='selectpicker' ), dict( id='scale', label='Scale', type='string', input='select', values=valid_filters['scale'], operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'], multiple=True, plugin='selectpicker' ), dict( id='n_cities_asked', label='Asked in (N) cities', type='integer', operators=['equal', 'not_equal', 'less', 'less_or_equal', 'greater', 'greater_or_equal', 'in', 'not_in'], input='select', values=valid_filters['n_cities_asked'], multiple=True, plugin='selectpicker' ), dict( id='data_type', label='Data Type', type='string', input='select', values=valid_filters['data_type'], operators=['equal', 'not_equal', 'in', 'not_in'], multiple=True, plugin='selectpicker' ), dict( id='in_FFC_file', label='FFC variable', type='string', input='select', operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'], values={'yes': 'Yes', 'no': 'No'}, multiple=True, plugin='selectpicker' ) ] return jsonify({"filters": filters})
21,869
def rdp_rec(M, epsilon, dist=pldist): """ Simplifies a given array of points. Recursive version. :param M: an array :type M: numpy array :param epsilon: epsilon in the rdp algorithm :type epsilon: float :param dist: distance function :type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist` """ dmax = 0.0 index = -1 for i in range(1, M.shape[0]): d = dist(M[i], M[0], M[-1]) if d > dmax: index = i dmax = d if dmax > epsilon: r1 = rdp_rec(M[:index + 1], epsilon, dist) r2 = rdp_rec(M[index:], epsilon, dist) return np.vstack((r1[:-1], r2)) else: return np.vstack((M[0], M[-1]))
21,870
def secBetweenDates(dateTime0, dateTime1): """ :param dateTime0: :param dateTime1: :return: The number of seconds between two dates. """ dt0 = datetime.strptime(dateTime0, '%Y/%m/%d %H:%M:%S') dt1 = datetime.strptime(dateTime1, '%Y/%m/%d %H:%M:%S') timeDiff = ((dt1.timestamp()) - (dt0.timestamp())) return timeDiff
21,871
def clear_annotation(doc, annotation): """Remove an annotation file if it exists.""" annotation_path = util.get_annotation_path(doc, annotation) if os.path.exists(annotation_path): os.remove(annotation_path)
21,872
def iframe_home(request): """ Página inicial no iframe """ # Info sobre pedidos de fabricação pedidosFabricacao = models.Pedidofabricacao.objects.filter( hide=False ).exclude( fkid_statusfabricacao__order=3 ).order_by( '-fkid_statusfabricacao', 'dt_fim_maturacao' ) context = { "fabricacaoPiece":"iframe/pieces/fabricacaoDetail.html", "pedidosFabricacao":pedidosFabricacao } return render(request, "iframe/home.html", context)
21,873
def get_raw_KEGG(kegg_comp_ids=[], kegg_rxn_ids=[], krest="http://rest.kegg.jp", n_threads=128, test_limit=0): """ Downloads all KEGG compound (C) and reaction (R) records and formats them as MINE database compound or reaction entries. The final output is a tuple containing a compound dictionary and a reaction dictionary. Alternatively, downloads only a supplied list of compounds and reactions. """ s_out("\nDownloading KEGG data via %s/...\n" % krest) # Acquire list of KEGG compound IDs if not len(kegg_comp_ids): s_out("Downloading KEGG compound list...") r = rget("/".join([krest,"list","compound"])) if r.status_code == 200: for line in r.text.split("\n"): if line == "": break # The end kegg_comp_id = line.split()[0].split(":")[1] kegg_comp_ids.append(kegg_comp_id) else: msg = "Error: Unable to download KEGG rest compound list.\n" sys.exit(msg) s_out(" Done.\n") # Acquire list of KEGG reaction IDs if not len(kegg_rxn_ids): s_out("Downloading KEGG reaction list...") r = rget("/".join([krest,"list","reaction"])) if r.status_code == 200: for line in r.text.split("\n"): if line == "": break # The end kegg_rxn_id = line.split()[0].split(":")[1] kegg_rxn_ids.append(kegg_rxn_id) else: msg = "Error: Unable to download KEGG rest reaction list.\n" sys.exit(msg) s_out(" Done.\n") # Limit download length, for testing only if test_limit: kegg_comp_ids = kegg_comp_ids[0:test_limit] kegg_rxn_ids = kegg_rxn_ids[0:test_limit] # Download compounds (threaded) kegg_comp_dict = {} print("Downloading KEGG compounds...") for comp in get_KEGG_comps(kegg_comp_ids): if comp == None: continue try: kegg_comp_dict[comp['_id']] = comp except KeyError: s_err("Warning: '" + str(comp) + \ "' lacks an ID and will be discarded.\n") continue print("") # Download reactions (threaded) kegg_rxn_dict = {} print("Downloading KEGG reactions...") for rxn in get_KEGG_rxns(kegg_rxn_ids): if rxn == None: continue try: kegg_rxn_dict[rxn['_id']] = rxn except KeyError: s_err("Warning: '" + str(rxn) + \ "' lacks an ID and will be discarded.\n") continue print("") # Re-organize compound reaction listing, taking cofactor role into account s_out("Organizing reaction lists...") sort_KEGG_reactions(kegg_comp_dict, kegg_rxn_dict) s_out(" Done.\n") s_out("KEGG download completed.\n") return (kegg_comp_dict, kegg_rxn_dict)
21,874
def inv(n: int, n_bits: int) -> int: """Compute the bitwise inverse. Args: n: An integer. n_bits: The bit-width of the integers used. Returns: The binary inverse of the input. """ # We should only invert the bits that are within the bit-width of the # integers we use. We set this mask to set the other bits to zero. bit_mask = (1 << n_bits) - 1 # e.g. 0b111 for n_bits = 3 return ~n & bit_mask
21,875
def _render_flight_addition_page(error): """ Helper to render the flight addition page :param error: Error message to display on the page or None :return: The rendered flight addition template """ return render_template("flights/add.html", airlines=list_airlines(), airports=list_airports(), error=error)
21,876
def get_node_index(glTF, name): """ Return the node index in the glTF array. """ if glTF.get('nodes') is None: return -1 index = 0 for node in glTF['nodes']: if node['name'] == name: return index index += 1 return -1
21,877
def online_user_count(filter_user=None): """ Returns the number of users online """ return len(_online_users())
21,878
def get_latest_version_url(start=29, template="http://unicode.org/Public/cldr/{}/core.zip"): """Discover the most recent version of the CLDR dataset. Effort has been made to make this function reusable for other URL numeric URL schemes, just override `start` and `template` to iteratively search for the latest version of any other URL. """ latest = None with Session() as http: # We perform several requests iteratively, so let's be nice and re-use the connection. for current in count(start): result = http.head(template.format(current)) # We only care if it exists or not, thus HEAD use here. if result.status_code != 200: return current - 1, latest # Propagate the version found and the URL for that version. latest = result.url
21,879
def test_field_validator__init_invalid_value_doesnt_crash(): """Don't crash if an invalid value is set for a field in the constructor.""" class Class(binobj.Struct): text = fields.StringZ(validate=alnum_validator) struct = Class(text="!") with pytest.raises(errors.ValidationError): struct.to_bytes()
21,880
def delete_file(filename): """Remove a file""" filename = os.path.basename(filename) # FIXME: possible race condition if os.path.exists(secure_path(cagibi_folder, filename)) and filename in files_info: os.remove(secure_path(cagibi_folder, filename)) del files_info[filename] save_config(files_info, filename="files.json") return "Ok." else: abort(500, "File doesn't exist or is not in database.")
21,881
def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ level = 0 if name.startswith('.'): if not package: msg = f"the 'package' argument is required to perform a relative import for {name!r}" raise TypeError(msg) for character in name: if character != '.': break level += 1 return _gcd_import(name[level:], package, level)
21,882
def publish(): """ The main render script. """ num_w = 56 black_list = Blacklist() valid_data = construct_data_dirs(black_list) print(valid_data) print("Publishing segments: ") num_segments = [] if E('errors.txt'): os.remove('errors.txt') try: multiprocessing.freeze_support() with multiprocessing.Pool(num_w, initializer=tqdm.tqdm.set_lock, initargs=(multiprocessing.RLock(),)) as pool: manager = ThreadManager(multiprocessing.Manager(), num_w, 1, 1) func = functools.partial(_render_data, DATA_DIR, manager) num_segments = list( tqdm.tqdm(pool.imap_unordered(func, valid_data), total=len(valid_data), desc='Files', miniters=1, position=0, maxinterval=1)) # for recording_name, render_path in tqdm.tqdm(valid_renders, desc='Files'): # num_segments_rendered += gen_sarsa_pairs(render_path, recording_name, DATA_DIR) except Exception as e: if isinstance(e, KeyboardInterrupt): pool.terminate() pool.join() raise e print('\n' * num_w) print("Exception in pool: ", type(e), e) print('Vectorized {} files in total!'.format(sum(num_segments))) raise e num_segments_rendered = sum(num_segments) print('\n' * num_w) print('Vectorized {} files in total!'.format(num_segments_rendered)) if E('errors.txt'): print('Errors:') print(open('errors.txt', 'r').read())
21,883
def sanitize_filename(filename, replacement='_', max_length=200): """compute basename of filename. Replaces all non-whitelisted characters. The returned filename is always a basename of the file.""" basepath = os.path.basename(filename).strip() sane_fname = re.sub(r'[^\w\.\- ]', replacement, basepath) while ".." in sane_fname: sane_fname = sane_fname.replace('..', '.') while " " in sane_fname: sane_fname = sane_fname.replace(' ', ' ') if not len(filename): sane_fname = 'NONAME' # limit filename length if max_length: sane_fname = sane_fname[:max_length] return sane_fname
21,884
def get_image_links_from_imgur(imgur_url): """ Given an imgur URL, return a list of image URLs from it. """ if 'imgur.com' not in imgur_url: raise ValueError('given URL does not appear to be an imgur URL') urls = [] response = requests.get(imgur_url) if response.status_code != 200: raise ValueError('there was something wrong with the given URL') soup = BeautifulSoup(response.text, 'html5lib') # this is an album if '/a/' in imgur_url: matches = soup.select('.album-view-image-link a') urls += [x['href'] for x in matches] # directly linked image elif 'i.imgur.com' in imgur_url: urls.append(imgur_url) # single-image page else: try: urls.append(soup.select('.image a')[0]['href']) except IndexError: pass # clean up image URLs urls = [url.strip('/') for url in urls] urls = ['http://{}'.format(url) if not url.startswith('http') else url for url in urls] return urls
21,885
def test_get_versions_for_npm_package_deprecated_package(): """Test basic behavior of the function get_versions_for_npm_package.""" package_versions = get_versions_for_npm_package("nsp") assert package_versions is not None
21,886
def filter_ignored_images(y_true, y_pred, classification=False): """ Filter those images which are not meaningful. Args: y_true: Target tensor from the dataset generator. y_pred: Predicted tensor from the network. classification: To filter for classification or regression. Returns: Filtered tensors. """ states = y_true[:, :, -1] if classification: indexes = tf.where(tf.math.not_equal(states, -1)) else: indexes = tf.where(tf.math.equal(states, 1)) pred = y_pred true = y_true[:, :, :-1] true_filtered = tf.gather_nd(true, indexes) pred_filtered = tf.gather_nd(pred, indexes) return true_filtered, pred_filtered, indexes, states
21,887
def context_data_from_metadata(metadata): """ Utility function transforming `metadata` into a context data dictionary. Metadata may have been encoded at the client by `metadata_from_context_data`, or it may be "normal" GRPC metadata. In this case, duplicate values are allowed; they become a list in the context data. """ data = {} for name, value in metadata: if name.startswith(METADATA_PREFIX): _, key = name.split(METADATA_PREFIX, 1) data[key] = decode_value(value) else: if name in data: try: data[name].append(value) except AttributeError: data[name] = [data[name], value] else: data[name] = value return data
21,888
def open_debug_and_training_data(t, ids, training_data_path): """Open an concatenate the debugging and training data""" debug_files = { tag: glob.glob(os.path.join(path, '*.pkl')) for tag, path in ids.items() } # open training training_ds = xr.open_dataset(training_data_path) train_ds_init_time = training_ds.isel(time=0) args = [('Train', train_ds_init_time)] for tag in debug_files: dbg = open_debug_state_like_ds(debug_files[tag][t], train_ds_init_time) args.append((tag, dbg)) return concat_datasets(args, name='tag')
21,889
def plot_results_fit( xs, ys, covs, line_ax, lh_ax=None, outliers=None, auto_outliers=False, fit_includes_outliers=False, report_rho=False, ): """Do the fit and plot the result. Parameters ---------- sc_ax : axes to plot the best fit line lh_ax : axes to plot the likelihood function xs, ys, covs: the data to use (see return value of plot_results_scatter) outliers : list of int list of indices for which data will be ignored in the fitting. If auto_outliers is True, then this data will only be ignored for the first iteration. The manual outlier choice positions the fit where were we want it. Then, these points are added back in, and ideally, the automatic outlier rejection will reject them in an objective way. This is to make sure that we are not guilty of cherry picking. auto_outliers : bool Use auto outlier detection in linear_ortho_maxlh, and mark outliers on plot (line ax). See outlier detection function for criterion. fit_includes_outliers : bool Use the detected outliers in the fitting, despite them being outliers. report_rho: draw a box with the correlation coefficient AFTER outlier removal Returns ------- outlier_idxs : array of int Indices of points treated as outliers """ # fix ranges before plotting the fit line_ax.set_xlim(line_ax.get_xlim()) line_ax.set_ylim(line_ax.get_ylim()) r = linear_ortho_fit.linear_ortho_maxlh( xs, ys, covs, line_ax, sigma_hess=True, manual_outliers=outliers, auto_outliers=auto_outliers, fit_includes_outliers=fit_includes_outliers, ) m = r["m"] b_perp = r["b_perp"] sm = r["m_unc"] sb_perp = r["b_perp_unc"] outlier_idxs = r["outlier_idxs"] b = linear_ortho_fit.b_perp_to_b(m, b_perp) # The fitting process also indicated some outliers. Do the rest without them. if fit_includes_outliers: xs_used = xs ys_used = ys covs_used = covs else: xs_used = np.delete(xs, outlier_idxs, axis=0) ys_used = np.delete(ys, outlier_idxs, axis=0) covs_used = np.delete(covs, outlier_idxs, axis=0) # Looking at bootstrap with and without outliers might be interesting. # boot_cov_mb = linear_ortho_fit.bootstrap_fit_errors(xs_no_out, ys_no_out, covs_no_out) # boot_sm, boot_sb = np.sqrt(np.diag(boot_cov_mb)) # sample the likelihood function to determine statistical properties # of m and b a = 2 m_grid, b_perp_grid, logL_grid = linear_ortho_fit.calc_logL_grid( m - a * sm, m + a * sm, b_perp - a * sb_perp, b_perp + a * sb_perp, xs_used, ys_used, covs_used, ) # Sample the likelihood of (m, b_perp) and convert to (m, b), so we # can properly determine the covariance. sampled_m, sampled_b_perp = linear_ortho_fit.sample_likelihood( m, b_perp, m_grid, b_perp_grid, logL_grid, N=2000 ) sampled_b = linear_ortho_fit.b_perp_to_b(sampled_m, sampled_b_perp) sample_cov_mb = np.cov(sampled_m, sampled_b) m_unc = np.sqrt(sample_cov_mb[0, 0]) b_unc = np.sqrt(sample_cov_mb[1, 1]) mb_corr = sample_cov_mb[0, 1] / (m_unc * b_unc) # print out results here print("*** FIT RESULT ***") print(f"m = {m:.2e} pm {m_unc:.2e}") print(f"b = {b:.2e} pm {b_unc:.2e}") print(f"correlation = {mb_corr:.2f}") if lh_ax is not None: linear_ortho_fit.plot_solution_neighborhood( lh_ax, logL_grid, [min(b_perp_grid), max(b_perp_grid), min(m_grid), max(m_grid)], m, b_perp, cov_mb=sample_cov_mb, what="L", extra_points=zip(sampled_b_perp, sampled_m), ) # pearson coefficient without outliers (gives us an idea of how # reasonable the trend is) print("VVV-auto outlier removal-VVV") if report_rho: plot_rho_box( line_ax, xs_used, ys_used, covs_used, ) # plot the fitted line xlim = line_ax.get_xlim() xp = np.linspace(xlim[0], xlim[1], 3) yp = m * xp + b line_ax.plot(xp, yp, color=FIT_COLOR, linewidth=2) # plot sampled lines linear_ortho_fit.plot_solution_linescatter( line_ax, sampled_m, sampled_b_perp, color=FIT_COLOR, alpha=5 / len(sampled_m) ) # if outliers, mark them if len(outlier_idxs) > 0: line_ax.scatter( xs[outlier_idxs], ys[outlier_idxs], marker="x", color="y", label="outlier", zorder=10, ) # return as dict, in case we want to do more specific things in # post. Example: gathering numbers and putting them into a table, in # the main plotting script (paper_scatter.py). # Also return covariance and samples, useful for determining error on y = mx + b. results = { "m": m, "m_unc": m_unc, "b": b, "b_unc": b_unc, "mb_cov": sample_cov_mb[0, 1], "outlier_idxs": outlier_idxs, "m_samples": sampled_m, "b_samples": sampled_b, } return results
21,890
def check_for_features(cmph5_file, feature_list): """Check that all required features present in the cmph5_file. Return a list of features that are missing. """ aln_group_path = cmph5_file['AlnGroup/Path'][0] missing_features = [] for feature in feature_list: if feature not in cmph5_file[aln_group_path].keys(): missing_features.append(feature) return missing_features
21,891
def output_gtif(bandarr, cols, rows, outfilename, geotransform, projection, no_data_value=-99, driver_name='GTiff', dtype=GDT_Float32): """ Create a geotiff with gdal that will contain all the bands represented by arrays within bandarr which is itself array of arrays. Expecting bandarr to be of shape (Bands,Rows,Columns). Parameters ---------- bandarr : np.array Image array of shape (Rows,Cols,Bands) cols : int The number of columns measure in pixels. I may be able to do away with this parameter by just using the shape of `bandarr` to determine this value. rows : int The number of rows measure in pixels. I may be able to do away with this parameter by just using the shape of `bandarr` to determine this value. outfilename : string, optional The path to the output file. If `None` (the default), then the `RasterDS.output_file_path` method will be used to come up with one. What it comes up with is dependent on the `RasterDS.overwrite` property. geotransform : tuple or list The geotransform will determine how the elements of `bandarr` are spatially destributed. The elements of the geotransform are as follows:: adfGeoTransform[0] /* top left x */ adfGeoTransform[1] /* w-e pixel resolution */ adfGeoTransform[2] /* rotation, 0 if image is "north up" */ adfGeoTransform[3] /* top left y */ adfGeoTransform[4] /* rotation, 0 if image is "north up" */ adfGeoTransform[5] /* n-s pixel resolution */ projection : string The string should be a projection in OGC WKT or PROJ.4 format. no_data_value : int or float, optional The `no_data_value` to use in the output. If `None` or not specified an attempt will be made to use the `fill_value` of `bandarr`. If `bandarr` does not have a `fill_value`, the arbitrary value of -99 will be used. driver_name : string, optional The name of the GDAL driver to use. This will determine the format of the output. For GeoTiff output, use the default value ('GTiff'). dtype : int, optional If unspecified, an attempt will be made to find a GDAL datatype compatible with `bandarr.dtype`. This doesn't always work. These are the GDAL data types:: GDT_Unknown = 0, GDT_Byte = 1, GDT_UInt16 = 2, GDT_Int16 = 3, GDT_UInt32 = 4, GDT_Int32 = 5, GDT_Float32 = 6, GDT_Float64 = 7, GDT_CInt16 = 8, GDT_CInt32 = 9, GDT_CFloat32 = 10, GDT_CFloat64 = 11, GDT_TypeCount = 12 Returns ------- Nothing This method just writes a file. It has no return. """ # make sure bandarr is a proper band array if bandarr.ndim==2: bandarr = np.array([ bandarr ]) driver = gdal.GetDriverByName(driver_name) # The compress and predictor options below just reduced a geotiff # from 216MB to 87MB. Predictor 2 is horizontal differencing. outDs = driver.Create(outfilename, cols, rows, len(bandarr), dtype, options=[ 'COMPRESS=LZW','PREDICTOR=2' ]) if outDs is None: print "Could not create %s" % outfilename sys.exit(1) for bandnum in range(1,len(bandarr) + 1): # bandarr is zero based index while GetRasterBand is 1 based index outBand = outDs.GetRasterBand(bandnum) outBand.WriteArray(bandarr[bandnum - 1]) outBand.FlushCache() outBand.SetNoDataValue(no_data_value) # georeference the image and set the projection outDs.SetGeoTransform(geotransform) outDs.SetProjection(projection) # build pyramids gdal.SetConfigOption('HFA_USE_RRD', 'YES') outDs.BuildOverviews(overviewlist=[2,4,8,16,32,64,128])
21,892
def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if not argv: argv = sys.argv # setup command line parser parser = E.OptionParser(version="%prog version: $Id: psl2chain.py 2901 2010-04-13 14:38:07Z andreas $", usage=globals()["__doc__"]) # add common options (-h/--help, ...) and parse command line (options, args) = E.Start(parser, argv=argv) # do sth ninput, nskipped, noutput = 0, 0, 0 for psl in Blat.iterator(options.stdin): ninput += 1 if psl.strand == "-": qstart, qend = psl.mQueryLength - \ psl.mQueryTo, psl.mQueryLength - psl.mQueryFrom else: qstart, qend = psl.mQueryFrom, psl.mQueryTo options.stdout.write("chain %i %s %i %s %i %i %s %i %s %i %i %i\n" % (psl.mNMatches, psl.mSbjctId, psl.mSbjctLength, "+", psl.mSbjctFrom, psl.mSbjctTo, psl.mQueryId, psl.mQueryLength, psl.strand, qstart, qend, ninput)) size, tend, qend = 0, None, None for qstart, tstart, size in psl.getBlocks(): if tend is not None: options.stdout.write( "\t%i\t%i\n" % (tstart - tend, qstart - qend)) qend, tend = qstart + size, tstart + size options.stdout.write("%i" % (size,)) options.stdout.write("\n") noutput += 1 E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped)) # write footer and output benchmark information. E.Stop()
21,893
def inverse(a: int, b: int) -> int: """ Calculates the modular inverse of a in b :param a: :param b: :return: """ _, inv, _ = gcd_extended(a, b) return inv % b
21,894
def build_md2po_events(mkdocs_build_config): """Build dinamically those mdpo events executed at certain moments of the Markdown file parsing extrating messages from pages, different depending on active extensions and plugins. """ _md_extensions = mkdocs_build_config['markdown_extensions'] md_extensions = [] for ext in _md_extensions: if not isinstance(ext, str): if isinstance(ext, MkdocstringsExtension): md_extensions.append('mkdocstrings') else: md_extensions.append(ext) else: md_extensions.append(ext) def build_event(event_type): parameters = { 'text': 'md2po_instance, block, text', 'msgid': 'md2po_instance, msgid, *args', 'link_reference': 'md2po_instance, target, *args', }[event_type] if event_type == 'text': req_extension_conditions = { 'admonition': 're.match(AdmonitionProcessor.RE, text)', 'pymdownx.details': 're.match(DetailsProcessor.START, text)', 'pymdownx.snippets': ( 're.match(SnippetPreprocessor.RE_ALL_SNIPPETS, text)' ), 'pymdownx.tabbed': 're.match(TabbedProcessor.START, text)', 'mkdocstrings': 're.match(MkDocsStringsProcessor.regex, text)', } body = '' for req_extension, condition in req_extension_conditions.items(): if req_extension in md_extensions: body += ( f' if {condition}:\n ' 'md2po_instance.disabled_entries.append(text)\n' ' return False\n' ) if not body: return None elif event_type == 'msgid': body = ( " if msgid.startswith(': '):" 'md2po_instance._disable_next_line = True\n' ) else: # link_reference body = " if target.startswith('^'):return False;\n" function_definition = f'def {event_type}_event({parameters}):\n{body}' code = compile(function_definition, 'test', 'exec') exec(code) return locals()[f'{event_type}_event'] # load only those events required for the extensions events_functions = { event: build_event(event) for event in ['text', 'msgid', 'link_reference'] } events = {} for event_name, event_function in events_functions.items(): if event_function is not None: events[event_name] = event_function return events
21,895
def base_build(order, group, dry_run): """Builds base (dependence) packages. This command builds dependence packages (packages that are not Bob/BEAT packages) in the CI infrastructure. It is **not** meant to be used outside this context. """ condarc = select_user_condarc( paths=[os.curdir], branch=os.environ.get("CI_COMMIT_REF_NAME") ) condarc = condarc or os.path.join(os.environ["CONDA_ROOT"], "condarc") if os.path.exists(condarc): logger.info("Loading (this build's) CONDARC file from %s...", condarc) with open(condarc, "rb") as f: condarc_options = yaml.load(f, Loader=yaml.FullLoader) else: # not building on the CI? - use defaults # use default condarc_options = yaml.load(BASE_CONDARC, Loader=yaml.FullLoader) # dump packages at conda_root condarc_options["croot"] = os.path.join( os.environ["CONDA_ROOT"], "conda-bld" ) recipes = load_order_file(order) from .. import bootstrap from ..build import base_build as _build for k, recipe in enumerate(recipes): echo_normal("\n" + (80 * "=")) echo_normal('Building "%s" (%d/%d)' % (recipe, k + 1, len(recipes))) echo_normal((80 * "=") + "\n") if not os.path.exists(os.path.join(recipe, "meta.yaml")): logger.info('Ignoring directory "%s" - no meta.yaml found' % recipe) continue variants_file = select_conda_build_config( paths=[recipe, os.curdir], branch=os.environ.get("CI_COMMIT_REF_NAME"), ) logger.info("Conda build configuration file: %s", variants_file) _build( bootstrap=bootstrap, server=SERVER, intranet=True, group=group, recipe_dir=recipe, conda_build_config=variants_file, condarc_options=condarc_options, )
21,896
def printExternalClusters(newClusters, extClusterFile, outPrefix, oldNames, printRef = True): """Prints cluster assignments with respect to previously defined clusters or labels. Args: newClusters (set iterable) The components from the graph G, defining the PopPUNK clusters extClusterFile (str) A CSV file containing definitions of the external clusters for each sample (does not need to contain all samples) outPrefix (str) Prefix for output CSV (_external_clusters.csv) oldNames (list) A list of the reference sequences printRef (bool) If false, print only query sequences in the output Default = True """ # Object to store output csv datatable d = defaultdict(list) # Read in external clusters extClusters = \ readIsolateTypeFromCsv(extClusterFile, mode = 'external', return_dict = True) # Go through each cluster (as defined by poppunk) and find the external # clusters that had previously been assigned to any sample in the cluster for ppCluster in newClusters: # Store clusters as a set to avoid duplicates prevClusters = defaultdict(set) for sample in ppCluster: for extCluster in extClusters: if sample in extClusters[extCluster]: prevClusters[extCluster].add(extClusters[extCluster][sample]) # Go back through and print the samples that were found for sample in ppCluster: if printRef or sample not in oldNames: d['sample'].append(sample) for extCluster in extClusters: if extCluster in prevClusters: d[extCluster].append(";".join(prevClusters[extCluster])) else: d[extCluster].append("NA") if "sample" not in d: sys.stderr.write("WARNING: No new samples found, cannot write external clusters\n") else: pd.DataFrame(data=d).to_csv(outPrefix + "_external_clusters.csv", columns = ["sample"] + list(extClusters.keys()), index = False)
21,897
def is_three(x): """Return whether x is three. >>> search(is_three) 3 """ return x == 3
21,898
def get_task_id(prefix, path): """Generate unique tasks id based on the path. :parma prefix: prefix string :type prefix: str :param path: file path. :type path: str """ task_id = "{}_{}".format(prefix, path.rsplit("/", 1)[-1].replace(".", "_")) return get_unique_task_id(task_id)
21,899