content
stringlengths
22
815k
id
int64
0
4.91M
def bytes_to_bytesio(bytestream): """Convert a bytestring to a BytesIO ready to be decoded.""" from io import BytesIO fp = BytesIO() fp.write(bytestream) fp.seek(0) return fp
5,341,800
def image_scatter_channels(im: Image, subimages=None) -> List[Image]: """Scatter an image into a list of subimages using the channels :param im: Image :param subimages: Number of channels :return: list of subimages """ image_list = list() if subimages is None: subimages = im.shape[0] for slab in image_channel_iter(im, subimages=subimages): image_list.append(slab) assert len(image_list) == subimages, "Too many subimages scattered" return image_list
5,341,801
def unauthorized_handler(): """ If unauthorized requests are arrived then redirect sign-in URL. :return: Redirect sign-in in page """ current_app.logger.info("Unauthorized user need to sign-in") return redirect(url_for('userView.signin'))
5,341,802
def main(exit_event): """ ladder Lights one channel at a time in order Then backs down to the first Then repeat everything 20 times """ # this is a list of all the channels you have access to lights = hc._GPIO_PINS # start with all the lights off hc.turn_off_lights() # pause for 1 second time.sleep(1) # working loop for _ in range(20): # here we just loop over the gpio pins and do something with them # except the last one for light in range(len(lights)-1): # turn off all the lights hc.turn_off_lights() # then turn on one hc.turn_on_light(light) # wait a little bit time.sleep(.04) # to make the transition back smoother we handle the last pin here hc.turn_off_lights() hc.turn_on_light(light + 1) # this loop walks it back the other way for light in range(len(lights)-1, 0, -1): # turn off all the lights hc.turn_off_lights() # then turn on one hc.turn_on_light(light) # wait a little bit time.sleep(.04) # again to make it smoother handle the first pin like the last pin hc.turn_off_lights() hc.turn_on_light(light - 1) # this is required so that an sms play now command will # end your script and any subprocess you have statred if exit_event.is_set(): break # lets make sure we turn off the lights before we go back to the show hc.turn_off_lights()
5,341,803
def pre_order_next(path, children): """Returns the next dir for pre-order traversal.""" assert path.startswith('/'), path # First subdir is next for subdir in children(path): return posixpath.join(path, subdir) while path != '/': # Next sibling is next name = posixpath.basename(path) parent = posixpath.dirname(path) siblings = list(children(parent)) assert name in siblings if name != siblings[-1]: return posixpath.join(parent, siblings[siblings.index(name) + 1]) # Go up, find a sibling of the parent. path = parent # This was the last one return None
5,341,804
def gettiming(process_list, typetiming): """ Used to get a sort set for different duration needed to conver to morse code. """ timing = [] for x in process_list: if(x[0] == typetiming): timing.append(x[3]) timing = set(timing) return sorted(timing)
5,341,805
def new_begin(self): """Runs when game is began.""" org_begin(self) night_mode()
5,341,806
def init(): """Top level command handler.""" @click.command() @click.option('--approot', type=click.Path(exists=True), envvar='TREADMILL_APPROOT', required=True) @click.argument('eventfile', type=click.Path(exists=True)) def configure(approot, eventfile): """Configure local manifest and schedule app to run.""" tm_env = appenv.AppEnvironment(root=approot) container_dir = app_cfg.configure(tm_env, eventfile) _LOGGER.info('Configured %r', container_dir) return configure
5,341,807
def launch_subprocess(command): """ Process launch helper :param command Command to execute :type command list[str]|str :return Popen object """ is_shell = not isinstance(command, (list, tuple)) return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=is_shell, close_fds=True)
5,341,808
def tobooks(f: '(toks, int) -> DataFrame', bks=bktksall) -> DataFrame: """Apply a function `f` to all the tokens in each book, putting the results into a DataFrame column, and adding a column to indicate each book. """ return pd.concat([f(v, i) for i, v in bks.items()])
5,341,809
def _identity_error_message(msg_type, message, status_code, request): """ Set the response code on the request, and return a JSON blob representing a Identity error body, in the format Identity returns error messages. :param str msg_type: What type of error this is - something like "badRequest" or "itemNotFound" for Identity. :param str message: The message to include in the body. :param int status_code: The status code to set :param request: the request to set the status code on :return: dictionary representing the error body """ request.setResponseCode(status_code) return { msg_type: { "message": message, "code": status_code } }
5,341,810
def dummySvgCall(): """Code which is here just so pyinstaller can discover we need SVG support""" dummy = QtSvg.QGraphicsSvgItem("some_svg.svg")
5,341,811
def _string_to_list(s, dtype='str'): """ converts string to list Args: s: input dtype: specifies the type of elements in the list can be one of `str` or `int` """ if ' <SENT/> ' in s: return s.split(' <SENT/> ') elif dtype == 'int': return [int(e) for e in s.split(LIST_SEPARATOR) if e] if dtype == 'str': return s.split(LIST_SEPARATOR) elif dtype == 'int': return [int(e) for e in s.split(LIST_SEPARATOR) if e]
5,341,812
def internal_solve_pounders( criterion, x0, lower_bounds, upper_bounds, gtol_abs, gtol_rel, gtol_scaled, maxinterp, maxiter, delta, delta_min, delta_max, gamma0, gamma1, theta1, theta2, eta0, eta1, c1, c2, solver_sub, maxiter_sub, maxiter_gradient_descent_sub, gtol_abs_sub, gtol_rel_sub, gtol_scaled_sub, gtol_abs_conjugate_gradient_sub, gtol_rel_conjugate_gradient_sub, k_easy_sub, k_hard_sub, batch_evaluator, n_cores, ): """Find the local minimum to a non-linear least-squares problem using POUNDERS. Args: criterion_and_derivative (callable): Function that returns criterion and derivative as a tuple. x0 (np.ndarray): Initial guess for the parameter vector (starting points). lower_bounds (np.ndarray): Lower bounds. Must have same length as the initial guess of the parameter vector. Equal to -1 if not provided by the user. upper_bounds (np.ndarray): Upper bounds. Must have same length as the initial guess of the parameter vector. Equal to 1 if not provided by the user. gtol_abs (float): Convergence tolerance for the absolute gradient norm. Stop if norm of the gradient is less than this. gtol_rel (float): Convergence tolerance for the relative gradient norm. Stop if norm of the gradient relative to the criterion value is less than this. gtol_scaled (float): Convergence tolerance for the scaled gradient norm. Stop if norm of the gradient divided by norm of the gradient at the initial parameters is less than this. maxinterp (int): Maximum number of interpolation points. Default is `2 * n + 1`, where `n` is the length of the parameter vector. maxiter (int): Maximum number of iterations. If reached, terminate. delta (float): Delta, initial trust-region radius. delta_min (float): Minimal trust-region radius. delta_max (float): Maximal trust-region radius. gamma0 (float): Shrinking factor of the trust-region radius in case the solution vector of the suproblem is not accepted, but the model is fully linar (i.e. "valid"). gamma1 (float): Expansion factor of the trust-region radius in case the solution vector of the suproblem is accepted. theta1 (float): Threshold for adding the current candidate vector to the model. Function argument to find_affine_points(). theta2 (float): Threshold for adding the current candidate vector to the model. Argument to get_interpolation_matrices_residual_model(). eta0 (float): Threshold for accepting the solution vector of the trust-region subproblem as the best candidate. eta1 (float): Threshold for successfully accepting the solution vector of the trust-region subproblem as the best candidate. c1 (float): Treshold for accepting the norm of our current x candidate. Equal to sqrt(n) by default. Argument to find_affine_points() in case the input array *model_improving_points* is zero. c2 (int)): Treshold for accepting the norm of our current candidate vector. Equal to 10 by default. Argument to find_affine_points() in case the input array *model_improving_points* is not zero. solver_sub (str): Solver to use for the trust-region subproblem. Two internal solvers are supported: - "bntr": Bounded Newton Trust-Region (default, supports bound constraints) - "gqtpar": (does not support bound constraints) maxiter_sub (int): Maximum number of iterations in the trust-region subproblem. maxiter_gradient_descent_sub (int): Maximum number of gradient descent iterations to perform when the trust-region subsolver BNTR is used. gtol_abs_sub (float): Convergence tolerance for the absolute gradient norm in the trust-region subproblem ("BNTR"). gtol_rel_sub (float): Convergence tolerance for the relative gradient norm in the trust-region subproblem ("BNTR"). gtol_scaled_sub (float): Convergence tolerance for the scaled gradient norm in the trust-region subproblem ("BNTR"). gtol_abs_conjugate_gradient_sub (float): Convergence tolerance for the absolute gradient norm in the conjugate gradient step of the trust-region subproblem ("BNTR"). gtol_rel_conjugate_gradient_sub (float): Convergence tolerance for the relative gradient norm in the conjugate gradient step of the trust-region subproblem ("BNTR"). k_easy_sub (float): topping criterion for the "easy" case in the trust-region subproblem ("GQTPAR"). k_hard_sub (float): Stopping criterion for the "hard" case in the trust-region subproblem ("GQTPAR"). batch_evaluator (str or callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or callable with the same interface as the estimagic batch_evaluators. n_cores (int): Number of processes used to parallelize the function evaluations. Default is 1. Returns: (dict) Result dictionary containing: - solution_x (np.ndarray): Solution vector of shape (n,). - solution_criterion (np.ndarray): Values of the criterion function at the solution vector. Shape (n_obs,). - history_x (np.ndarray): Entire history of x. Shape (history.get_n_fun(), n). - history_criterion (np.ndarray): Entire history of the criterion function evaluations. Shape (history.get_n_fun(), n_obs) - n_iterations (int): Number of iterations the algorithm ran before finding a solution vector or reaching maxiter. - "success" (bool): Boolean indicating whether a solution has been found before reaching maxiter. """ history = LeastSquaresHistory() n = len(x0) model_indices = np.zeros(maxinterp, dtype=int) n_last_modelpoints = 0 if lower_bounds is not None and upper_bounds is not None: if np.max(x0 + delta - upper_bounds) > 1e-10: raise ValueError("Starting points + delta > upper bounds.") xs = [x0] for i in range(n): x1 = x0.copy() x1[i] += delta xs.append(x1) residuals = batch_evaluator(criterion, arguments=xs, n_cores=n_cores) history.add_entries(xs, residuals) accepted_index = history.get_best_index() residual_model = create_initial_residual_model( history=history, accepted_index=accepted_index, delta=delta ) main_model = create_main_from_residual_model( residual_model=residual_model, multiply_square_terms_with_residuals=False ) x_accepted = history.get_best_x() gradient_norm_initial = np.linalg.norm(main_model.linear_terms) gradient_norm_initial *= delta valid = True n_modelpoints = n + 1 last_model_indices = np.zeros(maxinterp, dtype=int) converged = False convergence_reason = "Continue iterating." for niter in range(maxiter + 1): result_sub = solve_subproblem( x_accepted=x_accepted, main_model=main_model, lower_bounds=lower_bounds, upper_bounds=upper_bounds, delta=delta, solver=solver_sub, maxiter=maxiter_sub, maxiter_gradient_descent=maxiter_gradient_descent_sub, gtol_abs=gtol_abs_sub, gtol_rel=gtol_rel_sub, gtol_scaled=gtol_scaled_sub, gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient_sub, gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient_sub, k_easy=k_easy_sub, k_hard=k_hard_sub, ) x_candidate = x_accepted + result_sub["x"] * delta residuals_candidate = criterion(x_candidate) history.add_entries(x_candidate, residuals_candidate) predicted_reduction = history.get_critvals( accepted_index ) - history.get_critvals(-1) actual_reduction = -result_sub["criterion"] with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) rho = np.divide(predicted_reduction, actual_reduction) if (rho >= eta1) or (rho > eta0 and valid is True): residual_model = residual_model._replace( intercepts=history.get_residuals(index=accepted_index) ) center_info = {"x": history.get_best_x(), "radius": delta} x_candidate = history.get_centered_xs(center_info, index=-1) residual_model = update_residual_model_with_new_accepted_x( residual_model=residual_model, x_candidate=x_candidate ) main_model = update_main_model_with_new_accepted_x( main_model=main_model, x_candidate=x_candidate ) x_accepted = history.get_best_x() accepted_index = history.get_best_index() critval_accepted = history.get_critvals(index=accepted_index) # The model is deemend "not valid" if it has less than n model points. # Otherwise, if the model has n points, it is considered "valid" or # "fully linear". # Note: valid is True in the first iteration if not valid: ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=np.zeros((n, n)), project_x_onto_null=False, delta=delta, theta1=theta1, c=c1, model_indices=model_indices, n_modelpoints=0, ) if n_modelpoints < n: ( history, model_indices, ) = add_geomtery_points_to_make_main_model_fully_linear( history=history, main_model=main_model, model_improving_points=model_improving_points, model_indices=model_indices, x_accepted=x_accepted, n_modelpoints=n_modelpoints, delta=delta, criterion=criterion, lower_bounds=lower_bounds, upper_bounds=upper_bounds, batch_evaluator=batch_evaluator, n_cores=n_cores, ) n_modelpoints = n delta_old = delta delta = update_trustregion_radius( result_subproblem=result_sub, rho=rho, model_is_valid=valid, delta=delta, delta_min=delta_min, delta_max=delta_max, eta1=eta1, gamma0=gamma0, gamma1=gamma1, ) ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=np.zeros((n, n)), project_x_onto_null=False, delta=delta, theta1=theta1, c=c1, model_indices=model_indices, n_modelpoints=0, ) if n_modelpoints == n: valid = True else: valid = False ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=model_improving_points, project_x_onto_null=project_x_onto_null, delta=delta, theta1=theta1, c=c2, model_indices=model_indices, n_modelpoints=n_modelpoints, ) if n_modelpoints < n: ( history, model_indices, ) = add_geomtery_points_to_make_main_model_fully_linear( history=history, main_model=main_model, model_improving_points=model_improving_points, model_indices=model_indices, x_accepted=x_accepted, n_modelpoints=n_modelpoints, delta=delta, criterion=criterion, lower_bounds=lower_bounds, upper_bounds=upper_bounds, batch_evaluator=batch_evaluator, n_cores=n_cores, ) model_indices, n_model_points = update_model_indices_residual_model( model_indices, accepted_index, n_modelpoints ) ( x_sample_monomial_basis, monomial_basis, basis_null_space, lower_triangular, n_modelpoints, ) = get_interpolation_matrices_residual_model( history=history, x_accepted=x_accepted, model_indices=model_indices, delta=delta, c2=c2, theta2=theta2, n_maxinterp=maxinterp, n_modelpoints=n_modelpoints, ) center_info = {"x": x_accepted, "radius": delta_old} interpolation_set = history.get_centered_xs( center_info, index=model_indices[:n_modelpoints] ) residual_model_interpolated = interpolate_residual_model( history=history, interpolation_set=interpolation_set, residual_model=residual_model, model_indices=model_indices, n_modelpoints=n_modelpoints, n_maxinterp=maxinterp, ) coefficients_residual_model = get_coefficients_residual_model( x_sample_monomial_basis=x_sample_monomial_basis, monomial_basis=monomial_basis, basis_null_space=basis_null_space, lower_triangular=lower_triangular, residual_model_interpolated=residual_model_interpolated, n_modelpoints=n_modelpoints, ) residual_model = residual_model._replace( intercepts=history.get_residuals(index=accepted_index) ) residual_model = update_residual_model( residual_model=residual_model, coefficients_to_add=coefficients_residual_model, delta=delta, delta_old=delta_old, ) main_model = create_main_from_residual_model(residual_model) gradient_norm = np.linalg.norm(main_model.linear_terms) gradient_norm *= delta ( last_model_indices, n_last_modelpoints, same_model_used, ) = get_last_model_indices_and_check_for_repeated_model( model_indices=model_indices, last_model_indices=last_model_indices, n_modelpoints=n_modelpoints, n_last_modelpoints=n_last_modelpoints, ) converged, convergence_reason = _check_for_convergence( gradient_norm=gradient_norm, gradient_norm_initial=gradient_norm_initial, critval=critval_accepted, delta=delta, delta_old=delta_old, same_model_used=same_model_used, converged=converged, reason=convergence_reason, niter=niter, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, maxiter=maxiter, ) if converged: break result_dict = { "solution_x": history.get_xs(index=accepted_index), "solution_criterion": history.get_best_residuals(), "history_x": history.get_xs(), "history_criterion": history.get_residuals(), "n_iterations": niter, "success": converged, "message": convergence_reason, } return result_dict
5,341,813
def get_overlap_info(bbox): """ input: box_priors: [batch_size, number_obj, 4] output: [number_object, 6] number of overlapped obj (self not included) sum of all intersection area (self not included) sum of IoU (Intersection over Union) average of all intersection area (self not included) average of IoU (Intersection over Union) roi area """ batch_size, num_obj, bsize = bbox.shape # generate input feat overlap_info = Variable(torch.FloatTensor(batch_size, num_obj, 6).zero_().cuda()) # each obj has how many overlaped objects reverse_eye = Variable(1.0 - torch.eye(num_obj).float().cuda()) # removed diagonal elements for i in range(batch_size): sliced_bbox = bbox[i].view(num_obj, bsize) sliced_intersection = bbox_intersections(sliced_bbox, sliced_bbox) sliced_overlap = bbox_overlaps(sliced_bbox, sliced_bbox, sliced_intersection) sliced_area = bbox_area(sliced_bbox) # removed diagonal elements sliced_intersection = sliced_intersection * reverse_eye sliced_overlap = sliced_overlap * reverse_eye # assign value overlap_info[i, :, 0] = (sliced_intersection > 0.0).float().sum(1) overlap_info[i, :, 1] = sliced_intersection.sum(1) overlap_info[i, :, 2] = sliced_overlap.sum(1) overlap_info[i, :, 3] = overlap_info[i, :, 1] / (overlap_info[i, :, 0] + 1e-9) overlap_info[i, :, 4] = overlap_info[i, :, 2] / (overlap_info[i, :, 0] + 1e-9) overlap_info[i, :, 5] = sliced_area return overlap_info.view(batch_size * num_obj, 6)
5,341,814
def comvideo(inputvideo, endvideo): """ 视频合成 :param inputvideo:合成的第一段视频 :param endvideo:合成的第二段视频 :return: """ path, _ = os.path.splitext(inputvideo) a = '视频空间合成{}.mp4'.format(endvideo) outname = path + a video1 = VideoFileClip(inputvideo) video2 = VideoFileClip(endvideo) video3 = CompositeVideoClip([video1, video2]) video3.write_videofile(outname)
5,341,815
def get_current_language(request, set_default=True, default_id=1): """ Description: Returns the current active language. Will set a default language if none is found. Args: request (HttpRequest): HttpRequest from Django set_default (Boolean): Indicates if a default language must be activated (if none currently is). Default to True. default_id (Integer): The PK for the default Language instance. Default to 1 Returns: Language: The currently used language from our app's Language model """ # Base variables language = None language_name = request.session.get(LANGUAGE_SESSION_KEY, False) # Get the language if language_name: try: language = Language.objects.get(django_language_name=language_name) except Language.DoesNotExist: pass # Set a default language if necessary if language is None and set_default: language = set_default_language(request, default_id) # Always return the active language return language
5,341,816
def _update_user_proficiency(user_proficiency): """Updates the user_proficiency. Args: user_proficiency: UserContributionProficiency. The user proficiency to be updated. """ user_proficiency_model = user_models.UserContributionProficiencyModel.get( user_proficiency.user_id, user_proficiency.score_category ) if user_proficiency_model is not None: user_proficiency_model.user_id = user_proficiency.user_id user_proficiency_model.score_category = user_proficiency.score_category user_proficiency_model.score = user_proficiency.score user_proficiency_model.onboarding_email_sent = ( user_proficiency.onboarding_email_sent ) user_proficiency_model.update_timestamps() user_proficiency_model.put() else: user_models.UserContributionProficiencyModel.create( user_proficiency.user_id, user_proficiency.score_category, user_proficiency.score, user_proficiency.onboarding_email_sent)
5,341,817
def get_children(): """ Get children of a given instance. """ yield lambda selector, key: base.find_children(metadata.CRAIGSLIST, selector, key)
5,341,818
def is_leap_year(year): """ returns True for leap year and False otherwise :param int year: calendar year :return bool: """ # return (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0) return year % 100 != 0 or year % 400 == 0 if year % 4 == 0 else False
5,341,819
def update_search_params(context, **kwargs): """Update the set parameters of the current request""" params = context["request"].GET.copy() for k, v in kwargs.items(): params[k] = v return params.urlencode()
5,341,820
def dynamic2message(dynamic_dict: dict) -> Message: """ 将从api获取到的原始动态转换为消息 """ author_name = dynamic_dict['desc']['user_profile']['info']['uname'] dynamic_id = dynamic_dict['desc']['dynamic_id'] if dynamic_dict['desc']['type'] == 1: # 转发或投票 text = f"用户[{author_name}]转发了动态:\n" + dynamic_dict['card']['item']['content'] + "\n---------------------\n" origin_dynamic = dynamic.get_info(dynamic_dict['card']['item']['orig_dy_id']) ori_message = dynamic2message(origin_dynamic) msg = MessageSegment.text(text) + ori_message + MessageSegment.text('\n---------------------') elif dynamic_dict['desc']['type'] == 2: # 图文动态 text = f"用户[{author_name}]发布了动态:\n" + dynamic_dict['card']['item']['description'] msg = MessageSegment.text(text) for i in range(dynamic_dict['card']['item']['pictures_count']): msg = msg + MessageSegment.image(dynamic_dict['card']['item']['pictures'][i]['img_src']) elif dynamic_dict['desc']['type'] == 4: # 纯文字动态 msg = MessageSegment.text(f"用户[{author_name}]发布了动态:\n" + dynamic_dict['card']['item']['content']) elif dynamic_dict['desc']['type'] == 8: # 视频投稿 msg = MessageSegment.text( f"用户[{author_name}]发布了视频:\n" + dynamic_dict['card']['dynamic'] + "\n视频标题:" + dynamic_dict['card'][ 'title'] + "\n视频链接:" + dynamic_dict['card']['short_link']) elif dynamic_dict['desc']['type'] == 64: # 发布专栏 msg = MessageSegment.text(f"用户[{author_name}]发布了专栏:\n" + dynamic_dict['card']['title']) else: msg = MessageSegment.text(f'用户[{author_name}]发布了动态,但无法判断类型') msg = msg + MessageSegment.text(f'\n\n原动态链接:https://t.bilibili.com/{dynamic_id}') return msg
5,341,821
def main(include_diagnoses=(1,3)): """Training and validation using all modalities Parameters: ---------- include_diagnoses: tuple or list Which groups/diagnoses included in training and validation, such as (1,3) """ # Instantiate model model = Model() # Concatenate all modalities, demo and mean_num_na for i, metrics_ in enumerate(METRICS): if i == 0: data_train_all_mod = data_train[metrics_] else: data_train_all_mod = np.hstack([data_train_all_mod, data_train[metrics_]]) # Get included diagnoses idx_train = np.in1d(label_train, include_diagnoses) label_train_ = label_train[idx_train] data_train_ = data_train_all_mod[idx_train] # Concatenate all features # data_train_ = np.hstack([data_train_, mean_num_na_train_, demo_train_]) # data_validation_ = np.hstack([data_validation_, mean_num_na_validation_, demo_validation_]) # Denan data_train_, label_train_, value = model.denan(data_train_, label_train_, fill=True) # Preprocessing scaler, data_train_ = model.preprocess_(data_train_) # Re-sample ros = RandomOverSampler(random_state=0) print(Counter(label_train_.reshape([-1,]))) data_train_, label_train_ = ros.fit_sample(data_train_, label_train_) print(Counter(label_train_)) # Fit clf1 = model.make_linearSVC() clf2 = model.make_SVC() clf3 = model.make_logistic_regression() clf4 = model.make_ridge_regression() clf5 = model.make_xgboost() clf6 = model.make_mlp() clf7 = model.make_SVC_rbf() clf8 = model.make_gnb() clfs = [clf1, clf2, clf3, clf4, clf5, clf6, clf7, clf8] # Merge models merged_model = model.train_ensemble_classifier(data_train_, label_train_, *clfs) # Dict models and scaler model_and_param = {"merged_model": merged_model, "fill_value": value, "scaler": scaler} # Save original models, merged model and scaler groups = ["nc","mci","ad"] save_name = [groups[include_diagnoses_-1] for include_diagnoses_ in include_diagnoses] save_name = "ensemble_model_" + "VS".join(save_name) + ".pickle.dat" save_file = os.path.join(r"D:\My_Codes\lc_private_codes\AD分类比赛", save_name) pickle.dump(model_and_param, open(save_file, "wb")) # Predict predict_proba_train, prediction_train = model.predict(merged_model, data_train_) # Evaluation acc_train, auc_train, f1_train, confmat_train, report_train = model.evaluate(label_train_, predict_proba_train, prediction_train) print(f"Traing dataset:\nacc = {acc_train}\nf1score = {f1_train}\nauc = {auc_train}\n")
5,341,822
def dem_to_roughness(src_raster, band=0): """Calculate the roughness for the DEM. Parameters ---------- src_raster : Raster The dem used to calculate the roughness. band : int, optional, default: 0 source band number to use. Returns ------- dst_raster: Raster roughness calculated from the DEM. """ options = dict(band=band+1, format='MEM') ds_src = src_raster.to_gdal_ds() ds = gdal.DEMProcessing('', ds_src, 'Roughness', **options) dst_raster = tgp.read_gdal_ds(ds) return dst_raster
5,341,823
def conv_filter_image_summary(tag, kernel, padding=1): """Creates an image summary of the convolutional filters of the first layer. Parameters ---------- tag: str or Tensor of type string A scalar Tensor of type string. Used to build the tag of the summary values. A placeholder could be used to feed in the tag name to generate multiple images, because using a fixed string causes to overwrite the previous one. kernel: 4D Tensor of shape [kh, kw, channels_in, filters_out] The convolutional filters to write to summary. Note that this is only supported for the frst conv-layer, which has to have an 1 or 3 as channels_in. padding: int, optional The padding between each patch of the grid. Example ---------- conv = light.network.conv2d("Conv1", ...) # Get kernel by reusing the same variable-scope with tf.variable_scope("Conv1", reuse=True): kernel = tf.get_variable("W") light.board.conv_filter_image_summary("conv1_filters", kernel); """ with tf.name_scope("filter_summary"): # X and Y dimensions, w.r.t. padding static_shape = kernel.get_shape().as_list() ky = static_shape[0] + padding * 2 kx = static_shape[1] + padding * 2 channels_in = static_shape[2] filters_out = static_shape[3] grid_length = int(math.ceil(math.sqrt(filters_out))) grid_y = grid_x = grid_length # add padding to input kernel k = tf.pad(kernel, tf.constant([[padding,padding],[padding,padding],[0,0],[0,0]])) # add placeholder filters to be able to build a square placeholders_to_add = grid_y * grid_x - filters_out if (placeholders_to_add > 0): placeholders = tf.zeros((ky, kx, channels_in, placeholders_to_add)) k = tf.concat([k, placeholders], axis=3) # put filters_out to the 1st dimension k = tf.transpose(k, (3, 0, 1, 2)) # organize grid on Y axis k = tf.reshape(k, tf.stack([grid_x, ky * grid_y, kx, channels_in])) # switch X and Y axes k = tf.transpose(k, (0, 2, 1, 3)) # organize grid on X axis k = tf.reshape(k, tf.stack([1, kx * grid_x, ky * grid_y, channels_in])) # back to normal order (not combining with the next step for clarity) k = tf.transpose(k, (2, 1, 3, 0)) # to tf.image_summary order [batch_size, height, width, channels] k = tf.transpose(k, (3, 0, 1, 2)) # scale to [0, 1] x_min = tf.reduce_min(k) x_max = tf.reduce_max(k) grid = (k - x_min) / (x_max - x_min) # write filter image to summary with tf.device('/cpu:0'): tf.summary.image(tag, grid, max_outputs=1)
5,341,824
def import_file(path, name=None): """Import modules from file.""" spec = importlib.util.spec_from_file_location(name or '', path) module = importlib.util.module_from_spec(spec) if name: sys.modules[name] = module spec.loader.exec_module(module) return module
5,341,825
def batch_matmul_checker( attrs: Any, args: List[relay.expr.Expr], op_name: str ) -> bool: # pylint: disable=unused-variable """Check if dense is supported by TensorRT.""" if get_tensorrt_use_implicit_batch_mode() and len(args[0].checked_type.shape) != len( args[1].checked_type.shape ): logger.info(f"{op_name}: requires use_implict_batch=False.") return False return True
5,341,826
def func_run_dynamic(input_file, dynamic_dic, exclude, pprint): """ Execute one dynamic template :param input_file: (string) The template file name :param dynamic_dic: (dict) The dictionary of the dynamic variables :return: """ new_template_filename = create_dynamic_template(input_file, dynamic_dic) t = Template.Template() t.file_path = new_template_filename t.load_sections() t.set_execute_order() t.start_driver() report = t.run() if pprint: t.pprint(exclude_none=exclude) return t
5,341,827
def find(store_config, shardid): # FIXME require config instead """Find the path of a shard. Args: store_config: Dict of storage paths to optional attributes. limit: The dir size limit in bytes, 0 for no limit. use_folder_tree: Files organized in a folder tree (always on for fat partitions). shardid: Id of the shard to find. Returns: Path to the shard or None if not found. Raises: AssertionError: If input not valid. Example: import storjlib id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae" store_config = {"path/alpha": None, "path/beta": None} shard_path = storjlib.store.manager.remove(store_config, id) print("shard located at %s" % shard_path) """ assert(storjlib.store.shard.valid_id(shardid)) store_config = setup(store_config) # setup if needed for store_path, attributes in store_config.items(): use_folder_tree = attributes["use_folder_tree"] shard_path = _get_shard_path(store_path, shardid, use_folder_tree) if os.path.isfile(shard_path): return shard_path return None
5,341,828
def octoquote(): """prints a small gift""" print(np.random.choice(all_quotes)) pass
5,341,829
def CppCallStaticMethod(scope, type_defn, method, param_exprs): """Gets the representation of a static function call. Args: scope: a Definition for the scope in which the expression will be written. type_defn: a Definition, representing the type of the object being called. method: a Function, representing the function to call. param_exprs: a list of strings, each being the expression for the value of each parameter. Returns: a string, which is the expression for the function call. Raises: InvalidArrayUsage: always. This function should not be called on an array. """ raise InvalidArrayUsage
5,341,830
def xclGetDeviceInfo2 (handle, info): """ xclGetDeviceInfo2() - Obtain various bits of information from the device :param handle: (xclDeviceHandle) device handle :param info: (xclDeviceInfo pointer) Information record :return: 0 on success or appropriate error number """ libc.xclGetDeviceInfo2.restype = ctypes.c_int libc.xclGetDeviceInfo2.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2)] return libc.xclGetDeviceInfo2(handle, info)
5,341,831
def template_failure(request, status=403, **kwargs): """ Renders a SAML-specific template with general authentication error description. """ return render(request, 'djangosaml2/login_error.html', status=status)
5,341,832
def zflatten2xyz(z, x=None, y=None): """ flatten an nxm 2D array to [x, y, z] of shape=(n*m, 3)""" if x is None: x = np.arrange(0, z.shape[0], step=1) if y is None: y = np.arrange(0, z.shape[1], step=1) xlen = len(x) ylen = len(y) assert z.shape[0] == xlen and z.shape[1] == ylen, 'check dimensions!!!' xx, yy = np.meshgrid(x, y) xx = xx.T yy = yy.T # meshgrid take the second dimension as x xylen = xlen*ylen return np.concatenate((xx.reshape((xylen, 1)), yy.reshape((xylen, 1)), z.reshape((xylen, 1))), axis=1)
5,341,833
def test_update_site_logo(admin_client, settings): """ We can add a site logo, and it renders out """ url = reverse("admin:index") settings.JAZZMIN_SETTINGS["site_logo"] = "books/img/logo.png" response = admin_client.get(url) soup = BeautifulSoup(response.content, "html.parser") assert soup.find("a", class_="brand-link").find("img")["src"] == "/static/books/img/logo.png"
5,341,834
def penalized_log_likelihood(curve,t,pairwise_contact_matrix,a,b,term_weights,square_root_speed=None,pairwise_distance_matrix=None): """ penalized log likelihood """ if pairwise_distance_matrix is None: #if the do not already have the pairwise distance matrix computed, then compute it pairwise_distance_matrix=compute_pairwise_distance(curve) L=0 # initialize log likelihood term R1=0 # initialize first order term R2=0 # initialize second order term Q=0 # initialize parametrization penalty term S=0 # initialize shape prior term if term_weights[0]!=0: L=term_weights[0]*loglikelihood_Varoquaux_with_missing(pairwise_distance_matrix,a,b,pairwise_contact_matrix) if (term_weights[1]!=0)&(term_weights[2]==0): R1=term_weights[1]*srvf.length(curve,t) if (term_weights[2]!=0): R1,R2=srvf.roughness2(curve,t) R1=term_weights[1]*R1 R2=term_weights[2]*R2 if (term_weights[3]!=0): Q=term_weights[3]*parametrization_error(curve,square_root_speed,t) if (term_weights[4]!=0): S=term_weights[4]*0 # not implemented yet return L-R1-R2-Q-S
5,341,835
def listening_ports(): """ Reads listening ports from /proc/net/tcp """ ports = [] if not os.path.exists(PROC_TCP): return ports with open(PROC_TCP) as fh: for line in fh: if '00000000:0000' not in line: continue parts = line.lstrip(' ').split(' ') if parts[2] != '00000000:0000': continue local_port = parts[1].split(':')[1] local_port = int('0x' + local_port, base=16) ports.append(local_port) return ports
5,341,836
def dict2array(X): """ Returns a Numpy array from dictionary Parameters ---------- X: dict """ all_var = [] for k in X.keys(): all_var.append(X[k]) return np.array(all_var)
5,341,837
def preprocess_field_data(subdelimiter, field_value, path_to_script): """Executes a field preprocessor script and returns its output and exit status code. The script is passed the field subdelimiter as defined in the config YAML and the field's value, and prints a modified vesion of the value (result) back to this function. """ cmd = subprocess.Popen([path_to_script, subdelimiter, field_value], stdout=subprocess.PIPE) result, stderrdata = cmd.communicate() return result, cmd.returncode
5,341,838
def dfs_level_details(): """This function traverses all levels in a DFS style. It gets the child directories and recursively calls the same function on child directories to extract its level details Returns: Dictionary: Key is the level name, value is a list with first element as url and the second element as the bounding box of that url """ level_details = {} local_server_name = app.config['HOST_NAME'] if 'HOST_NAME' in app.config else "Unknown" try: bounding_box_level = GeographyHelper.GetCoordinatesForLevel(local_server_name) except: print("An error has occured while retrieveing bounding box") bounding_box_level = None level_details[local_server_name] = [request.url_root, bounding_box_level] locations_to_urls = DirectoryNameToURL.objects(relationship='child').all() if locations_to_urls == None: return None for location_to_url in locations_to_urls: request_url = urljoin(location_to_url.url, url_for('api.dfs_level_details')) try: response = requests.get(request_url) if response.status_code != 200: return jsonify(response.json()), response.status_code results = response.json() if results == None or len(results) == 0: continue for result in results: level_details[result] = results[result] except: return jsonify(ERROR_JSON), 400 return jsonify(level_details), 200
5,341,839
def set_ansible_envar() -> None: """Set an envar if not set, runner will need this""" ansible_config_path, msgs = get_conf_path("ansible.cfg") for msg in msgs: logger.debug(msg) # set as env var, since we hand env vars over to runner if ansible_config_path and not os.getenv("ANSIBLE_CONFIG"): os.environ.setdefault("ANSIBLE_CONFIG", ansible_config_path) logger.debug("ANSIBLE_CONFIG set to %s", ansible_config_path)
5,341,840
def makeNonParameterized(p): """Return a new Pointset stripped of its parameterization. """ if isinstance(p, Pointset) and p._isparameterized: return Pointset({'coordarray': copy(p.coordarray), 'coordnames': copy(p.coordnames), 'norm': p._normord, 'labels': copy(p.labels)}) else: raise TypeError("Must provide a parameterized Pointset")
5,341,841
def interface_getattr(*v): """Behaves like `getattr` but for zope Interface objects which hide the attributes. .. note:: Originally I simply tried to override :meth:`InterfaceDocumenter.special_attrgetter` to deal with the special access needs of :class:`Interface` objects, but found that this is not intended to be overwritten. Instead one should register the special accessor using :func:`app.add_autodoc_attrgetter`. """ obj, name = v[:2] if "__dict__" == name: # Interface objects do not list their members through # __dict__. return dict((n, obj.get(n)) for n in obj.names()) if name in obj.names(all=True): return obj.get(name) else: return getattr(*v)
5,341,842
def round_time(t, to=timedelta(seconds=1)): """ cftime will introduces noise when decoding values into date objects. This rounds time in the date object to the nearest second, assuming the init time is at most 1 sec away from a round minute. This is used when merging datasets so their time dims match up. Args: t: datetime or cftime object to: size of increment to round off to. By default round to closest integer second. Returns: datetime or cftime object rounded to nearest minute """ midnight = t.replace(hour=0, minute=0, second=0, microsecond=0) time_since_midnight = exact_cftime_datetime_difference(midnight, t) remainder = time_since_midnight % to quotient = time_since_midnight // to if remainder <= to / 2: closest_multiple_of_to = quotient else: closest_multiple_of_to = quotient + 1 rounded_time_since_midnight = closest_multiple_of_to * to return midnight + rounded_time_since_midnight
5,341,843
def decide_end(match_list, return_whole_match_object = False): """ Among all the match objects, return the march string the closest to the end of the text Return : a string. If return_whole_match_object is True, return a match object """ if len(match_list) == 0: return pd.NA ends = np.array(list(map(lambda match_object : match_object.span()[1], match_list))) closest_index = np.argmax(ends) if return_whole_match_object: return match_list[closest_index] else: return match_list[closest_index].group()
5,341,844
def ensure_tag(tag_id): """ Check if a tag with id `tag_id` exists. If not, create it. """ if not tag_id: raise ValueError("Tag id must not be empty") with get_db().transaction() as t: try: t.query(_Tag).filter(_Tag.id == tag_id).one() except NoResultFound: tag = _Tag() tag.id = tag_id tag.color = random.randrange(0, len(colors)) t.add(tag)
5,341,845
def get_level_refactorings_count(level: int, dataset: str = "") -> str: """ Get the count of all refactorings for the given level Parameter: level (int): get the refactoring instances for this level dataset (str) (optional): filter for these specific projects """ return f"SELECT refactoring, count(*) FROM (" + \ get_instance_fields(refactoringCommits, [(refactoringCommits, ["refactoring"])], f"{refactoringCommits}.level = {str(level)}", dataset) + \ f" AND {valid_refactorings_filter(refactoringCommits)} AND {file_type_filter(refactoringCommits)}) t group by refactoring order by count(*) desc"
5,341,846
def repeat_batch(t, K, dim=0): """Repeat a tensor while keeping the concept of a batch. :param t: `torch.Tensor`: The tensor to repeat. :param K: `int`: The number of times to repeat the tensor. :param dim: `int`: The dimension to repeat in. This should be the batch dimension. :returns: `torch.Tensor`: The repeated tensor. The new shape will be batch size * K at dim, the rest of the shapes will be the same. Example:: >>> a = torch.arange(10).view(2, -1) >>> a tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> a.repeat(2, 1) tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> repeat_batch(a, 2) tensor([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [5, 6, 7, 8, 9]]) """ shape = t.shape tiling = [1] * (len(shape) + 1) tiling[dim + 1] = K tiled = t.unsqueeze(dim + 1).repeat(tiling) old_bsz = shape[dim] new_bsz = old_bsz * K new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :]) return tiled.view(new_shape)
5,341,847
def ifte(s, g_cond, g_true, g_false): """goal that succeeds if g_cond and g_true succeed or g_cond fails and g_false succeeds""" def loop(s_inf=g_cond(s)): try: first_cond = next(s_inf) except StopIteration: yield from g_false(s) return except SuspendIteration as suspension: raise SuspendIteration(loop(suspension.stream)) yield from append_inf(g_true(first_cond), append_map_inf(g_true, s_inf)) return loop()
5,341,848
def filter_by_continue_threshold_variance_threshold(peak_info, acc, cont_win_size=3, cont_thres=4, var_thres=0.001): """ Calculate the continuity by a given window length, then calculate the variance and filter the data by a given threshold :param peak_info: a 5D matrix :param cont_win_size: continue window len :param cont_thres: continue threshold :param var_thres: variance threshold :param fs: frequency of accelerometer data :return: all_steps: step count list """ end_for = len(peak_info[:,2])-1 for i in np.arange(cont_thres-1, end_for): v_count = 0 for x in np.arange(1, cont_thres+1): if np.var(acc[int(peak_info[i-x+1, 0]):int(peak_info[i-x+2, 0]+1)], ddof=1) > var_thres: v_count = v_count + 1 if v_count >= cont_win_size: peak_info[i, 4] = 1 else: peak_info[i, 4] = 0 peak_info = peak_info[peak_info[:, 4] == 1, 0] return peak_info
5,341,849
def configure(config: Configuration) -> None: """Configure the Origin Request handler.""" global _config _config = config
5,341,850
def send_update(*args: str) -> bool: """ Updates the path endpoint to contain the current UTC timestamp """ assert args, "Firebase path cannot be empty" endpoint = args[-1] value = {endpoint: datetime.utcnow().isoformat()} return send_message(value, *args[:-1])
5,341,851
def test_sophos_firewall_web_filter_update_command(requests_mock): """ Scenario: Update an existing web filter. Given: - User has provided valid credentials. When: - sophos_firewall_web_filter_update is called. Then: - Ensure outputs prefix is correct. - Ensure a sample value from the API matches what is generated in the context. """ from sophos_firewall import Client, sophos_firewall_web_filter_update_command mock_response = load_mock_response('web_filter_set.xml') requests_mock.post(REQUEST_URL, text=mock_response) mock_response = load_mock_response('web_filter_get.xml') requests_mock.get(REQUEST_URL, text=mock_response) client = Client(base_url=BASE_URL, verify=False, auth=('uname', 'passwd'), proxy=False) result = sophos_firewall_web_filter_update_command(client, {'name': 'b', 'default_action': 'Allow'}) assert result.outputs_prefix == 'SophosFirewall.WebFilterPolicy' assert result.outputs.get('Name') == 'unitest'
5,341,852
def run_quad_extraction(cart3d_triq_filename): """tests getting the normal groups""" cart3d = Cart3D(log=None, debug=False) result_names = [] # read the mesh only cart3d.read_cart3d(cart3d_triq_filename, result_names=result_names) points = cart3d.points elements = cart3d.elements celements = elements.copy() normals, groups = get_normal_groups(points, celements) tris, quads = normal_groups_to_quads(celements, normals, groups) write_nastran_quads_tris(points, tris, quads, bdf_filename='tris_quads.bdf')
5,341,853
def execute_custom(datatype, runtype, driver, data_repository, step_list): """ Execute a custom testcase """ print_info("{0} {1}".format(datatype, runtype)) tc_status = False if data_repository.has_key("suite_exectype") and \ data_repository["suite_exectype"].upper() == "ITERATIVE": print_info("Testsuite execute type=iterative but the testcase datatype=custom. " "All testcases in a iterative testsuite should have datatype=iterative, " "Hence this testcase will be marked as failure.") elif runtype.upper() == 'SEQUENTIAL_KEYWORDS' or runtype.upper() == 'PARALLEL_KEYWORDS': tc_status = driver.main(step_list, data_repository, tc_status, system_name=None) else: print_error("Unsuppored runtype found, please check testcase file") return tc_status
5,341,854
def setup(bot): """ Setup the cog """ bot.add_cog(Admin(bot))
5,341,855
def _gather_function(properties, funcs): """generate functions of all components as sequence""" for name, fun in funcs.iteritems(): if len(fun) == 1: properties[name] = fun[0] else: properties[name] = _gen_sequence_function(fun)
5,341,856
def parseSolFile(filename): """Parses SOL file and extract soil profiles.""" data = {} profile = None lat = None lon = None with open(filename) as fin: for line in fin: if line.startswith("*"): if profile is not None: data[(lat, lon)] = "{0}\r\n".format(profile) profile = line[1:].strip() elif not line.startswith("@") and len(line.strip()) > 0: toks = line.split() if len(toks) == 5: lat = float(toks[2]) lon = float(toks[3]) profile += "\r\n{0}".format(line.strip()) else: try: float(toks[0]) line = line.replace(toks[1], "".join([" "]*len(toks[1]))) profile += "\r\n{0}".format(line.rstrip()) except: profile += "\r\n {0}".format(line.rstrip()) return data
5,341,857
def test_validate_params(): """Testing validation of params attribute.""" # four different ways in which params can fail invalid_params_0 = (1.0, 1.0, 0.02, 0.02, 0.15, 0.33, 0.03) invalid_params_1 = {'A0': 1.0, 'g': -0.02, 'L0': 1.0, 'n': -0.02, 's': 0.15, 'alpha': 0.33, 'delta': 0.03} invalid_params_2 = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15, 'alpha': 0.33, 'delta': -0.03} invalid_params_3 = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': -0.15, 'alpha': 0.33, 'delta': 0.03} invalid_params_4 = {'A0': -1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15, 'alpha': 0.33, 'delta': 0.03} invalid_params_3 = {'A0': 1.0, 'g': 0.02, 'L0': -1.0, 'n': 0.02, 's': 0.15, 'alpha': 0.33, 'delta': 0.03} # params must be a dict with nose.tools.assert_raises(AttributeError): solow.Model(output=valid_output, params=invalid_params_0) # effective depreciation rate must be positive with nose.tools.assert_raises(AttributeError): solow.Model(output=valid_output, params=invalid_params_1) # physical depreciation rate must be positive with nose.tools.assert_raises(AttributeError): solow.Model(output=valid_output, params=invalid_params_2) # savings rate must be positive with nose.tools.assert_raises(AttributeError): solow.Model(output=valid_output, params=invalid_params_3) # initial condition for A must be positive with nose.tools.assert_raises(AttributeError): solow.Model(output=valid_output, params=invalid_params_4)
5,341,858
def test_environment(): """Test loading form environment variables""" for var, value in env_vars: os.environ[var] = value config = yaconfig.Config(metaconfig) config.load_environment(prefix="YACONFIG_TEST_") for variable, value in config.items(): assert value == env_expected[variable]
5,341,859
def get_deps_info(projects, configs): """Calculates dependency information (forward and backwards) given configs.""" deps = {p: configs[p].get('deps', {}) for p in projects} # Figure out the backwards version of the deps graph. This allows us to figure # out which projects we need to test given a project. So, given # # A # / \ # B C # # We want to test B and C, if A changes. Recipe projects only know about the # B-> A and C-> A dependencies, so we have to reverse this to get the # information we want. downstream_projects = collections.defaultdict(set) for proj, targets in deps.items(): for target in targets: downstream_projects[target].add(proj) return deps, downstream_projects
5,341,860
def write_cflags(): """Adds C-Flags. C++ version is defined at the beginning of this file""" text = f"""CFLAGS = ${{TF_CFLAGS}} ${{OMP_CFLAGS}} -fPIC -O2 -std={CPPVERSION} LDFLAGS = -shared ${{TF_LFLAGS}} """ text += write_cflags_cuda() return text
5,341,861
def ins_to_sem_compatibility(sem_labels_for_instances, num_sem_labels, stuff_sem_cls_ids, stuff_penalisation=1.0): """ Returns the compatibility matrix for the instance_labels -> semantic_labels bipartite potentials in BCRF. Args: sem_labels_for_instances: Semantic labels of the instances. 0th instance must be 'no_instance' with label -1 num_sem_labels: The total number of semantic labels used stuff_sem_cls_ids: List of semantic labels for the 'stuff' classes (no instances for these classes) stuff_penalisation: Relative strength of the association between 'stuff' classes and 'no_instance' object instance (instance label 0) Returns: A matrix of shape (num_instances, num_sem_labels), where the entry (i, j) contains the connection strength between the i th instance and the j the semantic label. """ mat = np.zeros((len(sem_labels_for_instances), num_sem_labels), dtype=np.float32) assert sem_labels_for_instances[0] == -1 # First instance must be 'no_instance' # Attraction between an instance and its semantic class for inst_lbl, sem_lbl in islice(enumerate(sem_labels_for_instances), 1, None): mat[inst_lbl, sem_lbl] = 1.0 # TODO(sadeep) Learn this as a vector of size len(thing_sem_cls_ids) # Attraction between `no_instance` and stuff classes if stuff_sem_cls_ids is not None: for stuff_id in stuff_sem_cls_ids: mat[0, stuff_id] = stuff_penalisation # TODO(sadeep) Learn this as a vector of size len(stuff_sem_cls_ids) return mat
5,341,862
def display_matches(BL_im, FU_im, BL_points, FU_points, inliers=[]): """ A function that displays the two given images and plots the matching points in each of the corresponding images. """ fig = plt.figure() fig.add_subplot(1, 2, 1) plt.imshow(BL_im) plt.title('BL01') for point in range(len(BL_points)): if len(inliers) > 0: if point in inliers: plt.scatter(BL_points[point, 0], BL_points[point, 1], marker='o', c='r') else: plt.scatter(BL_points[point, 0], BL_points[point, 1], marker='o', c='b') plt.annotate(str(point + 1), (BL_points[point, 0], BL_points[point, 1])) continue plt.scatter(BL_points[point, 0], BL_points[point, 1], marker='o') plt.annotate(str(point + 1), (BL_points[point, 0], BL_points[point, 1])) fig.add_subplot(1, 2, 2) plt.imshow(FU_im) plt.title('FU01') for point in range(len(FU_points)): if len(inliers) > 0: if point in inliers: plt.scatter(FU_points[point, 0], FU_points[point, 1], marker='o', c='r') else: plt.scatter(FU_points[point, 0], FU_points[point, 1], marker='o', c='b') plt.annotate(str(point + 1), (FU_points[point, 0], FU_points[point, 1])) continue plt.scatter(FU_points[point, 0], FU_points[point, 1], marker='o') plt.annotate(str(point + 1), (FU_points[point, 0], FU_points[point, 1])) plt.show()
5,341,863
def test_update_package_dry_run(monkeypatch): """Test generating an update command for a package.""" monkeypatch.setattr(manage, "call", Mock()) pkg = PkgFile("mypkg", "1.0", replaces=PkgFile("mypkg", "0.9")) update_package(pkg, ".", dry_run=True) assert not manage.call.mock_calls
5,341,864
def get_coverage(inputs): """Get edge coverage. Returns: A dictionary of inputs and corresponding coverage """ cov_dict = dict() for test_input in inputs: "Get coverage by running the program" cov = coverage(input) "Update coverage dictionary of test input" cov_dict[test_input] = cov return cov_dict
5,341,865
def test_inchi_key(input, output): """Check that inchi key is the same""" rd_mol = Chem.MolFromSmiles(input) rd_inchi_key = cmiles.generator.get_inchi_and_key(rd_mol)[1] assert rd_inchi_key == output
5,341,866
def compress_as(filename, fmt, target=None, keep=True): """Compress an existing file. Supported compression formats are: gzip, bzip2, zip, and lzma (Python 3.3 or newer only). Args: filename: The path and name of the uncompressed file. fmt: Decides to which format the file will be compressed. * *zip*: Uses the standard zip library. * *bz2*: Uses the bz2 library. * *gz*: Uses the GNU zip library. * *xz*: Uses the lzma format. target: The default output filename is *filename.fmt*. If you do not like it, you can set another filename here. keep: If true, keep the original file after compressing. Otherwise it will be deleted. Default is keeping. Returns: The filename of the newly created file. """ if not is_compression_format(fmt): raise ValueError("Unknown compression format '%s'!" % fmt) if target is None: target = ".".join([filename, fmt]) # The filename inside the zipped archive target_filename = os.path.basename(target) target_basename, extension = os.path.splitext(target_filename) if extension.endswith(fmt): target_filename = target_basename # Read datafile in 100 MiB chunks for good performance/memory usage chunksize = 100 * 1024 * 1024 compfile = get_compressor(fmt) try: if fmt == "zip": with compfile(target, 'w') as f_out: f_out.write( filename, arcname=target_filename, compress_type=zipfile.ZIP_DEFLATED ) else: with open(filename, 'rb') as f_in: if fmt == "gz": # Coming from https://stackoverflow.com/a/38020236 with open(target, 'wb') as f_out: with compfile(filename, 'wb', fileobj=f_out) as f_out: shutil.copyfileobj(f_in, f_out, length=chunksize) elif fmt == "bz2" or fmt == "xz": with compfile(target, 'wb') as f_out: shutil.copyfileobj(f_in, f_out, length=chunksize) except Exception as e: raise e else: if not keep: os.unlink(filename) return target
5,341,867
def average(arr, mode = "mixed"): """ average(arr, mode) takes the average of a given array Once again, the modes of add() can be used here to denote what the type of the array is The function below, determine_mode(arr) can be used to determine the correct mode for your array """ if len(arr) == 0: return 0.0 return add(arr, mode)/len(arr)
5,341,868
def createTag(tag): """ Method that creates a tag on the ExtraHop system. Parameters: tag (str): The name of the tag """ url = urlunparse(("https", HOST, "/api/v1/tags", "", "", "")) headers = {"Authorization": "ExtraHop apikey=%s" % API_KEY} data = {"name": TAG} r = requests.post(url, headers=headers, json=data) if r.status_code == 201: print(f"Created tag {tag}") else: print("Failed to create tag") print(r.status_code) print(r.text) sys.exit()
5,341,869
def save_result_to_csv(source, results, dst="."): """Save ingestion results to CSV file.""" with open(f"{dst}/ingestion-{source}.csv", "w") as f: writer = csv.writer(f) for initial_size, result in results.items(): for entites, time in result.items(): writer.writerow(initial_size, entites, time)
5,341,870
def non_halting(p): """Return a non-halting part of parser `p` or `None`.""" return left_recursive(p) or non_halting_many(p)
5,341,871
def set_cfg(config_name:str): """ Sets the active config. Works even if cfg is already imported! """ global cfg # Note this is not just an eval because I'm lazy, but also because it can # be used like ssd300_config.copy({'max_size': 400}) for extreme fine-tuning cfg.replace(eval(config_name))
5,341,872
def remove_products_by_search_number(user_id: str, search_number: str): """Remove products of search.""" db = get_database_connection() search_url = db.hget(f'{DB_SEARCH_PREFIX}{user_id}', search_number) remove_launched_search(user_id, search_url.decode('utf-8')) products_pattern = f'{DB_PRODUCT_PREFIX}{user_id}:*' user_product_keys = db.keys(pattern=products_pattern) keys_for_deletion = [] for key in user_product_keys: if db.hget(key, 'search_url') == search_url: keys_for_deletion.append(key) if not keys_for_deletion: db_logger.debug(f'Removed 0 products for search {search_number} of user {user_id}') return db.delete(*keys_for_deletion) db_logger.debug(f'Removed products for search {search_number} of user {user_id}')
5,341,873
def _isValidWord(word): """Determine whether a word is valid. A valid word is a valid english non-stop word.""" if word in _englishStopWords: return False elif word in _englishWords: return True elif wordnet.synsets(word): return True else: return False
5,341,874
def compare_floats(value1: float, value2: float): """Função que compara 2 floats""" return True if abs(value1 - value2) <= 10**-6 else False
5,341,875
def carrega_dataset(caminho_diretorio: str, divisao: Tuple[int, int], embaralhar=True) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Especifique o caminho do diretório em que os arquivos `noisy.npy`e `original.npy` estão. Args: caminho_diretorio (str): caminho do diretório. divisao (Tuple[int, int]): como será a divisão entre treinamento e teste. embaralhar (bool, optional): se deseja embaralhar o dataset. Defaults to True. Returns: Tuple: retorna (x_train, y_train, x_test, y_test) """ if caminho_diretorio != '': x = np.load(os.path.join(caminho_diretorio, 'noisy.npy')) y = np.load(os.path.join(caminho_diretorio, 'original.npy')) else: x = np.load('noisy.npy') y = np.load('original.npy') if embaralhar: np.random.seed(42) np.random.shuffle(x) np.random.seed(42) np.random.shuffle(y) x_train, x_test = dividir_dataset_em_treinamento_e_teste(x, divisao=divisao) y_train, y_test = dividir_dataset_em_treinamento_e_teste(y, divisao=divisao) return (x_train, y_train, x_test, y_test)
5,341,876
def _build_treelite_classifier(m, data, arg={}): """Setup function for treelite classification benchmarking""" from cuml.utils.import_utils import has_treelite, has_xgboost if has_treelite(): import treelite import treelite.runtime else: raise ImportError("No treelite package found") if has_xgboost(): import xgboost as xgb else: raise ImportError("No XGBoost package found") # use maximum 1e5 rows to train the model train_size = min(data[0].shape[0], 100000) dtrain = xgb.DMatrix(data[0][:train_size, :], label=data[1][:train_size]) params = { "silent": 1, "eval_metric": "error", "objective": "binary:logistic" } params.update(arg) max_depth = arg["max_depth"] num_rounds = arg["num_rounds"] n_feature = data[0].shape[1] tmpdir = tempfile.mkdtemp() model_name = f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model" model_path = os.path.join(tmpdir, model_name) bst = xgb.train(params, dtrain, num_rounds) tl_model = treelite.Model.from_xgboost(bst) tl_model.export_lib( toolchain="gcc", libpath=model_path+"treelite.so", params={'parallel_comp': 40}, verbose=False ) return treelite.runtime.Predictor(model_path+"treelite.so", verbose=False)
5,341,877
def insertion_sort(start, end): """Sort an array of pairs of addresses. This is an insertion sort, so it's slowish unless the array is mostly sorted already (which is what I expect, but XXX check this). """ next = start while next < end: # assuming the interval from start (included) to next (excluded) # to be already sorted, move the next element back into the array # until it reaches its proper place. addr1 = next.address[0] addr2 = next.address[1] scan = next while scan > start and addr1 < scan.address[-2]: scan.address[0] = scan.address[-2] scan.address[1] = scan.address[-1] scan -= arrayitemsize scan.address[0] = addr1 scan.address[1] = addr2 next += arrayitemsize
5,341,878
def _RenameChartsAndPointsWithSuffix(charts, suffix): """Append |suffix| to all chart names (except 'trace') and point names (except 'summary'). Args: charts: A dictionary of charts. suffix: A string suffix, e.g. '_control'. """ # First rename all points except 'summary. for chart_name in charts: chart = charts[chart_name] old_point_names = chart.keys() for point_name in old_point_names: if point_name == 'summary': continue chart[point_name + suffix] = chart[point_name] chart.pop(point_name, None) # Then rename all charts except 'trace'. old_chart_names = charts.keys() for chart_name in old_chart_names: if chart_name == 'trace': continue chart = charts[chart_name] new_chart_name = chart_name + suffix for point_name in chart: chart[point_name]['name'] = new_chart_name charts[new_chart_name] = chart charts.pop(chart_name, None)
5,341,879
def _pkq(pk): """ Returns a query based on pk. Note that these are designed to integrate with cells and how they are saved in the database :Parameters: ---------------- pk : list list of primary keys :Returns: ------- dict mongo query filtering for table :Examples: ---------- >>> import datetime >>> assert _pkq(None) == {} >>> assert dict(_pkq(['world', 'hello'])) == {"_pk": {"$eq": ["hello", "world"]}} """ if pk is None or len(pk) == 0: return {} else: return q[_pk] == [pk]
5,341,880
def inspect_decode_labels(pred, num_images=1, num_classes=NUM_CLASSES, inspect_split=[0.9, 0.8, 0.7, 0.5, 0.0], inspect_ratio=[1.0, 0.8, 0.6, 0.3]): """Decode batch of segmentation masks accroding to the prediction probability. Args: pred: result of inference. num_images: number of images to decode from the batch. num_classes: number of classes to predict (including background). inspect_split: probability between different split has different brightness. Returns: A batch with num_images RGB images of the same size as the input. """ if isinstance(pred, torch.Tensor): pred = pred.data.cpu().numpy() n, c, h, w = pred.shape pred = pred.transpose([0, 2, 3, 1]) if n < num_images: num_images = n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) for i in range(num_images): img = Image.new('RGB', (w, h)) pixels = img.load() for j_, j in enumerate(pred[i, :, :, :]): for k_, k in enumerate(j): assert k.shape[0] == num_classes k_value = np.max(softmax(k)) k_class = np.argmax(k) for it, iv in enumerate(inspect_split): if k_value > iv: break if iv > 0: pixels[k_,j_] = tuple(map(lambda x: int(inspect_ratio[it]*x), label_colours[k_class])) outputs[i] = np.array(img) return torch.from_numpy(outputs.transpose([0, 3, 1, 2]).astype('float32')).div_(255.0)
5,341,881
def set_default_values( **attributes: Dict[str, Union[float, int, str]], ) -> Dict[str, Union[float, int, str]]: """Set the default value of various parameters. :param attributes: the attribute dict for the electronic filter being calculated. :return: attributes; the updated attribute dict. :rtype: dict """ if attributes["quality_id"] <= 0: attributes["quality_id"] = 1 return attributes
5,341,882
def validate_function(fn: FunctionType, config: Configuration, module_type: ModuleType) -> FunctionValidationResult: """Validates the docstring of a function against its signature. Args: fn (FunctionType): The function to validate. config (Configuration): The configuration to use while validating. module_type (ModuleType): The module from which the function was extracted. Returns: FunctionValidationResult: The result of validating this function. """ log(f"Validating function: {fn}") result = FunctionValidationResult(fn) doc = inspect.getdoc(fn) if not doc: if config.fail_on_missing_docstring: result.result = ResultType.FAILED result.fail_reason = f"Function does not have a docstring" _, line_number = inspect.getsourcelines(fn) result.range = Range(line_number, line_number, 0, 0) else: result.result = ResultType.NO_DOC return result parser = config.get_parser() summary = parser.get_summary(doc, module_type) if not summary and config.fail_on_missing_summary: result.result = ResultType.FAILED result.fail_reason = f"Function does not have a summary" result.range = __get_docstring_range(fn, module_type, doc) return result sig = inspect.signature(fn) sig_parameters = [Parameter(name, proxy.annotation) for name, proxy in sig.parameters.items() if name != "self"] sig_return_type = type(None) if sig.return_annotation is None else sig.return_annotation try: doc_parameters = parser.get_parameters(doc, module_type) doc_return_type = parser.get_return_type(doc, module_type) except ParseException as e: result.result = ResultType.FAILED result.fail_reason = f"Unable to parse docstring: {str(e)}" result.range = __get_docstring_range(fn, module_type, doc) return result # Validate return type if sig_return_type != doc_return_type: result.result = ResultType.FAILED result.fail_reason = f"Return type differ. Expected (from signature) {sig_return_type}, but got (in docs) {doc_return_type}." result.range = __get_docstring_range(fn, module_type, doc) return result # Validate equal number of parameters if len(sig_parameters) != len(doc_parameters): result.result = ResultType.FAILED result.fail_reason = f"Number of arguments differ. Expected (from signature) {len(sig_parameters)} arguments, but found (in docs) {len(doc_parameters)}." result.range = __get_docstring_range(fn, module_type, doc) return result # Validate name and type of function parameters for sigparam, docparam in zip(sig_parameters, doc_parameters): if sigparam.name != docparam.name: result.result = ResultType.FAILED result.fail_reason = f"Argument name differ. Expected (from signature) '{sigparam.name}', but got (in docs) '{docparam.name}'" result.range = __get_docstring_range(fn, module_type, doc) return result # NOTE: Optional[str] == Union[str, None] # True if sigparam.type != docparam.type: result.result = ResultType.FAILED result.fail_reason = f"Argument type differ. Argument '{sigparam.name}' was expected (from signature) to have type '{sigparam.type}', but has (in docs) type '{docparam.type}'" result.range = __get_docstring_range(fn, module_type, doc) return result # Validate exceptions raised if config.fail_on_raises_section: sig_exceptions = get_exceptions_raised(fn, module_type) doc_exceptions = parser.get_exceptions_raised(doc) if len(sig_exceptions) != len(doc_exceptions): result.result = ResultType.FAILED result.fail_reason = f"Number of listed raised exceptions does not match actual. Doc: {doc_exceptions}, expected: {sig_exceptions}" result.range = __get_docstring_range(fn, module_type, doc) return result intersection = set(sig_exceptions) - set(doc_exceptions) if len(intersection) > 0: result.result = ResultType.FAILED result.fail_reason = f"Listed raised exceptions does not match actual. Docstring: {doc_exceptions}, expected: {sig_exceptions}" result.range = __get_docstring_range(fn, module_type, doc) return result result.result = ResultType.OK return result
5,341,883
def find_existing_installation(package_name: str, display_name: str, test=True): """ Finds an existing installation of a package in the windows registry given the package name and display name #### Arguments package_name (str): Name of the package display_name (str): Display name of the package test (bool, optional): If the command is being run to test successful installation / uninstallation. Defaults to True. Returns: [type]: [description] """ import registry key = registry.get_uninstall_key(package_name, display_name) installed_packages = [''.join(f.replace('.json', '').split( '@')[:1]) for f in os.listdir(PathManager.get_appdata_directory() + r'\Current')] if key: if not test: return package_name in installed_packages return True return False
5,341,884
def say(l, b, i): """ !d Repeat a word or phrase !a <message...> !r moderator """ try: print 'Saying the phrase:', ' '.join(i.args) b.l_say(' '.join(i.args), i, 1) return True except TypeError: return False
5,341,885
def run_benchmark(args): """Runs the benchmark.""" try: dtest = xgb.DMatrix('dtest.dm') dtrain = xgb.DMatrix('dtrain.dm') if not (dtest.num_col() == args.columns and dtrain.num_col() == args.columns): raise ValueError("Wrong cols") if not (dtest.num_row() == args.rows * args.test_size and dtrain.num_row() == args.rows * (1 - args.test_size)): raise ValueError("Wrong rows") except xgb.core.XGBoostError: print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns)) print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size)) tmp = time.time() X, y = make_classification(args.rows, n_features=args.columns, n_redundant=0, n_informative=args.columns, n_repeated=0, random_state=7) if args.sparsity < 1.0: X = np.array([[np.nan if RNG.uniform(0, 1) < args.sparsity else x for x in x_row] for x_row in X]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=7) print("Generate Time: %s seconds" % (str(time.time() - tmp))) tmp = time.time() print("DMatrix Start") dtrain = xgb.DMatrix(X_train, y_train) dtest = xgb.DMatrix(X_test, y_test, nthread=-1) print("DMatrix Time: %s seconds" % (str(time.time() - tmp))) dtest.save_binary('dtest.dm') dtrain.save_binary('dtrain.dm') param = {'objective': 'binary:logistic'} if args.params != '': param.update(ast.literal_eval(args.params)) param['tree_method'] = args.tree_method print("Training with '%s'" % param['tree_method']) tmp = time.time() xgb.train(param, dtrain, args.iterations, evals=[(dtest, "test")]) print("Train Time: %s seconds" % (str(time.time() - tmp)))
5,341,886
def rSanderSelect(dbItem,index=0,interactive=False): """ rSanderSelect(dbItem,index=0,interactive=False) select which rSander henry data to use in dbItem Parameters: dbItem, db[key] dictionary object with keys = ['hbpSIP','hbpSIPL', 'hbpSI_index'] index, positive integer index for list item in hbpSIPL to move into hbpSIP. Use interactive=True to display choices and ask for user input for an index. interactive, True to display choices and ask user input for an index, False to make the change silently Returns: Nothing on success (dbItem is succussfully changed) or error messages if there is an issue """ keys = ['hbpSIP','hbpSIPL','hbpSI_index'] for key in keys: #test if dbItem has valid dictionary keys if key not in dbItem.keys(): return print("Henry data (%s) not found in dbItem[%s]\n" % (key,dbItem['name'])) nHbpSIPL =len(dbItem['hbpSIPL']) if not interactive: invalIndex = "Invalid index: %s\n0 <= index <= %s\n" % (index,nHbpSIPL-1) if re.match(r'^[0-9]+$',str(index)): #make sure index is positive integer if index > nHbpSIPL-1: #check for valid index return print(invalIndex) dbItem['hbpSI_index'] = index dbItem['hbpSIP'] = [float(dbItem['hbpSIPL'][index][0]), float(dbItem['hbpSIPL'][index][1])] else: return print(invalIndex) else: header = ['Index','Ho /mol/kg/Pa','dln(H)/d(1/T) /K','Code','Ref.'] inStr = "Select an index (%s to %s) or e(x)it: " % (0,nHbpSIPL-1) choice = '' while choice != 'x': table = [] for idx in range(nHbpSIPL): table.append([idx]) table[idx].extend(dbItem['hbpSIPL'][idx]) print('\n'+tabulate(table,headers=header,numalign='center', stralign='center')+'\n') choice = input(inStr) inStr = "Select an index (%s to %s) or e(x)it: " % (0,nHbpSIPL-1) invalStr = "Invalid input: %s\n0 <= index <= %s or \'x\' to exit\n" % (choice,nHbpSIPL-1) if re.match(r'^[0-9]+$',choice): index = int(choice) if index > nHbpSIPL-1: #check for valid index inStr = invalStr + inStr else: dbItem['hbpSI_index'] = index dbItem['hbpSIP'] = [float(dbItem['hbpSIPL'][index][0]), float(dbItem['hbpSIPL'][index][1])] else: inStr = invalStr + inStr
5,341,887
def dataset_w_pedigree_field(): """ :return: Return model Dataset example with `pedigree_field` defined. """ search_pattern = SearchPattern(left="*/*/*_R1.fastq.gz", right="*/*/*_R2.fastq.gz") dataset = DataSet( sheet_file="sheet.tsv", sheet_type="germline_variants", search_paths=("/path",), search_patterns=(search_pattern,), naming_scheme="only_secondary_id", sodar_uuid="99999999-aaaa-bbbb-cccc-999999999999", pedigree_field="familyId", ) return dataset
5,341,888
def build_index(site_data, posts): """Build index. Builds an index page from the given site data and list of posts. Parameters ---------- site_data : dict Site- and blog-level data. posts : list A list of dictionaries of page data. Returns ------- None """ template = os.path.join(site_data["template_path"], site_data["blog"]["template"]) ext = get_ext(template) page = render_template(template, site_data, posts) if not site_data["blog"]["continue"] or site_data["blog"]["continue"] == 0: path = os.path.join( site_data["output_dir"], site_data["blog_dir"], f"index{ext}" ) else: path = os.path.join( site_data["output_dir"], site_data["blog_dir"], f"index{site_data['blog']['continue']}{ext}", ) save_page(page, path)
5,341,889
def main(): """Make a jazz noise here""" args = get_args() file = '../data/metagenomes-4.1.csv' njobs = 10 #interpro_dir = os.path.join(os.getcwd(), 'interpro') interpro_dir = '../data/interpro' if not os.path.isdir(interpro_dir): os.makedirs(interpro_dir) tmpl = 'https://www.ebi.ac.uk/metagenomics/api/v1/analyses/{}/downloads' interpro_re = re.compile('interpro\.tsv', re.IGNORECASE) jobs_file = os.path.join(os.getcwd(), 'jobs.txt') jobs_fh = open(jobs_file, 'wt') with open(file) as fh: reader = csv.DictReader(fh) for i, row in enumerate(reader): analysis = row['Analysis'] print('{:3}: {}'.format(i+1, analysis)) url = tmpl.format(analysis) r = requests.get(url) dat = json.loads(r.text) files = dat['data'] if not files: continue for file in files: match = interpro_re.search(file['id']) if match: file_url = file['links']['self'] basename = os.path.basename(file_url) fpath = os.path.join(interpro_dir, basename) jobs_fh.write('wget -nv -O {} --no-clobber {}\n'.format( fpath, file_url)) jobs_fh.close() cmd = 'parallel -j {} < {}'.format(njobs, jobs_file) subprocess.run(cmd, shell=True) print('Done')
5,341,890
def get_new_access_token(client_id, client_secret, refresh_token): """Use long-lived refresh token to get short-lived access token.""" response = requests.post( 'https://www.googleapis.com/oauth2/v4/token', data={ 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token', }, timeout=TIMEOUT, ) response.raise_for_status() access_token = response.json()['access_token'] return access_token
5,341,891
def prettify_seconds(seconds): """ Prettifies seconds. Takes number of seconds (int) as input and returns a prettified string. Example: >>> prettify_seconds(342543) '3 days, 23 hours, 9 minutes and 3 seconds' """ if seconds < 0: raise ValueError("negative input not allowed") signs = {"s": {"singular": "second", "plural": "seconds", }, "h": {"singular": "hour", "plural": "hours"}, "min": {"singular": "minute", "plural": "minutes"}, "d": {"singular": "day", "plural": "days"} } seperator = ", " last_seperator = " and " def get_sign(unit, value): if value == 1 or value == -1: return signs[unit]["singular"] else: return signs[unit]["plural"] days, remainder = divmod(seconds, 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) daystext = "{} {}".format(days, get_sign("d", days)) if days else "" hourstext = "{} {}".format(hours, get_sign("h", hours)) if hours else "" minutestext = "{} {}".format(minutes, get_sign("min", minutes)) if minutes else "" if (not seconds) and (days or hours or minutes): secondstext = "" else: secondstext = "{} {}".format(seconds, get_sign("s", seconds)) output_list = [daystext, hourstext, minutestext, secondstext] filtered = [item for item in output_list if item] if len(filtered) <= 2: output = last_seperator.join(filtered) else: output = seperator.join(filtered[:-1]) + last_seperator + filtered[-1] return output
5,341,892
def _PrepareServer(vm): """Installs Tensorflow Serving on a single server vm. Args: vm: server vm to operate on """ logging.info('Installing Tensorflow Serving on server %s', vm) vm.Install('tensorflow_serving') vm.InstallPreprovisionedBenchmarkData( BENCHMARK_NAME, [RESNET_NHWC_SAVEDMODEL_TGZ], TF_SERVING_BASE_DIRECTORY) extract_dir = posixpath.join( TF_SERVING_BASE_DIRECTORY, "resnet") vm.RemoteCommand('mkdir {0}'.format(extract_dir)) vm.RemoteCommand('cd {0} && tar --strip-components=2 --directory {1} -xvzf ' '{2}'.format(TF_SERVING_BASE_DIRECTORY, extract_dir, RESNET_NHWC_SAVEDMODEL_TGZ))
5,341,893
def get_regions_prodigal(fn): """Parse prodigal output""" regions = {} with open(fn, 'r') as f: for line in f: if line[:12] == '# Model Data': continue if line[:15] == '# Sequence Data': m = re.search('seqhdr="(\S+)"', line) if m: id = m.group(1) regions[id] = {} regions[id]['+'] = [] regions[id]['-'] = [] else: r = line[1:].rstrip().split('_') n = int(r[ 0]) # also store the index of the fragment - prodigal uses these (rather than coords) to identify sequences in the fasta output s = int(r[1]) e = int(r[2]) regions[id][r[3]].append(NumberedRegion(s, e, n)) return regions
5,341,894
def add_processor(name, processor_object): """Adds a Processor to the autopkglib namespace""" globals()[name] = processor_object if name not in _PROCESSOR_NAMES: _PROCESSOR_NAMES.append(name)
5,341,895
def get_transform(account_name: Optional[str] = None, resource_group_name: Optional[str] = None, transform_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransformResult: """ Use this data source to access information about an existing resource. :param str account_name: The Media Services account name. :param str resource_group_name: The name of the resource group within the Azure subscription. :param str transform_name: The Transform name. """ __args__ = dict() __args__['accountName'] = account_name __args__['resourceGroupName'] = resource_group_name __args__['transformName'] = transform_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180701:getTransform', __args__, opts=opts, typ=GetTransformResult).value return AwaitableGetTransformResult( created=__ret__.created, description=__ret__.description, last_modified=__ret__.last_modified, name=__ret__.name, outputs=__ret__.outputs, type=__ret__.type)
5,341,896
def do_ptp_modify(cc, args): """Modify PTP attributes.""" ptps = cc.ptp.list() ptp = ptps[0] op = "replace" attributes = [] if args.enabled is not None: attributes.append('enabled=%s' % args.enabled) if args.mode is not None: attributes.append('mode=%s' % args.mode) if args.transport is not None: attributes.append('transport=%s' % args.transport) if args.mechanism is not None: attributes.append('mechanism=%s' % args.mechanism) if len(attributes) == 0: print("No options provided.") return patch = utils.args_array_to_patch("replace", attributes) try: ptp = cc.ptp.update(ptp.uuid, patch) except exc.HTTPNotFound: raise exc.CommandError('PTP not found: %s' % ptp.uuid) _print_ptp_show(ptp)
5,341,897
def default_preprocessing(df): """Perform the same preprocessing as the original analysis: https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb """ return df[(df.days_b_screening_arrest <= 30) & (df.days_b_screening_arrest >= -30) & (df.is_recid != -1) & (df.c_charge_degree != 'O') & (df.score_text != 'N/A')]
5,341,898
def get_step_type_udfs( step_type: str, workflow: str, adapter: ArnoldAdapter = Depends(get_arnold_adapter), ): """Get available artifact udfs for a step type""" artifact_udfs = find_step_type_artifact_udfs( adapter=adapter, step_type=step_type, workflow=workflow ) process_udfs = find_step_type_process_udfs( adapter=adapter, step_type=step_type, workflow=workflow ) return artifact_udfs + process_udfs
5,341,899