content
stringlengths
22
815k
id
int64
0
4.91M
def extract_and_coadd(ra, dec, pm_ra, pm_dec, match_radius=4./3600., search_radius=25./60, sigma_clip=None, query_timeout=60., upper_limits=True, return_exps=False): """ The top-level function of this module, extract_and_coadd finds sources in GALEX archive matching the target while accounting for its proper motion between observing visits, the coadds the fluxes from each visit. Parameters ---------- ra : float Right ascencion of target in decimal degrees. dec : float Declination of target in decimal degrees. pm_ra : float Right ascencion proper motion of target in mas/yr. pm_dec : float Declination proper motion of target in mas/yr. match_radius : float Radius within which to consider a GALEX source a match to the target in degrees. For reference, the 1-sigma astrometric uncertainty is 0.4 arcseconds for GALEX. search_radius : float Radius in which to query the MCAT in degrees. If upper limits are desired, this should be large enough for the MCAT to return results whenever exposures were taken near enough that the target could have been in the aperture. sigma_clip : float Exclude fluxes > this many sigma from median flux relative to their measurement error. Careful with this. Stars show real variability that is often well beyond measurement errors, so it is probably unwise to sigma clip in most cases. query_timeout : float Seconds to wait for server to respond before giving up. upper_limits : bool Estimate upper limits for exposures where there is no match for the source. return_exps : bool If True, return all the data provided by extract_source. Returns ------- nuv_coadd : tuple Coadded flux and error in counts s-1 and, optionally, exposure info returned by extract_source. Upper limits show up as -999 for the flux with a positive error. fuv_coadd : tuple As above, for fuv. """ data = extract_source(ra, dec, pm_ra, pm_dec, match_radius, search_radius, query_timeout, upper_limits) nuv_data, fuv_data = data nuv = list(coadd_fluxes(*nuv_data[:3], sigma_clip=sigma_clip)) fuv = list(coadd_fluxes(*fuv_data[:3], sigma_clip=sigma_clip)) if return_exps: nuv.append(nuv_data) fuv.append(fuv_data) nuv, fuv = map(tuple, (nuv, fuv)) return (nuv, fuv)
27,400
async def create_payout( session: ClientSession, data: CreatePayoutRequest ) -> CreatePayoutResponse: """ Create a payout. """ url = RAZORPAY_BASE_URL + "/payouts" async with session.post( url, json=data.__dict__, auth=aiohttp.BasicAuth(RAZORPAY_KEY_ID, RAZORPAY_KEY_SECRET), ) as resp: response = await resp.json() print(response, resp.status) return from_dict(data_class=CreatePayoutResponse, data=response)
27,401
def eval_ocr_metric(pred_texts, gt_texts): """Evaluate the text recognition performance with metric: word accuracy and 1-N.E.D. See https://rrc.cvc.uab.es/?ch=14&com=tasks for details. Args: pred_texts (list[str]): Text strings of prediction. gt_texts (list[str]): Text strings of ground truth. Returns: eval_res (dict[str: float]): Metric dict for text recognition, include: - word_acc: Accuracy in word level. - word_acc_ignore_case: Accuracy in word level, ignore letter case. - word_acc_ignore_case_symbol: Accuracy in word level, ignore letter case and symbol. (default metric for academic evaluation) - char_recall: Recall in character level, ignore letter case and symbol. - char_precision: Precision in character level, ignore letter case and symbol. - 1-N.E.D: 1 - normalized_edit_distance. """ assert isinstance(pred_texts, list) assert isinstance(gt_texts, list) assert len(pred_texts) == len(gt_texts) match_res = count_matches(pred_texts, gt_texts) eps = 1e-8 char_recall = 1.0 * match_res['true_positive_char_num'] / ( eps + match_res['gt_char_num']) char_precision = 1.0 * match_res['true_positive_char_num'] / ( eps + match_res['pred_char_num']) word_acc = 1.0 * match_res['match_word_num'] / ( eps + match_res['gt_word_num']) word_acc_ignore_case = 1.0 * match_res['match_word_ignore_case'] / ( eps + match_res['gt_word_num']) word_acc_ignore_case_symbol = 1.0 * match_res[ 'match_word_ignore_case_symbol'] / ( eps + match_res['gt_word_num']) eval_res = {} eval_res['word_acc'] = word_acc eval_res['word_acc_ignore_case'] = word_acc_ignore_case eval_res['word_acc_ignore_case_symbol'] = word_acc_ignore_case_symbol eval_res['char_recall'] = char_recall eval_res['char_precision'] = char_precision eval_res['1-N.E.D'] = 1.0 - match_res['ned'] eval_res['BLEU'] = match_res['bleu'] for key, value in eval_res.items(): eval_res[key] = float('{:.4f}'.format(value)) return eval_res
27,402
def get_all_zones(): """Return a list of all available zones.""" cf = CloudFlare.CloudFlare(raw=True) page_number = 0 total_pages = 1 all_zones = [] while page_number < total_pages: page_number += 1 raw_results = cf.zones.get(params={'per_page':100, 'page':page_number}) zones = raw_results['result'] all_zones += zones total_pages = raw_results['result_info']['total_pages'] return all_zones
27,403
def entry_id(e): """entry identifier which is not the bibtex key """ authortitle = ''.join([author_id(e),title_id(e)]) return (e.get('doi','').lower(), authortitle)
27,404
def AskNumber(text="unknown task"): """ Asks the user to interactively input a number (float or int) at any point in the script, and returns the input number. | __option__ | __description__ | --- | --- | *text | an optional string to identify for what purpose the chosen number will be used. """ def ValidateNumber(text): try: innumber = input("\n\nwrite a comma or integer number to use for "+str(text)+" (example: 15 or 15.83)\nnumber = ") except NameError: print("""\n---> unknown error""") return ValidateNumber(text) if not isinstance(innumber,(float,int)): print("""\n---> error: the number must be either a floating point comma or integer number""") return ValidateNumber(text) return innumber return ValidateNumber(text)
27,405
def TokenEmphasis(character="_"): """ Italic (`<i>`, `<em>`) text is rendered with one asterisk or underscore """ assert character in ("_", "*") return { "type": "Characters", "data": character, "_md_type": mdTokenTypes["TokenEmphasis"], }
27,406
def get_data(filename, **kwargs): """ get selected data file """ filepath = os.path.join(data.__path__[0], filename) return np.genfromtxt(filepath, **kwargs)
27,407
def generate_plan(suite, node): """Randomly generates a plan, completely ignoring norms. This is mainly for testing the norm driven algorithm""" plan = [node] next_actions = next_actions(suite,node) # print "Next actions ", next_actions while (next_actions != []): a = random.sample(next_actions,1)[0] node = a.path[1:] plan[len(plan):] = node node = node[-1] # if we have a sequence of actions next_actions = next_actions(suite,node) return plan
27,408
def update_y(pred_coords, ypart_tracker, history=1500): """ Update y-tracker and store last 1500 detection :param pred_coords: y coordinates :param ypart_tracker: choose keypoints based on input conditions :return: y-tracker """ anks_val = (pred_coords[15] + pred_coords[16]) * 0.5 shdr_val = (pred_coords[5] + pred_coords[6]) * 0.5 ypart_tracker['anks'] = np.append(ypart_tracker['anks'], [anks_val], axis=0) ypart_tracker['shdr'] = np.append(ypart_tracker['shdr'], [shdr_val], axis=0) ypart_tracker['anks-shdr'] = np.append(ypart_tracker['anks-shdr'], [anks_val - shdr_val], axis=0) ypart_tracker = {k: v[-history:] for k, v in ypart_tracker.items()} return ypart_tracker
27,409
def augmentData(features, labels): """ For augmentation of the data :param features: :param labels: :return: """ features = np.append(features, features[:, :, ::-1], axis=0) labels = np.append(labels, -labels, axis=0) return features, labels
27,410
def generate_election_spec_contest_groups(e, synpar): """ Greate synpar.n_cids-1 'random' contest groups. They get ids like 'gid2-6' meaning they cover cids 2 to 6 inclusive. """ e.gids = [] cids_list = sorted(list(e.cids)) for (low, high) in syn.generate_segments(e, synpar, 1, synpar.n_cids): gid = "gid{}-{}".format(low, high) e.cgids_g[gid] = cids_list[low:high+1]
27,411
def train_adversary(): """ Trains an adversary on data from the data censoring process. """ def accuracy(pred, true): u = true.cpu().numpy().flatten() p = np.argmax(pred.cpu().detach().numpy(), axis=1) acc = np.sum(u == p)/len(u) return acc tmp_secret_classifier.train() adv_secret_classifier.train() for i_epoch in range(5): for i_batch, batch in tqdm.tqdm(enumerate(train_dataloader, 0)): imgs = batch['image'].cuda() utility = batch['utility'].float().cuda() secret = batch['secret'].float().cuda() secret = secret.view(secret.size(0)) utility = utility.view(utility.size(0)) batch_size = imgs.shape[0] z1 = torch.randn(batch_size, opt.latent_dim).cuda() if opt.use_filter: filter_imgs = filter(imgs, z1, secret.long()) else: filter_imgs = imgs if opt.use_real_fake: z2 = torch.randn(batch_size, opt.latent_dim).cuda() gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], batch_size))) filter_imgs = generator(filter_imgs, z2, gen_secret) # train tmp optimizer_tmp.zero_grad() secret_pred = tmp_secret_classifier(imgs) loss = adversarial_loss(secret_pred, secret.long()) loss.backward() optimizer_tmp.step() if i_batch % 50 == 0: acc = accuracy(secret_pred, secret) print("secret_tmp_acc: ", acc) # train adversary optimizer_adv.zero_grad() secret_pred = adv_secret_classifier(filter_imgs.detach()) loss = adversarial_loss(secret_pred, secret.long()) loss.backward() optimizer_adv.step() if i_batch % 50 == 0: acc = accuracy(secret_pred, secret) print("secret_adv_acc: ", acc) utils.save_model(adv_secret_classifier, os.path.join(artifacts_path, "adv_secret_classifier.hdf5")) accs1 = [] accs2 = [] tmp_secret_classifier.eval() adv_secret_classifier.eval() for i_batch, batch in tqdm.tqdm(enumerate(valid_dataloader, 0)): imgs = batch['image'].cuda() secret = batch['secret'].float().cuda() secret = secret.view(secret.size(0)) batch_size = imgs.shape[0] z1 = torch.randn(batch_size, opt.latent_dim).cuda() if opt.use_filter: filter_imgs = filter(imgs, z1, secret.long()) else: filter_imgs = imgs if opt.use_real_fake: z2 = torch.randn(batch_size, opt.latent_dim).cuda() gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], batch_size))) filter_imgs = generator(filter_imgs, z2, gen_secret) secret_pred = adv_secret_classifier(filter_imgs.detach()) acc = accuracy(secret_pred, secret) accs1.append(acc) secret_pred = tmp_secret_classifier(imgs) acc = accuracy(secret_pred, secret) accs2.append(acc) acc1 = np.mean(accs1) acc2 = np.mean(accs2) print("test_secret_adv_acc: ", acc1) print("test_secret_tmp_acc: ", acc2) return acc1
27,412
def sigmoid(x): """ Implement 1 / ( 1 + exp( -x ) ) in terms of tanh.""" return 0.5 * (np.tanh(x / 2.) + 1)
27,413
def get_unique_output_values(signals): """ Based on segment length, determine how many of the possible four uniquely identifiable digits are in the set of signals. """ unique_digit_count = 0 for signal in signals: for digit in signal["output"]: if len(digit) in (2, 3, 4, 7): unique_digit_count += 1 return unique_digit_count
27,414
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn: """ Create scatter plot between each feature and the response. - Plot title specifies feature name - Plot title specifies Pearson Correlation between feature and response - Plot saved under given folder with file name including feature name Parameters ---------- X : DataFrame of shape (n_samples, n_features) Design matrix of regression problem y : array-like of shape (n_samples, ) Response vector to evaluate against output_path: str (default ".") Path to folder in which plots are saved """ X.drop(columns=['yr_renovated_20', 'yr_renovated_19', 'yr_renovated_0.', 'yr_built_20', 'yr_built_19', 'date_2015', 'date_2014', 'zipcode_area_981', 'zipcode_area_980'], inplace=True) for column in X: pearson = np.cov(X[column], y) / (np.std(X[column]) * np.std(y)) df = pd.DataFrame( {'intercept': np.ones(X.shape[0]), column: X[column].to_numpy()}) w = np.linalg.pinv(df) @ y y_predict = w[0] + w[1] * X[column].to_numpy() fig = px.scatter(x=X[column], y=y) fig.update_layout(title=f"feature values of {column} column against the response values\n" f"Pearson Correlation {pearson[0][1]}", xaxis_title=f'{column}', yaxis_title='price') fig.show() fig.write_image(os.path.join(output_path, column + ".png"))
27,415
def split_and_pad(s, sep, nsplit, pad=None): """ Splits string s on sep, up to nsplit times. Returns the results of the split, pottentially padded with additional items, up to a total of nsplit items. """ l = s.split(sep, nsplit) return itertools.chain(l, itertools.repeat(None, nsplit+1-len(l)))
27,416
def transpose_report(report): """Transposes the report. Columns into rows""" return list(map(list, zip(*report)))
27,417
def _shape_from_resolution(resolution): """ Calculate the shape of the global Earth relief grid given a resolution. Parameters ---------- resolution : str Same as the input for load_earth_relief Returns ------- shape : (nlat, nlon) The calculated shape. Examples -------- >>> _shape_from_resolution('60m') (181, 361) >>> _shape_from_resolution('30m') (361, 721) >>> _shape_from_resolution('10m') (1081, 2161) """ minutes = int(resolution[:2]) nlat = 180*60//minutes + 1 nlon = 360*60//minutes + 1 return (nlat, nlon)
27,418
def test_base_reader_rosbag_accel(): """Run the base reader test.""" import numpy as np from pydtk.io import BaseFileReader path = 'test/records/rosbag_model_test/data/records.bag' reader = BaseFileReader() timestamps, data, columns = reader.read(path=path, contents='/vehicle/acceleration') assert isinstance(data, np.ndarray)
27,419
def __virtual__(): """ Load module only if cx_Oracle installed """ if HAS_CX_ORACLE: return __virtualname__ return ( False, "The oracle execution module not loaded: python oracle library not found.", )
27,420
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Zigbee Home Automation switches.""" discovery_info = zha.get_discovery_info(hass, discovery_info) if discovery_info is None: return from zigpy.zcl.clusters.general import OnOff in_clusters = discovery_info['in_clusters'] cluster = in_clusters[OnOff.cluster_id] await cluster.bind() await cluster.configure_reporting(0, 0, 600, 1,) async_add_devices([Switch(**discovery_info)], update_before_add=True)
27,421
def find_plugins(): """ Finds all Python packages inside the port.plugins directory """ return [os.path.join(importer.path, name) for importer, name, ispkg in pkgutil.iter_modules(plugins.__path__) if ispkg]
27,422
def saturation(rgb_img, threshold=255, channel="any"): """Return a mask filtering out saturated pixels. Inputs: rgb_img = RGB image threshold = value for threshold, above which is considered saturated channel = how many channels must be saturated for the pixel to be masked out ("any", "all") Returns: masked_img = A binary image with the saturated regions blacked out. :param rgb_img: np.ndarray :param threshold: int :param channel: str :return masked_img: np.ndarray """ # Mask red, green, and blue saturation separately b, g, r = cv2.split(rgb_img) b_saturated = cv2.inRange(b, threshold, 255) g_saturated = cv2.inRange(g, threshold, 255) r_saturated = cv2.inRange(r, threshold, 255) # Combine channel masks if channel.lower() == "any": # Consider a pixel saturated if any channel is saturated saturated = cv2.bitwise_or(b_saturated, g_saturated) saturated = cv2.bitwise_or(saturated, r_saturated) elif channel.lower() == "all": # Consider a pixel saturated only if all channels are saturated saturated = cv2.bitwise_and(b_saturated, g_saturated) saturated = cv2.bitwise_and(saturated, r_saturated) else: fatal_error(str(channel) + " is not a valid option. Channel must be either 'any', or 'all'.") # Invert "saturated" before returning, so saturated = black bin_img = cv2.bitwise_not(saturated) _debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device), '_saturation_threshold.png')) return bin_img
27,423
def get_fuel_from(mass: int) -> int: """Gets fuel from mass. Args: mass (int): mass for the fuel Returns: int: fuel necessary for the mass """ return mass // 3 - 2
27,424
def test_energy_density_function(): """ Compute the Zeeman energy density over the entire mesh, integrate it, and compare it to the expected result. """ mesh = df.RectangleMesh(df.Point(-50, -50), df.Point(50, 50), 10, 10) unit_length = 1e-9 H = 1e6 # Create simulation object. sim = finmag.Simulation(mesh, 1e5, unit_length=unit_length) # Set uniform magnetisation. def m_ferromagnetic(pos): return np.array([0., 0., 1.]) sim.set_m(m_ferromagnetic) # Assign zeeman object to simulation sim.add(Zeeman(H * np.array([0., 0., 1.]))) # Get energy density function edf = sim.get_interaction('Zeeman').energy_density_function() # Integrate it over the mesh and compare to expected result. total_energy = df.assemble(edf * df.dx) * unit_length expected_energy = -mu0 * H assert (total_energy + expected_energy) < 1e-6
27,425
def my_Bayes_model_mse(params): """ Function fits the Bayesian model from Tutorial 4 Args : params (list of positive floats): parameters used by the model (params[0] = posterior scaling) Returns : (scalar) negative log-likelihood :sum of log probabilities """ trial_ll = np.zeros_like(true_stim) ## Create the prior Matrix outside of trial loop alpha=params[0] prior_mean = 0 prior_sigma1 = 0.5 prior_sigma2 = 3 prior1 = my_gaussian(x, prior_mean, prior_sigma1) prior2 = my_gaussian(x, prior_mean, prior_sigma2) prior_combined = (1-alpha) * prior1 + (alpha * prior2) prior_combined = prior_combined / np.sum(prior_combined) prior_matrix = np.tile(prior_combined, hypothetical_stim.shape[0]).reshape((hypothetical_stim.shape[0],-1)) ## Create posterior matrix outside of trial loop posterior_matrix = np.zeros_like(likelihood_matrix) for i_posterior in np.arange(posterior_matrix.shape[0]): posterior_matrix[i_posterior,:] = np.multiply(prior_matrix[i_posterior,:], likelihood_matrix[i_posterior,:]) posterior_matrix[i_posterior,:] = posterior_matrix[i_posterior,:] / np.sum(posterior_matrix[i_posterior,:]) ## Create Binary decision matrix outside of trial loop binary_decision_matrix = np.zeros_like(posterior_matrix) for i_posterior in np.arange(posterior_matrix.shape[0]): mean, _, _ = moments_myfunc(x, posterior_matrix[i_posterior,:]) idx = np.argmin(np.abs(x - mean)) binary_decision_matrix[i_posterior,idx] = 1 # Loop over stimuli for i_stim in np.arange(true_stim.shape[0]): input_matrix = np.zeros_like(posterior_matrix) for i in np.arange(x.shape[0]): input_matrix[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1) input_matrix[:, i] = input_matrix[:, i] / np.sum(input_matrix[:, i]) marginalization_matrix = input_matrix * binary_decision_matrix marginal = np.sum(marginalization_matrix, axis=0) marginal = marginal / np.sum(marginal) action = behaviour[i_stim] idx = np.argmin(np.abs(x - action)) trial_ll[i_stim] = np.log(marginal[idx] + np.finfo(float).eps) neg_ll = -np.sum(trial_ll) return neg_ll
27,426
def validate_request_tween_factory(handler, registry): """ Updates request.environ's REQUEST_METHOD to be X_REQUEST_METHOD if present. Asserts that if a POST (or similar) request is in application/json format, with exception for /metadata/* endpoints. Apache config: SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD """ def validate_request_tween(request): # Fix Request method changed by mod_wsgi. # See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2 environ = request.environ if 'X_REQUEST_METHOD' in environ: environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD'] if request.method in ('GET', 'HEAD'): # If GET request, don't need to check `request.content_type` # Includes page text/html requests. return handler(request) elif request.content_type != 'application/json': if request.content_type == 'application/x-www-form-urlencoded' and request.path[0:10] == '/metadata/': # Special case to allow us to POST to metadata TSV requests via form submission return handler(request) detail = "Request content type %s is not 'application/json'" % request.content_type raise HTTPUnsupportedMediaType(detail) return handler(request) return validate_request_tween
27,427
def gen(camera): """Video streaming generator function.""" # initialise some variables frame_count = 0; bytes_io = BytesIO() skip_frame = 1; face_names = [] face_locations = [] while True: print("Capturing image.") camera.capture(output, format="rgb") im = Image.fromarray(output,'RGB') # process every other frame to speed up if frame_count > skip_frame: # reset frame_count = 0; # find face locations on captured image face_locations = face_recognition.face_locations(output) print("Found {} faces in image.".format(len(face_locations))) # find faces on captured image face_encodings = face_recognition.face_encodings(output,face_locations) # match up with saved faces face_names = [] for face_encoding in face_encodings: # see of face is a match matches = face_recognition.compare_faces(known_face_encodings,face_encoding) name = "Unknown" # match found add to know list of face names if True in matches: first_match_index = matches.index(True) name = known_face_names[first_match_index] print("I see someone names {}!".format(name)) face_names.append(name) # blat out results on image with bounding box and name draw = ImageDraw.Draw(im) for (top, right, bottom, left), name in zip(face_locations,face_names): text_width, text_height = draw.textsize(name) draw.rectangle(((left, bottom ), (right, top)), outline=(0, 0, 255)) draw.rectangle(((left, bottom + text_height + 10), (right, bottom)), fill=(0,0,255), outline=(0, 0, 255)) draw.text((left + 6, bottom + text_height - 5), name, fill=(255, 255, 255, 255)) # flip frame frame_count = frame_count + 1 # convert to PNG string object bytes_io.seek(0) im.save(bytes_io, 'PNG') im.close() # send image to browser yield (b'--frame\r\n' b'Content-Type: image/png\r\n\r\n' + bytes_io.getvalue() + b'\r\n')
27,428
async def get_rank(display_number: int, minimal_msg_number: int, display_total_number: int, group_id: int) -> str: """ 获取排行榜 """ repeat_list = recorder_obj.repeat_list(group_id) msg_number_list = recorder_obj.msg_number_list(group_id) ranking = Ranking(group_id, display_number, minimal_msg_number, display_total_number, repeat_list, msg_number_list) str_data = await ranking.ranking() if not str_data: str_data = '暂时还没有满足条件的数据~>_<~' return str_data
27,429
def feedback(request): """FeedbackForm""" if (request.method == 'POST'): form = forms.FeedbackForm(request.POST) # pdb.set_trace() if form.is_valid(): form.save() type = form.cleaned_data['type'] type = dict(form.fields['type'].choices)[type] settings.EMAIL_HOST_USER += 'Tester@jobport.iiitd.edu.in' send_mail( '[' + type + '] ' + form.cleaned_data['title'], 'A new feedback was posted on JobPort' + '\n\n' + form.cleaned_data['body'], ['jobportiiitd@gmail.com'] ) settings.EMAIL_HOST_USER += '' messages.success( request, 'Thanks for filling your precious feedback! :) ') return HttpResponseRedirect('/') else: context = {'form': form} return render(request, 'jobport/feedback.html', context) else: form = forms.FeedbackForm() context = {'form': form} return render(request, 'jobport/feedback.html', context)
27,430
def get_convolutional_args(call, include_buffers=False, remove_constants=False): """A method to extract the arguments from conv2d or depthwise_conv2d extern call.""" args = call.args conv_args = [] remove_indices = [0] if remove_constants: remove_indices += [41, 42, 44, 45] for i, arg in enumerate(args): if i in remove_indices: continue elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm): conv_args.append(arg.value) elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers: conv_args.append(arg.index) else: conv_args.append(arg) return conv_args
27,431
def view_party(party_id): """View dashboard for that party.""" party = party_service.find_party(party_id) if party is None: abort(404) days = party_service.get_party_days(party) days_until_party = (party.starts_at.date() - date.today()).days orga_count = orga_team_service.count_memberships_for_party(party.id) orga_team_count = orga_team_service.count_teams_for_party(party.id) seating_area_count = seating_area_service.count_areas_for_party(party.id) seat_count = seat_service.count_seats_for_party(party.id) ticket_sale_stats = ticket_service.get_ticket_sale_stats(party.id) tickets_checked_in = ticket_service.count_tickets_checked_in_for_party( party.id ) seat_utilization = seat_service.get_seat_utilization(party.id) guest_servers = guest_server_service.get_all_servers_for_party(party.id) return { 'party': party, 'days': days, 'days_until_party': days_until_party, 'orga_count': orga_count, 'orga_team_count': orga_team_count, 'seating_area_count': seating_area_count, 'seat_count': seat_count, 'ticket_sale_stats': ticket_sale_stats, 'tickets_checked_in': tickets_checked_in, 'seat_utilization': seat_utilization, 'guest_servers': guest_servers, }
27,432
def _sequence_like(instance, args): """Converts the sequence `args` to the same type as `instance`. Args: instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or `type_spec.TypeSpec`. args: elements to be converted to the `instance` type. Returns: `args` with the type of `instance`. """ if _is_mutable_mapping(instance): # Pack dictionaries in a deterministic order by sorting the keys. # Notice this means that we ignore the original order of `OrderedDict` # instances. This is intentional, to avoid potential bugs caused by mixing # ordered and plain dicts (e.g., flattening a dict but using a # corresponding `OrderedDict` to pack it back). result = dict(zip(_sorted(instance), args)) instance_type = type(instance) if instance_type == _collections.defaultdict: d = _collections.defaultdict(instance.default_factory) else: d = instance_type() for key in instance: d[key] = result[key] return d elif _is_mapping(instance): result = dict(zip(_sorted(instance), args)) instance_type = type(instance) tf_logging.log_first_n( tf_logging.WARN, "Mapping types may not work well with tf.nest. Prefer" " using MutableMapping for {}".format(instance_type), 1) try: return instance_type((key, result[key]) for key in instance) except TypeError as err: raise TypeError("Error creating an object of type {} like {}. Note that " "it must accept a single positional argument " "representing an iterable of key-value pairs, in " "addition to self. Cause: {}".format( type(instance), instance, err)) elif _is_mapping_view(instance): # We can't directly construct mapping views, so we create a list instead return list(args) elif _is_namedtuple(instance) or _is_attrs(instance): if isinstance(instance, _wrapt.ObjectProxy): instance_type = type(instance.__wrapped__) else: instance_type = type(instance) return instance_type(*args) elif _is_composite_tensor(instance): assert len(args) == 1 spec = instance._type_spec # pylint: disable=protected-access return spec._from_components(args[0]) # pylint: disable=protected-access elif _is_type_spec(instance): # Pack a CompositeTensor's components according to a TypeSpec. assert len(args) == 1 return instance._from_components(args[0]) # pylint: disable=protected-access elif isinstance(instance, _six.moves.range): return _sequence_like(list(instance), args) elif isinstance(instance, _wrapt.ObjectProxy): # For object proxies, first create the underlying type and then re-wrap it # in the proxy type. return type(instance)(_sequence_like(instance.__wrapped__, args)) else: # Not a namedtuple return type(instance)(args)
27,433
def patch_violinplot(): """Patch seaborn's violinplot in current axis to workaround matplotlib's bug ##5423.""" from matplotlib.collections import PolyCollection ax = plt.gca() for art in ax.get_children(): if isinstance(art, PolyCollection): art.set_edgecolor((0.3, 0.3, 0.3))
27,434
def subcat_add(): """ 添加小分类 """ if request.method == 'POST': cat_name = request.form['cat_name'] super_cat_id = request.form['super_cat_id'] # 检测名称是否存在 subcat = SubCat.query.filter_by(cat_name=cat_name).count() if subcat : return "<script>alert('该小分类已经存在');history.go(-1);</script>" # 组织数据 data = SubCat( super_cat_id = super_cat_id, cat_name = cat_name, ) db.session.add(data) db.session.commit() return redirect(url_for("admin.subcat_list")) supercat = SuperCat.query.all() # 获取大分类信息 return render_template("admin/subcat_add.html",supercat=supercat)
27,435
def handle_candidate_results( data: Union[ structs.NationalSummaryPresident, structs.StateSummaryPresident, structs.StateSummaryCongressionalResult, structs.CountyCongressionalResult, structs.CountyPresidentialResult, ], named_candidate_factory: Any, record: SQLRecord, historical_counts: HistoricalResults, ) -> None: """ Helper function that adds a result to dem/gop/other. This function: - Populates the GOP/Dem candidate if there isn't already one - If there's already a GOP/Dem candidate, or if this record is for a third-party candidate, adds the results to the "other" bucket - Gets the historical vote counts and populates those as well """ party = structs.Party.from_ap(record.party) if party == structs.Party.GOP: if data.gop: # There is already a GOP candidate. We process in order of number # of votes, so this is a secondary GOP candidate if hasattr(data, "multiple_gop"): data.multiple_gop = True else: # This is the leading GOP candidate data.gop = named_candidate_factory( first_name=record.first, last_name=record.last, pop_vote=record.votecount, pop_pct=record.votepct, pop_vote_history=historical_counts.get(record.elex_id, {}), ) return elif party == structs.Party.DEM: if data.dem: # There is already a Dem candidate. We process in order of number # of votes, so this is a secondary Dem candidate if hasattr(data, "multiple_dem"): data.multiple_dem = True else: # This is the leading Dem candidate data.dem = named_candidate_factory( first_name=record.first, last_name=record.last, pop_vote=record.votecount, pop_pct=record.votepct, pop_vote_history=historical_counts.get(record.elex_id, {}), ) return # Third-party candidate or non-leading gop/dem data.oth.pop_vote += record.votecount data.oth.pop_pct += record.votepct # Merge the candidate's historical counts into the overall historical # counts for datetime_str, count in historical_counts.get(record.elex_id, {}).items(): if datetime_str in data.oth.pop_vote_history: data.oth.pop_vote_history[datetime_str] += count else: data.oth.pop_vote_history[datetime_str] = count
27,436
async def async_setup_entry(hass, config_entry, async_add_devices): """Set up the Alexa sensor platform by config_entry.""" return await async_setup_platform( hass, config_entry.data, async_add_devices, discovery_info=None)
27,437
def parse_duration(dur: str) -> int: """Generates seconds from a human readable duration.""" if not DURATION_REGEX.match(dur): raise ValueError('Time passed does not match required format: `XX:XX` or `XX:XX:XX`') parts = dur.split(':') seconds = 0 if len(parts) == 3: seconds += int(parts[0]) * 60 * 60 seconds += int(parts[1]) * 60 seconds += int(parts[2]) else: seconds += int(parts[0]) * 60 seconds += int(parts[1]) return seconds
27,438
def shorter_uuid(length=7, starter=None, with_original=False): """ Generate an even shorter short UUID generated by the shortuuid library. :param length: Length of trimmed ID. :param starter: Whether to begin with an already-created ShortUUID. Useful when using recursively. :param with_original: Also return initially-generated ShortUUID :return: """ original_id = str(shortuuid.uuid()) if starter is None else starter n = len(original_id) dx = min(length, len(original_id)) # ID length if starter is not None and len(starter) < dx * 2: original_id = str(shortuuid.uuid()) start_point = random.randint(0, n - dx) shorter_id = original_id[start_point:(start_point + dx)] return shorter_id if not with_original else [shorter_id, original_id]
27,439
async def connect_web_socket(uri: str, serializer: Union[aiowamp.SerializerABC, Sequence[aiowamp.SerializerABC]] = None, *, ssl_context: ssl.SSLContext = None) -> WebSocketTransport: """Connect to a router using web socket transport. Args: uri: URI to connect_raw_socket to. serializer: Serializer to use. Accepts a sequence of serializers in order of preference which will be used during negotiation. `None` or an empty sequence will accept all known serializers. ssl_context: Enforce custom SSL context options. If set, the uri must use a scheme supporting TLS. Returns: An open websocket transport. """ ...
27,440
def ok_git_config_not_empty(ar): """Helper to verify that nothing rewritten the config file""" # TODO: we don't support bare -- do we? assert_true(os.stat(opj(ar.path, '.git', 'config')).st_size)
27,441
def group_by(s: Iterable[_ElementType], key: Callable[[_ElementType], _GroupType], gfunc: Optional[Callable[[List[_ElementType]], _ResultType]] = None) -> Dict[_GroupType, _ResultType]: """ Overview: Divide the elements into groups. :param s: Elements. :param key: Group key, should be a callable object. :param gfunc: Post-process function for groups, should be a callable object. Default is ``None`` which means \ no post-processing will be performed. :return: Grouping result. Examples:: >>> from hbutils.collection import group_by >>> >>> foods = [ ... 'apple', 'orange', 'pear', ... 'banana', 'fish', 'pork', 'milk', ... ] >>> group_by(foods, len) # group by length {5: ['apple'], 6: ['orange', 'banana'], 4: ['pear', 'fish', 'pork', 'milk']} >>> group_by(foods, len, len) # group and get length {5: 1, 6: 2, 4: 4} >>> group_by(foods, lambda x: x[0]) # group by first letter {'a': ['apple'], 'o': ['orange'], 'p': ['pear', 'pork'], 'b': ['banana'], 'f': ['fish'], 'm': ['milk']} >>> group_by(foods, lambda x: x[0], len) # group and get length {'a': 1, 'o': 1, 'p': 2, 'b': 1, 'f': 1, 'm': 1} """ gfunc = gfunc or (lambda x: x) _result_dict: Dict[_GroupType, List[_ElementType]] = {} for item in s: _item_key = key(item) if _item_key not in _result_dict: _result_dict[_item_key] = [] _result_dict[_item_key].append(item) return { key: gfunc(grps) for key, grps in _result_dict.items() }
27,442
def update_key(context,alias,new_url,new_api_key,fetch_api_key): """ Update stored Galaxy API key. Update the Galaxy URL and/or API key stored against ALIAS. """ instances = Credentials() if alias not in instances.list_keys(): logger.error("'%s': not found" % alias) sys.exit(1) if new_url: galaxy_url = new_url else: galaxy_url = instances.fetch_key(alias)[0] click.echo("galaxy_url: %s" % galaxy_url) click.echo("username : %s" % context.username) if fetch_api_key: # Attempt to connect to Galaxy and fetch API key gi = context.galaxy_instance(alias) if gi is None: logger.critical("%s: failed to connect" % alias) sys.exit(1) new_api_key = gi.key if not instances.update_key(alias, new_url=new_url, new_api_key=new_api_key): sys.exit(1)
27,443
def init_templateflow_wf( bids_dir, output_dir, participant_label, mov_template, ref_template='MNI152NLin2009cAsym', use_float=True, omp_nthreads=None, mem_gb=3.0, modality='T1w', normalization_quality='precise', name='templateflow_wf', fs_subjects_dir=None, ): """ A Nipype workflow to perform image registration between two templates *R* and *M*. *R* is the *reference template*, selected by a templateflow identifier such as ``MNI152NLin2009cAsym``, and *M* is the *moving template* (e.g., ``MNI152Lin``). This workflows maps data defined on template-*M* space onto template-*R* space. 1. Run the subrogate images through ``antsBrainExtraction``. 2. Recompute :abbr:`INU (intensity non-uniformity)` correction using the mask obtained in 1). 3. Independently, run spatial normalization of every :abbr:`INU (intensity non-uniformity)` corrected image (supplied via ``in_files``) to both templates. 4. Calculate an initialization between both templates, using them directly. 5. Run multi-channel image registration of the images resulting from 3). Both sets of images (one registered to *R* and another to *M*) are then used as reference and moving images in the registration framework. **Parameters** in_files: list of files a list of paths pointing to the images that will be used as surrogates mov_template: str a templateflow identifier for template-*M* ref_template: str a templateflow identifier for template-*R* (default: ``MNI152NLin2009cAsym``). """ # number of participants ninputs = len(participant_label) ants_env = { 'NSLOTS': '%d' % omp_nthreads, 'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS': '%d' % omp_nthreads, 'OMP_NUM_THREADS': '%d' % omp_nthreads, } # Get path to templates tpl_ref = str(get_template(ref_template, suffix=modality, desc=None, resolution=1)) tpl_ref_mask = str(get_template(ref_template, suffix='mask', desc='brain', resolution=1)) tpl_mov = str(get_template(mov_template, suffix=modality, desc=None, resolution=1)) tpl_mov_mask = str(get_template(mov_template, suffix='mask', desc='brain', resolution=1)) wf = pe.Workflow(name) inputnode = pe.Node(niu.IdentityInterface(fields=['participant_label']), name='inputnode') inputnode.iterables = ('participant_label', sorted(list(participant_label))) pick_file = pe.Node(niu.Function(function=_bids_pick), name='pick_file', run_without_submitting=True) pick_file.inputs.bids_root = bids_dir ref_bex = init_brain_extraction_wf( in_template=ref_template, omp_nthreads=omp_nthreads, mem_gb=mem_gb, bids_suffix=modality, name='reference_bex', ) mov_bex = init_brain_extraction_wf( in_template=mov_template, omp_nthreads=omp_nthreads, mem_gb=mem_gb, bids_suffix=modality, name='moving_bex', ) ref_norm = pe.Node( Registration( from_file=pkgr.resource_filename( 'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)), name='ref_norm', n_procs=omp_nthreads) ref_norm.inputs.fixed_image = tpl_ref ref_norm.inputs.fixed_image_masks = tpl_ref_mask ref_norm.inputs.environ = ants_env # Register the INU-corrected image to the other template mov_norm = pe.Node( Registration( from_file=pkgr.resource_filename( 'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)), name='mov_norm', n_procs=omp_nthreads) mov_norm.inputs.fixed_image = tpl_mov mov_norm.inputs.fixed_image_masks = tpl_mov_mask mov_norm.inputs.environ = ants_env # Initialize between-templates transform with antsAI init_aff = pe.Node(AI( metric=('Mattes', 32, 'Regular', 0.2), transform=('Affine', 0.1), search_factor=(20, 0.12), principal_axes=False, convergence=(10, 1e-6, 10), verbose=True, fixed_image=tpl_ref, fixed_image_mask=tpl_ref_mask, moving_image=tpl_mov, moving_image_mask=tpl_mov_mask, environ=ants_env, ), name='init_aff', n_procs=omp_nthreads) ref_buffer = pe.JoinNode(niu.IdentityInterface( fields=['fixed_image']), joinsource='inputnode', joinfield='fixed_image', name='ref_buffer') mov_buffer = pe.JoinNode(niu.IdentityInterface( fields=['moving_image']), joinsource='inputnode', joinfield='moving_image', name='mov_buffer') flow = pe.Node( Registration( from_file=pkgr.resource_filename( 'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)), name='flow_norm', n_procs=omp_nthreads, ) flow.inputs.fixed_image_masks = tpl_ref_mask flow.inputs.moving_image_masks = tpl_mov_mask flow.inputs.metric = [[v] * ninputs for v in flow.inputs.metric] flow.inputs.metric_weight = [[1 / ninputs] * ninputs for _ in flow.inputs.metric_weight] flow.inputs.radius_or_number_of_bins = [ [v] * ninputs for v in flow.inputs.radius_or_number_of_bins] flow.inputs.sampling_percentage = [ [v] * ninputs for v in flow.inputs.sampling_percentage] flow.inputs.sampling_strategy = [ [v] * ninputs for v in flow.inputs.sampling_strategy] flow.inputs.environ = ants_env # Datasinking ref_norm_ds = pe.Node( DerivativesDataSink(base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=ref_template, desc='preproc', keep_dtype=True), name='ref_norm_ds', run_without_submitting=True ) mov_norm_ds = pe.Node( DerivativesDataSink(base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=mov_template, desc='preproc', keep_dtype=True), name='mov_norm_ds', run_without_submitting=True ) xfm_ds = pe.Node(DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, allowed_entities=['from', 'mode'], mode='image', suffix='xfm', source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template), **{'from': mov_template}), name='xfm_ds', run_without_submitting=True) wf.connect([ (inputnode, pick_file, [('participant_label', 'participant_label')]), (pick_file, ref_bex, [('out', 'inputnode.in_files')]), (pick_file, mov_bex, [('out', 'inputnode.in_files')]), (ref_bex, ref_norm, [('outputnode.bias_corrected', 'moving_image'), ('outputnode.out_mask', 'moving_image_masks'), ('norm.forward_transforms', 'initial_moving_transform')]), (ref_bex, mov_norm, [('outputnode.bias_corrected', 'moving_image')]), (mov_bex, mov_norm, [('outputnode.out_mask', 'moving_image_masks'), ('norm.forward_transforms', 'initial_moving_transform')]), (init_aff, flow, [('output_transform', 'initial_moving_transform')]), (ref_norm, ref_buffer, [('warped_image', 'fixed_image')]), (mov_norm, mov_buffer, [('warped_image', 'moving_image')]), (ref_buffer, flow, [('fixed_image', 'fixed_image')]), (mov_buffer, flow, [('moving_image', 'moving_image')]), (pick_file, ref_norm_ds, [('out', 'source_file')]), (ref_norm, ref_norm_ds, [('warped_image', 'in_file')]), (pick_file, mov_norm_ds, [('out', 'source_file')]), (mov_norm, mov_norm_ds, [('warped_image', 'in_file')]), (flow, xfm_ds, [('composite_transform', 'in_file')]), ]) if fs_subjects_dir: fssource = pe.Node( FreeSurferSource(subjects_dir=str(fs_subjects_dir)), name='fssource', run_without_submitting=True) tonative = pe.Node(fs.Label2Vol(subjects_dir=str(fs_subjects_dir)), name='tonative') tonii = pe.Node( fs.MRIConvert(out_type='niigz', resample_type='nearest'), name='tonii') ref_aparc = pe.Node( ApplyTransforms(interpolation='MultiLabel', float=True, reference_image=tpl_ref, environ=ants_env), name='ref_aparc', mem_gb=1, n_procs=omp_nthreads ) mov_aparc = pe.Node( ApplyTransforms(interpolation='MultiLabel', float=True, reference_image=tpl_mov, environ=ants_env), name='mov_aparc', mem_gb=1, n_procs=omp_nthreads ) ref_aparc_buffer = pe.JoinNode( niu.IdentityInterface(fields=['aparc']), joinsource='inputnode', joinfield='aparc', name='ref_aparc_buffer') ref_join_labels = pe.Node( AntsJointFusion( target_image=[tpl_ref], out_label_fusion='merged_aparc.nii.gz', out_intensity_fusion_name_format='merged_aparc_intensity_%d.nii.gz', out_label_post_prob_name_format='merged_aparc_posterior_%d.nii.gz', out_atlas_voting_weight_name_format='merged_aparc_weight_%d.nii.gz', environ=ants_env, ), name='ref_join_labels', n_procs=omp_nthreads) ref_join_labels_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, suffix='dtissue', desc='aparc', keep_dtype=False, source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)), name='ref_join_labels_ds', run_without_submitting=True) ref_join_probs_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, suffix='probtissue', desc='aparc', keep_dtype=False, source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)), name='ref_join_probs_ds', run_without_submitting=True) # ref_join_voting_ds = pe.Node( # DerivativesDataSink( # base_directory=str(output_dir.parent), # out_path_base=output_dir.name, space=ref_template, # suffix='probtissue', desc='aparcvoting', keep_dtype=False, # source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)), # name='ref_join_voting_ds', run_without_submitting=True) mov_aparc_buffer = pe.JoinNode( niu.IdentityInterface(fields=['aparc']), joinsource='inputnode', joinfield='aparc', name='mov_aparc_buffer') mov_join_labels = pe.Node( AntsJointFusion( target_image=[tpl_mov], out_label_fusion='merged_aparc.nii.gz', out_intensity_fusion_name_format='merged_aparc_intensity_%d.nii.gz', out_label_post_prob_name_format='merged_aparc_posterior_%d.nii.gz', out_atlas_voting_weight_name_format='merged_aparc_weight_%d.nii.gz', environ=ants_env, ), name='mov_join_labels', n_procs=omp_nthreads) mov_join_labels_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, suffix='dtissue', desc='aparc', keep_dtype=False, source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)), name='mov_join_labels_ds', run_without_submitting=True) mov_join_probs_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, suffix='probtissue', desc='aparc', keep_dtype=False, source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)), name='mov_join_probs_ds', run_without_submitting=True) ref_aparc_ds = pe.Node( DerivativesDataSink(base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=ref_template, suffix='dtissue', desc='aparc', keep_dtype=False), name='ref_aparc_ds', run_without_submitting=True ) mov_aparc_ds = pe.Node( DerivativesDataSink(base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=mov_template, suffix='dtissue', desc='aparc', keep_dtype=False), name='mov_aparc_ds', run_without_submitting=True ) # Extract surfaces cifti_wf = init_gifti_surface_wf( name='cifti_surfaces', subjects_dir=str(fs_subjects_dir)) # Move surfaces to template spaces gii2csv = pe.MapNode(GiftiToCSV(itk_lps=True), iterfield=['in_file'], name='gii2csv') ref_map_surf = pe.MapNode( ApplyTransformsToPoints(dimension=3, environ=ants_env), n_procs=omp_nthreads, name='ref_map_surf', iterfield=['input_file']) ref_csv2gii = pe.MapNode( CSVToGifti(itk_lps=True), name='ref_csv2gii', iterfield=['in_file', 'gii_file']) ref_surfs_buffer = pe.JoinNode( niu.IdentityInterface(fields=['surfaces']), joinsource='inputnode', joinfield='surfaces', name='ref_surfs_buffer') ref_surfs_unzip = pe.Node(UnzipJoinedSurfaces(), name='ref_surfs_unzip', run_without_submitting=True) ref_ply = pe.MapNode(SurfacesToPointCloud(), name='ref_ply', iterfield=['in_files']) ref_recon = pe.MapNode(PoissonRecon(), name='ref_recon', iterfield=['in_file']) ref_avggii = pe.MapNode(PLYtoGifti(), name='ref_avggii', iterfield=['in_file', 'surf_key']) ref_smooth = pe.MapNode(fs.SmoothTessellation(), name='ref_smooth', iterfield=['in_file']) ref_surfs_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=ref_template, keep_dtype=False, compress=False), name='ref_surfs_ds', run_without_submitting=True) ref_avg_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=ref_template, keep_dtype=False, compress=False, source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)), name='ref_avg_ds', run_without_submitting=True) mov_map_surf = pe.MapNode( ApplyTransformsToPoints(dimension=3, environ=ants_env), n_procs=omp_nthreads, name='mov_map_surf', iterfield=['input_file']) mov_csv2gii = pe.MapNode( CSVToGifti(itk_lps=True), name='mov_csv2gii', iterfield=['in_file', 'gii_file']) mov_surfs_buffer = pe.JoinNode( niu.IdentityInterface(fields=['surfaces']), joinsource='inputnode', joinfield='surfaces', name='mov_surfs_buffer') mov_surfs_unzip = pe.Node(UnzipJoinedSurfaces(), name='mov_surfs_unzip', run_without_submitting=True) mov_ply = pe.MapNode(SurfacesToPointCloud(), name='mov_ply', iterfield=['in_files']) mov_recon = pe.MapNode(PoissonRecon(), name='mov_recon', iterfield=['in_file']) mov_avggii = pe.MapNode(PLYtoGifti(), name='mov_avggii', iterfield=['in_file', 'surf_key']) mov_smooth = pe.MapNode(fs.SmoothTessellation(), name='mov_smooth', iterfield=['in_file']) mov_surfs_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=mov_template, keep_dtype=False, compress=False), name='mov_surfs_ds', run_without_submitting=True) mov_avg_ds = pe.Node( DerivativesDataSink( base_directory=str(output_dir.parent), out_path_base=output_dir.name, space=mov_template, keep_dtype=False, compress=False, source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)), name='mov_avg_ds', run_without_submitting=True) wf.connect([ (inputnode, fssource, [(('participant_label', _sub_decorate), 'subject_id')]), (inputnode, cifti_wf, [ (('participant_label', _sub_decorate), 'inputnode.subject_id')]), (pick_file, cifti_wf, [('out', 'inputnode.in_t1w')]), (pick_file, tonii, [('out', 'reslice_like')]), # Select DKT aparc (fssource, tonative, [(('aparc_aseg', _last), 'seg_file'), ('rawavg', 'template_file'), ('aseg', 'reg_header')]), (tonative, tonii, [('vol_label_file', 'in_file')]), (tonii, ref_aparc, [('out_file', 'input_image')]), (tonii, mov_aparc, [('out_file', 'input_image')]), (ref_norm, ref_aparc, [('composite_transform', 'transforms')]), (mov_norm, mov_aparc, [('composite_transform', 'transforms')]), (ref_buffer, ref_join_labels, [ ('fixed_image', 'atlas_image')]), (ref_aparc, ref_aparc_buffer, [('output_image', 'aparc')]), (ref_aparc_buffer, ref_join_labels, [ ('aparc', 'atlas_segmentation_image')]), (mov_buffer, mov_join_labels, [ ('moving_image', 'atlas_image')]), (mov_aparc, mov_aparc_buffer, [('output_image', 'aparc')]), (mov_aparc_buffer, mov_join_labels, [ ('aparc', 'atlas_segmentation_image')]), # Datasinks (ref_join_labels, ref_join_labels_ds, [('out_label_fusion', 'in_file')]), (ref_join_labels, ref_join_probs_ds, [ ('out_label_post_prob', 'in_file'), (('out_label_post_prob', _get_extra), 'extra_values')]), # (ref_join_labels, ref_join_voting_ds, [ # ('out_atlas_voting_weight_name_format', 'in_file')]), (mov_join_labels, mov_join_labels_ds, [('out_label_fusion', 'in_file')]), (mov_join_labels, mov_join_probs_ds, [ ('out_label_post_prob', 'in_file'), (('out_label_post_prob', _get_extra), 'extra_values')]), (pick_file, ref_aparc_ds, [('out', 'source_file')]), (ref_aparc, ref_aparc_ds, [('output_image', 'in_file')]), (pick_file, mov_aparc_ds, [('out', 'source_file')]), (mov_aparc, mov_aparc_ds, [('output_image', 'in_file')]), # Mapping ref surfaces (cifti_wf, gii2csv, [ (('outputnode.surf_norm', _discard_inflated), 'in_file')]), (gii2csv, ref_map_surf, [('out_file', 'input_file')]), (ref_norm, ref_map_surf, [ (('inverse_composite_transform', _ensure_list), 'transforms')]), (ref_map_surf, ref_csv2gii, [('output_file', 'in_file')]), (cifti_wf, ref_csv2gii, [ (('outputnode.surf_norm', _discard_inflated), 'gii_file')]), (pick_file, ref_surfs_ds, [('out', 'source_file')]), (ref_csv2gii, ref_surfs_ds, [ ('out_file', 'in_file'), (('out_file', _get_surf_extra), 'extra_values')]), (ref_csv2gii, ref_surfs_buffer, [('out_file', 'surfaces')]), (ref_surfs_buffer, ref_surfs_unzip, [('surfaces', 'in_files')]), (ref_surfs_unzip, ref_ply, [('out_files', 'in_files')]), (ref_ply, ref_recon, [('out_file', 'in_file')]), (ref_recon, ref_avggii, [('out_file', 'in_file')]), (ref_surfs_unzip, ref_avggii, [('surf_keys', 'surf_key')]), (ref_avggii, ref_smooth, [('out_file', 'in_file')]), (ref_smooth, ref_avg_ds, [ ('surface', 'in_file'), (('surface', _get_surf_extra), 'extra_values')]), # Mapping mov surfaces (gii2csv, mov_map_surf, [('out_file', 'input_file')]), (mov_norm, mov_map_surf, [ (('inverse_composite_transform', _ensure_list), 'transforms')]), (mov_map_surf, mov_csv2gii, [('output_file', 'in_file')]), (cifti_wf, mov_csv2gii, [ (('outputnode.surf_norm', _discard_inflated), 'gii_file')]), (pick_file, mov_surfs_ds, [('out', 'source_file')]), (mov_csv2gii, mov_surfs_ds, [ ('out_file', 'in_file'), (('out_file', _get_surf_extra), 'extra_values')]), (mov_csv2gii, mov_surfs_buffer, [('out_file', 'surfaces')]), (mov_surfs_buffer, mov_surfs_unzip, [('surfaces', 'in_files')]), (mov_surfs_unzip, mov_ply, [('out_files', 'in_files')]), (mov_ply, mov_recon, [('out_file', 'in_file')]), (mov_recon, mov_avggii, [('out_file', 'in_file')]), (mov_surfs_unzip, mov_avggii, [('surf_keys', 'surf_key')]), (mov_avggii, mov_smooth, [('out_file', 'in_file')]), (mov_smooth, mov_avg_ds, [ ('surface', 'in_file'), (('surface', _get_surf_extra), 'extra_values')]), ]) return wf
27,444
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """ Set up iDiamant from a config entry. """ implementation = ( await config_entry_oauth2_flow.async_get_config_entry_implementation( hass, entry ) ) # Set unique id if non was set. if not entry.unique_id: hass.config_entries.async_update_entry(entry, unique_id=DOMAIN) session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation) try: await session.async_ensure_token_valid() except aiohttp.ClientResponseError as ex: _LOGGER.debug("API error: %s (%s)", ex.code, ex.message) if ex.code in ( HTTPStatus.BAD_REQUEST, HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN, ): raise ConfigEntryAuthFailed("Token not valid, trigger renewal") from ex raise ConfigEntryNotReady from ex if sorted(session.token["scope"]) != sorted(SCOPES): _LOGGER.debug("Scopes are invalids: %s != %s", session.token["scope"], SCOPES) raise ConfigEntryAuthFailed("Token scopes not valid, trigger renewal") hass.data[DOMAIN][entry.entry_id] = { AUTH: api.AsyncConfigEntryNetatmoAuth( aiohttp_client.async_get_clientsession(hass), session ) } # data_handler = NetatmoDataHandler(hass, entry) # await data_handler.async_setup() # hass.data[DOMAIN][entry.entry_id][DATA_HANDLER] = data_handler hass.config_entries.async_setup_platforms(entry, PLATFORMS)
27,445
def test_apply_conversion(qtbot, value, precision, unit, show_unit, expected_format_string): """ Test the unit conversion by examining the resulted format string. Expectations: Provided with the value, precision, unit, and the show unit Boolean flag by the user, this function must provide the correct format string to format the displayed value for the widget. Parameters ---------- qtbot : fixture Window for widget testing value : int, float, bin, hex, numpy.array The value to be converted precision : int The unit : str The unit of the new value show_units : bool True if the value unit is to be displayed. False otherwise expected_format_string : str The expected format string that will produce the correct displayed value after the conversion """ pydm_lineedit = PyDMLineEdit() qtbot.addWidget(pydm_lineedit) pydm_lineedit.value = value pydm_lineedit._unit = unit pydm_lineedit._prec = precision pydm_lineedit.showUnits = show_unit pydm_lineedit.apply_conversion(unit) assert pydm_lineedit.format_string == expected_format_string
27,446
def Qest(ICobj, r=None): """ Estimate Toomre Q at r (optional) for ICs, assuming omega=epicyclic frequency. Ignores disk self-gravity """ if not hasattr(ICobj, 'sigma'): raise ValueError, 'Could not find surface density profile (sigma)' G = SimArray(1.0, 'G') kB = SimArray(1.0, 'k') if r is None: r = ICobj.sigma.r_bins sigma = ICobj.sigma(r) T = ICobj.T(r) M = ICobj.settings.physical.M m = ICobj.settings.physical.m M = match_units(M, 'Msol')[0] m = match_units(m, 'm_p')[0] gamma = ICobj.settings.physical.gamma_cs() Q = np.sqrt(M*kB*T*gamma/(G*m*r**3))/(np.pi*sigma) Q.convert_units('1') return Q
27,447
def get_projector_csr_file(config_name: str) -> str: """Returns full path to projector server crt file""" return join(get_run_configs_dir(), config_name, f'{PROJECTOR_JKS_NAME}.csr')
27,448
def test_xiaomi(): """ KITTI视差图——>深度图——>点云 """ def disp2depth(b, f, disp): """ """ disp = disp.astype(np.float32) non_zero_inds = np.where(disp) depth = np.zeros_like(disp, dtype=np.float32) depth[non_zero_inds] = b * f / disp[non_zero_inds] return depth disp_f_path = './disp_2.png' # TestDisparity2DepthAndPC img_f_path = './left_2.png' if not (os.path.isfile(disp_f_path) or os.path.isfile(img_f_path)): print('[Err]: invalid disparity/image file path.') return # # KITTI数据集参数 # f = 721 # pixel # b = 0.54 # m # xiaomi参数 # fx = 998.72290039062500 # fy = 1000.0239868164063 f = (998.72290039062500 + 1000.0239868164063) * 0.5 # 1000.0 cx = 671.15643310546875 cy = 384.32458496093750 b = 0.12 # m # 读取视差图 disp = cv2.imread(disp_f_path, cv2.IMREAD_ANYDEPTH) print('Disparity image data type: ', disp.dtype) # 读取BGR图 bgr = cv2.imread(img_f_path, cv2.IMREAD_COLOR) print('BGR image data type: ', bgr.dtype) assert (bgr.shape[:2] == disp.shape[:2]) H, W = disp.shape[:2] print('W×H: {:d}×{:d}'.format(W, H)) c, r = np.meshgrid(np.arange(W), np.arange(H)) # print(c, '\n', r) # x, y = np.arange(W), np.arange(H) cx, cy = W * 0.5, H * 0.5 # ---------- 视差图(uint16)——>深度图(float32) depth = disp2depth(b, f, disp) # --------- 深度图——>点云x, y, z points = np.zeros((H, W, 3), dtype=np.float32) colors = np.zeros((H, W, 3), dtype=np.uint8) points[r, c, 0] = (c - cx) * depth / f # x points[r, c, 1] = (r - cy) * depth / f # y points[r, c, 2] = depth # z # bgr ——> rgb colors = bgr[:, :, ::-1] # ----- 过滤掉x, y, z全为0的点 inds = np.where((points[:, :, 0] != 0.0) | (points[:, :, 1] != 0.0) | (points[:, :, 2] != 0.0)) points = points[inds] colors = colors[inds] # # # ----- 滤波 # inds = np.where( # (points[:, 1] > -1.0) # & (points[:, 1] < 1.0) # ) # points = points[inds] # colors = colors[inds] # print('{:d} 3D points left.'.format(inds[0].size)) # view_points_cloud(points) # 保存pcd点云文件 points2pcd(points, './pc_2.pcd') print('PCD poind cloud saved.') # 保存ply点云文件 points2ply(points, colors, './ply_2.ply') print('Ply poind cloud saved.') # ---------- 保存深度图 depth *= 1000.0 # m ——> mm depth = depth.astype(np.uint16) cv2.imwrite('./depth_2.png', depth) print('Depth image written.')
27,449
def get_img(shape, path, dtype, should_scale=True): """Get image as input.""" resize_to = shape[1:3] path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path) img = PIL.Image.open(path) img = img.resize(resize_to, PIL.Image.ANTIALIAS) img_np = np.array(img).astype(dtype) img_np = np.stack([img_np] * shape[0], axis=0).reshape(shape) if should_scale: img_np = img_np / 255 return img_np
27,450
def render_author(**kwargs): """ Unstrict template block for rendering authors: <div class="author"> <img class="author-avatar" src="{author_avatar}"> <p class="author-name"> <a href="{author_link}">{author_name}</a> </p> <p class="user-handle">{author_handle}</p> </div> """ html = '<div class="user">' author_avatar = kwargs.get('author_avatar', None) if author_avatar: html += '<img class="user-avatar" src="{}">'.format(author_avatar) author_name = kwargs.get('author_name', None) if author_name: html += '<p class="user-name">' author_link = kwargs.get('author_link', None) if author_link: html += '<a href="{author_link}">{author_name}</a>'.format( author_link=author_link, author_name=author_name ) else: html += author_name html += '</p>' author_handle = kwargs.get('author_handle', None) if author_handle: html += '<p class="user-handle">{}</p>'.format(author_handle) html += '</div>'
27,451
def save_pattern(seq, end): """ Saves a polar graph of the GCS sequence, up to a specified stopping point in the sequence @param seq (list[int]) - the generalized chopstick sequence @param end (int) - only GCS numbers less than or equal to `end` are included in the graph """ size = round(log(end)) * 2 fig = plt.figure(figsize=(size, size)) trunc_seq = [n for n in seq if n <= end] for n in trunc_seq: plt.polar(n, n, 'b.') fig.savefig('./img/polar/gcs_polar_{0}.png'.format(end))
27,452
def make_epsilon_greedy_policy(Q: defaultdict, epsilon: float, nA: int) -> callable: """ Creates an epsilon-greedy policy based on a given Q-function and epsilon. I.e. create weight vector from which actions get sampled. :param Q: tabular state-action lookup function :param epsilon: exploration factor :param nA: size of action space to consider for this policy """ def policy_fn(observation): policy = np.ones(nA) * epsilon / nA best_action = np.random.choice(np.flatnonzero( # random choice for tie-breaking only Q[observation] == Q[observation].max() )) policy[best_action] += (1 - epsilon) return policy return policy_fn
27,453
def write_init(proxy_parameters=None, exception=None): """Encodes and returns an MPI ('Metadata Init') response.""" return _write_init(Method.MPI, MetadataProviderError, proxy_parameters, exception)
27,454
def run(loaded_sns_message, context): """Send an Alert to its described outputs. Args: alerts [dict]: SNS message dictionary with the following structure: { 'default': alert } The alert is another dict with the following structure: { 'record': record, 'metadata': { 'rule_name': rule.rule_name, 'rule_description': rule.rule_function.__doc__, 'log': str(payload.log_source), 'outputs': rule.outputs, 'type': payload.type, 'source': { 'service': payload.service, 'entity': payload.entity } } } """ LOGGER.debug(loaded_sns_message) alert = loaded_sns_message['default'] rule_name = alert['metadata']['rule_name'] # strip out unnecessary keys and sort alert = sort_dict(alert) config = load_output_config() outputs = alert['metadata']['outputs'] # Get the output configuration for this rule and send the alert to each for output in set(outputs): try: service, descriptor = output.split(':') except ValueError: LOGGER.error('Outputs for rules must be declared with both a service and a ' 'descriptor for the integration (ie: \'slack:my_channel\')') continue if not service in config or not descriptor in config[service]: LOGGER.error('The output %s does not exist!', output) continue region = context.invoked_function_arn.split(':')[3] function_name = context.function_name # Retrieve the proper class to handle dispatching the alerts of this services output_dispatcher = get_output_dispatcher(service, region, function_name, config) if not output_dispatcher: continue try: LOGGER.debug('Sending alert to %s:%s', service, descriptor) output_dispatcher.dispatch(descriptor=descriptor, rule_name=rule_name, alert=alert) except Exception as err: LOGGER.error('An error occurred while sending alert to %s:%s: %s. alert:\n%s', service, descriptor, err, json.dumps(alert, indent=4))
27,455
def pfam_to_pubmed(family): """get a list of associated pubmed ids for given pfam access key. :param family: pfam accession key of family :type family: str :return: List of associated Pubmed ids :rettype:list""" url='https://pfam.xfam.org/family/'+family pattern='http://www.ncbi.nlm.nih.gov/pubmed/' return _xfam_to(url,pattern)
27,456
def cvConvexHull2(input, hull_storage=None, orientation=CV_CLOCKWISE, return_points=0): """CvSeq_or_CvMat cvConvexHull2(list_or_tuple_of_CvPointXYZ input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0) Finds convex hull of point set [ctypes-opencv] OpenCV's note: a vertex of the detected convex hull can be represented by: a point of the same type with every point in 'input', if return_points==1 an index to a point in 'input', if return_points==0 and hull_storage is a CvMat a pointer to a point in 'input', if return_points==0 and hull_storage is a CvStorage [ctypes-opencv] If input is a (subclass of) CvSeq, 'hull_storage' can be: None: detected vertices are stored in input's storage an instance of CvStorage or CvMat: detected vertices are stored here [ctypes-opencv] If input is 1d CvMat of 2D 32-bit points, 'hull_storage' can be: None: 'hull_storage' is internally created as a 1d CvMat of 2D 32-bit points. an instance of CvStorage or CvMat: detected vertices are stored here [ctypes-opencv] In any case, the function returns a sequence (CvSeq) of detected vertices if 'hull_storage' is an instance CvStorage, or 'hull_storage' itself if otherwise. """ if isinstance(input, _CvSeqStructure): # a sequence return pointee(_cvConvexHull2(input, hull_storage, orientation, return_points), input if hull_storage is None else hull_storage) if hull_storage is None: hull_storage = cvCreateMat(1, input.rows*input.cols, CV_MAT_TYPE(input) if return_points else CV_32SC1) _cvConvexHull2(input, hull_storage, orientation, return_points) return hull_storage
27,457
def display(result, args): """ Displays the language identification results according to the format defined by command line arguments. :param result: the language identification results. :param args: the command line arguments. """ formatter = get_formatter(args.format) formatter(result)
27,458
def main(params): """ PIPELINE for matching :param {str: str} params: """ # tl_expt.set_experiment_logger(params['path_expt']) # tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS) logging.info(tl_expt.string_dict(params, desc='PARAMETERS')) if not os.path.isdir(params['path_output']): os.mkdir(params['path_output']) df_info = pd.read_csv(params['path_infofile'], index_col=0) df_info = r_match.filter_table(df_info, params['path_images']) df_info.dropna(inplace=True) df_info = df_info[df_info['ellipse_Jaccard'] >= OVERLAP_THRESHOLD] logging.info('filtered %i item in table' % len(df_info)) # execute over groups per stage path_dir_imgs = os.path.dirname(params['path_images']) for stage, df_stage in df_info.groupby('stage'): perform_stage(df_stage, stage, path_dir_imgs, params['path_output'])
27,459
def init_classifier(config: Union[str, mmcv.Config], checkpoint: Optional[str] = None, device: str = 'cuda:0', options: Optional[Dict] = None) -> nn.Module: """Prepare a few shot classifier from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str | None): Checkpoint path. If left as None, the model will not load any weights. Default: None. device (str): Runtime device. Default: 'cuda:0'. options (dict | None): Options to override some settings in the used config. Default: None. Returns: nn.Module: The constructed classifier. """ if isinstance(config, str): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if options is not None: config.merge_from_dict(options) model = build_classifier(config.model) if checkpoint is not None: map_loc = 'cpu' if device == 'cpu' else None load_checkpoint(model, checkpoint, map_location=map_loc) # save the config in the model for convenience in later use model.cfg = config model.to(device) model.eval() return model
27,460
def get_cognito_events(IdentityPoolId=None): """ Gets the events and the corresponding Lambda functions associated with an identity pool. This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity. See also: AWS API Documentation :example: response = client.get_cognito_events( IdentityPoolId='string' ) :type IdentityPoolId: string :param IdentityPoolId: [REQUIRED] The Cognito Identity Pool ID for the request :rtype: dict :return: { 'Events': { 'string': 'string' } } """ pass
27,461
def get_twitter_auth(): """Setup Twitter authentication. Return: tweepy.OAuthHandler object """ try: credentials = read_credentials() consumer_key = credentials.get('consumer_key') consumer_secret = credentials.get('consumer_secret') access_token = credentials.get('access_token') access_secret = credentials.get('access_secret') except KeyError: sys.stderr.write("TWITTER_* not found\n") sys.exit(1) auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) return auth
27,462
def write_parameters(data, run_dir, is_parallel): """Write the parameters file Args: data (dict): input run_dir (py.path): where to write is_parallel (bool): run in background? """ pkio.write_text( run_dir.join(template_common.PARAMETERS_PYTHON_FILE), _generate_parameters_file(data, run_dir=run_dir) )
27,463
def Newton_method(f, df, start:float=0.0, max_step:int=32, sign_dig:int=6)->float: """ Newton method. --------------------------- Args: None. Returns: None. Raises: None. """ fun = lambda x: x - f(x)/df(x) return fixed_point(fun, start, max_step, sign_dig)
27,464
def validate_blueprints(ctx): """Check all blueprints in the project for errors""" data = utilities.convert_input_to_dict(ctx) cmd = utilities.get_commandline(SENTINEL_SCRIPT_PATH, ["run-module", "ue4", "project", "commandlet"], data, sub_command_arguments=["--task=Compile-Blueprints"]) utilities.run_cmd(cmd)
27,465
def get_all(isamAppliance, count=None, start=None, filter=None, check_mode=False, force=False): """ Retrieve a list of federations """ return isamAppliance.invoke_get("Retrieve a list of federations", "{0}/{1}".format(uri, tools.create_query_string(count=count, start=start, filter=filter)), requires_modules=requires_modules, requires_version=requires_version)
27,466
def roty(theta): """ Rotation about Y-axis @type theta: number @param theta: the rotation angle @rtype: 3x3 orthonormal matrix @return: rotation about Y-axis @see: L{rotx}, L{rotz}, L{rotvec} """ ct = cos(theta) st = sin(theta) return mat([[ct, 0, st], [0, 1, 0], [-st, 0, ct]])
27,467
def connect_default_signals(model_class): """ Use this function to connect default signals to your custom model. It is called automatically, if default cities_light models are used, i.e. settings `CITIES_LIGHT_APP_NAME` is not changed. """ if 'Country' in model_class.__name__: signals.pre_save.connect(set_name_ascii, sender=model_class) if 'Region' in model_class.__name__: signals.pre_save.connect(set_name_ascii, sender=model_class) signals.pre_save.connect(set_display_name, sender=model_class) if 'City' in model_class.__name__: signals.pre_save.connect(set_name_ascii, sender=model_class) signals.pre_save.connect(set_display_name, sender=model_class) signals.pre_save.connect(city_country, sender=model_class) signals.pre_save.connect(city_search_names, sender=model_class)
27,468
def test_pe_validation_right_size_invalid_number(): """Test if an invalid number is really invalid""" invalid_number = '1520309645' assert pe.start(invalid_number) == False
27,469
def dfs_iter(graph, start): """Iterativni verze DFS (vcetne casovych znamek).""" # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat stack = [(start, 0)] time = 1 graph.discovery_time[start] = time graph.visited[start] = True while stack: # not empty u, v = stack.pop() while v < graph.size and not is_edge(graph, u, v): v += 1 if v < graph.size: # found successor, u is not yet finished stack.append((u, v + 1)) if not graph.visited[v]: # we have discovered v stack.append((v, 0)) graph.parent[v] = u graph.visited[v] = True time += 1 graph.discovery_time[v] = time else: # u has no more successors time += 1 graph.finishing_time[u] = time
27,470
def flush(name, family="ipv4", ignore_absence=False, **kwargs): """ .. versionadded:: 2014.7.0 .. versionchanged:: Magnesium Flush current nftables state family Networking family, either ipv4 or ipv6 ignore_absence If set to True, attempts to flush a non-existent table will not result in a failed state. .. versionadded:: Magnesium """ ret = {"name": name, "changes": {}, "result": None, "comment": ""} if __opts__["test"]: ret["comment"] = "nftables flush not performed in test mode." return ret for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if "table" not in kwargs: kwargs["table"] = "filter" check_table = __salt__["nftables.check_table"](kwargs["table"], family=family) if not ignore_absence and not check_table["result"]: ret["result"] = False ret[ "comment" ] = "Failed to flush table {} in family {}, table does not exist.".format( kwargs["table"], family ) return ret if "chain" not in kwargs: kwargs["chain"] = "" else: check_chain = __salt__["nftables.check_chain"]( kwargs["table"], kwargs["chain"], family=family ) if not ignore_absence and not check_chain["result"]: ret["result"] = False ret[ "comment" ] = "Failed to flush chain {} in table {} in family {}, chain does not exist.".format( kwargs["chain"], kwargs["table"], family ) return ret res = __salt__["nftables.flush"](kwargs["table"], kwargs["chain"], family) if res["result"] or ( ignore_absence and (not check_table["result"] or not check_chain["result"]) ): ret["changes"] = {"locale": name} ret["result"] = True ret["comment"] = "Flush nftables rules in {} table {} chain {} family".format( kwargs["table"], kwargs["chain"], family ) return ret else: ret["result"] = False ret["comment"] = "Failed to flush nftables rules" return ret
27,471
def solution(s): """ Check if a string has properly matching brackets :param s: String to verify if it is well-formed :return: 1 if the brackets are properly matching, 0 otherwise """ return check_matching_brackets(s, opening="(", closing=")")
27,472
def process_to_annotation_data(df, class_names, video_fps, min_len): """ This function cleans the output data, so that there are no jumping frames. """ j = 1 # Helper # Minimum qty of frames of the same task in order to # consider it a whole task min_frames = int(float(min_len) * float(video_fps) * float(0.6)) # Initialize variables df["subgroup"] = (df.iloc[:, -1] != df.iloc[:, -1].shift(1)).cumsum() added = ( df["subgroup"] .value_counts()[df["subgroup"].value_counts() < (j + 1)] .index.tolist() ) # Modify jumping frames by considering the sourrounding frames # check for frames that jump (the total group of those frames are of a max of 7) for jj in range(min_frames): j = jj + 1 df["subgroup"] = (df.iloc[:, -2] != df.iloc[:, -2].shift(1)).cumsum() added = ( df["subgroup"] .value_counts()[df["subgroup"].value_counts() < (j + 1)] .index.tolist() ) cnt = 0 i_prev = 0 i_prev_cnt = 0 while len(added) > 0: added.sort() i = added[0] k = 1 # Helper prev = [] after = [] prev_yes = 0 after_yes = 0 if (i - k) > 0: prev = [df[df["subgroup"] == (i - k)].iloc[0, -2]] * len( df[df["subgroup"] == (i - k)] ) prev_yes = 1 if (i + k) < max(df["subgroup"]) + 1: after = [df[df["subgroup"] == (i + k)].iloc[0, -2]] * len( df[df["subgroup"] == (i + k)] ) after_yes = 1 check_loop = True if (prev_yes + after_yes) == 2: if mode(prev).mode[0] == mode(after).mode[0]: check_loop = False if check_loop: k = 1 # Helper while len(prev) < j + 2 - i_prev_cnt: k += 1 if (i - k) > 0: prev_i = [df[df["subgroup"] == (i - k)].iloc[0, -2]] * len( df[df["subgroup"] == (i - k)] ) prev.extend(prev_i) else: break k = 1 # Helper while len(after) < j + 2 - i_prev_cnt: k += 1 if (i + k) < max(df["subgroup"]) + 1: prev_i = [df[df["subgroup"] == (i + k)].iloc[0, -2]] * len( df[df["subgroup"] == (i + k)] ) after.extend(prev_i) else: break changeTo = prev changeTo.extend(after) changeTo = mode(changeTo).mode[0] else: changeTo = mode(prev).mode[0] change_idx = df.index[df["subgroup"] == i].tolist() df.iloc[change_idx, -2] = changeTo df["subgroup"] = (df.iloc[:, -2] != df.iloc[:, -2].shift(1)).cumsum() added = ( df["subgroup"] .value_counts()[df["subgroup"].value_counts() < (j + 1)] .index.tolist() ) added.sort() if i == i_prev: i_prev_cnt += 1 else: i_prev_cnt = 0 i_prev = i cnt += 1 if cnt > max(df["subgroup"]) * (j + 2): break # Modify the output shape so that for each task we have start frame and end frame output_df = pd.DataFrame(columns=["task", "startTime", "endTime"]) for i in range(max(df["subgroup"])): df_i = df[df["subgroup"] == (i + 1)] task_str = str(class_names[int(df_i.iloc[0]["task_label"])]) start_frame = int(min(df_i["frame"])) start_frame = frame_to_time(start_frame, video_fps) end_frame = int(max(df_i["frame"])) end_frame = frame_to_time(end_frame, video_fps) output_df = output_df.append( pd.DataFrame( [[task_str] + [start_frame] + [end_frame]], columns=["task", "startTime", "endTime"], ) ) return output_df
27,473
def test_successful_parse_undocumented_endpoints() -> None: """ Asserts that a schema section is returned successfully. """ schema = fetch_from_dir(settings.BASE_DIR + '/demo_project/openapi-schema.yml') for url in ['/api/v1/cars/incorrect/', '/api/v1/trucks/incorrect/']: schema_section = parse_endpoint(schema, 'GET', url) assert schema_section == {'type': 'array', 'items': {}}
27,474
def dicom_dir(): """Decompress DICOM files into a temp directory""" with TemporaryDirectory(prefix="dcm-test") as temp_dir: # Unpack tar to temp dir and yield list of paths with tarfile.open(DICOM_TAR, "r:bz2") as tf: tf.extractall(temp_dir) temp_dir = Path(temp_dir) yield temp_dir
27,475
def throw_sardana_exception(exc): """Throws an exception as a tango exception""" if isinstance(exc, SardanaException): if exc.exc_info and None not in exc.exc_info: Except.throw_python_exception(*exc.exc_info) else: tb = "<Unknown>" if exc.traceback is not None: tb = str(exc.traceback) Except.throw_exception(exc.type, exc.msg, tb) elif hasattr(exc, 'exc_info'): Except.throw_python_exception(*exc.exc_info) else: raise exc
27,476
def reduce_memmap(a): """Pickle the descriptors of a memmap instance to reopen on same file.""" m = _get_backing_memmap(a) if m is not None: # m is a real mmap backed memmap instance, reduce a preserving striding # information return _reduce_memmap_backed(a, m) else: # This memmap instance is actually backed by a regular in-memory # buffer: this can happen when using binary operators on numpy.memmap # instances return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
27,477
def load_sparse_csr(filename): """Load a saved sparse matrix in csr format. Stolen from above source.""" loader = np.load(filename) return sparse.csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'])
27,478
def admin_unpublish_selected(modeladmin, request, queryset): """ Sets all selected items in queryset to not published """ queryset.update(admin_published=False)
27,479
def blank(name: Optional[str]) -> Output: """Generate a blank `Output` instance.""" return Output(file_suffix=name or _DEFAULT_SUFFIX, variables=dict())
27,480
def estatistica_regras(regras_pt, regras_lgp): """ Contagem das regras morfossintáticas no corpus. :param regras_pt: Lado português das regras (lista) :param regras_lgp: Lado LGP das regras (lista) :return: Dicionário com a frequência de cada regra. Ex: {"(0, 'INT')": 1, "(1, 'CAN')": 1, "(2, 'INT')": 1} """ estatistica = {} repetido = set() for i in range(len(regras_pt)): tipo = regras_pt[i][1] if i in repetido: continue if tipo == "": tipo = "CAN" if str((i,tipo)) not in estatistica.keys(): estatistica[str((i, tipo))]= 1 for j in range(len(regras_pt)): a = regras_pt[i] b = regras_lgp[i] c = regras_pt[j] d = regras_lgp[j] if i >= j: continue if j in repetido: continue if compara_regra(a,b,c,d): repetido.add(j) tipo = regras_pt[j][1] if tipo == "": tipo = "CAN" estatistica[str((i,tipo))] +=1 if str((j, tipo)) in estatistica.keys(): del estatistica[str((j,tipo))] else: tipo = regras_pt[j][1] if tipo == "": tipo = "CAN" if str((j, tipo)) not in estatistica.keys(): estatistica.setdefault(str((j,tipo)),0) estatistica[str((j, tipo))] += 1 return estatistica
27,481
def read_config(): """ Reads the configuration info into the cfg dictionary. :return: A dictionary with the SSH-IPS configuration variables. """ CONFIG_FILE = '/etc/ssh-ips/config.json' try: with open(CONFIG_FILE, "r") as f: cfg = json.load(f) except ValueError as e: print(str(e)) sys.exit() return cfg
27,482
def bq_create_dataset(bq_client): """Creates the BigQuery dataset. If the dataset already exists, the existing dataset will be returned. Dataset will be create in the location specified by DATASET_LOCATION. Args: bq_client: BigQuery client Returns: BigQuery dataset that will be used to store data. """ dataset_id = "{}.{}".format(bq_client.project, DATASET_NAME) dataset = bigquery.Dataset(dataset_id) dataset.location = DATASET_LOCATION dataset = bq_client.create_dataset(dataset, exists_ok=True) return dataset
27,483
def root_node(): """ Returns DCC scene root node :return: str """ return scene.get_root_node()
27,484
def firsts(things): """ FIRSTS list outputs a list containing the FIRST of each member of the input list. It is an error if any member of the input list is empty. (The input itself may be empty, in which case the output is also empty.) This could be written as:: to firsts :list output map \"first :list end but is provided as a primitive in order to speed up the iteration tools MAP, MAP.SE, and FOREACH:: to transpose :matrix if emptyp first :matrix [op []] op fput firsts :matrix transpose bfs :matrix end """ return [first(thing) for thing in things]
27,485
def inf_set_mark_code(*args): """ inf_set_mark_code(_v=True) -> bool """ return _ida_ida.inf_set_mark_code(*args)
27,486
def get_letter(xml): """ :param xml: :return: everything between <bank> tag """ try: left, right = xml.index('<bank '), xml.index('</bank>') + _BANK_OFFSET return xml[left:right] except ValueError: return None
27,487
def stim_align_all_cells(traces, time, new_start): """ Make stim-aligned PSTHs from trialwise data (eg. trial x cell x time array). The advantage of doing it this way (trialwise) is the trace for each cell gets rolled around to the other side of the array, thus eliminating the need for nan padding. Args: trialwise_traces (array-like): trial x cell x time array of traces data, typicall from make_trialwise times (array-like): list of stim times for each cell, must match exactly, not sure how it handles nans yet... new_start (int): frame number where the psths will be aligned to """ # FIXME: URGERNT see above, list or single stim time??? depends on how this is working... for a # single trial an int is fine, but for multiple trials you'd want to give a list psth = np.zeros_like(traces) for i in range(traces.shape[0]): psth[i,:,:] = np.roll(traces[i,:,:], -int(time[i])+new_start, axis=1) return psth
27,488
def set_random_number_seed(seed=5801): """Initialize random number generator (RNG). By default, the RNG is always given the same seed so that simulations are reproducible. However, if you would like to rerun simulations with different random sequences, give a new random seed here. This affects :meth:`set_initial_temperature` and :meth:`create_dynamics` / :math:`run_simulation` if Langevin dynamics are chosen. Parameters: seed: integer a new RNG seed """ nr.seed(seed)
27,489
def PowercycleNode(opts, args): """Remove a node from the cluster. @param opts: the command line options selected by the user @type args: list @param args: should contain only one element, the name of the node to be removed @rtype: int @return: the desired exit code """ node = args[0] if (not opts.confirm and not AskUser("Are you sure you want to hard powercycle node %s?" % node)): return 2 op = opcodes.OpNodePowercycle(node_name=node, force=opts.force) result = SubmitOrSend(op, opts) if result: ToStderr(result) return 0
27,490
async def test_async_setup(hass): """Test a successful setup with all of the different options.""" with patch( "homeassistant.components.dynalite.bridge.DynaliteDevices.async_setup", return_value=True, ): assert await async_setup_component( hass, dynalite.DOMAIN, { dynalite.DOMAIN: { dynalite.CONF_BRIDGES: [ { CONF_HOST: "1.2.3.4", CONF_PORT: 1234, dynalite.CONF_AUTO_DISCOVER: True, dynalite.CONF_POLL_TIMER: 5.5, dynalite.CONF_AREA: { "1": { CONF_NAME: "Name1", dynalite.CONF_CHANNEL: {"4": {}}, dynalite.CONF_PRESET: {"7": {}}, dynalite.CONF_NO_DEFAULT: True, }, "2": {CONF_NAME: "Name2"}, "3": { CONF_NAME: "Name3", dynalite.CONF_TEMPLATE: CONF_ROOM, }, "4": { CONF_NAME: "Name4", dynalite.CONF_TEMPLATE: dynalite.CONF_TIME_COVER, }, }, dynalite.CONF_DEFAULT: {dynalite.CONF_FADE: 2.3}, dynalite.CONF_ACTIVE: dynalite.ACTIVE_INIT, dynalite.CONF_PRESET: { "5": {CONF_NAME: "pres5", dynalite.CONF_FADE: 4.5} }, dynalite.CONF_TEMPLATE: { CONF_ROOM: { dynalite.CONF_ROOM_ON: 6, dynalite.CONF_ROOM_OFF: 7, }, dynalite.CONF_TIME_COVER: { dynalite.CONF_OPEN_PRESET: 8, dynalite.CONF_CLOSE_PRESET: 9, dynalite.CONF_STOP_PRESET: 10, dynalite.CONF_CHANNEL_COVER: 3, dynalite.CONF_DURATION: 2.2, dynalite.CONF_TILT_TIME: 3.3, dynalite.CONF_DEVICE_CLASS: "awning", }, }, } ] } }, ) await hass.async_block_till_done() assert len(hass.config_entries.async_entries(dynalite.DOMAIN)) == 1
27,491
def collect_data(bids_dir, participant_label, queries, filters=None, bids_validate=True): """ Uses pybids to retrieve the input data for a given participant """ if isinstance(bids_dir, BIDSLayout): layout = bids_dir else: layout = BIDSLayout(str(bids_dir), validate=bids_validate) bids_filters = filters or {} for acq, entities in bids_filters.items(): queries[acq].update(entities) subj_data = { dtype: sorted( layout.get( return_type="file", subject=participant_label, extension=["nii", "nii.gz"], **query ) ) for dtype, query in queries.items() } return subj_data, layout
27,492
def minimaldescriptives(inlist): """this function takes a clean list of data and returns the N, sum, mean and sum of squares. """ N = 0 sum = 0.0 SS = 0.0 for i in range(len(inlist)): N = N + 1 sum = sum + inlist[i] SS = SS + (inlist[i] ** 2) mean = sum / float(N) return N, sum, mean, SS
27,493
def gen_filelist(infiles, tmpd) : """Write all audio files to a temporary text document for ffmpeg Returns the path of that text document.""" filename = tmpd/"files.txt" with open(filename, "w") as f: for file in infiles: # This part ensures that any apostrophes are escaped file = str(file).split("'") if len(file) > 1: file = "'\\''".join(file) else: file = file[0] # Write the file line f.write("file '"+file+"'\n") return filename
27,494
def onpy_register(unit, *args): """ Python knows about which built-ins can be imported, due to their registration in the Assembly or at the start of the interpreter. All modules from the sources listed in PY_SRCS() are registered automatically. To register the modules from the sources in the SRCS(), you need to use PY_REGISTER(). """ py3 = is_py3(unit) for name in args: if '=' in name: fullname, shortname = name.split('=', 1) assert '.' not in shortname, shortname assert fullname == shortname or fullname.endswith('.' + shortname), fullname py_register(unit, fullname, py3) if py3: unit.oncflags(['-DPyInit_{}=PyInit_{}'.format(shortname, mangle(fullname))]) else: unit.oncflags(['-Dinit{}=init{}'.format(shortname, mangle(fullname))]) else: py_register(unit, name, py3)
27,495
def _create_tables(driver, create_all, timeout): """ create tables Args: driver: sqlalchemy driver create_all (function): create_all function that creates all tables timeout (int): timeout for transaction Returns: None """ logger.info('Creating tables (timeout: %d)', timeout) with driver.session_scope() as session: connection = session.connection() logger.info("Setting lock_timeout to %d", timeout) timeout_str = '{}s'.format(int(timeout+1)) connection.execute("SET LOCAL lock_timeout = %s;", timeout_str) create_all(connection) update_version(driver, session)
27,496
def check_envs(): """Checks environment variables. The MONGODB_PWD is a needed variable to enable mongodb connection. Returns: bool: If all needed environment variables are set. """ if not os.environ.get('MONGODB_PWD', False): return False return True
27,497
def VMMemoryLower() -> tvm.ir.transform.Pass: """Perform memory lowering. Lowers the relax.builtin.alloc_tensor intrinsic to VM intrinsics. Returns ------- ret: tvm.ir.transform.Pass """ return _ffi_api.VMMemoryLower()
27,498
def subnet_no_ip_space_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[VPC.4] Subnets should be monitored for available IP address space""" iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() vpc = describe_vpcs(cache=cache) myVpcs = vpc["Vpcs"] for vpcs in myVpcs: vpcId = str(vpcs["VpcId"]) # Get subnets for the VPC for snet in ec2.describe_subnets(Filters=[{'Name': 'vpc-id','Values': [vpcId]}])["Subnets"]: snetArn = str(snet["SubnetArn"]) snetId = str(snet["SubnetId"]) if int(snet["AvailableIpAddressCount"]) <= 1: # This is a failing check finding = { "SchemaVersion": "2018-10-08", "Id": snetArn + "/subnet-map-no-more-ips-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": snetArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "MEDIUM"}, "Confidence": 99, "Title": "[VPC.4] Subnets should be monitored for available IP address space", "Description": "Subnet " + snetId + " does not have any available IP address space, consider terminating unncessary workloads or expanding CIDR capacity to avoid availability losses. Refer to the remediation instructions if this configuration is not intended", "Remediation": { "Recommendation": { "Text": "For information on IP addressing refer to the IP Addressing in your VPC section of the Amazon Virtual Private Cloud User Guide", "Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html" } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEc2Subnet", "Id": snetArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "Other": { "VpcId": vpcId, "SubnetId": snetId } } } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF ID.BE-5", "NIST CSF PR.PT-5", "NIST SP 800-53 CP-2", "NIST SP 800-53 CP-11", "NIST SP 800-53 SA-13", "NIST SP 800-53 SA14", "AICPA TSC CC3.1", "AICPA TSC A1.2", "ISO 27001:2013 A.11.1.4", "ISO 27001:2013 A.17.1.1", "ISO 27001:2013 A.17.1.2", "ISO 27001:2013 A.17.2.1", ] }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE" } yield finding else: finding = { "SchemaVersion": "2018-10-08", "Id": snetArn + "/subnet-map-no-more-ips-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": snetArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[VPC.4] Subnets should be monitored for available IP address space", "Description": "Subnet " + snetId + " has available IP address space, well, at least 2 lol...", "Remediation": { "Recommendation": { "Text": "For information on IP addressing refer to the IP Addressing in your VPC section of the Amazon Virtual Private Cloud User Guide", "Url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html" } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEc2Subnet", "Id": snetArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "Other": { "VpcId": vpcId, "SubnetId": snetId } } } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF ID.BE-5", "NIST CSF PR.PT-5", "NIST SP 800-53 CP-2", "NIST SP 800-53 CP-11", "NIST SP 800-53 SA-13", "NIST SP 800-53 SA14", "AICPA TSC CC3.1", "AICPA TSC A1.2", "ISO 27001:2013 A.11.1.4", "ISO 27001:2013 A.17.1.1", "ISO 27001:2013 A.17.1.2", "ISO 27001:2013 A.17.2.1", ] }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED" } yield finding
27,499