content
stringlengths
22
815k
id
int64
0
4.91M
def elementwise(op: Callable[..., float], *ds: D) -> NumDict: """ Apply op elementwise to a sequence of numdicts. If any numdict in ds has None default, then default is None, otherwise the new default is calculated by running op on all defaults. """ keys: set = set() keys.update(*ds) grouped: dict = {} defaults: list = [] for d in ds: defaults.append(d.default) for k in keys: grouped.setdefault(k, []).append(d[k]) if any([d is None for d in defaults]): default = None else: default = op(defaults) return NumDict({k: op(grouped[k]) for k in grouped}, default)
13,800
def test_get_closed_class_vague_meaning_count(): """Test get_closed_class_vague_meaning_count method.""" result = discourse_markers.get_closed_class_vague_meaning_count( Doc( [ Text("En realidad la pandemia no es mala."), Text("En contra de lo que se cree no es tan mortal."), ] ) ) assert result == 2
13,801
def sigmoid_focal_loss( inputs: torch.Tensor, targets: torch.Tensor, alpha: float = -1, gamma: float = 2, reduction: str = "none", ) -> torch.Tensor: """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: Loss tensor with the reduction option applied. """ p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = p * targets + (1 - p) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss if reduction == "mean": loss = loss.mean() elif reduction == "sum": loss = loss.sum() return loss
13,802
def test_fullImport(): """ Test that we can import the full dataset. """ tensor, matrix, patient_data = form_tensor() assert isinstance(tensor, np.ndarray) assert isinstance(matrix, np.ndarray) assert tensor.shape[0] == matrix.shape[0] assert isinstance(patient_data, pd.DataFrame)
13,803
def test_liquidbulk_01(): """Test to see if object initialisation works properly""" import opentisim Smallhydrogen = opentisim.liquidbulk.Vessel(**opentisim.liquidbulk.smallhydrogen_data) assert Smallhydrogen.call_size == opentisim.liquidbulk.smallhydrogen_data['call_size'] assert Smallhydrogen.LOA == opentisim.liquidbulk.smallhydrogen_data['LOA'] assert Smallhydrogen.draft == opentisim.liquidbulk.smallhydrogen_data['draft'] assert Smallhydrogen.beam == opentisim.liquidbulk.smallhydrogen_data['beam']
13,804
def arcsin(x): """Return the inverse sine or the arcsin. INPUTS x (Variable object or real number) RETURNS if x is a Variable, then return a Variable with val and der. if x is a real number, then return the value of arcsin(x). EXAMPLES >>> x = Variable(0, name='x') >>> t = arcsin(x) >>> print(t.val, t.der['x']) 0.0 1.0 """ try: val = np.arcsin(x.val) ders = defaultdict(float) sec_ders = defaultdict(float) for key in x.der: ders[key] += 1/((1 - x.val**2)**0.5) * (x.der[key]) sec_ders[key] += (x.val*x.der[key]**2-x.sec_der[key]*(x.val**2-1))/((1-x.val**2)**1.5) return Variable(val, ders, sec_ders) except AttributeError: return np.arcsin(x)
13,805
def preprocessing(texts, words, label, coef=0.3, all_tasks=False, include_repeat=True, progressbar=True): """ the function returns the processed array for the Spacy standard """ train = [] enit = {} assert 0 < coef <= 1, f"The argument must be in the range (0 < coef <= 1) --> {coef}" if all_tasks: words_f = unique(flatten(words, coef)) if coef == 1: include_repeat = False else: assert len(texts) == len(words), f"Data must be same length: ({len(texts)}, {len(words)})" print("\n\033[31mcoef is ignored because you are using all_tasks=False") for i in tqdm(range((len(texts))), disable=not progressbar): if all_tasks: if include_repeat: words_f = unique(chain(words_f, words[i])) enit['entities'] = to_format(texts[i], words_f, label) else: enit['entities'] = to_format(texts[i], words[i], label) train.append((texts[i], deepcopy(enit))) return train
13,806
def rotate( input, angle, axes=(1, 0), reshape=True, output=None, order=3, mode="constant", cval=0.0, prefilter=True, *, allow_float32=True, ): """Rotate an array. The array is rotated in the plane defined by the two axes given by the ``axes`` parameter using spline interpolation of the requested order. Args: input (cupy.ndarray): The input array. angle (float): The rotation angle in degrees. axes (tuple of 2 ints): The two axes that define the plane of rotation. Default is the first two axes. reshape (bool): If ``reshape`` is True, the output shape is adapted so that the input array is contained completely in the output. Default is True. output (cupy.ndarray or ~cupy.dtype): The array in which to place the output, or the dtype of the returned array. order (int): The order of the spline interpolation. If it is not given, order 1 is used. It is different from :mod:`scipy.ndimage` and can change in the future. The order has to be in the range 0-5. mode (str): Points outside the boundaries of the input are filled according to the given mode (``'constant'``, ``'nearest'``, ``'mirror'`` or ``'opencv'``). Default is ``'constant'``. cval (scalar): Value used for points outside the boundaries of the input if ``mode='constant'`` or ``mode='opencv'``. Default is 0.0 prefilter (bool): It is not used yet. It just exists for compatibility with :mod:`scipy.ndimage`. Returns: cupy.ndarray or None: The rotated input. Notes ----- This implementation handles boundary modes 'wrap' and 'reflect' correctly, while SciPy prior to release 1.6.0 does not. So, if comparing to older SciPy, some disagreement near the borders may occur. For ``order > 1`` with ``prefilter == True``, the spline prefilter boundary conditions are implemented correctly only for modes 'mirror', 'reflect' and 'grid-wrap'. .. seealso:: :func:`scipy.ndimage.zoom` """ _check_parameter("rotate", order, mode) if mode == "opencv": mode = "_opencv_edge" input_arr = input axes = list(axes) if axes[0] < 0: axes[0] += input_arr.ndim if axes[1] < 0: axes[1] += input_arr.ndim if axes[0] > axes[1]: axes = [axes[1], axes[0]] if axes[0] < 0 or input_arr.ndim <= axes[1]: raise ValueError("invalid rotation plane specified") ndim = input_arr.ndim rad = numpy.deg2rad(angle) sin = math.sin(rad) cos = math.cos(rad) # determine offsets and output shape as in scipy.ndimage.rotate rot_matrix = numpy.array([[cos, sin], [-sin, cos]]) img_shape = numpy.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]] # Compute the shape of the transformed input plane out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int) else: out_plane_shape = img_shape[axes] out_center = rot_matrix @ ((out_plane_shape - 1) / 2) in_center = (in_plane_shape - 1) / 2 output_shape = img_shape output_shape[axes] = out_plane_shape output_shape = tuple(output_shape) matrix = numpy.identity(ndim) matrix[axes[0], axes[0]] = cos matrix[axes[0], axes[1]] = sin matrix[axes[1], axes[0]] = -sin matrix[axes[1], axes[1]] = cos offset = numpy.zeros(ndim, dtype=float) offset[axes] = in_center - out_center matrix = cupy.asarray(matrix) offset = cupy.asarray(offset) return affine_transform( input, matrix, offset, output_shape, output, order, mode, cval, prefilter, allow_float32=allow_float32, )
13,807
def default(): """ Run all default tasks to test, and build lib and docs. """ pass
13,808
def generate_url_fragment(title, blog_post_id): """Generates the url fragment for a blog post from the title of the blog post. Args: title: str. The title of the blog post. blog_post_id: str. The unique blog post ID. Returns: str. The url fragment of the blog post. """ lower_title = title.lower() hyphenated_title = lower_title.replace(' ', '-') lower_id = blog_post_id.lower() return hyphenated_title + '-' + lower_id
13,809
def update_cluster(cluster, cluster_args, args, api=None, path=None, session_file=None): """Updates cluster properties """ if api is None: api = bigml.api.BigML() message = dated("Updating cluster. %s\n" % get_url(cluster)) log_message(message, log_file=session_file, console=args.verbosity) cluster = api.update_cluster(cluster, cluster_args) check_resource_error(cluster, "Failed to update cluster: %s" % cluster['resource']) cluster = check_resource(cluster, api.get_cluster, query_string=FIELDS_QS) if is_shared(cluster): message = dated("Shared cluster link. %s\n" % get_url(cluster, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, cluster) return cluster
13,810
def calculate_FLOPs_scale(model, input_size, multiply_adds=False, use_gpu=False): """ forked from FishNet @ github https://www.zhihu.com/question/65305385/answer/256845252 https://blog.csdn.net/u011501388/article/details/81061024 https://blog.csdn.net/xidaoliang/article/details/88191910 no bias: K^2 * IO * HW multiply_adds : False in FishNet Paper, but True in DenseNet paper """ assert isinstance(model, torch.nn.Module) USE_GPU = use_gpu and torch.cuda.is_available() def conv_hook(self, input, output): batch_size, input_channels, input_height, input_width = input[0].size() output_channels, output_height, output_width = output[0].size() kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * ( 2 if multiply_adds else 1) bias_ops = 1 if self.bias is not None else 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_height * output_width list_conv.append(flops) def deconv_hook(self, input, output): batch_size, input_channels, input_height, input_width = input[0].size() output_channels, output_height, output_width = output[0].size() kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * ( 2 if multiply_adds else 1) bias_ops = 1 if self.bias is not None else 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_height * output_width list_deconv.append(flops) def linear_hook(self, input, output): batch_size = input[0].size(0) if input[0].dim() == 2 else 1 weight_ops = self.weight.nelement() * (2 if multiply_adds else 1) bias_ops = self.bias.nelement() flops = batch_size * (weight_ops + bias_ops) list_linear.append(flops) def bn_hook(self, input, output): list_bn.append(input[0].nelement()) def relu_hook(self, input, output): list_relu.append(input[0].nelement()) def pooling_hook(self, input, output): batch_size, input_channels, input_height, input_width = input[0].size() output_channels, output_height, output_width = output[0].size() kernel_ops = self.kernel_size * self.kernel_size bias_ops = 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_height * output_width list_pooling.append(flops) def foo(net): childrens = list(net.children()) if not childrens: if isinstance(net, torch.nn.Conv2d): net.register_forward_hook(conv_hook) if isinstance(net, torch.nn.ConvTranspose2d): net.register_forward_hook(deconv_hook) if isinstance(net, torch.nn.Linear): net.register_forward_hook(linear_hook) if isinstance(net, torch.nn.BatchNorm2d): net.register_forward_hook(bn_hook) if isinstance(net, torch.nn.ReLU): net.register_forward_hook(relu_hook) if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d): net.register_forward_hook(pooling_hook) return for c in childrens: foo(c) multiply_adds = multiply_adds list_conv, list_deconv, list_bn, list_relu, list_linear, list_pooling = [], [], [], [], [], [] foo(model) input = torch.rand(2, 3, input_size, input_size) if USE_GPU: input = input.cuda() model = model.cuda() _ = model(input) total_flops = (sum(list_conv) + sum(list_deconv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling)) print(' + Number of FLOPs: %.5fG' % (total_flops / 1e9 / 2))
13,811
def save_Ps_and_Ts(data_filename, Fs = 1000, f_range = (6,12)): """ Saves the indices corresponding to oscillatory peaks and trough into a new numpy file """ # Load data x = np.load(data_filename) # Calculate peaks and troughs Ps, Ts = nonshape.findpt(x, f_range, Fs = Fs) # Save peaks and troughs save_dict = {'Ps':Ps, 'Ts':Ts} for key in save_dict.keys(): filename_save = './out/'+key+'_'+os.path.basename(data_filename) np.save(filename_save, save_dict[key])
13,812
def calcfirst(dfas, first, name): """Recursive function that mutates first.""" dfa = dfas[name] first[name] = None # dummy to detect left recursion state = dfa[0] totalset = {} overlapcheck = {} for label, _ in state.arcs.items(): if label in dfas: if label in first: fset = first[label] if fset is None: raise ValueError("recursion for rule %r" % name) else: calcfirst(dfas, first, label) fset = first[label] totalset.update(fset) overlapcheck[label] = fset else: totalset[label] = 1 overlapcheck[label] = {label: 1} inverse = {} for label, itsfirst in overlapcheck.items(): for symbol in itsfirst: if symbol in inverse: raise ValueError("rule %s is ambiguous; %s is in the" " first sets of %s as well as %s" % (name, symbol, label, inverse[symbol])) inverse[symbol] = label first[name] = totalset
13,813
def get_post_by_user(user_id: int, database: Session) -> Post: """ """ post = database.query(Post).filter( Post.user == user_id).order_by(Post.id.desc()).all() logger.info("FOI RETORNADO DO BANCO AS SEGUINTES CONTRIBUIÇÕES: %s", post) return post
13,814
def unformat_bundle(formattedBundle): """ Converts a push-ready bundle into a structured object by changing stringified yaml of 'customResourceDefinitions', 'clusterServiceVersions', and 'packages' into lists of objects. Undoing the format helps simplify bundle validation. :param formattedBundle: A push-ready bundle """ bundle = BuildCmd()._get_empty_bundle() if 'data' not in formattedBundle: return bundle if 'customResourceDefinitions' in formattedBundle['data']: customResourceDefinitions = yaml.safe_load( formattedBundle['data']['customResourceDefinitions']) if customResourceDefinitions: bundle['data']['customResourceDefinitions'] = customResourceDefinitions if 'clusterServiceVersions' in formattedBundle['data']: clusterServiceVersions = yaml.safe_load( formattedBundle['data']['clusterServiceVersions']) if clusterServiceVersions: bundle['data']['clusterServiceVersions'] = clusterServiceVersions if 'packages' in formattedBundle['data']: packages = yaml.safe_load(formattedBundle['data']['packages']) if packages: bundle['data']['packages'] = packages return bundle
13,815
def supports_box_chars() -> bool: """Check if the encoding supports Unicode box characters.""" return all(map(can_encode, "│─└┘┌┐"))
13,816
def calculate_intersection_over_union(box_data, prior_boxes): """Calculate intersection over union of box_data with respect to prior_boxes. Arguments: ground_truth_data: numpy array with shape (4) indicating x_min, y_min, x_max and y_max coordinates of the bounding box. prior_boxes: numpy array with shape (num_boxes, 4). Returns: intersections_over_unions: numpy array with shape (num_boxes) which corresponds to the intersection over unions of box_data with respect to all prior_boxes. """ x_min = box_data[0] y_min = box_data[1] x_max = box_data[2] y_max = box_data[3] prior_boxes_x_min = prior_boxes[:, 0] prior_boxes_y_min = prior_boxes[:, 1] prior_boxes_x_max = prior_boxes[:, 2] prior_boxes_y_max = prior_boxes[:, 3] # calculating the intersection intersections_x_min = np.maximum(prior_boxes_x_min, x_min) intersections_y_min = np.maximum(prior_boxes_y_min, y_min) intersections_x_max = np.minimum(prior_boxes_x_max, x_max) intersections_y_max = np.minimum(prior_boxes_y_max, y_max) intersected_widths = intersections_x_max - intersections_x_min intersected_heights = intersections_y_max - intersections_y_min intersected_widths = np.maximum(intersected_widths, 0) intersected_heights = np.maximum(intersected_heights, 0) intersections = intersected_widths * intersected_heights # calculating the union prior_box_widths = prior_boxes_x_max - prior_boxes_x_min prior_box_heights = prior_boxes_y_max - prior_boxes_y_min prior_box_areas = prior_box_widths * prior_box_heights box_width = x_max - x_min box_height = y_max - y_min ground_truth_area = box_width * box_height unions = prior_box_areas + ground_truth_area - intersections intersection_over_union = intersections / unions return intersection_over_union
13,817
def get_report(analytics, start_date, end_date = 'today'): """Queries the Analytics Reporting API V4. Args: analytics: An authorized Analytics Reporting API V4 service object. Returns: The Analytics Reporting API V4 response. """ return analytics.reports().batchGet( body={ 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [{'startDate': start_date, 'endDate': end_date}], 'metrics': [{'expression': 'ga:userTimingValue'}], 'dimensions': [ {'name': 'ga:userTimingVariable'}] }] } ).execute()
13,818
def write_pinout_xml(pinout, out_xml=None): """ write the pinout dict to xml format with no attributes. this is verbose but is the preferred xml format """ ar = [] for k in sort_alpha_num(pinout.keys()): d = pinout[k] d['number'] = k # ar.append({'pin': d}) ar.append( d) # x = dicttoxml(pinout, custom_root='pin_map', attr_type=True) my_item_func = lambda x: 'pin' # x = dicttoxml(ar, custom_root='pin_map', attr_type=False) x = dicttoxml(ar, custom_root='pin_map', item_func=my_item_func, attr_type=False) reparsed = minidom.parseString(x) xml_pretty = reparsed.toprettyxml(indent=" ") if out_xml != None: fo = open(out_xml, "w") fo.write(xml_pretty) fo.close() return xml_pretty
13,819
def get_solution(request, level=1): """Returns a render of answers.html""" context = RequestContext(request) cheat_message = '\\text{Ulovlig tegn har blitt brukt i svar}' required_message = '\\text{Svaret ditt har ikke utfylt alle krav}' render_to = 'game/answer.html' if request.method == 'POST': form = QuestionForm(request.POST) if form.is_valid(): form_values = form.process() template = Template.objects.get(pk=form_values['primary_key']) user_answer = form_values['user_answer'] try: disallowed = json.loads(template.disallowed) except ValueError: disallowed = [] try: required = json.loads(template.required) except ValueError: required = [] context_dict = make_answer_context_dict(form_values) if (cheat_check(user_answer, disallowed, form_values['variable_dictionary'].split('§'))) and\ (form_values['template_type'] == 'normal') and (context_dict['user_won']): context_dict['answer'] = cheat_message return render_to_response(render_to, context_dict, context) elif (required_check(user_answer, required, form_values['variable_dictionary'].split('§'))) and \ (form_values['template_type'] == 'normal') and (context_dict['user_won']): context_dict['answer'] = required_message return render_to_response(render_to, context_dict, context) if request.is_ajax(): new_user_rating, new_star = change_level_rating(template, request.user, context_dict['user_won'], form_values['template_type'], level) context_dict['chapter_id'] = request.POST['chapter_id'] context_dict['ulp'] = int(new_user_rating) context_dict['new_star'] = new_star context_dict['stars'] = get_user_stars_for_level(request.user, Level.objects.get(pk=level)) return render_to_response(render_to, context_dict, context) else: change_elo(template, request.user, context_dict['user_won'], form_values['template_type']) render_to_response(render_to, context_dict, context) else: print(form.errors)
13,820
def build_UNIST_tree(): """ This function returns a (linked) binary tree that contains (a simplified and fictitious version of) the organisational structure of schools and departments at UNIST. In particular, this function should return the following tree: UNIST --Engineering ----Management Engineering ------Big datastore ------Business process management ----Materials Engineering ------Wood ------Plastic --Business ----Business Administration """ root = LinkedBinaryTree()
13,821
def get_work_log_queue(): """ json格式为:: {'func':'transform', 'kw':{ ... # 和前面task_queue相同 }, "runtime":{ # 队列运行相关信息 'created':12323423 #进入原始队列时间 'queue':'q01' # 是在哪个原子原子队列 'start':123213123 #转换开始时间 'end':123213123 #转换结束时间 'worker':'w01', # 转换器名 'thread':'131231', # 'return':-1, # 返回的错误代号, 0表示成功 'reason':'失败原因' # 详细的原因 } } """ work__log_queue = "ztq:queue:worker_log" return get_limit_queue(work__log_queue, 200)
13,822
def azure_project_train_status_handler(**kwargs): """ Listen on azure_training.models.Project change. If a Project is created, create a Train(Training Status) as well. """ logger.info("Azure Project changed.") logger.info("Checking...") if 'sender' not in kwargs or kwargs['sender'] != Project: logger.info("'sender' not in kwargs or kwargs['sender'] != Project") logger.info("nothing to do") return if 'instance' not in kwargs: logger.info("'instance' not in kwargs:'") logger.info("Nothing to do") return instance = kwargs['instance'] if Train.objects.filter(project_id=instance.id).count() < 1: Train.objects.update_or_create( project_id=instance.id, defaults={ "status": "ok", "log": "Status : Has not configured", "performance": "" }, )
13,823
def is_first_buy(ka, ka1, ka2=None, pf=False): """确定某一级别一买 注意:如果本级别上一级别的 ka 不存在,无法识别本级别一买,返回 `无操作` !!! 一买识别逻辑: 1)必须:上级别最后一个线段标记和最后一个笔标记重合且为底分型; 2)必须:上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型; 3)必须:本级别向下线段背驰 或 本级别向下笔背驰; 4)辅助:下级别向下线段背驰 或 下级别向下笔背驰。 :param ka: KlineAnalyze 本级别 :param ka1: KlineAnalyze 上级别 :param ka2: KlineAnalyze 下级别,默认为 None :param pf: bool pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。 :return: dict """ detail = { "标的代码": ka.symbol, "操作提示": "无操作", "出现时间": None, "基准价格": None, "其他信息": None } if not isinstance(ka1, KlineAnalyze): return detail # 上级别最后一个线段标记和最后一个笔标记重合且为底分型; if len(ka1.xd) >= 2 and ka1.xd[-1]['xd'] == ka1.bi[-1]['bi'] \ and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'd': bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']] # 上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型; if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'd': # 本级别向下线段背驰 或 本级别向下笔背驰; if (ka.xd_bei_chi() or (ka.bi[-1]['fx_mark'] == 'd' and ka.bi_bei_chi())): detail['操作提示'] = "一买" detail['出现时间'] = ka.xd[-1]['dt'] detail['基准价格'] = ka.xd[-1]['xd'] if pf and detail["操作提示"] == "一买" and isinstance(ka2, KlineAnalyze): # 下级别线段背驰 或 下级别笔背驰 if not ((ka2.xd[-1]['fx_mark'] == 'd' and ka2.xd_bei_chi()) or (ka2.bi[-1]['fx_mark'] == 'd' and ka2.bi_bei_chi())): detail['操作提示'] = "无操作" return detail
13,824
def AddInitialRefugees(e, d, loc): """ Add the initial refugees to a location, using the location name""" num_refugees = int(d.get_field(loc.name, 0, FullInterpolation=True)) for i in range(0, num_refugees): e.addAgent(location=loc)
13,825
def preprocess(path, l_pass=0.7, h_pass=0.01, bandpass=True, short_ch_reg=False, tddr=True, negative_correlation=False, verbose=False, return_all=False): """ Load raw data and preprocess :param str path: path to the raw data :param float l_pass: low pass frequency :param float h_pass: high pass frequency :param bool bandpass: apply bandpass filter :param bool short_ch_reg: apply short channel regression :param bool tddr: apply tddr :param bool negative_correlation: apply negative correlation :param bool verbose: print progress :return: preprocessed data """ if verbose: ic("Loading ", path) raw_intensity = mne.io.read_raw_snirf(path, preload=True) step_od = mne.preprocessing.nirs.optical_density(raw_intensity) # sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od, l_freq=0.7, h_freq=1.5) # raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) if verbose: ic("Apply short channel regression.") if short_ch_reg: step_od = mne_nirs.signal_enhancement.short_channel_regression(step_od) if verbose: ic("Do temporal derivative distribution repair on:", step_od) if tddr: step_od = mne.preprocessing.nirs.tddr(step_od) if verbose: ic("Convert to haemoglobin with the modified beer-lambert law.") step_haemo = beer_lambert_law(step_od, ppf=6) if verbose: ic("Apply further data cleaning techniques and extract epochs.") if negative_correlation: step_haemo = mne_nirs.signal_enhancement.enhance_negative_correlation( step_haemo) if not return_all: if verbose: ic("Separate the long channels and short channels.") short_chs = get_short_channels(step_haemo) step_haemo = get_long_channels(step_haemo) if verbose: ic("Bandpass filter on:", step_haemo) if bandpass: step_haemo = step_haemo.filter( h_pass, l_pass, h_trans_bandwidth=0.3, l_trans_bandwidth=h_pass*0.25) return step_haemo
13,826
def geomapi_To2d(*args): """ * To intersect a curve and a surface. This function builds (in the parametric space of the plane P) a 2D curve equivalent to the 3D curve C. The 3D curve C is considered to be located in the plane P. Warning The 3D curve C must be of one of the following types: - a line - a circle - an ellipse - a hyperbola - a parabola - a Bezier curve - a BSpline curve Exceptions Standard_NoSuchObject if C is not a defined type curve. :param C: :type C: Handle_Geom_Curve & :param P: :type P: gp_Pln :rtype: Handle_Geom2d_Curve """ return _GeomAPI.geomapi_To2d(*args)
13,827
def validate(epoch, model, criterion, data_loader, tb_writer, args): """Routine to validate an epoch. """ model.eval() # dataset loop pbar = tqdm(enumerate(data_loader)) loss = 0.0 for batch_id, batch_data in pbar: # retrieve data from loader and copy data to device images = batch_data['images'].to(args.device) labels = batch_data['labels'].to(args.device) # inference model outputs = model(images) # compute the loss loss += criterion(outputs, labels) # print statistics at the end of the validation epoch if args.tensorboard: assert tb_writer is not None, "ERROR: tb_writer is None" global_step = len(data_loader) * epoch + batch_id tb_writer.add_scalar('val/loss', loss.detach(), global_step) # update logger bar pbar.set_description("## VALIDATION ## Epoch: {0} Batch: {1}/{2} Loss: {3:.4f}" .format(epoch, batch_id, len(data_loader), loss.detach()))
13,828
def play( q_values, env, num_episodes, grid_cells, state_bounds, suppress_print=False, episode_length=None ): """Renders gym environment under greedy policy. The gym environment will be rendered and executed according to the greedy policy induced by the state-action value function. NOTE: After you use an environment to play you'll need to make a new environment instance to use it again because the close function is called. Args: q_values (np.ndarray): State-action values, which is of dimension (grid_cells, env.action_space.n). env (gem.Env): Environment to interact with that hasn't been closed yet. num_episodes (int): How many episode to run for. grid_cells (tuple of ints): the ith value is the number of grid_cells for ith dimension of state. state_bounds (list of tuples): the ith tuple contains the min and max value for ith dimension of state. suppress_print (bool, optional): Suppress print statements about current episode if True. Defaults to False. episode_length (int, optional): #timesteps that counts as solving an episode, also acts as max episode timesteps. Defaults to None. """ for i in range(num_episodes): obv = env.reset() t = 0 done = False while not done: if episode_length and t >= episode_length: break state = state_to_index(grid_cells, state_bounds, obv) action = select_action(q_values, state, env, epsilon=0) obv, reward, done, _ = env.step(action) env.render() t += 1 if not suppress_print: print("episode", i, "lasted", t, "timesteps.") # This is required to prevent the script from crashing after closing the # window. env.close()
13,829
def get_object_list(): """Returns the object name list for APC2015. Args: None. Returns: objects (list): List of object name. """ pkg_path = rospkg.RosPack().get_path(PKG) yaml_file = osp.join(pkg_path, 'data/object_list.yml') with open(yaml_file) as f: objects = yaml.load(f) return objects
13,830
def write_spec_to_h5(specfile, h5file, h5path='/', mode="a", overwrite_data=False, link_type="hard", create_dataset_args=None): """Write content of a SpecFile in a HDF5 file. :param specfile: Path of input SpecFile or :class:`SpecH5` object :param h5file: Path of output HDF5 file or HDF5 file handle (`h5py.File` object) :param h5path: Target path in HDF5 file in which scan groups are created. Default is root (``"/"``) :param mode: Can be ``"r+"`` (read/write, file must exist), ``"w"`` (write, existing file is lost), ``"w-"`` (write, fail if exists) or ``"a"`` (read/write if exists, create otherwise). This parameter is ignored if ``h5file`` is a file handle. :param overwrite_data: If ``True``, existing groups and datasets can be overwritten, if ``False`` they are skipped. This parameter is only relevant if ``file_mode`` is ``"r+"`` or ``"a"``. :param link_type: ``"hard"`` (default) or ``"soft"`` :param create_dataset_args: Dictionary of args you want to pass to ``h5py.File.create_dataset``. This allows you to specify filters and compression parameters. Don't specify ``name`` and ``data``. These arguments don't apply to scalar datasets. The structure of the spec data in an HDF5 file is described in the documentation of :mod:`silx.io.spech5`. """ if not isinstance(specfile, SpecH5): sfh5 = SpecH5(specfile) else: sfh5 = specfile if not h5path.endswith("/"): h5path += "/" writer = SpecToHdf5Writer(h5path=h5path, overwrite_data=overwrite_data, link_type=link_type, create_dataset_args=create_dataset_args) if not isinstance(h5file, h5py.File): # If h5file is a file path, open and close it with h5py.File(h5file, mode) as h5f: writer.write(sfh5, h5f) else: writer.write(sfh5, h5file)
13,831
def gtMakeTAKBlobMsg(callsign, text, aesKey=False): """ Assemble an ATAK plugin compatible chat message blob (suitable for feeding to gtMakeAPIMsg() ) With optional AES encryption, if a key is provided """ body = (callsign + b': ' + text)[:230] # Apply optional encryption (and base64 encoding only for chats) if aesKey: body = b64encode(aesEncrypt(body, aesKey)) return gtMakeGTABlobMsg(body, 'A')
13,832
def validate_json_with_extensions(value, rule_obj, path): """ Performs the above match, but also matches a dict or a list. This it just because it seems like you can't match a dict OR a list in pykwalify """ validate_extensions(value, rule_obj, path) if not isinstance(value, (list, dict)): raise BadSchemaError("Error at {} - expected a list or dict".format(path)) def nested_values(d): if isinstance(d, dict): for v in d.values(): if isinstance(v, dict): for v_s in v.values(): yield v_s else: yield v else: yield d if any(isinstance(i, ApproxScalar) for i in nested_values(value)): # If this is a request data block if not re.search(r"^/stages/\d/(response/body|mqtt_response/json)", path): raise BadSchemaError( "Error at {} - Cannot use a '!approx' in anything other than an expected http response body or mqtt response json".format( path ) ) return True
13,833
def artists_by_rating(formatter, albums): """Returns the artists sorted by decreasing mean album rating. Only artists with more than 1 reviewed albums are considered. """ artist_tags = set([album["artist_tag"] for album in albums]) artists = [] # build the list of artists and compute their ratings for artist_tag in artist_tags: specific_albums = [x for x in albums if x["artist_tag"] == artist_tag] if len(specific_albums) > 1: rating = compute_artist_rating([x["rating"] for x in specific_albums]) artists.append( { "artist_tag": artist_tag, "artist": specific_albums[0]["artist"], "rating": rating, } ) sorted_artists = sorted( artists, key=lambda x: (x["rating"], x["artist"]), reverse=True ) return formatter.parse_list(sorted_artists, formatter.format_artist_rating)
13,834
def st_max(*args): """Max function. Parameters ---------- x : float, int, MissingValue instance, or None (2 or more such inputs allowed) Returns ------- max(x1, x2, ...) if any x is non-missing (with missing values ignored). Otherwise, MISSING (".") returned. """ if len(args) <= 1: raise TypeError("need at least 2 arguments") vectors = [a for a in args if isinstance(a, StataVarVals)] scalars = [ a for a in args if not isinstance(a, StataVarVals) and not _is_missing(a) ] if len(vectors) != 0: sca_max = max(scalars) if not len(scalars) == 0 else None return StataVarVals([_max(*v, sub_max=sca_max) for v in zip(*vectors)]) elif len(scalars) == 0: return mv return max(scalars)
13,835
def build_vocab(data_root: str, dataset: str, data_select_ratio: float, vocab_limit_size: int, save_root: str): """Build vocab for dataset with random selected client Args: data_root (str): string path for data saving root dataset (str): string of dataset name to build vocab data_select_ratio (float): random select clients ratio vocab_limit_size (int): limit max number of vocab size save_root (str): string of path to save built vocab Returns: save vocab.pck for dataset """ save_root = Path(save_root) save_root.mkdir(parents=True, exist_ok=True) save_file_path = save_root / f"{dataset}_vocab.pickle" if save_file_path.exists(): print('There has been a built vocab file for {} dataset in {}, ' 'please delete it before re-building'.format(dataset, Path)) return data_sample = DataSample(dataset=dataset, data_root=data_root, select_ratio=data_select_ratio) vocab = Vocab(origin_data_tokens=data_sample.data_token, vocab_limit_size=vocab_limit_size) with open(save_file_path, "wb") as save_file: pickle.dump(vocab, save_file) print('sample data to build vocab for {} dataset is completed!'.format(dataset))
13,836
def is_button_controller(device: Device) -> bool: """Return true if the device is a stateless button controller.""" return ( CAP_PUSHABLE_BUTTON in device.capabilities or CAP_HOLDABLE_BUTTON in device.capabilities or CAP_DOUBLE_TAPABLE_BUTTON in device.capabilities )
13,837
def gen_acq_noddi(in_file, epi_params, alt_epi_params, readout, readout_alt): """ This is a function to generate the FSL topup acq.txt file :param in_file: :param epi_params: :param alt_epi_params: :param readout: :param readout_alt: :return: """ import numpy as np import os import nibabel as nb out_file = os.path.abspath('acq.txt') vols = nb.load(in_file).get_data().shape[-1] arr = np.ones([vols, 4]) for i in range(vols): if i < vols/2: if epi_params['enc_dir'] == 'y-': arr[i, :] = np.array((0, -1, 0, readout)) elif epi_params['enc_dir'] == 'y': arr[i, :] = np.array((0, 1, 0, readout)) elif epi_params['enc_dir'] == 'x': arr[i, :] = np.array((0, 1, 0, readout)) elif epi_params['enc_dir'] == 'x-': arr[i, :] = np.array((0, -1, 0, readout)) elif epi_params['enc_dir'] == 'z': arr[i, :] = np.array((0, 1, 0, readout)) elif epi_params['enc_dir'] == 'x-': arr[i, :] = np.array((0, -1, 0, readout)) else: if alt_epi_params['enc_dir_alt'] == 'y-': arr[i, :] = np.array((0, -1, 0, readout_alt)) elif alt_epi_params['enc_dir_alt'] == 'y': arr[i, :] = np.array((0, 1, 0, readout_alt)) elif alt_epi_params['enc_dir_alt'] == 'x': arr[i, :] = np.array((0, 1, 0, readout_alt)) elif alt_epi_params['enc_dir_alt'] == 'x-': arr[i, :] = np.array((0, -1, 0, readout_alt)) elif alt_epi_params['enc_dir_alt'] == 'z': arr[i, :] = np.array((0, 1, 0, readout_alt)) elif alt_epi_params['enc_dir_alt'] == 'x-': arr[i, :] = np.array((0, -1, 0, readout_alt)) np.savetxt(out_file, arr) return out_file
13,838
def list_parts(bucket, key, upload_id): """Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload ). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_parts Request Syntax -------------- response = client.list_parts( Bucket='string', Key='string', MaxParts=123, PartNumberMarker=123, UploadId='string', RequestPayer='requester', ExpectedBucketOwner='string' ) Response Syntax --------------- { 'AbortDate': datetime(2015, 1, 1), 'AbortRuleId': 'string', 'Bucket': 'string', 'Key': 'string', 'UploadId': 'string', 'PartNumberMarker': 123, 'NextPartNumberMarker': 123, 'MaxParts': 123, 'IsTruncated': True|False, 'Parts': [ { 'PartNumber': 123, 'LastModified': datetime(2015, 1, 1), 'ETag': 'string', 'Size': 123 }, ], 'Initiator': { 'ID': 'string', 'DisplayName': 'string' }, 'Owner': { 'DisplayName': 'string', 'ID': 'string' }, 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER'|'DEEP_ARCHIVE'|'OUTPOSTS', 'RequestCharged': 'requester' } Parameters ---------- bucket : str Name of the S3 bucket key : str Name of the key for the multipart upload upload_id : str The unique identifier returned on creation of the multipart upload Returns ------- response : obj A requests.Response object """ client = boto3.client("s3") try: response = client.list_parts( Bucket=bucket, Key=key, UploadId=upload_id, ) logger.info( f"Listed parts for multipart upload {upload_id} for key {key} in bucket {bucket}" ) except Exception as e: logger.error( f"Could not list parts for multipart upload {upload_id} for key {key} in bucket {bucket}: {e}" ) return response
13,839
def leap_year(): """ This functions seeks to return a leap year after user input << integer(4). Rules for a leap year: As you surely know, due to some astronomical reasons, years may be leap or common. The former are 366 days long, while the latter are 365 days long. Since the introduction of the Gregorian calendar (in 1582), the following rule is used to determine the kind of year: -->if the year number isn't divisible by four, it's a common year; -->otherwise, if the year number isn't divisible by 100, it's a leap year; -->otherwise, if the year number isn't divisible by 400, it's a common year; -->otherwise, it's a leap year. :return: Year --> Integer """ year = int(input("Enter a year: ")) mess_1 = 'It\'s a common year!' mess_2 = 'It\'s a leap year!' if year <= 1582: return f'{year} does not fall under Gregorian Calendar!!' elif year % 4 != 0: return mess_1 elif year % 100 != 0: return mess_2 elif year % 400 != 0: return mess_1 else: return mess_2
13,840
def parse(files, **kwargs): """Parse all BAM files.""" parsed = [] if kwargs["meta"].has_field("base_coverage"): cov_range = kwargs["meta"].field_meta("base_coverage")["range"] else: cov_range = [math.inf, -math.inf] if kwargs["meta"].has_field("read_coverage"): read_cov_range = kwargs["meta"].field_meta("read_coverage")["range"] else: read_cov_range = [math.inf, -math.inf] names = base_names(files) for file in names: if ".json" in file: fields = parse_json_cov( file, **kwargs, cov_range=cov_range, read_cov_range=read_cov_range ) else: fields = parse_bam( file, **kwargs, cov_range=cov_range, read_cov_range=read_cov_range ) if "cov" in fields: parsed.append(fields["cov"]) cov_range = fields["cov_range"] if "y" not in kwargs["meta"].plot: kwargs["meta"].plot.update({"y": fields["cov_id"]}) if "read_cov" in fields: parsed.append(fields["read_cov"]) read_cov_range = fields["read_cov_range"] return parsed
13,841
def check_latest_version(): """ checks for the latest version of cumulusci from pypi, max once per hour """ check = True with timestamp_file() as f: timestamp = float(f.read() or 0) delta = time.time() - timestamp check = delta > 3600 if check: try: latest_version = get_latest_final_version() except requests.exceptions.RequestException as e: click.echo("Error checking cci version:", err=True) click.echo(str(e), err=True) return result = latest_version > get_installed_version() if result: click.echo( f"""An update to CumulusCI is available. To install the update, run this command: {get_cci_upgrade_command()}""", err=True, )
13,842
def test_username_match(string: str) -> None: """Test that the username regex matches correct strings.""" assert USERNAME.fullmatch(string)
13,843
def login(request): """Login view for GET requests.""" logged_in = request.authenticated_userid is not None if logged_in: return {'logged_in': True, 'form_enabled': False, 'status': u'Already logged in', 'status_type': u'info'} status = u'' status_type = u'' return { 'form_enabled': True, 'status_type': status_type, 'status': status, 'logged_in': False, 'username': request.params.get('username', u''), }
13,844
def of_type(_type, value_1, *args) -> bool: """ Check if a collection of values are of the same type. Parameters: _type (any): The type to check for. value_1 (any): The first value to check. *args (any): Rest of values to check against given type. Returns: (bool) whether or not all inputs of given type. """ all_of_type = isinstance(value_1, _type) i = len(args) while i > 0 and all_of_type != False: all_of_type = isinstance(args[i-1], _type) i -= 1 return all_of_type
13,845
def configuration(parent_package='', top_path=None): """[Placeholder]. Parameters ---------- parent_package : top_path : Returns ------- configuration : """ build_path = build_mlpack() config = Configuration('mlpack', parent_package, top_path) libraries = ['mlpack', 'boost_serialization'] if os.name == 'posix': libraries.append('m') for pyx in ['_arma_numpy.pyx', '_det.pyx']: config.add_extension( pyx.split('.')[0], sources=[pyx], language='c++', include_dirs=[numpy.get_include(), os.path.join(build_path, 'include')], # Needed for arma_numpy.pyx library_dirs=[os.path.join(build_path, 'lib')], libraries=libraries, extra_compile_args=('-DBINDING_TYPE=BINDING_TYPE_PYX ' '-std=c++11 -Wall -Wextra -ftemplate-depth=1000 ' '-O3 -fopenmp').split(' '), extra_link_args=['-fopenmp'], undef_macros=[] if len("") == 0 else ''.split(';') ) # Cythonize files (i.e. create .cpp files and return cpp sources) config.ext_modules = cythonize(config.ext_modules) config.add_subpackage('tests') return config
13,846
def read_config(path): """ Reads the Kong config file (YAML). """ if path is None: raise Exception( "empty path provided. please provide a path using `--config=<config.yml>`" ) with open(path, "r") as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: raise exc
13,847
def generate_grid_world(grid, prob, pos_rew, neg_rew, gamma=.9, horizon=100): """ This Grid World generator requires a .txt file to specify the shape of the grid world and the cells. There are five types of cells: 'S' is the starting position where the agent is; 'G' is the goal state; '.' is a normal cell; '*' is a hole, when the agent steps on a hole, it receives a negative reward and the episode ends; '#' is a wall, when the agent is supposed to step on a wall, it actually remains in its current state. The initial states distribution is uniform among all the initial states provided. The grid is expected to be rectangular. Args: grid (str): the path of the file containing the grid structure; prob (float): probability of success of an action; pos_rew (float): reward obtained in goal states; neg_rew (float): reward obtained in "hole" states; gamma (float, .9): discount factor; horizon (int, 100): the horizon. Returns: A FiniteMDP object built with the provided parameters. """ grid_map, cell_list = parse_grid(grid) p = compute_probabilities(grid_map, cell_list, prob) r = compute_reward(grid_map, cell_list, pos_rew, neg_rew) mu = compute_mu(grid_map, cell_list) return FiniteMDP(p, r, mu, gamma, horizon)
13,848
def sqrt_quadrature_scheme(N_poly, N_poly_log): """ Returns quadrature rule that is exact on 0^1 for p(x) + q(x)sqrt(x) for deg(p) <= N_poly and deg(q) <= N_poly_sqrt. """ nodes, weights = sqrt_quadrature_rule(N_poly, N_poly_log) return QuadScheme1D(nodes, weights)
13,849
def check_dataset_update(args, dataset): """Checks if the dataset information must be updated. """ return (args.dataset_attributes or args.import_fields or (args.shared_flag and r.shared_changed(args.shared, dataset)) or (((hasattr(args, 'max_categories') and args.max_categories > 0) or (hasattr(args, 'multi_label') and args.multi_label)) and args.objective_field))
13,850
def get_package_extras(provider_package_id: str) -> Dict[str, List[str]]: """ Finds extras for the package specified. :param provider_package_id: id of the package """ if provider_package_id == 'providers': return {} with open(DEPENDENCIES_JSON_FILE) as dependencies_file: cross_provider_dependencies: Dict[str, List[str]] = json.load(dependencies_file) extras_dict = ( { module: [get_pip_package_name(module)] for module in cross_provider_dependencies[provider_package_id] } if cross_provider_dependencies.get(provider_package_id) else {} ) provider_yaml_dict = get_provider_yaml(provider_package_id) additional_extras = provider_yaml_dict.get('additional-extras') if additional_extras: for key in additional_extras: if key in extras_dict: extras_dict[key].append(additional_extras[key]) else: extras_dict[key] = additional_extras[key] return extras_dict
13,851
def e(a: float, b: float) -> float: """ e = sqrt(1 + (b * b) / (a * a)) :param a: semi-major axis :type a: float :param b: semi-minor axis :type b: float :return: eccentricity :rtype: float """ return np.sqrt(1 + (b * b) / (a * a))
13,852
def remove_extra_citation_metadata(graph) -> None: """Remove superfluous metadata associated with a citation (that isn't the db/id). Best practice is to add this information programmatically. """ for u, v, k in graph.edges(keys=True): if CITATION not in graph[u][v][k]: continue for key in list(graph[u][v][k][CITATION]): if key not in _CITATION_KEEP_KEYS: del graph[u][v][k][CITATION][key]
13,853
def parse_note(path: Path) -> dict: """ convert note in plain text to a dictionary. Line #1 ~ #5 are meta data of the note. Line #9 to end is the body. """ header_line_number = 5 body_start_line = 9 res = {} with open(path) as f: for x in range(header_line_number): the_line = next(f).strip() if the_line.endswith(':'): the_line += ' ' # fix 'Tags: ' striped to 'Tags:' problem header_sections = the_line.split(': ') assert len(header_sections) == 2, f'Please fix header {the_line} of note {path}' res[header_sections[0]] = header_sections[1] body = sh.sed('-n', f'{body_start_line},$p', path).stdout.decode('utf-8') res['body'] = body return res
13,854
def get_previous_sle_for_warehouse(last_sle, exclude_current_voucher=False): """get stock ledger entries filtered by specific posting datetime conditions""" last_sle['time_format'] = '%H:%i:%s' if not last_sle.get("posting_date"): last_sle["posting_date"] = "1900-01-01" if not last_sle.get("posting_time"): last_sle["posting_time"] = "00:00" sle = frappe.db.sql(""" select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry` where item_code = %(item_code)s and warehouse = %(warehouse)s and is_cancelled = 0 and timestamp(posting_date, time_format(posting_time, %(time_format)s)) < timestamp(%(posting_date)s, time_format(%(posting_time)s, %(time_format)s)) order by timestamp(posting_date, posting_time) desc, creation desc limit 1 for update""", last_sle, as_dict=1) return sle[0] if sle else frappe._dict()
13,855
def write_visfile(discr, io_fields, visualizer, vizname, step=0, t=0, overwrite=False, vis_timer=None): """Write VTK output for the fields specified in *io_fields*. Parameters ---------- visualizer: A :class:`meshmode.discretization.visualization.Visualizer` VTK output object. io_fields: List of tuples indicating the (name, data) for each field to write. """ from contextlib import nullcontext from mirgecom.io import make_rank_fname, make_par_fname comm = discr.mpi_communicator rank = 0 if comm: rank = comm.Get_rank() rank_fn = make_rank_fname(basename=vizname, rank=rank, step=step, t=t) if rank == 0: import os viz_dir = os.path.dirname(rank_fn) if viz_dir and not os.path.exists(viz_dir): os.makedirs(viz_dir) if comm: comm.barrier() if vis_timer: ctm = vis_timer.start_sub_timer() else: ctm = nullcontext() with ctm: visualizer.write_parallel_vtk_file( comm, rank_fn, io_fields, overwrite=overwrite, par_manifest_filename=make_par_fname( basename=vizname, step=step, t=t ) )
13,856
def forest_str(graph, with_labels=True, sources=None, write=None, ascii_only=False): """ Creates a nice utf8 representation of a directed forest Parameters ---------- graph : nx.DiGraph | nx.Graph Graph to represent (must be a tree, forest, or the empty graph) with_labels : bool If True will use the "label" attribute of a node to display if it exists otherwise it will use the node value itself. Defaults to True. sources : List Mainly relevant for undirected forests, specifies which nodes to list first. If unspecified the root nodes of each tree will be used for directed forests; for undirected forests this defaults to the nodes with the smallest degree. write : callable Function to use to write to, if None new lines are appended to a list and returned. If set to the `print` function, lines will be written to stdout as they are generated. If specified, this function will return None. Defaults to None. ascii_only : Boolean If True only ASCII characters are used to construct the visualization Returns ------- str | None : utf8 representation of the tree / forest Example ------- >>> graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) >>> print(nx.forest_str(graph)) ╙── 0 ├─╼ 1 │   ├─╼ 3 │   │   ├─╼ 7 │   │   └─╼ 8 │   └─╼ 4 │   ├─╼ 9 │   └─╼ 10 └─╼ 2 ├─╼ 5 │   ├─╼ 11 │   └─╼ 12 └─╼ 6 ├─╼ 13 └─╼ 14 >>> graph = nx.balanced_tree(r=1, h=2, create_using=nx.Graph) >>> print(nx.forest_str(graph)) ╙── 0 └── 1 └── 2 >>> print(nx.forest_str(graph, ascii_only=True)) +-- 0 L-- 1 L-- 2 """ import networkx as nx printbuf = [] if write is None: _write = printbuf.append else: _write = write # Define glphys # Notes on available box and arrow characters # https://en.wikipedia.org/wiki/Box-drawing_character # https://stackoverflow.com/questions/2701192/triangle-arrow if ascii_only: glyph_empty = "+" glyph_newtree_last = "+-- " glyph_newtree_mid = "+-- " glyph_endof_forest = " " glyph_within_forest = ":   " glyph_within_tree = "|   " glyph_directed_last = "L-> " glyph_directed_mid = "|-> " glyph_undirected_last = "L-- " glyph_undirected_mid = "|-- " else: glyph_empty = "╙" glyph_newtree_last = "╙── " glyph_newtree_mid = "╟── " glyph_endof_forest = " " glyph_within_forest = "╎   " glyph_within_tree = "│   " glyph_directed_last = "└─╼ " glyph_directed_mid = "├─╼ " glyph_undirected_last = "└── " glyph_undirected_mid = "├── " if len(graph.nodes) == 0: _write(glyph_empty) else: if not nx.is_forest(graph): raise nx.NetworkXNotImplemented("input must be a forest or the empty graph") is_directed = graph.is_directed() succ = graph.succ if is_directed else graph.adj if sources is None: if is_directed: # use real source nodes for directed trees sources = [n for n in graph.nodes if graph.in_degree[n] == 0] else: # use arbitrary sources for undirected trees sources = [ min(cc, key=lambda n: graph.degree[n]) for cc in nx.connected_components(graph) ] # Populate the stack with each source node, empty indentation, and mark # the final node. Reverse the stack so sources are popped in the # correct order. last_idx = len(sources) - 1 stack = [(node, "", (idx == last_idx)) for idx, node in enumerate(sources)][ ::-1 ] seen = set() while stack: node, indent, islast = stack.pop() if node in seen: continue seen.add(node) if not indent: # Top level items (i.e. trees in the forest) get different # glyphs to indicate they are not actually connected if islast: this_prefix = indent + glyph_newtree_last next_prefix = indent + glyph_endof_forest else: this_prefix = indent + glyph_newtree_mid next_prefix = indent + glyph_within_forest else: # For individual tree edges distinguish between directed and # undirected cases if is_directed: if islast: this_prefix = indent + glyph_directed_last next_prefix = indent + glyph_endof_forest else: this_prefix = indent + glyph_directed_mid next_prefix = indent + glyph_within_tree else: if islast: this_prefix = indent + glyph_undirected_last next_prefix = indent + glyph_endof_forest else: this_prefix = indent + glyph_undirected_mid next_prefix = indent + glyph_within_tree if with_labels: label = graph.nodes[node].get("label", node) else: label = node _write(this_prefix + str(label)) # Push children on the stack in reverse order so they are popped in # the original order. children = [child for child in succ[node] if child not in seen] for idx, child in enumerate(children[::-1], start=1): islast_next = idx <= 1 try_frame = (child, next_prefix, islast_next) stack.append(try_frame) if write is None: # Only return a string if the custom write function was not specified return "\n".join(printbuf)
13,857
def rotated_shower(shower, alt, az): """ Return a rotated shower object from a shower object and a direction (alt, az) Parameters ---------- shower: shower class object Returns ------- copy of the given shower but rotated """ rot_shower = copy(shower) rot_shower.particles = shower_array_rot(shower.particles, shower.alt, shower.az) return rot_shower
13,858
def write_output_report(dataframe): """Report workspace set-up statuses and create output tsv file from provided dataframe.""" # create timestamp and use to label output file timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") output_filename = f"{timestamp}_workspaces_published_status.tsv" dataframe.to_csv(output_filename, sep="\t", index=False) # count success and failed workspaces and report to stdout successes = dataframe.final_workspace_status.str.count("Success").sum() fails = dataframe.final_workspace_status.str.count("Failed").sum() total = successes + fails print(f"Number of workspaces passed set-up: {successes}/{total}") print(f"Number of workspaces failed set-up: {fails}/{total}") print(f"All workspace set-up (success or fail) details available in output file: {output_filename}")
13,859
def cv_project(c): """Create CV project from cookiecutter """ with c.cd(dir_path): c.run( f"cookiecutter cvbpcc --no-input type=cv project_name={cc_project_name}", pty=True, ) _fake_update(os.path.join(dir_path, cc_project_name))
13,860
def summary(clf, X, y, xlabels=None): """ Output summary statistics for a fitted regression model. Parameters ---------- clf : sklearn.linear_model A scikit-learn linear model classifier with a `predict()` method. X : numpy.ndarray Training data used to fit the classifier. y : numpy.ndarray Target training values, of shape = [n_samples]. xlabels : list, tuple The labels for the predictors. """ # Check and/or make xlabels ncols = X.shape[1] if xlabels is None: xlabels = np.array( ['x{0}'.format(i) for i in range(1, ncols + 1)], dtype='str') elif isinstance(xlabels, (tuple, list)): xlabels = np.array(xlabels, dtype='str') # Make sure dims of xlabels matches dims of X if xlabels.shape[0] != ncols: raise AssertionError( "Dimension of xlabels {0} does not match " "X {1}.".format(xlabels.shape, X.shape)) # Create data frame of coefficient estimates and associated stats coef_df = pd.DataFrame( index=['_intercept'] + list(xlabels), columns=['Estimate', 'Std. Error', 't value', 'p value'] ) coef_df['Estimate'] = np.concatenate( (np.round(np.array([clf.intercept_]), 6), np.round((clf.coef_), 6))) coef_df['Std. Error'] = np.round(coef_se(clf, X, y), 6) coef_df['t value'] = np.round(coef_tval(clf, X, y), 4) coef_df['p value'] = np.round(coef_pval(clf, X, y), 6) # Create data frame to summarize residuals resids = residuals(clf, X, y, r_type='raw') resids_df = pd.DataFrame({ 'Min': pd.Series(np.round(resids.min(), 4)), '1Q': pd.Series(np.round(np.percentile(resids, q=25), 4)), 'Median': pd.Series(np.round(np.median(resids), 4)), '3Q': pd.Series(np.round(np.percentile(resids, q=75), 4)), 'Max': pd.Series(np.round(resids.max(), 4)), }, columns=['Min', '1Q', 'Median', '3Q', 'Max']) # Output results print("Residuals:") print(resids_df.to_string(index=False)) print('\n') print('Coefficients:') print(coef_df.to_string(index=True)) print('---') print('R-squared: {0:.5f}, Adjusted R-squared: {1:.5f}'.format( metrics.r2_score(y, clf.predict(X)), adj_r2_score(clf, X, y))) print('F-statistic: {0:.2f} on {1} features'.format( f_stat(clf, X, y), ncols))
13,861
def angle2circle(angles): """from degree to radians multipled by 2""" return np.deg2rad(2 * (np.array(angles) + 7.5))
13,862
def test_string(): """ Test incomplete string """ inp = load('js/incomplete_string.js') exp = load('js/incomplete_string.min.js') # save('js/incomplete_string.min.js', py_jsmin(inp)) assert py_jsmin(inp) == exp assert py_jsmin2(inp) == exp assert c_jsmin(inp) == exp inp = inp.decode('latin-1') exp = exp.decode('latin-1') assert py_jsmin(inp) == exp assert py_jsmin2(inp) == exp assert c_jsmin(inp) == exp
13,863
def _make_source(cls_source: str, cls_name: str, instance_method: str): """Converts a class source to a string including necessary imports. Args: cls_source (str): A string representing the source code of a user-written class. cls_name (str): The name of the class cls_source represents. instance_method (str): The method within the class that should be called from __main__ Returns: A string representing a user-written class that can be written to a file in order to yield an inner script for the ModelBuilder SDK. The only difference between the user-written code and the string returned by this method is that the user has the option to specify a method to call from __main__. """ src = "\n".join(["import torch", "import pandas as pd", cls_source]) src = src + "if __name__ == '__main__':\n" + f"\t{cls_name}().{instance_method}()" return src
13,864
def prepend_pass_statement(line: str) -> str: """Prepend pass at indent level and comment out the line.""" colno = num_indented(line) right_side = line[colno:] indent = " " * colno return indent + "pass # " + right_side
13,865
def main(): """Start execution of the script""" MiscUtil.PrintInfo("\n%s (PyMOL v%s; %s) Starting...\n" % (ScriptName, pymol.cmd.get_version()[1], time.asctime())) (WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime() # Retrieve command line arguments and options... RetrieveOptions() # Process and validate command line arguments and options... ProcessOptions() # Perform actions required by the script... CalculatePhysicochemicalProperties() MiscUtil.PrintInfo("\n%s: Done...\n" % ScriptName) MiscUtil.PrintInfo("Total time: %s" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))
13,866
def init_session_values(): """ Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main. """ # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY"))
13,867
def clear_cache(user=None, doctype=None): """clear cache""" import frappe.sessions if doctype: import frappe.model.meta frappe.model.meta.clear_cache(doctype) reset_metadata_version() elif user: frappe.sessions.clear_cache(user) else: # everything import translate frappe.sessions.clear_cache() translate.clear_cache() reset_metadata_version() frappe.local.role_permissions = {}
13,868
def download_spot_by_dates(start=datetime(2011, 1, 1)): """ 下载数据,存储为csv文件 :param start: 2011-01-01 最早数据 :return: True 下载文件 False 没有下载文件 """ file_index = get_download_file_index(SPREAD_DIR, start=start) if file_index.empty: return False for date in file_index: date_str = date.strftime('%Y-%m-%d') file_path = SPREAD_DIR / '{}.csv'.format(date_str) if file_path.exists(): continue table = download_spot_by_date(date_str) if len(table) != 0: print(date) spread_df = pd.DataFrame(table, columns=HEADER) spread_df.to_csv(str(file_path), index=False, encoding='gb2312') time.sleep(np.random.rand() * 5) return True
13,869
def run_dataset(prob_label): """Run the experiment""" sample_source, n = get_sample_source(prob_label) # /////// submit jobs ////////// # create folder name string home = os.path.expanduser("~") foldername = os.path.join(home, "freqopttest_slurm", 'e%d'%ex) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters( foldername=foldername, job_name_base="e%d_"%ex, parameter_prefix="") # Use the following line if Slurm queue is not used. #engine = SerialComputationEngine() engine = SlurmComputationEngine(batch_parameters) n_methods = len(method_job_funcs) # repetitions x #methods aggregators = np.empty((reps, n_methods ), dtype=object) d = sample_source.dim() for r in range(reps): for mi, f in enumerate(method_job_funcs): # name used to save the result func_name = f.__name__ fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \ %(prob_label, func_name, J, r, d, alpha, tr_proportion) if not is_rerun and glo.ex_file_exists(ex, prob_label, fname): logger.info('%s exists. Load and return.'%fname) test_result = glo.ex_load_result(ex, prob_label, fname) sra = SingleResultAggregator() if test_result is SingleResult: sra.submit_result(test_result) else: sra.submit_result(SingleResult(test_result)) aggregators[r, mi] = sra else: # result not exists or rerun job = Ex4Job(SingleResultAggregator(), prob_label, r, n, f) agg = engine.submit_job(job) aggregators[r, mi] = agg # let the engine finish its business logger.info("Wait for all call in engine") engine.wait_for_all() # ////// collect the results /////////// logger.info("Collecting results") test_results = np.empty((reps, n_methods), dtype=object) for r in range(reps): for mi, f in enumerate(method_job_funcs): logger.info("Collecting result (%s, r=%d)" % (f.__name__, r )) # let the aggregator finalize things aggregators[r, mi].finalize() # aggregators[i].get_final_result() returns a SingleResult instance, # which we need to extract the actual result test_result = aggregators[r, mi].get_final_result().result if isinstance(test_result, SingleResult): test_result = test_result.result if isinstance(test_result, SingleResult): test_result = test_result.result if isinstance(test_result, SingleResult): test_result = test_result.result test_results[r, mi] = test_result func_name = f.__name__ fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \ %(prob_label, func_name, J, r, d, alpha, tr_proportion) glo.ex_save_result(ex, test_result, prob_label, fname) func_names = [f.__name__ for f in method_job_funcs] func2labels = exglobal.get_func2label_map() method_labels = [func2labels[f] for f in func_names if f in func2labels] # save results results = {'results': test_results, 'n': n, 'data_fname':label2fname[prob_label], 'alpha': alpha, 'J': J, 'sample_source': sample_source, 'tr_proportion': tr_proportion, 'method_job_funcs': method_job_funcs, 'prob_label': prob_label, 'method_labels': method_labels} # class name fname = 'ex%d-%s-me%d_J%d_rs%d_nma%d_d%d_a%.3f_trp%.2f.p' \ %(ex, prob_label, n_methods, J, reps, n, d, alpha, tr_proportion) glo.ex_save_result(ex, results, fname) logger.info('Saved aggregated results to %s'%fname)
13,870
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True): """Truncate too low and too high values. Parameters ---------- data : np.ndarray Image to be truncated. percMin : float Percentile minimum. percMax : float Percentile maximum. discard_zeros : bool Discard voxels with value 0 from truncation. Returns ------- data : np.ndarray """ if discard_zeros: msk = ~np.isclose(data, 0) pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax]) else: pMin, pMax = np.nanpercentile(data, [percMin, percMax]) temp = data[~np.isnan(data)] temp[temp < pMin], temp[temp > pMax] = pMin, pMax # truncate min and max data[~np.isnan(data)] = temp if discard_zeros: data[~msk] = 0 # put back masked out voxels return data
13,871
def update_dashboards(modules, horizon_config, installed_apps): """Imports dashboard and panel configuration from modules and applies it. The submodules from specified modules are imported, and the configuration for the specific dashboards is merged, with the later modules overriding settings from the former. Then the configuration is applied to horizon_config and installed_apps, in alphabetical order of files from which the configurations were imported. For example, given this setup: | foo/__init__.py | foo/_10_baz.py | foo/_20_qux.py | bar/__init__.py | bar/_30_baz_.py and being called with ``modules=[foo, bar]``, we will first have the configuration from ``_10_baz`` and ``_30_baz`` merged, then the configurations will be applied in order ``qux``, ``baz`` (``baz`` is second, because the most recent file which contributed to it, ``_30_baz``, comes after ``_20_qux``). Panel specific configurations are stored in horizon_config. Dashboards from both plugin-based and openstack_dashboard must be registered before the panel configuration can be applied. Making changes to the panel is deferred until the horizon autodiscover is completed, configurations are applied in alphabetical order of files where it was imported. """ config_dashboards = horizon_config.get('dashboards', []) if config_dashboards or horizon_config.get('default_dashboard'): logging.warning( '"dashboards" and "default_dashboard" in (local_)settings is ' 'DEPRECATED now and may be unsupported in some future release. ' 'The preferred way to specify the order of dashboards and the ' 'default dashboard is the pluggable dashboard mechanism (in %s).', ', '.join([os.path.abspath(module.__path__[0]) for module in modules]) ) enabled_dashboards = [] disabled_dashboards = [] exceptions = horizon_config.get('exceptions', {}) apps = [] angular_modules = [] js_files = [] js_spec_files = [] scss_files = [] panel_customization = [] header_sections = [] update_horizon_config = {} for key, config in import_dashboard_config(modules): if config.get('DISABLED', False): if config.get('DASHBOARD'): disabled_dashboards.append(config.get('DASHBOARD')) continue _apps = config.get('ADD_INSTALLED_APPS', []) apps.extend(_apps) _header_sections = config.get('ADD_HEADER_SECTIONS', []) header_sections.extend(_header_sections) if config.get('AUTO_DISCOVER_STATIC_FILES', False): for _app in _apps: module = import_module(_app) base_path = os.path.join(module.__path__[0], 'static/') file_discovery.populate_horizon_config(horizon_config, base_path) add_exceptions = config.get('ADD_EXCEPTIONS', {}).items() for category, exc_list in add_exceptions: exceptions[category] = tuple(set(exceptions.get(category, ()) + exc_list)) angular_modules.extend(config.get('ADD_ANGULAR_MODULES', [])) # avoid pulling in dashboard javascript dependencies multiple times existing = set(js_files) js_files.extend([f for f in config.get('ADD_JS_FILES', []) if f not in existing]) js_spec_files.extend(config.get('ADD_JS_SPEC_FILES', [])) scss_files.extend(config.get('ADD_SCSS_FILES', [])) update_horizon_config.update( config.get('UPDATE_HORIZON_CONFIG', {})) if config.get('DASHBOARD'): dashboard = key enabled_dashboards.append(dashboard) if config.get('DEFAULT', False): horizon_config['default_dashboard'] = dashboard elif config.get('PANEL') or config.get('PANEL_GROUP'): config.pop("__builtins__", None) panel_customization.append(config) # Preserve the dashboard order specified in settings dashboards = ([d for d in config_dashboards if d not in disabled_dashboards] + [d for d in enabled_dashboards if d not in config_dashboards]) horizon_config['panel_customization'] = panel_customization horizon_config['header_sections'] = header_sections horizon_config['dashboards'] = tuple(dashboards) horizon_config.setdefault('exceptions', {}).update(exceptions) horizon_config.update(update_horizon_config) horizon_config.setdefault('angular_modules', []).extend(angular_modules) horizon_config.setdefault('js_files', []).extend(js_files) horizon_config.setdefault('js_spec_files', []).extend(js_spec_files) horizon_config.setdefault('scss_files', []).extend(scss_files) # apps contains reference to applications declared in the enabled folder # basically a list of applications that are internal and external plugins # installed_apps contains reference to applications declared in settings # such as django.contribe.*, django_pyscss, compressor, horizon, etc... # for translation, we are only interested in the list of external plugins # so we save the reference to it before we append to installed_apps horizon_config.setdefault('plugins', []).extend(apps) installed_apps[0:0] = apps
13,872
def test_reset_threshold(): """ Test the model threshold can be reset. Performance metric should be recalculated and also predictions should be changed based on the new threshold. """ # import data airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv")) # convert columns to factors airlines["Year"] = airlines["Year"].asfactor() airlines["Month"] = airlines["Month"].asfactor() airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor() airlines["Cancelled"] = airlines["Cancelled"].asfactor() airlines['FlightNum'] = airlines['FlightNum'].asfactor() # set the predictor names and the response column name predictors = ["Origin", "Dest", "Year", "UniqueCarrier", "DayOfWeek", "Month", "Distance", "FlightNum"] response = "IsDepDelayed" # split into train and validation sets train, valid = airlines.split_frame(ratios = [.8], seed = 1234) # initialize the estimator model = H2OGradientBoostingEstimator(seed = 1234, ntrees=5) # train the model model.train(x=predictors, y=response, training_frame=train) old_threshold = model._model_json['output']['default_threshold'] # predict preds = model.predict(airlines) # reset the threshold and get the old one new_threshold = 0.6917189903082518 old_returned = reset_model_threshold(model, new_threshold) reset_model = h2o.get_model(model.model_id) reset_threshold = reset_model._model_json['output']['default_threshold'] # predict with reset model preds_reset = reset_model.predict(airlines) # compare thresholds assert old_threshold == old_returned assert new_threshold == reset_threshold assert reset_threshold != old_threshold # compare predictions preds_local = preds.as_data_frame() preds_reset_local = preds_reset.as_data_frame() print("old threshold:", old_threshold, "new_threshold:", new_threshold) for i in range(airlines.nrow): if old_threshold <= preds_local.iloc[i, 2] < new_threshold: assert preds_local.iloc[i, 0] != preds_reset_local.iloc[i, 0] else: assert preds_local.iloc[i, 0] == preds_reset_local.iloc[i, 0]
13,873
async def payment_list(request): """ --- description: Show outgoing payments, regarding {bolt11} or {payment_hash} if set Can only specify one of {bolt11} or {payment_hash} tags: - payments produces: - application/json parameters: - in: body name: body required: false schema: type: object properties: bolt11: type: string payment_hash: type: string responses: "200": description: successful operation. """ data = await request.json() bolt11 = data.get('bolt11', None) payment_hash = data.get('payment_hash', None) return web.json_response(request.app['rpc'].listpayments(bolt11=bolt11, payment_hash=payment_hash))
13,874
def write_opened(dir, file_dict, data_dict, verbose=True): """ read in dictionary with open files as values and write data to files """ for game_id, vals in data_dict.items(): f = file_dict.get(game_id) if not f: fn = dir + str(game_id) + ".csv" f = io.init_csv(fn, header=bm.LINE_COLUMNS, close=False) file_dict[game_id] = f io.write_list(f, vals) if verbose: print(f"writing {vals} to game [{game_id}]") return file_dict
13,875
def get_accessible_cases(item, user): """Return all accessible for a cohort and user.""" return getattr(item, "get_accessible_cases_for_user")(user)
13,876
def run_main(): """ 这是主函数 """ cluster_number = args.cluster_number dataset_path = os.path.abspath(args.dataset_path) result_path = os.path.abspath(args.yolo_anchors_path) kmeans = YOLO_KMeans(cluster_number, dataset_path) kmeans.txt2clusters(result_path)
13,877
def aux_conv5(A, B, n, idx): """ Performs the convolution of A and B where B = A* (enumerate-for-loop) :param A: Coefficients matrix 1 (orders, buses) :param B: Coefficients matrix 2 (orders, buses) :param c: last order of the coefficients in while loop :param indices: bus indices array :return: Array with the convolution for the buses given by "indices" """ suma = np.zeros(len(idx), dtype=nb.complex128) for m in range(0, n): for i, k in enumerate(idx): suma[i] += A[m, k] * B[n-1-m, k] return suma.real
13,878
def plot_phaseogram(phaseogram, phase_bins, time_bins, unit_str='s', ax=None, **plot_kwargs): """Plot a phaseogram. Parameters ---------- phaseogram : NxM array The phaseogram to be plotted phase_bins : array of M + 1 elements The bins on the x-axis time_bins : array of N + 1 elements The bins on the y-axis Other Parameters ---------------- unit_str : str String indicating the time unit (e.g. 's', 'MJD', etc) ax : `matplotlib.pyplot.axis` instance Axis to plot to. If None, create a new one. plot_kwargs : dict Additional arguments to be passed to pcolormesh Returns ------- ax : `matplotlib.pyplot.axis` instance Axis where the phaseogram was plotted. """ if ax is None: plt.figure('Phaseogram') ax = plt.subplot() ax.pcolormesh(phase_bins, time_bins, phaseogram.T, **plot_kwargs) ax.set_ylabel('Time ({})'.format(unit_str)) ax.set_xlabel('Phase') ax.set_xlim([0, np.max(phase_bins)]) ax.set_ylim([np.min(time_bins), np.max(time_bins)]) return ax
13,879
def load_rapidSTORM_track_header(path): """ Load xml header from a rapidSTORM (track) single-molecule localization file and identify column names. Parameters ---------- path : str, bytes, os.PathLike, file-like File path for a rapidSTORM file to load. Returns ------- list of str A list of valid dataset property keys as derived from the rapidSTORM identifiers. """ # read xml part in header with open_path_or_file_like(path) as file: return _read_rapidSTORM_track_header(file)
13,880
def hex_to_byte(hexStr): """ Convert hex strings to bytes. """ bytes = [] hexStr = ''.join(hexStr.split(" ")) for i in range(0, len(hexStr), 2): bytes.append(chr(int(hexStr[i:i + 2], 16))) return ''.join(bytes)
13,881
def _vital_config_update(cfg, cfg_in): """ Treat a vital Config object like a python dictionary Args: cfg (kwiver.vital.config.config.Config): config to update cfg_in (dict | kwiver.vital.config.config.Config): new values """ # vital cfg.merge_config doesnt support dictionary input if isinstance(cfg_in, dict): for key, value in cfg_in.items(): if cfg.has_value(key): cfg.set_value(key, str(value)) else: raise KeyError('cfg has no key={}'.format(key)) else: cfg.merge_config(cfg_in) return cfg
13,882
def SubscriberReceivedStartEncKeyVector(builder, numElems): """This method is deprecated. Please switch to Start.""" return StartEncKeyVector(builder, numElems)
13,883
def _read_extended_field_value(value, rawdata): """Used to decode large values of option delta and option length from raw binary form.""" if value >= 0 and value < 13: return (value, rawdata) elif value == 13: return (rawdata[0] + 13, rawdata[1:]) elif value == 14: return (struct.unpack('!H', rawdata[:2])[0] + 269, rawdata[2:]) else: raise ValueError("Value out of range.")
13,884
def add_uint(a, b): """Returns the sum of two uint256-ish tuples.""" a = from_uint(a) b = from_uint(b) c = a + b return to_uint(c)
13,885
def get_dcgan_args(parser, args=[]): """ parameters determing the DCGAN parameters """ # DCGAN: # ------------------------------------------------------------------------ parser.add_argument( "--lam", type=float, default=10, help="Factor for scaling gradient penalty" ) parser.add_argument( "--wgan", type=bool, default=False, help="Determine if WGAN training should be activated", ) parser.add_argument( "--p_drop", type=float, default=0.1, help="Dropout probability for the Discriminator network", ) # ------------------------------------------------------------------------ return parser
13,886
def test_file_upload_with_users(svc_client, identity_headers): """Check successful file upload and listing based on user auth header.""" headers_user1 = copy.deepcopy(identity_headers) headers_user1.pop("Content-Type") filename = uuid.uuid4().hex jwt_data = { "aud": ["renku"], "email_verified": False, "preferred_username": "user1@platform2.com", "given_name": "user", "family_name": "user one", "name": "User One", "email": "user1@platform2.com", "sub": "8d1f08e2-b136-4c93-a38f-d5f36a5919d9", } headers_user2 = { "Renku-User": jwt.encode(jwt_data, JWT_TOKEN_SECRET, algorithm="HS256"), "Authorization": identity_headers["Authorization"], } response = svc_client.post( "/cache.files_upload", data=dict(file=(io.BytesIO(b"this is a test"), filename)), headers=headers_user1 ) assert {"result"} == set(response.json.keys()) file_id = response.json["result"]["files"][0]["file_id"] assert file_id assert 200 == response.status_code response = svc_client.post( "/cache.files_upload", data=dict(file=(io.BytesIO(b"this is a test"), filename)), headers=headers_user2 ) assert response assert {"result"} == set(response.json.keys()) response = svc_client.get("/cache.files_list", headers=headers_user1) assert response assert {"result"} == set(response.json.keys()) assert 0 < len(response.json["result"]["files"]) assert file_id in [file["file_id"] for file in response.json["result"]["files"]]
13,887
def _gen_span_id() -> str: """Return 16 random hexadecimal digits. The id is used for distributed tracing. """ return os.urandom(8).hex()
13,888
def store_tabular_data(filepath: Path, use_stem: bool = True) -> None: """Reads the tabular data from filepath and stores it in-memory to be plotted asychronously. Args: filepath (Path): The tabular data file to be read and stored. use_stem (bool, optional): Only store the filename (without extension). Defaults to True. """ # Declare global variables locally global data_glob global data_glob_changed floats = read_tabular_data(filepath) if floats == []: print('Skipping empty file', filepath) return None # Check that the array is not ragged; each line must be the same length! # I'm not exactly sure why this happens, but it seems like maybe the file # contents are not being flushed to disk before getting read back in again. # When I manually check the files afterwards, the data is all present. lengths = [len(x) for x in floats] if not all([length == lengths[0] for length in lengths]): print('Warning! Skipping ragged data in', filepath) return None data = np.array(floats) if use_stem: filepath = Path(filepath.stem) for i in range(len(data_glob)): (p, data_old_) = data_glob[i] if filepath == p: data_glob[i] = (filepath, data) data_glob_changed = True return None data_glob.append((filepath, data)) data_glob_changed = True return None
13,889
def load_uci_credit_card(return_X_y=False, as_frame=False): """Loads the UCI Credit Card Dataset. This dataset contains a sample of [Default of Credit Card Clients Dataset](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset). Example: ```python from skorecard import datasets df = datasets.load_uci_credit_card(as_frame=True) ``` Args: return_X_y (bool): If True, returns `(data, target)` instead of a dict object. as_frame (bool): give the pandas dataframe instead of X, y matrices (default=False). Returns: (pd.DataFrame, dict or tuple) features and target, with as follows: - if as_frame is True: returns pd.DataFrame with y as a target - return_X_y is True: returns a tuple: (X,y) - is both are false (default setting): returns a dictionary where the key `data` contains the features, and the key `target` is the target """ # noqa file = pkgutil.get_data("skorecard", "data/UCI_Credit_Card.zip") df = pd.read_csv(io.BytesIO(file), compression="zip") df = df.rename(columns={"default.payment.next.month": "default"}) if as_frame: return df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1", "default"]] X, y = ( df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1"]], df["default"].values, ) if return_X_y: return X, y return {"data": X, "target": y}
13,890
def sanitize_yaml_and_save_datasource( context: DataContext, datasource_yaml: str, overwrite_existing: bool = False ) -> None: """A convenience function used in notebooks to help users save secrets.""" if not datasource_yaml: raise ValueError("Please verify the yaml and try again.") if not isinstance(datasource_yaml, str): raise TypeError("Please pass in a valid yaml string.") config = yaml.load(datasource_yaml) try: datasource_name = config.pop("name") except KeyError: raise ValueError("The datasource yaml is missing a `name` attribute.") if not overwrite_existing and check_if_datasource_name_exists( context=context, datasource_name=datasource_name ): print( f'**WARNING** A Datasource named "{datasource_name}" already exists in this Data Context. The Datasource has *not* been saved. Please use a different name or set overwrite_existing=True if you want to overwrite!' ) return if "credentials" in config.keys(): credentials = config["credentials"] config["credentials"] = "${" + datasource_name + "}" context.save_config_variable(datasource_name, credentials) context.add_datasource(name=datasource_name, **config)
13,891
def quad_lsq(x, y, verbose=False, itmax=200, iparams=[]): """ Fits a parabola to the data, more handy as it fits for parabola parameters in the form y = B_0 * (x - B_1)**2 + B_2. This is computationally slower than poly_lsq, so beware of its usage for time consuming operations. Uses scipy odrpack, but for least squares. Parameters ---------- x, y : 1-D arrays Data to fit. verbose : bool or int, optional Can be 0,1,2 for different levels of output (False or True are the same as 0 or 1) itmax : int, optional Maximum number of iterations. iparams : 1D array, optional Initial parameters B_0, B_1, B_2. Returns ------- coeff : 1-D array Parabola coefficients err : 1-D array Standard error (1-sigma) on the coefficients. """ # Internal definition of quadratic def _quadratic(B, x): return B[0] * (x - B[1]) * (x - B[1]) + B[2] def _quad_fjd(B, x): return 2 * B[0] * (x - B[1]) def _quad_fjb(B, x): _ret = np.concatenate((np.ones(x.shape, float), 2 * B[0] * (B[1] - x), x * x - 2 * B[1] * x + B[1] * B[1],)) _ret.shape = (3,) + x.shape return _ret if any(iparams): def _quad_est(data): return tuple(iparams) else: def _quad_est(data): return (1., 1., 1.) quadratic = odr.Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est) mydata = odr.Data(x, y) myodr = odr.ODR(mydata, quadratic, maxit=itmax) # Set type of fit to least-squares: myodr.set_job(fit_type=2) if verbose == 2: myodr.set_iprint(final=2) fit = myodr.run() # Display results: if verbose: fit.pprint() if fit.stopreason[0] == 'Iteration limit reached': print('(WWW) quad_lsq: iteration limit reached, result not reliable!') # Results and errors coeff = fit.beta err = fit.sd_beta return coeff, err
13,892
def find_file(directory_name, cyclone_id_string, prefer_zipped=True, allow_other_format=True, raise_error_if_missing=True): """Finds NetCDF file with SHIPS data. :param directory_name: Name of directory with SHIPS data. :param cyclone_id_string: Cyclone ID (must be accepted by `satellite_utils.parse_cyclone_id`). :param prefer_zipped: Boolean flag. If True, will look for zipped file first. If False, will look for unzipped file first. :param allow_other_format: Boolean flag. If True, will allow opposite of preferred file format (zipped or unzipped). :param raise_error_if_missing: Boolean flag. If file is missing and `raise_error_if_missing == True`, will throw error. If file is missing and `raise_error_if_missing == False`, will return *expected* file path. :return: ships_file_name: File path. :raises: ValueError: if file is missing and `raise_error_if_missing == True`. """ error_checking.assert_is_string(directory_name) satellite_utils.parse_cyclone_id(cyclone_id_string) error_checking.assert_is_boolean(prefer_zipped) error_checking.assert_is_boolean(allow_other_format) error_checking.assert_is_boolean(raise_error_if_missing) ships_file_name = '{0:s}/ships_{1:s}.nc{2:s}'.format( directory_name, cyclone_id_string, GZIP_FILE_EXTENSION if prefer_zipped else '' ) if os.path.isfile(ships_file_name): return ships_file_name if allow_other_format: if prefer_zipped: ships_file_name = ships_file_name[:-len(GZIP_FILE_EXTENSION)] else: ships_file_name += GZIP_FILE_EXTENSION if os.path.isfile(ships_file_name) or not raise_error_if_missing: return ships_file_name error_string = 'Cannot find file. Expected at: "{0:s}"'.format( ships_file_name ) raise ValueError(error_string)
13,893
def check_sentence_ending(sentence): """Check the ending of the sentence to verify that a period is present. :param sentence: str - a sentence to check. :return: bool - return True if punctuated correctly with period, False otherwise. """ pass
13,894
def _parse_cli_variable(mapping_str: str) -> Tuple[str, str]: """Checks that the input is of shape `name:value` and then splits it into a tuple""" match = re.match(r"(?P<name>.+?):(?P<value>.+)", mapping_str) if match is None: raise ValueError(f'CLI variable input {mapping_str} is not of form `"name:value"`') parsed = match.groupdict() return parsed["name"], parsed["value"]
13,895
def list_privileges_by_role(request, role): """ List sentry privilegs by role :param request: :param role: role name :return: A Json array of SentryPrivileges: [p1, p2, p3...] """ sentry_privileges = _get_sentry_api(request.user).list_sentry_privileges_by_role("cdap", role) sentry_privileges = [{"actions": p["action"], "authorizables": _sentry_authorizables_to_path(p["authorizables"])} for p in sentry_privileges] return HttpResponse(json.dumps(sentry_privileges), content_type="application/json")
13,896
def to_torch_as(x: Any, y: torch.Tensor) -> Union[Batch, torch.Tensor]: """Return an object without np.ndarray. Same as ``to_torch(x, dtype=y.dtype, device=y.device)``. """ assert isinstance(y, torch.Tensor) return to_torch(x, dtype=y.dtype, device=y.device)
13,897
async def test_closest_function_home_vs_group_state(hass): """Test closest function home vs group state.""" hass.states.async_set( "test_domain.object", "happy", { "latitude": hass.config.latitude + 0.1, "longitude": hass.config.longitude + 0.1, }, ) hass.states.async_set( "not_in_group.but_closer", "happy", {"latitude": hass.config.latitude, "longitude": hass.config.longitude}, ) assert await async_setup_component(hass, "group", {}) await hass.async_block_till_done() await group.Group.async_create_group(hass, "location group", ["test_domain.object"]) info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}') assert_result_info( info, "test_domain.object", {"group.location_group", "test_domain.object"} ) assert info.rate_limit is None info = render_to_info(hass, "{{ closest(states.group.location_group).entity_id }}") assert_result_info( info, "test_domain.object", {"test_domain.object", "group.location_group"} ) assert info.rate_limit is None
13,898
def cleanup_exports(): """ Cleanup export directories """ require('map', provided_by=[map]) local('rm -rf %(map)s/tiles/*' % env) local('rm -rf %(map)s/exports/*' % env)
13,899