content
stringlengths
22
815k
id
int64
0
4.91M
def test_get_releases_mine(client: TestClient, session: db.Session): """Releases list must mark which releases are in the user's collection""" master_set = Release(name="Master Set") master_set.is_public = True session.add(master_set) first_expansion = Release(name="First Expansion") first_expansion.is_public = True session.add(first_expansion) session.commit() user, token = create_user_token(session) session.add(UserRelease(release_id=master_set.id, user_id=user.id)) session.commit() response = client.get( "/v2/releases", headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert data[0]["stub"] == master_set.stub assert data[0]["is_mine"] == True assert data[1]["is_mine"] == False
21,200
def UnN(X, Z, N, sampling_type): """Computes block-wise complete U-statistic.""" return UN(X, Z, N, Un, sampling_type=sampling_type)
21,201
def view_log_view(request, model_name, object_id): """view log view Arguments: request {object} -- wsgi request object content_type {str} -- content type object_id {int} -- admin_log id Returns: retun -- html view """ if model_name not in register_form: return render(request, 'admin/error.html', {'error_msg': 'illegal request!'}) model = register_form[model_name]['model'] res = get_object_or_404(model, pk=object_id) log_entries = LogEntry.objects.filter( content_type_id=get_content_type_for_model(model).pk, object_id=res.id ) return render(request, 'admin/view_log.html', { 'log_data': log_entries })
21,202
def parse_markdown(source: str) -> Tuple[str, Dict]: """Parse a Markdown document using our custom parser. Args: source (str): the Markdown source text Returns: tuple(str, dict): 1. the converted output as a string 2. any extracted metadata as a dict """ # Reset or we'll have leftover garbage from the previous file _md_parser.reset() html: str = _md_parser.convert(source) meta: Dict = set_metadata(_md_parser.metadata) return html, meta
21,203
def flatten_list(nested_list): # Essentially we want to loop through each element in the list # and check to see if it is of type integer or list """ Flatten a arbitrarily nested list Args: nested_list: a nested list with item to be either integer or list example: [2,[[3,[4]], 5]] Returns: a flattened list with only integers example: [2,3,4,5] """ result = [] for element in nested_list: if isinstance(element, int): result.append(element) elif hasattr(element, '__iter__'): #check to see if it is of type list list_result = flatten_list(element) #recursive call for single_integer in list_result: result.append(single_integer) return result
21,204
def detect_runner(): """ Guess which test runner we're using by traversing the stack and looking for the first matching module. This *should* be reasonably safe, as it's done during test discovery where the test runner should be the stack frame immediately outside. """ if _test_runner_override is not None: return _test_runner_override global _test_runner_guess if _test_runner_guess is False: stack = inspect.stack() for record in reversed(stack): frame = record[0] module = frame.f_globals.get("__name__").partition(".")[0] if module in _test_runner_aliases: module = _test_runner_aliases[module] if module in _test_runners: _test_runner_guess = module break if record[1].endswith("python2.6/unittest.py"): _test_runner_guess = "unittest" break else: _test_runner_guess = None return _test_runner_guess
21,205
def analyze(ss, cfg): """ Run job :param ss: SparkSession :param cfg: app configuration :return: None """ logger = logging.getLogger(__name__) logger.info('Python version: {}'.format(sys.version)) logger.info('Finding all papers') # MAG dataset to use db_name = cfg['mag_db_name'] # avoid nazis spark = ss papers_df = spark \ .table(db_name + '.papers') journals_df = spark \ .table(db_name + '.journals') \ .select("journalid", "normalizedname", "displayname", "publisher") paper_author_affil = spark \ .table(db_name + '.paperauthoraffiliations') # the maximum of the authorsequencenumber should be the number of authors a # given paper has # how to do the aggregation: https://stackoverflow.com/a/36251274/3149349 coauthors_per_paper = paper_author_affil \ .groupBy("paperid") \ .agg(f.max(f.col("authorsequencenumber")).alias("n_authors")) papers_with_coauthors = papers_df.join(coauthors_per_paper, "paperid", "left") # compute average citations across journals and years aggregated_citations = papers_with_coauthors \ .groupBy("journalid", "year") \ .agg(f.avg(f.col("citationcount")).alias("mean_citations"), f.avg(f.col("n_authors")).alias("mean_authors")) journal_table = journals_df.join(aggregated_citations, "journalid", "left") logger.info('Writing averaged coauthors and citations to file...') out_file = "/project/core/bikash_dataset/journal_averages.csv" journal_table. \ write.csv(out_file, mode="overwrite", header=True, sep=",", quoteAll=True) logger.info('Done.')
21,206
def retinanet( mode, offsets_mean=None, offsets_std=None, architecture='resnet50', train_bn=False, channels_fmap=256, num_anchors_per_pixel=9, num_object_classes=1, pi=0.01, alpha=0.25, gamma=2.0, confidence_threshold=0.05, num_top_scoring=1000, batch_size=2, max_objects_per_class_per_img=100, iou_threshold=0.5, output_top_scoring=False ): """ Builds a RetinaNet. Parameters ---------- mode : string The mode of building a retinanet either in 'training' or 'inference'. offsets_mean, offsets_std : float The mean and std of anchor offsets for a given dataset. If offsets are normalized, they will be used to de-normalize offsets. architecture : string, optional ResNet architecture in {'resnet50', 'resnet101'}. The default is 'resnet50'. train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. channels_fmap : integer, optional The number of filters in all FPN conv layers. The default is 256. num_anchors_per_pixel : integer, optional The number of anchors to generate at different scales for every pixel; see anchors.anchors_from_fpn(). The default is 9. num_object_classes : integer, optional The number of classes containing only objects, i.e., object classes denoted by positive integers while background denoted by 0. The default is 1. pi : float, optional The bias initialization at the final conv layer of the classification subnet, prevents the large number of anchors from generating a large loss value in the first iteration of training. The default is 0.01. alpha : float, optional A weighting factor in [0,1] for the object class, addressing class imbalance. The default is 0.25. gamma : float, optional A focusing parameter >= 0 for removing easy examples. The default is 2.0. confidence_threshold : float, optional The minimum selection's probabilites. The default is 0.05. num_top_scoring : integer, optional The number of top-scoring selections. The default is 1000. batch_size : integer, optional The batch size of input images. The default is 2. max_objects_per_class_per_img : integer, optional The maximum number of objects over all images for a particular class. The default is 100. iou_threshold : float, optional An iou threshold for NMS. The default is 0.5. output_top_scoring : boolean, optional Whether to include the output of detections.select_top_scoring() in the inference mode. The default is False. Returns ------- model : tf keras The retinanet. - Training mode * inputs are a batch of images, anchor indicators, ground-truth class ids and offsets generated by data_gen.data_generator(); * outputs are predicted anchor probabilities, offsets, classification and regression losses. - Inference mode * inputs are a batch of raw images, a list of anchors at all levels generated by anchors.anchors_from_fpn() and a window with shape of [1, 4] used in clipping anchors in detections.SelectTopScoring() where 4 is (y1, x1, y2, x2) corner coordinates for all images in the batch. * outputs is a list of detections, each has corresponding target boxes, class ids and scores. """ assert mode in ['training', 'inference'] # input images images = tf.keras.Input(shape=(None, None, 3), name='images') if mode == 'training': # inputs generated by anchors.anchors_targets() gt_anchor_indicators = tf.keras.Input( shape=(None,), name='gt_anchor_indicators', dtype=tf.int32) gt_anchor_class_ids = tf.keras.Input( shape=(None, num_object_classes), name='gt_anchor_class_ids', dtype=tf.int32) gt_anchor_offsets = tf.keras.Input( shape=(None, 4), name='gt_anchor_offsets', dtype=tf.float32) # backbone, ResNet + FPN fmaps = resnet_fpn.resnet_fpn( images, architecture, train_bn, channels_fmap) if mode == 'inference': # input generated by anchors.anchors_from_fpn(), and then each # element is broadcasted to batch_size, resulting in shape of # [batch_size, num_anchors_per_fmap, 4] anchors_fpn_batches = [] for i in range(len(fmaps)): anchors_i = tf.keras.Input( shape=(None, 4), name='anchors_p'+str(i+3), dtype=tf.float32) anchors_fpn_batches.append(anchors_i) # input used when clipping anchors in detections.SelectTopScoring() window = tf.keras.Input( shape=(4), batch_size=1, name='window', dtype=tf.int32) # classification and regression subnets cls_subnet = subnets.cls_subnet( num_anchors_per_pixel, num_object_classes, channels_fmap, pi) reg_subnet = subnets.reg_subnet( num_anchors_per_pixel, channels_fmap) # outputs, list, each element is for one FPN level if mode == 'training': pred_anchor_probs, pred_anchor_offsets = [], [] else: list_anchor_idxes = [] list_anchors, list_class_ids, list_scores = [], [], [] # loop for each FPN level for i in range(len(fmaps)): # fmap, [batch_size, h_i, w_i, channels_fmap] where h_i and w_i denote # the current fmap size p = fmaps[i] # cls, [batch_size, h_i, w_i, num_anchors_per_pixel*num_object_classes] pred_anchor_probs_i = cls_subnet([p]) # reshape, [batch_size, h_i*w_i*num_anchors_per_pixel, num_object_classes] pred_anchor_probs_i = tf.keras.layers.Reshape( (-1, num_object_classes), name='cls_probs_p'+str(i+3) )(pred_anchor_probs_i) # reg, [batch_size, h_i, w_i, num_anchors_per_pixel*4] pred_anchor_offsets_i = reg_subnet([p]) # reshape, [batch_size, h_i*w_i*num_anchors_per_pixel, 4] pred_anchor_offsets_i = tf.keras.layers.Reshape( (-1, 4), name='reg_offsets_p'+str(i+3) )(pred_anchor_offsets_i) if mode == 'training': pred_anchor_probs.append(pred_anchor_probs_i) pred_anchor_offsets.append(pred_anchor_offsets_i) else: # filter low confidence, select top-scoring and refine anchors anchors_i = anchors_fpn_batches[i] select_top_scoring_inputs_i = [ anchors_i, pred_anchor_probs_i, pred_anchor_offsets_i, window] select_top_scoring_outputs_i = detections.SelectTopScoring( confidence_threshold, num_top_scoring, batch_size, offsets_mean, offsets_std, name='select_top_detection_p'+str(i+3) )(select_top_scoring_inputs_i) list_anchor_idxes.append(select_top_scoring_outputs_i[0]) list_anchors.append(select_top_scoring_outputs_i[1]) list_class_ids.append(select_top_scoring_outputs_i[2]) list_scores.append(select_top_scoring_outputs_i[3]) if mode == 'training': # probs, [batch_size, num_anchors, num_object_classes] pred_anchor_probs = tf.keras.layers.Concatenate( axis=1, name='pred_anchor_probs')(pred_anchor_probs) # offsets, [batch_size, num_anchors, 4] pred_anchor_offsets = tf.keras.layers.Concatenate( axis=1, name='pred_anchor_offsets')(pred_anchor_offsets) # cls loss cls_inputs = [ gt_anchor_indicators, gt_anchor_class_ids, pred_anchor_probs] cls_loss = losses.ClsLoss(alpha, gamma)(cls_inputs) # reg loss reg_inputs = [ gt_anchor_indicators, gt_anchor_offsets, pred_anchor_offsets] reg_loss = losses.RegLoss()(reg_inputs) # training model's inputs and outputs inputs = [ images, gt_anchor_indicators, gt_anchor_class_ids, gt_anchor_offsets,] outputs = [ pred_anchor_probs, pred_anchor_offsets, cls_loss, reg_loss] else: # NMS nms_fpn_inputs = [ list_anchor_idxes, list_anchors, list_class_ids, list_scores] nms_fpn_outputs = detections.NMS_FPN( max_objects_per_class_per_img, iou_threshold, batch_size, name='nms' )(nms_fpn_inputs) # anchors_batch, class_ids_batch, scores_batch = nms_fpn_outputs # inference model's inputs and outputs inputs = [images, anchors_fpn_batches, window] if output_top_scoring: outputs = [nms_fpn_inputs, nms_fpn_outputs] else: outputs = nms_fpn_outputs with tf.device('/cpu:0'): model = tf.keras.Model(inputs, outputs, name='RetinaNet') return model
21,207
def default_chap_exec(gallery_or_id: Union[Gallery, int], chap: Chapter, only_values=False) \ -> Union[Tuple[str, dict], Tuple[int, Union[str, List[str]], int, bytes, int, Literal[0, 1]]]: """Pass a Gallery object or gallery id and a Chapter object""" gid: int if isinstance(gallery_or_id, Gallery): gallery: Gallery = gallery_or_id gid = gallery.id in_archive = gallery.is_archive else: gid = gallery_or_id in_archive = chap.in_archive if only_values: result_exec = (gid, chap.title, chap.number, str.encode(chap.path), chap.pages, in_archive) else: result_exec = ( """ INSERT INTO chapters(series_id, chapter_title, chapter_number, chapter_path, pages, in_archive) VALUES(:series_id, :chapter_title, :chapter_number, :chapter_path, :pages, :in_archive)""", { 'series_id': gid, 'chapter_title': chap.title, 'chapter_number': chap.number, 'chapter_path': str.encode(chap.path), 'pages': chap.pages, 'in_archive': in_archive } ) return result_exec
21,208
def merge_closest_groups(groups, special_tokens=None): """ Finds the two token groups with the best merge score and merges them. """ scores = [merge_score(groups[i], groups[i+1], special_tokens) for i in range(len(groups)-1)] #print(scores) ind = np.argmax(scores) groups[ind] = groups[ind] + groups[ind+1] #print(groups[ind][0].s in openers, groups[ind][0]) if groups[ind][0].s in openers and groups[ind+1][-1].s == openers[groups[ind][0].s]: groups[ind][0].balanced = True groups[ind+1][-1].balanced = True groups.pop(ind+1)
21,209
def _list_all(listing_call, output_format='dict', *args, **filters): """Helper to handle paged listing requests. Example usage: ``evaluations = list_all(list_evaluations, "predictive_accuracy", task=mytask)`` Parameters ---------- listing_call : callable Call listing, e.g. list_evaluations. output_format : str, optional (default='dict') The parameter decides the format of the output. - If 'dict' the output is a dict of dict - If 'dataframe' the output is a pandas DataFrame *args : Variable length argument list Any required arguments for the listing call. **filters : Arbitrary keyword arguments Any filters that can be applied to the listing function. additionally, the batch_size can be specified. This is useful for testing purposes. Returns ------- dict or dataframe """ # eliminate filters that have a None value active_filters = {key: value for key, value in filters.items() if value is not None} page = 0 result = {} if output_format == 'dataframe': result = pd.DataFrame() # Default batch size per paging. # This one can be set in filters (batch_size), but should not be # changed afterwards. The derived batch_size can be changed. BATCH_SIZE_ORIG = 10000 if 'batch_size' in active_filters: BATCH_SIZE_ORIG = active_filters['batch_size'] del active_filters['batch_size'] # max number of results to be shown LIMIT = None offset = 0 if 'size' in active_filters: LIMIT = active_filters['size'] del active_filters['size'] if LIMIT is not None and BATCH_SIZE_ORIG > LIMIT: BATCH_SIZE_ORIG = LIMIT if 'offset' in active_filters: offset = active_filters['offset'] del active_filters['offset'] batch_size = BATCH_SIZE_ORIG while True: try: current_offset = offset + BATCH_SIZE_ORIG * page new_batch = listing_call( *args, limit=batch_size, offset=current_offset, output_format=output_format, **active_filters ) except openml.exceptions.OpenMLServerNoResult: # we want to return an empty dict in this case break if output_format == 'dataframe': if len(result) == 0: result = new_batch else: result = result.append(new_batch, ignore_index=True) else: # For output_format = 'dict' or 'object' result.update(new_batch) if len(new_batch) < batch_size: break page += 1 if LIMIT is not None: # check if the number of required results has been achieved # always do a 'bigger than' check, # in case of bugs to prevent infinite loops if len(result) >= LIMIT: break # check if there are enough results to fulfill a batch if BATCH_SIZE_ORIG > LIMIT - len(result): batch_size = LIMIT - len(result) return result
21,210
def get_matching_based_variables(match_definitions:List[Dict[Literal['name', 'matching'],Any]], global_dict=None, local_dict=None, var_lenght=0): """ Function to construct an array with values depending on the condition provided by user The idea is to define things like, for example, 'region' for a table, indicating which analysis region is used. Example: Assume we want to have region="SRB" when "MET>100 && mt2<450". For ``MET=[50 ,150,250]`` and ``mt2=[300,400,500]``, when provided with argument ``matching_definitions=[{name:"SRB","matching":["np.logical_and(MET>100,mt2<450)"]}]`` will give output of ``[None,SRB, None]``. Args: match_definitions: list of dictionaries defining matching conditions and the value associated with the match. Each dictionary has to have field 'name' (value of variable when condition is met) and 'matching' -- list of cuts and indices for which the condition is met. Conditions are concacanated to each other. In the example above ``matching_definitions=[{name:"SRB","matching":["np.logical_and(MET>100,mt2<450)"]}`` is equivalent to ``matching_definitions=[{name:"SRB","matching":[1]}`` (index specifying position that matches) submission_dict: collections of variables and other known objects to be used in the transformation local_vars: yet another collection of variables known to be used in the transformation var_lenght: lenght of the corresponding variable/table (in case index is is chosen for matching specification) """ result=None for specification in match_definitions: var=specification.get('name',None) if(var is None): raise ValueError(f"matching_definitions have to have name for each specification.") cuts=specification.get('matching',[]) for cut in cuts: if(type(cut)==str): cutOutput=np.where(eval(cut,global_dict,local_dict),var,None) ToAppend=cutOutput.reshape(len(cutOutput),1) if(not result): result=ToAppend else: result=np.concatenate((result,ToAppend),axis=1) elif(type(cut)==int): if(cut>=len(cuts)): raise RuntimeError("lenght of cut table smaller than required index.") else: ToAppend=np.array([[None]]*len(var_lenght)) ToAppend[cut]=var if(not result): result=ToAppend else: result=np.concatenate((result,ToAppend),axis=1) else: raise TypeError("Variable cutDefinitions has improper content.") return result
21,211
def _AStar_graph(problem: BridgeProblem) -> (list, list): """Used for graphing, returns solution as well as all nodes in a list""" all_nodes = [problem.initial_node] pq = [(problem.initial_node.path_cost + problem.h(problem.initial_node.state), problem.initial_node)] closed = set() while True: assert pq priority, node = heappop(pq) if problem.goal_test(node): return problem.get_ancestors(node), all_nodes closed.add(node) children = problem.expand(node) for node in children: priority = node.path_cost + problem.h(node.state) bn = (priority, node) inpq = None for i, (_, pq_node) in enumerate(pq): if node == pq_node: inpq = i if node not in closed and inpq is None: heappush(pq, bn) elif inpq is not None and bn < pq[inpq]: pq.pop(inpq) pq.append(bn) heapify(pq) all_nodes.extend(children)
21,212
def plot_setup(name, figsize=None, fontsize=9, font='paper', dpi=None): """ Setup a PDF page for plot. name: PDF file name. If not ending with .pdf, will automatically append. figsize: dimension of the plot in inches, should be an array of length two. fontsize: fontsize for legends and labels. font: font for legends and labels, 'paper' uses Times New Roman, 'default' uses default, a tuple of (family, font, ...) customizes font. dpi: resolution of the figure. """ paper_plot(fontsize=fontsize, font=font) if not name.endswith('.pdf'): name += '.pdf' pdfpage = matplotlib.backends.backend_pdf.PdfPages(name) fig = matplotlib.pyplot.figure(figsize=figsize, dpi=dpi) return pdfpage, fig
21,213
def author_endyear(pub2author_df = None, colgroupby = 'AuthorId', datecol = 'Year', show_progress=False): """ Calculate the year of last publication for each author. Parameters ---------- pub2author_df : DataFrame, default None, Optional A DataFrame with the author2publication information. colgroupby : str, default 'AuthorId', Optional The DataFrame column with Author Ids. If None then the database 'AuthorId' is used. datecol : str, default 'Year', Optional The DataFrame column with Date information. If None then the database 'Year' is used. Returns ------- DataFrame Productivity DataFrame with 2 columns: 'AuthorId', 'CareerLength' """ newname_dict = zip2dict([str(datecol), '0'], ['EndYear']*2) return pub2author_df.groupby(colgroupby)[datecol].max().to_frame().reset_index().rename(columns=newname_dict)
21,214
def plot_spatial_3d(pos, output_file, box_size): """ Plots the 3D spatial distribution of galaxies. Parameters ========== pos : ``numpy`` 3D array with length equal to the number of galaxies The position (in Mpc/h) of the galaxies. output_file : String Name of the file the plot will be saved as. Returns ======= None. A plot will be saved as ``output_file``. """ from mpl_toolkits.mplot3d import Axes3D from random import sample fig = plt.figure() ax = fig.add_subplot(111, projection="3d") # Generate a subsample if necessary. num_gals = len(pos) sample_size = 10000 if num_gals > sample_size: w = sample(list(np.arange(num_gals)), sample_size) else: w = np.arange(num_gals) ax.scatter(pos[w,0], pos[w,1], pos[w,2], alpha=0.5) ax.set_xlim([0.0, box_size]) ax.set_ylim([0.0, box_size]) ax.set_zlim([0.0, box_size]) ax.set_xlabel(r"$\mathbf{x \: [h^{-1}Mpc]}$") ax.set_ylabel(r"$\mathbf{y \: [h^{-1}Mpc]}$") ax.set_zlabel(r"$\mathbf{z \: [h^{-1}Mpc]}$") fig.tight_layout() fig.savefig(output_file) print("Saved file to {0}".format(output_file)) plt.close()
21,215
def _filter_switch_ip(data): """filter switch ip related params to db/api understandable format. Examples: {'switchIp': '10.0.0.1'} to {'ip_int': {'eq': int of '10.0.0.1'}} {'switchIpStart': '10.0.0.1'} to {'ip_int': {'ge': int of '10.0.0.1'}} {'switchIpEnd': '10.0.0.1'} to {'ip_int': {'le': int of '10.0.0.1'}} {'switchIpRange': '10.0.0.1,10.0.0.254'} to {'ip_int': {'between': [int of '10.0.0.1', int of '10.0.0.254']}} the switch ip related params can be declared multi times. """ ip_filter = {} switch_ips = _get_data_list(data, 'switchIp') if switch_ips: ip_filter['eq'] = [] for switch_ip in switch_ips: ip_filter['eq'].append(long(netaddr.IPAddress(switch_ip))) switch_start = _get_data(data, 'switchIpStart') if switch_start is not None: ip_filter['ge'] = long(netaddr.IPAddress(switch_start)) switch_end = _get_data(data, 'switchIpEnd') if switch_end is not None: ip_filter['lt'] = long(netaddr.IPAddress(switch_end)) switch_nets = _get_data_list(data, 'switchIpNetwork') if switch_nets: ip_filter['between'] = [] for switch_net in switch_nets: network = netaddr.IPNetwork(switch_net) ip_filter['between'].append((network.first, network.last)) switch_ranges = _get_data_list(data, 'switchIpRange') if switch_ranges: ip_filter.setdefault('between', []) for switch_range in switch_ranges: ip_start, ip_end = switch_range.split(',') ip_filter['between'].append( long(netaddr.IPAddress(ip_start)), long(netaddr.IPAddress(ip_end)) ) if ip_filter: data['ip_int'] = ip_filter _clean_data( data, [ 'switchIp', 'switchIpStart', 'switchIpEnd', 'switchIpNetwork', 'switchIpRange' ] )
21,216
def to_gif(images, fps): """Converts image sequence (4D numpy array) to gif.""" imageio.mimsave('./animation.gif', images, fps=fps) return embed.embed_file('./animation.gif')
21,217
async def stop_all(consumers): """Stop all consumers. """ # pylint: disable=expression-not-assigned [await a.stop() for a in consumers.values()]
21,218
def get_half_max_down(signal, peak): """See `get_half_max_up` for explanation. This is a minor modification of the above function. """ if peak['peak'] == 0: return np.nan fflag = False half_max = signal[peak['peak']] / 2 falling_signal = signal[peak['peak']:(peak['right']+1)] closest_idx = (np.abs(falling_signal - half_max)).argmin() + peak['peak'] if closest_idx <= 1 or closest_idx >= 98: logging.warning('HM_DOWN: half-max too close to end of signal') return np.nan # If the signal at the index is nearly equal to half max, take that index if np.allclose(half_max, signal[closest_idx]): half_max_point = closest_idx # ...otherwise interpolate else: ix = -1 triplet = signal[(closest_idx - 1):(closest_idx + 2)] if triplet[0] > half_max > triplet[1]: ix = 0 elif triplet[1] > half_max > triplet[2]: ix = 1 else: logging.warning('HM_DOWN: simple method for interpolating' ' half-max decay time failed') fflag = True if ix != -1: y = [ix,ix+1] x = [triplet[ix], triplet[ix+1]] f = interp1d(x,y) trip_coord = f(half_max) half_max_point = closest_idx + (trip_coord - 1) if fflag == True: half_max_down = np.nan else: half_max_down = float(half_max_point - peak['peak']) return half_max_down
21,219
def connect2server(env=None, key=None, keyfile=None, logger=None): """Sets up credentials for accessing the server. Generates a key using info from the named keyname in the keyfile and checks that the server can be reached with that key. Also handles keyfiles stored in s3 using the env param""" if key and keyfile: keys = None if os.path.isfile(keyfile): with io.open(keyfile, 'r') as kf: keys_json_string = kf.read() keys = json.loads(keys_json_string) if keys: key = keys.get(key) try: auth = get_authentication_with_server(key, env) except Exception: logger.error("Authentication failed") sys.exit(1) return auth
21,220
def ternary(c): """ Encodes the circuit with ternary values Parameters ---------- c : Circuit Circuit to encode. Returns ------- Circuit Encoded circuit. """ if c.blackboxes: raise ValueError(f"{c.name} contains a blackbox") t = copy(c) # add dual nodes for n in c: if c.type(n) in ["and", "nand"]: t.add(f"{n}_x", "and") t.add( f"{n}_x_in_fi", "or", fanout=f"{n}_x", fanin=[f"{p}_x" for p in c.fanin(n)], ) t.add(f"{n}_0_not_in_fi", "nor", fanout=f"{n}_x") for p in c.fanin(n): t.add( f"{p}_is_0", "nor", fanout=f"{n}_0_not_in_fi", fanin=[p, f"{p}_x"] ) elif c.type(n) in ["or", "nor"]: t.add(f"{n}_x", "and") t.add( f"{n}_x_in_fi", "or", fanout=f"{n}_x", fanin=[f"{p}_x" for p in c.fanin(n)], ) t.add(f"{n}_1_not_in_fi", "nor", fanout=f"{n}_x") for p in c.fanin(n): t.add(f"{p}_is_1", "and", fanout=f"{n}_1_not_in_fi", fanin=p) t.add(f"{p}_not_x", "not", fanout=f"{p}_is_1", fanin=f"{p}_x") elif c.type(n) in ["buf", "not"]: p = c.fanin(n).pop() t.add(f"{n}_x", "buf", fanin=f"{p}_x") elif c.type(n) in ["output"]: p = c.fanin(n).pop() t.add(f"{n}_x", "output", fanin=f"{p}_x") elif c.type(n) in ["xor", "xnor"]: t.add(f"{n}_x", "or", fanin=(f"{p}_x" for p in c.fanin(n))) elif c.type(n) in ["0", "1"]: t.add(f"{n}_x", "0") elif c.type(n) in ["input"]: t.add(f"{n}_x", "input") else: raise ValueError(f"Node {n} has unrecognized type: {c.type(n)}") return t
21,221
def eval_lane_per_frame( gt_file: str, pred_file: str, bound_ths: List[float] ) -> Dict[str, np.ndarray]: """Compute mean,recall and decay from per-frame evaluation.""" task2arr: Dict[str, np.ndarray] = dict() # str -> 2d array gt_byte = np.asarray(Image.open(gt_file)) pred_byte = np.asarray(Image.open(pred_file)) gt_foreground = get_foreground(gt_byte) pd_foreground = get_foreground(pred_byte) for task_name, class_func in sub_task_funcs.items(): task_scores: List[List[float]] = [] for value in range(len(sub_task_cats[task_name])): gt_mask = class_func(gt_byte, value) & gt_foreground pd_mask = class_func(pred_byte, value) & pd_foreground cat_scores = [ eval_lane_per_threshold(gt_mask, pd_mask, bound_th) for bound_th in bound_ths ] task_scores.append(cat_scores) task2arr[task_name] = np.array(task_scores) return task2arr
21,222
def membrane(field, voxel_size=1, bound='dct2', dim=None, weights=None): """Precision matrix for the Membrane energy Note ---- .. This is exactly equivalent to SPM's membrane energy Parameters ---------- field : (..., *spatial) tensor voxel_size : float or sequence[float], default=1 bound : str, default='dct2' dim : int, default=field.dim() weights : (..., *spatial) tensor, optional Returns ------- field : (..., *spatial) tensor """ if weights is None: return _membrane_l2(field, voxel_size, bound, dim) def mul_(x, y): """Smart in-place multiplication""" if ((torch.is_tensor(x) and x.requires_grad) or (torch.is_tensor(y) and y.requires_grad)): return x * y else: return x.mul_(y) backend = dict(dtype=field.dtype, device=field.device) dim = dim or field.dim() if torch.is_tensor(voxel_size): voxel_size = make_vector(voxel_size, dim, **backend) dims = list(range(field.dim()-dim, field.dim())) fieldf = diff(field, dim=dims, voxel_size=voxel_size, side='f', bound=bound) weights = torch.as_tensor(weights, **backend) fieldf = mul_(fieldf, weights[..., None]) fieldb = diff(field, dim=dims, voxel_size=voxel_size, side='b', bound=bound) fieldb = mul_(fieldb, weights[..., None]) dims = list(range(fieldb.dim() - 1 - dim, fieldb.dim() - 1)) fieldb = div(fieldb, dim=dims, voxel_size=voxel_size, side='b', bound=bound) dims = list(range(fieldf.dim()-1-dim, fieldf.dim()-1)) field = div(fieldf, dim=dims, voxel_size=voxel_size, side='f', bound=bound) del fieldf field += fieldb field *= 0.5 return field
21,223
def test_can_accept_colormap_dict(): """Test that we can accept vispy colormaps in a dictionary""" colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]) cmap = ensure_colormap({'colors': colors, 'name': 'special_name'}) assert isinstance(cmap, Colormap) np.testing.assert_almost_equal(cmap.colors, colors) assert cmap.name == 'special_name'
21,224
def get_uint64(dgram: bytes, start_index: int) -> Tuple[int, int]: """Get a 64-bit big-endian unsigned integer from the datagram. Args: dgram: A datagram packet. start_index: An index where the integer starts in the datagram. Returns: A tuple containing the integer and the new end index. Raises: ParseError if the datagram could not be parsed. """ try: if len(dgram[start_index:]) < _UINT64_DGRAM_LEN: raise ParseError('Datagram is too short') return ( struct.unpack('>Q', dgram[start_index:start_index + _UINT64_DGRAM_LEN])[0], start_index + _UINT64_DGRAM_LEN) except (struct.error, TypeError) as e: raise ParseError('Could not parse datagram %s' % e)
21,225
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor: """ Similar to np.fft.fftshift but applies to PyTorch Tensors Args: x: A PyTorch tensor. dim: Which dimension to fftshift. Returns: fftshifted version of x. """ if dim is None: # this weird code is necessary for toch.jit.script typing dim = [0] * (x.dim()) for i in range(1, x.dim()): dim[i] = i # also necessary for torch.jit.script shift = [0] * len(dim) for i, dim_num in enumerate(dim): shift[i] = x.shape[dim_num] // 2 return roll(x, shift, dim)
21,226
def flatten(x): """Flattens nested list""" if isinstance(x, list): return [a for i in x for a in flatten(i)] else: return [x]
21,227
def get_capture_points_gazebo(bag, odom_topic='/gazebo/model_states', sync_topic='/mavros/imu/data_raw', camera_freq=20, sync_topic_freq=100, method='every'): """ method(string): method for sampling capturing points. 'every': Sample IMU for every n msgs, and then capture odometry msg which has the closest timestamp. This requires the existence of odom_msg for every imu_msg. """ odom_msg_list = [] odom_time_list = [] odom_stamp_list = [] capture_time_list = [] sync_topic_num = 0 for topic, msg, t in bag: if topic==odom_topic: odom_msg_list.append(msg) odom_time_list.append(t.to_time()) odom_stamp_list.append(copy.deepcopy(t)) for topic, msg, t in bag: if topic==sync_topic: if odom_time_list[0] > t.to_time(): continue if sync_topic_num % (int(sync_topic_freq/camera_freq)) == 0: capture_time_list.append(t.to_time()) sync_topic_num += 1 assert len(odom_msg_list)==len(odom_time_list) and len(odom_msg_list)==len(odom_stamp_list), 'length of odom_(msg/time/stamp)_list is not equal.' # start sampling odometry capture_points = [] curr_odom_idx = 0 for idx, capture_time in enumerate(capture_time_list): # take an odometry msg which has the timestamp closest to capture_time if capture_time < min(odom_time_list): continue while abs(capture_time - odom_time_list[curr_odom_idx]) >= 5*10**(-5): curr_odom_idx += 1 if curr_odom_idx >= len(odom_time_list): break if curr_odom_idx >= len(odom_time_list): break if odom_topic=='/gazebo/gazebo_states': capture_point = get_capture_point_from_gazebo_model_states(idx, odom_msg_list[curr_odom_idx], odom_stamp_list[curr_odom_idx]) elif odom_topic=='/odometry': capture_point = get_capture_point_from_navmsgs_odom(idx, odom_msg_list[curr_odom_idx], odom_stamp_list[curr_odom_idx]) capture_points.append(capture_point) return capture_points
21,228
async def test_get_connected_devices_no_ip(event_loop, mocker): """Test for get asuswrt_data and not requiring ip.""" mock_run_cmd(mocker, [WL_DATA, ARP_DATA, NEIGH_DATA, LEASES_DATA]) scanner = AsusWrt(host="localhost", port=22, mode="ap", require_ip=False) data = await scanner.async_get_connected_devices() assert WAKE_DEVICES_NO_IP == data
21,229
def parse_input(): """Parse input and return array of calendar A user can either pass the calendar via the stdin or via one or several icalendar files. This method will parse the input and return an array of valid icalendar """ input_data = '' calendars = [] for line in fileinput.input(): if 'BEGIN:VCALENDAR' in line: calendars.append(input_data) input_data = line else: input_data += line calendars.append(input_data) return calendars[1:]
21,230
def identity(obj): """Returns the ``obj`` parameter itself :param obj: The parameter to be returned :return: ``obj`` itself >>> identity(5) 5 >>> foo = 2 >>> identity(foo) is foo True """ return obj
21,231
def assignCrowdingDist(individuals): """Assign a crowding distance to each individual's fitness. The crowding distance can be retrieve via the :attr:`crowding_dist` attribute of each individual's fitness. """ if len(individuals) == 0: return distances = [0.0] * len(individuals) crowd = [(ind.fitness.values, i) for i, ind in enumerate(individuals)] nobj = len(individuals[0].fitness.values) for i in xrange(nobj): crowd.sort(key=lambda element: element[0][i]) distances[crowd[0][1]] = float("inf") distances[crowd[-1][1]] = float("inf") if crowd[-1][0][i] == crowd[0][0][i]: continue norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i]) for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]): distances[cur[1]] += (next[0][i] - prev[0][i]) / norm for i, dist in enumerate(distances): individuals[i].fitness.crowding_dist = dist
21,232
def loadAllProteins(proteinfiles, basefile, chromosomes, matrixfile,correctmatrix,celltype,resolution,internaloutdir,chromsizefile): """ Function for binning ChIP-seq data. Data must be provided in narrowPeak, broadPeak or bigwig format. Simultaneous usage of different formats is possible. Outputs a ph5 file which contains binned proteins per chromosome """ #if matrixfile has been provided, internalOutDir must also be present if matrixfile and not internaloutdir: msg = "Error: If --matrixFile / -mif is provided, --internalOutDir / -iod cannot be empty\n" sys.exit(msg) ### checking extensions of files if not checkExtension(basefile, '.ph5'): basefilename = os.path.splitext(basefile)[0] basefile = basefilename + ".ph5" msg = "basefile must have .ph5 file extension\n" msg += "renamed to {:s}" print(msg.format(basefile)) #remove existing basefiles, since they sometimes caused problems if os.path.isfile(basefile): os.remove(basefile) #split up the inputs and separate by file extensions protPeakFileList = [fileName for fileName in proteinfiles if checkExtension(fileName,['.narrowPeak', '.broadPeak'])] bigwigFileList = [fileName for fileName in proteinfiles if checkExtension(fileName, 'bigwig')] wrongFileExtensionList = [fileName for fileName in proteinfiles \ if not fileName in protPeakFileList and not fileName in bigwigFileList] if wrongFileExtensionList: msg = "The following input files are neither narrowPeak / broadPeak nor bigwig files and cannot be processed:\n" msg += ", ".join(wrongFileExtensionList) print(msg) if not protPeakFileList and not bigwigFileList: msg = "Nothing to process. Exiting" print(msg) return ### creation of parameter set params = initParamDict() params['resolution'] = resolution params['cellType'] = celltype ### conversion of desired chromosomes to list if chromosomes: chromosomeList = chromosomes.split(',') chromosomeList = [chrom.strip() for chrom in chromosomeList] else: chromosomeList = [str(chrom) for chrom in range(1, 23)] params['chromSizes'] = getChromSizes(chromosomeList, chromsizefile) params['proteinFileNames'] = [os.path.basename(x) for x in protPeakFileList + bigwigFileList] ###load protein data from files and store into python objects proteinData = getProteinFiles(protPeakFileList + bigwigFileList) ### iterate over all possible combinations of settings (merging) for setting in conf.getBaseCombinations(): ### get settings and file tag for each combination params['mergeOperation'] = setting['mergeOperation'] proteinTag =createProteinTag(params) for chromosome in tqdm(params['chromSizes'], desc= 'Iterate chromosomes'): ### get protein data from each object into a dataframe and store in a list binnedProteins = [] for proteinfile in proteinData.keys(): binnedProteins.append(loadProteinData(proteinData[proteinfile], chromosome, params)) ### merge the binned protein dataframes from the list into a single dataframe for i in range(len(binnedProteins)): binnedProteins[i].columns = [str(i)] #rename signalValue columns to make joining easy maxBinInt = math.ceil(params['chromSizes'][chromosome] / int(resolution)) proteinDf = pd.DataFrame(columns=['bin_id']) proteinDf['bin_id'] = list(range(0,maxBinInt)) proteinDf.set_index('bin_id', inplace=True) proteinDf = proteinDf.join(binnedProteins, how='outer') proteinDf.fillna(0.0,inplace=True) ### store binned proteins in base file proteinChromTag = proteinTag + "_chr" + chromosome store = pd.HDFStore(basefile) store.put(proteinChromTag, proteinDf, format='table') store.get_storer(proteinChromTag).attrs.metadata = params store.close() #if a matrixfile has been provided, cut it into chromosomes #and store the resulting matrices internally #these matrices can later be used for training if matrixfile: for chromosome in params['chromSizes']: cutHicMatrix(matrixfile, chromosome, internaloutdir, basefile) if correctmatrix: correctHiCMatrix(matrixfile, chromosome, internaloutdir) params['matrixCorrection'] = correctmatrix
21,233
def percent_clipper(x, percentiles): """ Takes data as np.ndarray and percentiles as array-like Returns clipped ndarray """ LOWERBOUND, UPPERBOUND = np.percentile(x, [percentiles[0], percentiles[1]) return np.clip(x, LOWERBOUND, UPPERBOUND)
21,234
def check_prob_vector(p): """ Check if a vector is a probability vector. Args: p, array/list. """ assert np.all(p >= 0), p assert np.isclose(np.sum(p), 1), p return True
21,235
def relabel_subgraph(): """ This function adapts an existing sampler by relabelling the vertices in the edge list to have dense index. Returns ------- sample: a function, that when invoked, produces a sample for the input function. """ def relabel(edge_list, positive_vertices): shape = edge_list.shape vertex_index, edge_list = np.unique(edge_list, return_inverse=True) edge_list = edge_list.astype(np.int32).reshape(shape) # relabel the positive vertices positive_verts = np.searchsorted(vertex_index, positive_vertices) is_positive = np.zeros_like(vertex_index) is_positive[positive_verts] = 1 return edge_list, vertex_index, is_positive def sample(data): edge_list = data['edge_list'] positive_vertices = data.get('positive_vertices', tf.unique(tf.reshape(edge_list, [-1]))[0]) vertex_index = data.get('vertex_index', None) if isinstance(edge_list, tf.Tensor): new_edge_list, new_vertex_index, is_positive = tf.py_func(relabel, [edge_list, positive_vertices], [tf.int32, tf.int32, tf.int32], stateful=False) new_edge_list.set_shape(edge_list.shape) new_vertex_index.set_shape([None]) is_positive.set_shape([None]) else: new_edge_list, new_vertex_index, is_positive = relabel(edge_list, positive_vertices) if vertex_index is not None: if isinstance(vertex_index, tf.Tensor): vertex_index = tf.gather(vertex_index, new_vertex_index, name='resample_vertex_index') else: vertex_index = vertex_index[new_vertex_index] else: vertex_index = new_vertex_index return {**data, 'edge_list': new_edge_list, 'vertex_index': vertex_index, 'is_positive': is_positive} return sample
21,236
def get_activation_bytes(input_file=None, checksum=None): """ Get the activation bytes from the .aax checksum using rainbow tables. None is returned if the activation bytes can't be computed. """ if (not input_file and not checksum) or (input_file and checksum): raise ValueError('Please specify only one of [input_file, checksum]') if input_file: checksum = get_checksum(input_file) _, stdout, _ = run_cmd( ['./rcrack', '.', '-h', checksum], cwd=os.path.join(_SCRIPT_PATH, 'tables') ) activation_bytes = re.findall('hex:(.*)', stdout)[0] return activation_bytes
21,237
def init_binary(mocker): """Initialize a dummy BinaryDigitalAssetFile for testing.""" mocker.patch.multiple( houdini_package_runner.items.digital_asset.BinaryDigitalAssetFile, __init__=lambda x, y, z: None, ) def _create(): return houdini_package_runner.items.digital_asset.BinaryDigitalAssetFile( None, None ) return _create
21,238
def with_setup_(setup=None, teardown=None): """Decorator like `with_setup` of nosetest but which can be applied to any function""" def decorated(function): def app(*args, **kwargs): if setup: setup() try: function(*args, **kwargs) finally: if teardown: teardown() return app return decorated
21,239
def g_model(padmode, padvalue, interpolation, resource_names, datasource, binning, output): """Derives a model from DATASOURCE with given BINNING. The model is written to OUTPUT. BINNING can be a path to a previously created binning, or custom bin edges in all dimension: dimensions are separated by colons, edge values in each dimension are separated by commas. """ # datasource checks try: source = DataSourceIO.read(datasource) except: raise click.FileError(datasource, "does not exist or is not readable.") # validate dimensionality match between binning and source if binning.dimensions != len(source.domain): raise click.UsageError( "Dimensions of binning (%d) and datasource (%d) mismatch." % (binning.dimensions, len(source.domain))) # resources checks: split list and verify dim match with source if not resource_names is None: resource_names = resource_names.split(",") if len(resource_names) != len(source.column_names): raise click.BadOptionUsage("resource-names", "Dimensions of resource names (%d) and datasource (%d) mismatch." % (len(resource_names), len(source.column_names))) # convert model params to enums and create ModelParams object model_params = ModelParams( Pad_Modes[padmode.upper()], Pad_Values[padvalue.upper()], Interpolation_Modes[interpolation.upper()] ) # histogram the data with given binning histogram = source.get_histogram(binning) model = Model.from_histogram(model_params, histogram, resource_names) model.to_file(output)
21,240
def post_options(): """Standard arguments and options for posting timeseries readings. """ options = [ click.argument('port'), click.argument('value', type=JSONParamType()), click.option('--timestamp', metavar='DATE', help='the time of the reading'), ] def wrapper(func): func.__doc__ += _post_options_docs for option in reversed(options): func = option(func) return func return wrapper
21,241
def make_filesystem(device, block_device): """ Synchronously initialize a device file with an ext4 filesystem. :param FilePath device: The path to the file onto which to put the filesystem. Anything accepted by ``mkfs`` is acceptable (including a regular file instead of a device file). :param bool block_device: If ``True`` then the device is expected to be a block device and the ``-F`` flag will not be passed to ``mkfs``. If ``False`` then the device is expected to be a regular file rather than an actual device and ``-F`` will be passed to ``mkfs`` to force it to create the filesystem. It's possible to detect whether the given file is a device file or not. This flag is required anyway because it's about what the caller *expects*. This is meant to provide an extra measure of safety (these tests run as root, this function potentially wipes the filesystem from the device specified, this could have bad consequences if it goes wrong). """ options = [] if block_device and not device.isBlockDevice(): raise Exception( "{} is not a block device but it was expected to be".format( device.path ) ) elif device.isBlockDevice() and not block_device: raise Exception( "{} is a block device but it was not expected to be".format( device.path ) ) if not block_device: options.extend([ # Force mkfs to make the filesystem even though the target is not a # block device. b"-F", ]) command = [b"mkfs"] + options + [b"-t", b"ext4", device.path] run_process(command)
21,242
def _partial_read_stats( args: argparse.Namespace, stats: Dict, input_data: Dict, ): """Calculate statistics about partially read files. :param args: Parsed command line arguments. :param stats: Statistics data structure. :param input_data: Input data from registry for partially read file. """ if args.verbose: percent = (input_data['offset'] / input_data['source_size']) * 100.0 LOGGER.info("Partially read file (%0.2f%%): %s [offset=%s, size=%s]", percent, input_data['source'], input_data['offset'], input_data['source_size']) if args.show_summary: stats['count_partial'] += 1
21,243
def add_vcz_parameters(this_flux_lm, which_gate: str = None): """ Adds to `this_flux_lm` the necessary parameters used for the VCZ flux waveform including corrections """ this_flux_lm.add_parameter( "vcz_amp_dac_at_11_02_%s" % which_gate, docstring="DAC amplitude (in the case of HDAWG) at the 11-02 " "interaction point. NB: the units might be different for some " "other AWG that is distinct from the HDAWG.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 10.0), initial_value=0.5, unit="a.u.", label="DAC amp. at the interaction point", ) this_flux_lm.add_parameter( "vcz_amp_sq_%s" % which_gate, docstring="Amplitude of the square parts of the NZ pulse. " "1.0 means qubit detuned to the 11-02 interaction point.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 10.0), initial_value=1.0, unit="a.u.", label="Square relative amp.", ) this_flux_lm.add_parameter( "vcz_amp_fine_%s" % which_gate, docstring="Amplitude of the single sample point inserted at " "the end of the first half of the NZ pulse and at the " "beginning of the second half. " "1.0 means same amplitude as `sq_amp_XX`.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 1.0), initial_value=.5, unit="a.u.", label="Fine tuning amp.", ) this_flux_lm.add_parameter( "vcz_use_amp_fine_%s" % which_gate, docstring="", parameter_class=ManualParameter, vals=vals.Bool(), initial_value=True, label="Add extra point with amplitude `vcz_amp_fine_XX`?", ) this_flux_lm.add_parameter( "vcz_amp_q_ph_corr_%s" % which_gate, docstring="Amplitude at the squares of the NZ pulse for single " "qubit phase correction.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 1.0), initial_value=0., unit="a.u.", label="Amp. phase correction", ) this_flux_lm.add_parameter( "vcz_time_q_ph_corr_%s" % which_gate, docstring="Total time of the single qubit phase correction NZ pulse.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 500e-9), initial_value=0., unit="s", label="Time phase correction", ) this_flux_lm.add_parameter( "vcz_correct_q_phase_%s" % which_gate, docstring="", parameter_class=ManualParameter, vals=vals.Bool(), initial_value=False, label="Correct single Q phase?", ) this_flux_lm.add_parameter( "vcz_time_single_sq_%s" % which_gate, docstring="Duration of each square. " "You should set it close to half speed limit (minimum " "time required to perform a full swap, i.e. 11 -> 02 -> 11)", parameter_class=ManualParameter, vals=vals.Numbers(1.0 / 2.4e9, 500e-9), initial_value=15.5555555e-9, unit="s", label="Duration single square", ) this_flux_lm.add_parameter( "vcz_time_middle_%s" % which_gate, docstring="Time between the two square parts.", parameter_class=ManualParameter, vals=vals.Numbers(0., 500e-9), initial_value=0., unit="s", label="Time between squares", ) this_flux_lm.add_parameter( "vcz_time_pad_%s" % which_gate, docstring="Time used to align different cz pulses.", parameter_class=ManualParameter, vals=vals.Numbers(0., 500e-9), initial_value=0, unit="s", label="Time padding before and after main pulse", ) this_flux_lm.add_parameter( "vcz_time_before_q_ph_corr_%s" % which_gate, docstring="Time after main pulse before single qubit phase " "correction.", parameter_class=ManualParameter, vals=vals.Numbers(0., 500e-9), initial_value=0., unit="s", label="Time before correction", ) this_flux_lm.add_parameter( "vcz_use_asymmetric_amp_%s" % which_gate, docstring="Flag to turn on asymmetric amplitudes of the SNZ pulse", parameter_class=ManualParameter, vals=vals.Bool(), initial_value=False, label="Use asymmetric SNZ pulse amplitudes", ) this_flux_lm.add_parameter( "vcz_amp_pos_%s" % which_gate, docstring="Amplitude of positive part of SNZ pulse, " "used only if vcz_use_asymmetric_amp is true.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 10.0), initial_value=1.0, unit="a.u.", label="Positive SNZ amplitude, if asymmetric is used.", ) this_flux_lm.add_parameter( "vcz_amp_neg_%s" % which_gate, docstring="Amplitude of negative part of SNZ pulse, " "used only if vcz_use_asymmetric_amp is true.", parameter_class=ManualParameter, vals=vals.Numbers(0.0, 10.0), initial_value=1.0, unit="a.u.", label="Negative SNZ amplitude, if asymmetric is used.", ) for specificity in ["coarse", "fine"]: this_flux_lm.add_parameter( "vcz_{}_optimal_hull_{}".format(specificity, which_gate), initial_value=np.array([]), label="{} hull".format(specificity), docstring=( "Stores the boundary points of a optimal region 2D region " "generated from a landscape. Intended for data points " "(x, y) = (`vcz_amp_sq_XX`, `vcz_time_middle_XX`)" ), parameter_class=ManualParameter, vals=vals.Arrays(), ) this_flux_lm.add_parameter( "vcz_{}_cond_phase_contour_{}".format(specificity, which_gate), initial_value=np.array([]), label="{} contour".format(specificity), docstring=( "Stores the points for an optimal conditional phase " "contour generated from a landscape. Intended for data points " "(x, y) = (`vcz_amp_sq_XX`, `vcz_time_middle_XX`) " "typically for the 180 deg cond. phase." ), parameter_class=ManualParameter, vals=vals.Arrays(), )
21,244
def bufferSetDefaultAllocMode(defAllocMode, defInitialSize): """ Sets new global default allocation mode and minimal intial size. defAllocMode : the new default buffer allocation mode. defInitialSize : the new default buffer minimal intial size. """ xmlsecmod.bufferSetDefaultAllocMode(defAllocMode, defInitialSize)
21,245
def test_in_scalar(): """ Testing the in_scalar function """ scalar = in_scalar(5) assert scalar.units == u.dimensionless assert scalar.magnitude == 5 scalar = in_scalar(5 * u.dimensionless) assert scalar.units == u.dimensionless assert scalar.magnitude == 5 with pytest.raises(ValueError): in_scalar(u.Quantity(5, u.m))
21,246
def authenticated(f): """Decorator for authenticating with the Hub""" @wraps(f) def decorated(*args, **kwargs): token = request.cookies.get(auth.cookie_name) if token: user = auth.user_for_token(token) else: user = None if user: return f(user, *args, **kwargs) else: # redirect to login url on failed auth state = auth.generate_state(next_url=request.path) response = make_response( redirect(auth.login_url + '&state=%s' % state) ) response.set_cookie(auth.state_cookie_name, state) return response return decorated
21,247
def warn(msg): """ Print an warning message. """ print("WARNING: {}".format(msg), file=sys.stderr)
21,248
def json_complex_hook(dct): """ Return an encoded complex number to it's python representation. :param dct: (dict) json encoded complex number (__complex__) :return: python complex number """ if isinstance(dct, dict): if '__complex__' in dct: parts = dct['__complex__'] assert len(parts) == 2 return parts[0] + parts[1] * 1j return dct
21,249
def reg_logLiklihood(x, weights, y, C): """Regularizd log-liklihood function (cost function to minimized in logistic regression classification with L2 regularization) Parameters ----------- x : {array-like}, shape = [n_samples, n_features + 1] feature vectors. Note, first column of x must be a vector of ones. weights : 1d-array, shape = [1, 1 + n_features] Coefficients that weight each samples feature vector y : list, shape = [n_samples,], values = 1|0 target values C : float Regularization parameter. C is equal to 1/lambda Returns ----------- Value of regularized log-liklihood function with the given feature values, weights, target values, and regularization parameter """ z = np.dot(x, weights) reg_term = (1 / (2 * C)) * np.dot(weights.T, weights) return -1 * np.sum((y * np.log(logistic_func(z))) + ((1 - y) * np.log(1 - logistic_func(z)))) + reg_term
21,250
def two_dimension_heatmap(data, xmin, xmax, ymin, ymax): """ Args: nparray with two dimensions """ # interpolation gaussian plt.xlabel("Packet size (bit)") plt.ylabel("bit error rate") image = plt.imshow( data, cmap="Greens", interpolation="none", extent=[xmin, xmax, ymax, ymin], aspect="auto", ) plt.colorbar(image) plt.show()
21,251
def smooth_internal(xyzlist, atom_names, width, allpairs=False, w_morse=0.0, rep=False, anchor=-1, window='hanning', **kwargs): """Smooth a trajectory by transforming to redundant, internal coordinates, running a 1d timeseries smoothing algorithm on each DOF, and then reconstructing a set of consistent cartesian coordinates. TODO: write this function as a iterator that yields s_xyz, so that they can be saved to disk (async) immediately when they're produced. Parameters ---------- xyzlist : np.ndarray Cartesian coordinates atom_names : array_like of strings The names of the atoms. Required for determing connectivity. width : float Width for the smoothing kernels allpairs : bool Use all interatomic distances (not just the bonds) w_morse: float Weight of the Morse potential in the smoothing window: string, default='hanning' Type of window to perform the averaging Other Parameters ---------------- bond_width : float Override width just for the bond terms angle_width : float Override width just for the angle terms dihedral_width : float Override width just for the dihedral terms xyzlist_guess : Cartesian coordinates to use as a guess during the reconstruction from internal xyzlist_match : Cartesian coordinates to use as a guess during the reconstruction from internal Returns ------- smoothed_xyzlist : np.ndarray """ bond_width = kwargs.pop('bond_width', width) angle_width = kwargs.pop('angle_width', width) dihedral_width = kwargs.pop('dihedral_width', width) xyzlist_guess = kwargs.pop('xyzlist_guess', xyzlist) xyzlist_match = kwargs.pop('xyzlist_match', None) for key in list(kwargs.keys()): raise KeyError('Unrecognized key, %s' % key) ibonds, iangles, idihedrals = None, None, None s_bonds, s_angles, s_dihedrals = None, None, None ibonds, iangles, idihedrals = union_connectivity(xyzlist, atom_names, allpairs=allpairs) # get the internal coordinates in each frame bonds = core.bonds(xyzlist, ibonds) angles = core.angles(xyzlist, iangles) dihedrals = core.dihedrals(xyzlist, idihedrals) # run the smoothing s_bonds = np.zeros_like(bonds) s_angles = np.zeros_like(angles) s_dihedrals = np.zeros_like(dihedrals) for i in range(bonds.shape[1]): #s_bonds[:, i] = buttersworth_smooth(bonds[:, i], width=bond_width) s_bonds[:, i] = window_smooth(bonds[:, i], window_len=bond_width, window=window) for i in range(angles.shape[1]): #s_angles[:, i] = buttersworth_smooth(angles[:, i], width=angle_width) s_angles[:, i] = window_smooth(angles[:, i], window_len=angle_width, window=window) # filter the dihedrals with the angular smoother, that filters # the sin and cos components separately for i in range(dihedrals.shape[1]): #s_dihedrals[:, i] = angular_smooth(dihedrals[:, i], # smoothing_func=buttersworth_smooth, width=dihedral_width) s_dihedrals[:, i] = angular_smooth(dihedrals[:, i], smoothing_func=window_smooth, window_len=dihedral_width, window=window) # compute the inversion for each frame s_xyzlist = np.zeros_like(xyzlist_guess) errors = np.zeros(len(xyzlist_guess)) # Thresholds for error and jump w_xrefs = 0.0 for i, xyz_guess in enumerate(xyzlist_guess): w_xref = 0.0 passed = False corrected = False while not passed: passed = False if i > 0: xref = s_xyzlist[i-1] else: xref = None passed = True ramp = 0.1 if i >= (1.0-ramp)*len(xyzlist_guess): w_morse_ = w_morse * float(len(xyzlist_guess)-i-1)/(ramp*len(xyzlist_guess)) elif i <= ramp*len(xyzlist_guess): w_morse_ = w_morse * float(i)/(ramp*len(xyzlist_guess)) else: w_morse_ = w_morse r = least_squares_cartesian(s_bonds[i], ibonds, s_angles[i], iangles, s_dihedrals[i], idihedrals, xyz_guess, xref=xref, w_xref=w_xref, elem=atom_names, w_morse=w_morse_, rep=rep) s_xyzlist[i], errors[i] = r if i > 0: aligned0 = align_trajectory(np.array([xyzlist[i],xyzlist[i-1]]), 0) aligned1 = align_trajectory(np.array([s_xyzlist[i],s_xyzlist[i-1]]), 0) maxd0 = np.max(np.abs(aligned0[1] - aligned0[0])) maxd1 = np.max(np.abs(aligned1[1] - aligned1[0])) if maxd0 > 1e-5: jmp = maxd1 / maxd0 else: jmp = 0.0 else: maxd1 = 0.0 jmp = 0.0 if (not passed) and (anchor < 0 or jmp < anchor): passed = True if w_xref >= 1.99: w_xrefs = w_xref - 1.0 elif w_xref < 0.1: w_xrefs = 0.0 else: w_xrefs = w_xref / 1.5 elif not passed: if w_xref == 0.0: if w_xrefs > 0.0: w_xref = w_xrefs else: w_xref = 2.0**10 / 3.0**10 else: if w_xref >= 0.99: w_xref += 1.0 else: w_xref *= 1.5 if w_xref > 30: print("\nanchor %f, giving up" % (w_xref)) # Set it back to a reasonable (but still high) number w_xrefs = 20.0 passed = True else: print("jump %f max(dx) %f, trying anchor = %f\r" % (jmp, maxd1, w_xref), end=' ') corrected = True if xyzlist_match != None: aligned_ij = align_trajectory(np.array([s_xyzlist[i], xyzlist_match[i]]), 0) maxd_ij = np.max(np.abs(aligned_ij[1] - aligned_ij[0])) if maxd_ij < 1e-3: print("% .4f" % maxd_ij, "\x1b[92mMatch\x1b[0m") s_xyzlist[i:] = xyzlist_match[i:] break # Print out a message if we had to correct it. if corrected: print('\rxyz: error %f max(dx) %f jump %s anchor %f' % (errors[i], maxd1, jmp, w_xref)) if (i%10) == 0: print("\rWorking on frame %i / %i" % (i, len(xyzlist_guess)), end=' ') print() if i > 0: print('max(dx) %f (new) %f (old) %s%f\x1b[0m (ratio)' % (maxd1, maxd0, "\x1b[91m" if jmp > 3 else "", jmp)) #return_value = (interweave(s_xyzlist), interweave(errors)) return_value = s_xyzlist, errors return return_value
21,252
def cli(): """Define the command line interface of the script.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument("--output", help="the directory to store the archives") parser.add_argument("--repeats", type=int, default=2, help="the number of compilations to run") parser.add_argument("--overwrite", action="store_true", help="ignore existing archives") parser.add_argument("--log-level", choices=("debug", "info", "warning", "error", "critical"), default="info", help="the severity of log messages to print") return parser
21,253
def estimate_gridsearch_size(model, params): """ Compute the total number of parameter combinations in a grid search Parameters ---------- model: str name of the model to train. The function currently supports feedforward neural networks (model = 'FNN'), long-short term memory (model = 'LSTM') and naive discriminative learning (model = 'NDL') also commonly known as Rescorla-Wagner model. params: dict of lists parameter set of the grid search: Returns ------- int number of param combinations """ ### FNN model if model == 'FNN': # Extract the dimensions of the pretrained embeddings pretrain_embed_dim = {} embed_inputs = params['embedding_input'] for i, e in enumerate(embed_inputs): if embed_inputs[i] and embed_inputs[i] != 'learn': pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])}) # Create a list of dictionaries giving all possible parameter combinations keys, values = zip(*params.items()) grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)] ### Remove impossible combinations ind_to_remove = [] for i,d in enumerate(grid_full): # In the case of no hidden layer, no need to set the 'activation' parameter - only 'last_activation' is used if grid_full[i]['hidden_layers'] == 0: grid_full[i]['activation'] = None # In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise, # it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with # embeddings to be learned from scratch if not grid_full[i]['embedding_input']: grid_full[i]['embedding_dim'] = None elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']: ind_to_remove.append(i) elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn': grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']] # In the case of embeddings, it is essential to set 'max_len' (max_len cannot be None), # so remove all cases where embeddings are used max_len is not given if grid_full[i]['embedding_input'] and not grid_full[i]['max_len']: ind_to_remove.append(i) # First remove the detected impossible combinations (e.g. 'embedding_input = 'learn', embedding_dim = None') for ii in sorted(ind_to_remove, reverse = True): del grid_full[ii] # Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None' grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}] ### LSTM model elif model == 'LSTM': # Extract the dimensions of the pretrained embeddings pretrain_embed_dim = {} embed_inputs = params['embedding_input'] for i, e in enumerate(embed_inputs): if embed_inputs[i] and embed_inputs[i] != 'learn': pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])}) ### Create a list of dictionaries giving all possible parameter combinations keys, values = zip(*params.items()) grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)] ### Remove impossible combinations ind_to_remove = [] for i,d in enumerate(grid_full): # In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise, # it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with # embeddings to be learned from scratch if not grid_full[i]['embedding_input']: grid_full[i]['embedding_dim'] = None elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']: ind_to_remove.append(i) elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn': grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']] # First remove the combinations 'embedding_input = 'learn', embedding_dim = None' for ii in sorted(ind_to_remove, reverse = True): del grid_full[ii] # Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None' grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}] ### NDL model elif model == 'NDL': ### Create a list of dictionaries giving all possible parameter combinations keys, values = zip(*params.items()) grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)] # Raise an error if a non-supported model is entered else: raise ValueError(f'The entered model "{model}" is not supported') return len(grid_full)
21,254
def softmax_logits_kld(ops, p_logits, q_logits, keepdims=False): """ Compute the KL-divergence between two softmax categorical distributions via logits. The last dimension of `p` and `q` are treated as the softmax dimension, and will be reduced for computing KL-divergence. .. math:: \\operatorname{D}_{KL}(p(y)\\|q(y)) = \\sum_y p(y) \\left(\\log p(y) - \\log q(y)\\right) Args: ops (npyops or tfops): The math operations module. p_logits: Logits of softmax categorical :math:`p(y)`. q_logits: Logits of softmax categorical :math:`q(y)`. keepdims (bool): Whether or not to keep the reduced dimension? (default :obj:`False`) Returns: The computed softmax categorical distributions KL-divergence. """ p_logits = ops.convert_to_tensor(p_logits) q_logits = ops.convert_to_tensor(q_logits) with ops.name_scope('softmax_logits_kld', values=[p_logits, q_logits]): log_p = log_softmax(ops, p_logits) log_q = log_softmax(ops, q_logits) p = softmax(ops, p_logits) # TODO: can we reduce time consumption by ``np.exp(log_p)``? # p = ops.exp(log_p) return ops.reduce_sum(p * (log_p - log_q), axis=-1, keepdims=keepdims)
21,255
def E_inductive_from_ElectricDipoleWholeSpace( XYZ, srcLoc, sig, f, current=1.0, length=1.0, orientation="X", kappa=1.0, epsr=1.0, t=0.0, ): """ Computing Inductive portion of Electric fields from Electrical Dipole in a Wholespace TODO: Add description of parameters """ mu = mu_0 * (1 + kappa) epsilon = epsilon_0 * epsr sig_hat = sig + 1j * omega(f) * epsilon XYZ = utils.asArray_N_x_Dim(XYZ, 3) # Check if XYZ.shape[0] > 1 & f.shape[0] > 1: raise Exception( "I/O type error: For multiple field locations only a single frequency can be specified." ) dx = XYZ[:, 0] - srcLoc[0] dy = XYZ[:, 1] - srcLoc[1] dz = XYZ[:, 2] - srcLoc[2] r = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0) # k = np.sqrt( -1j*2.*np.pi*f*mu*sig ) k = np.sqrt(omega(f) ** 2.0 * mu * epsilon - 1j * omega(f) * mu * sig) front = current * length / (4.0 * np.pi * sig_hat * r ** 3) * np.exp(-1j * k * r) if orientation.upper() == "X": Ex_inductive = front * (k ** 2 * r ** 2) Ey_inductive = np.zeros_like(Ex_inductive) Ez_inductive = np.zeros_like(Ex_inductive) return Ex_inductive, Ey_inductive, Ez_inductive elif orientation.upper() == "Y": # x--> y, y--> z, z-->x Ey_inductive = front * (k ** 2 * r ** 2) Ez_inductive = np.zeros_like(Ey_inductive) Ex_inductive = np.zeros_like(Ey_inductive) return Ex_inductive, Ey_inductive, Ez_inductive elif orientation.upper() == "Z": # x --> z, y --> x, z --> y Ez_inductive = front * (k ** 2 * r ** 2) Ex_inductive = np.zeros_like(Ez_inductive) Ey_inductive = np.zeros_like(Ez_inductive) return Ex_inductive, Ey_inductive, Ez_inductive
21,256
def translate_categories_json(n=10): """Translate category descriptions in categories.json to the most common n non-english languages""" language_list = get_language_list() language_list = language_list[:n] with open(os.path.join(data_directory, 'categories.json'), 'r') as f: categories_mapping = json.load(f) categories_mapping = {**categories_mapping['Mobile'], **categories_mapping['Beauty'], **categories_mapping['Fashion']} translate_client = translate.Client() translated_categories = categories_mapping.copy() for [lang, count] in language_list: target = lang for key, value in categories_mapping.items(): # key = description try: translation = translate_client.translate( key, source_language='en', target_language=target) except: print('Error translating {} to {}'.format(key, target)) continue translated_categories[translation['translatedText']] = value
21,257
def str2posix(timelist): """ This will take a list of strings with the date along with a start and end time and make a list with the posix times. Inputs timelist - A list of strings with the data followed by two times. The date for the second time can also be used, it will be at index 2 and the second time will be at index 3. Outputs dtts - A list of posix times from the original inputs""" if len(timelist)==3: timelist.insert(2,timelist[0]) (dt1,dt2) = parser.parse(timelist[0]+ ' '+timelist[1]),parser.parse(timelist[2]+ ' '+timelist[3]) dt1 =dt1.replace(tzinfo=pytz.utc) dt2 = dt2.replace(tzinfo=pytz.utc) dt1ts = (dt1 -datetime(1970,1,1,0,0,0,tzinfo=pytz.utc)).total_seconds() dt2ts = (dt2 -datetime(1970,1,1,0,0,0,tzinfo=pytz.utc)).total_seconds() return [dt1ts,dt2ts]
21,258
def test_spherical_isolation_grid1(): """ Create a regular grid inside the unit box with points on each of the following nodes: 0.1, 0.3, 0.5, 0.7, 0.9. Demonstrate that all points in such a sample are isolated if r_max < 0.2, regardless of periodic boundary conditions. """ sample1 = generate_3d_regular_mesh(5) r_max = 0.1 iso = spherical_isolation(sample1, sample1, r_max) assert np.all(iso == True) iso = spherical_isolation(sample1, sample1, r_max, period=1) assert np.all(iso == True) r_max = 0.25 iso2 = spherical_isolation(sample1, sample1, r_max) assert np.all(iso2 == False) iso2 = spherical_isolation(sample1, sample1, r_max, period=1) assert np.all(iso2 == False)
21,259
def iterate_by_type(objs, typelist): """ collects a sequence of objs into buckets by type, then re-emits objs from the buckets, sorting through the buckets in the order specified by typelist. Any objects of a type not specified in typelist will be emitted last in no guaranteed order (but still grouped by type). """ cache = collect_by_type(objs) for t in typelist: for val in cache.pop(t, tuple()): yield val for tl in cache.values(): for val in tl: yield val
21,260
def keys_verif(verif: bool = True): """ Used to verify existence of private or/and public keys of ElGamal. """ print("\nChecking the presence of keys in the system....") if isFileHere("public_key.kpk", config.DIRECTORY_PROCESSING): # from cipher.asymmetric import elGamal as elG print(f"\nPublic key is already here.\n") if isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING): print(f"Private key is here too.\n") if verif and not query_yn("Do you want to keep them? (default: No)", "no"): rmFile("public_key.kpk", config.DIRECTORY_PROCESSING) rmFile("private_key.kpk", config.DIRECTORY_PROCESSING) rmFile("encrypted.kat", config.DIRECTORY_PROCESSING) return True else: print("Private key's missing.\n") if query_yn("Do you want to add them now?\n"): while not isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING): input("Please put your 'private_key.kpk' file into the 'processing' folder.") print("Find it !") keys_verif() else: katsuAsymm() elif isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING): print("\nPrivate key's already here but not public one's.\n") if query_yn("Do you want to add them now? ( default: No)\n", "no"): while not isFileHere("public_key.kpk", config.DIRECTORY_PROCESSING): input("Please put your 'public_key.kpk' file into the 'processing' folder.") print("find it !") keys_verif() else: return True else: return True return False
21,261
def r2(ground_truth, simulation, join='inner', fill_value=0): """ R-squared value between ground truth and simulation Inputs: ground_truth - ground truth measurement (data frame) with measurement in the "value" column simulation - simulation measurement (data frame) with measurement in the "value" column join - type of join to perform between ground truth and simulation fill_value - fill value for non-overlapping joins """ if simulation is None or ground_truth is None: return None if len(simulation) == 0 or len(ground_truth) == 0: return None if type(ground_truth) is list: ground_truth = np.nan_to_num(ground_truth) simulation = np.nan_to_num(simulation) ground_truth = ground_truth[np.isfinite(ground_truth)] simulation = simulation[np.isfinite(simulation)] return np.sqrt(((np.asarray(ground_truth) - np.asarray(simulation)) ** 2).mean()) ground_truth = ground_truth[np.isfinite(ground_truth.value)] simulation = simulation[np.isfinite(simulation.value)] df = join_dfs(ground_truth,simulation,join=join,fill_value=fill_value) if df.empty: return None else: return r2_score(df["value_gt"],df["value_sim"])
21,262
def atualiza_pontos(lista_jogadores, indece, pontos_carregados): """ -> Funcão que ao ser chamada atualiza os pontos dos jogadores. :param lista_jogadores: é a lista que contem todos os dados dos jogadores cadastados, seus respectivos nomes e pontos. :param indece: é o índece do jogador que está jogando no momento e que portanto deve ter seus pontos atualizados. :param pontos_carregados: é a pontuação registrada do jogador . :return: """ lista_jogadores[indece][1] = pontos_carregados arq = 'lista-jogadores.txt' a = open(arq, 'wt') for linha in lista_jogadores: a.write(f'{linha[0]};{linha[1]}\n') a.close()
21,263
def add_team_batting_stats(df, years, batter_metrics): """ """ gids = list(set(df['gameId'])) bat_saber_paths = [ CONFIG.get('paths').get('batter_saber') + gid + \ "/batter_saber_team.parquet" for gid in os.listdir(CONFIG.get('paths').get('batter_saber')) ] curr_gids = list(set( list(df['homePrevGameId']) + list(df['awayPrevGameId']) )) bat_saber_paths = [ x for x in bat_saber_paths if any( gid in x for gid in curr_gids ) ] batter_saber = pd.concat( objs=[pd.read_parquet(path) for path in bat_saber_paths], axis=0 ) print(batter_saber.shape) print("batter saber shape above") # Get top 9 by AB batter_saber['game_id_team'] = ( batter_saber['gameId'] + batter_saber['team'] ) batter_saber.sort_values(by=['game_id_team', 'woba_trail6'], ascending=False, inplace=True) batter_saber['rank'] = batter_saber.groupby('game_id_team')\ ['batterId'].cumcount() batter_saber = batter_saber.loc[batter_saber['rank'] <= 9, :] batter_saber.loc[batter_saber['rank'] < 5, 'batter_group'] = 'high' batter_saber.loc[batter_saber['rank'] >= 5, 'batter_group'] = 'low' # Aggregate batter_saber = batter_saber.groupby( by=['gameId', 'team', 'batter_group'], as_index=False ).agg({k: 'mean' for k in batter_metrics}) batter_saber = batter_saber.pivot_table( index=['gameId', 'team'], columns=['batter_group'], values=[k for k in batter_metrics], aggfunc='mean' ) batter_saber.reset_index(inplace=True) batter_saber.columns = [ x[0] if x[1] == '' else x[0]+"_"+x[1] for x in batter_saber.columns ] batter_saber.to_csv( '/Users/peteraltamura/Desktop/batter_saber_wide.csv', index=False) # Merge Home df = pd.merge( df, batter_saber, how='left', left_on=['homePrevGameId', 'home_code'], right_on=['gameId', 'team'], validate='1:1', suffixes=['', '_HOME'] ) # Merge Away df = pd.merge( df, batter_saber, how='left', left_on=['awayPrevGameId', 'away_code'], right_on=['gameId', 'team'], validate='1:1', suffixes=['', '_AWAY'] ) return df
21,264
def depth_analysis_transform_1(rgb_tensor, depth_tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): # --------------------------------------- rgb branch with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D( num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) rgb_tensor = layer(rgb_tensor) # --------------------------------------- depth branch with tf.variable_scope("layer_d0"): layer = tfc.SignalConv2D( num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) depth_tensor = layer(depth_tensor) # --------------------------------------- fusion tf.summary.histogram('rgb_tensor', rgb_tensor) tf.summary.histogram('depth_tensor', depth_tensor) tensor = rgb_tensor + depth_tensor with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.relu) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
21,265
def boxPlot(med, quartiles, minmax, mean=None, outliers=None, name='boxplot', horiz=True, offset=0, legendGroup='boxplot', showleg=False, plot=False, col='blue', width=8): """ Makes very light plotly boxplot. Unlike theirs, this can take externally calc'd values rather than just data to make it go much faster. :param med: :param quartiles: :param minmax: :param mean: :param name: :param horiz: :param offset: :param legendGroup: :param plot: :param col: :return: """ show_indiv_leg=False #set to true for debug mode if horiz: wideaxis='x' offsetaxis='y' else: wideaxis = 'y' offsetaxis = 'x' if mean: text='Median=%.3e <br> Mean=%.3e <br> [Q1,Q2]=[%.3e,%.3e] <br> [min, max]=[%.3e,%.3e]' % \ (med,mean, *quartiles, *minmax) else: text = 'Median=%.3e <br> [Q1,Q2]=[%.3e,%.3e] <br> [min, max]=[%.2f,%.2f]' \ % (med, *quartiles, *minmax) thickLine = [{wideaxis:quartiles, offsetaxis:[offset]*2, 'name':name, 'showlegend':showleg, 'legendgroup':legendGroup, 'type': 'scatter', 'line':{'color': col, 'width': width}, 'opacity':.4, 'hovertext':text, 'hoverinfo':'name+text', }] thinLine = [{wideaxis:minmax, offsetaxis:[offset]*2, 'name':name, 'showlegend':show_indiv_leg, 'legendgroup':legendGroup, 'type': 'scatter', 'line': {'color': col, 'width': 2}, 'opacity':.4, 'hovertext':text, 'hoverinfo':'name+text'}] medPoint = [{wideaxis:[med], offsetaxis:[offset], 'hovertext':text, 'hoverinfo':'name+text', 'name':name, 'showlegend':show_indiv_leg, 'legendgroup':legendGroup, 'mode': 'markers', 'marker':{'color':'black', 'symbol':'square', 'size':8}, 'opacity':1}] boxPlots = thickLine + thinLine + medPoint if mean is not None: meanPoint = [{wideaxis: [mean], offsetaxis: [offset], 'hovertext':text, 'hoverinfo':'name+text', 'name': name, 'showlegend': show_indiv_leg, 'legendgroup': legendGroup, 'mode': 'markers', 'marker': {'color': 'white', 'symbol': 'diamond', 'size': 8, 'line': {'color':'black', 'width':1} }, 'opacity': 1, 'line': {'color':'black'}}] boxPlots += meanPoint if outliers is not None: outlierplot = [{wideaxis:outliers, offsetaxis:[offset]*len(outliers), 'name':name, 'legendgroup':legendGroup, 'mode':'markers', 'marker':dict(size = 2, color=col), 'hoverinfo': wideaxis+'+name'}] boxPlots += outlierplot fig = go.Figure(data=boxPlots) # as boxPlot is used primarily as a subcomponent in other plots, its output is not simply plotOut(fig, plot) if plot: fig = go.Figure(data=boxPlots) plotfunc = pyo.iplot if in_notebook() else pyo.plot plotfunc(fig) else: return boxPlots
21,266
def hub_quantile_prediction_dict_validator(target_group_dict, prediction_dict): """ Does hub prediction_dict validation as documented in `json_io_dict_from_quantile_csv_file()` """ error_messages = [] # return value. filled next valid_quantiles = target_group_dict['quantiles'] prediction_quantiles = prediction_dict['prediction']['quantile'] if set(valid_quantiles) != set(prediction_quantiles): error_messages.append(f"prediction_dict quantiles != valid_quantiles. valid_quantiles={valid_quantiles}, " f"prediction_quantiles={prediction_quantiles}") return error_messages
21,267
def allclose_periodical(x, y, a, b, atol=1e-10): """ Checks np.allclose(x,y), but assumes both x and y are periodical with respect to interval (a,b) """ assert(len(x) == len(y)) period = b-a x_p = np.remainder(x-a,period) # now in 0, b-a y_p = np.remainder(y-a,period) return all(np.isclose(x_p[i], y_p[i], atol=atol) or np.isclose(x_p[i], y_p[i]+period, atol=atol) or np.isclose(x_p[i], y_p[i]-period, atol=atol) for i in range(len(x_p)))
21,268
def get_categories_to_rows_ratio(df): """ Gets ratio of unique categories to number of rows in the categorical variable; do this for each categorical variable :param df: pd.DataFrame :return: array of tuples """ cat_columns = get_categorical_variable_names(df) ratios = {col:len(df[col].unique()) / df[col].count() for col in cat_columns} sorted_ratios = sorted(ratios.items(), key=operator.itemgetter(1), reverse=True) return sorted_ratios
21,269
def _width_left_set(size: int, lsize: int, value: list, fmt: str, meta: dict) -> dict: """Width setting of paragraph with left repositioning.""" return Plain([RawInline(fmt, '<p style="text-align:left !important;' 'text-indent:0 !important;' 'position:relative;width:{0};left:{1}">'. format(size, lsize))] + value + [RawInline(fmt, '</p>')])
21,270
def GetMappingKeyName(run, user): """Returns a str used to uniquely identify a mapping.""" return 'RunTesterMap_%s_%s' % (run.key().name(), str(user.user_id()))
21,271
def get_invitee_from_table(invite_code: str, table): """ Get a dictionary of the stored information for this invite code. Args: invite_code: The invitation code to search for table: A DynamoDB table for querying Returns: A dictionary of information stored under the invite code Throws: UnknownInviteCodeError: If the invite code is not in the database """ response = table.query( KeyConditionExpression=Key('invite_code').eq(invite_code) ) items = response['Items'] if len(items) == 0: # If there were no matches to the code then throw an error raise UnknownInviteCodeError() # The output will be a list, so we'll just use the first one since there # should not be duplicates items = items[0] # DynamoDB cannot store empty strings, so we use null instead and convert # between it as needed. At this point in time, we have no significance for # null so this works fine. items = {k: convert_null_to_empty_string(v) for k, v in items.items()} return items
21,272
def load_game(filename: str): """ Deserialise the game data from a file. Filename does not include path to save folder. """ # read from json with open(SAVE_PATH + filename + ".json", "r") as file: save = json.load(file) # check the version if save["version"] != VERSION: logging.warning(f"Loading data from a previous version, {save['version']}.") # deserialise data new_world = world.deserialise(save["world"]) store.deserialise(save["store"]) # set the data as the default world world.move_world(new_world) logging.info(f"Game loaded from {filename}.")
21,273
def minter(pid_type, pid_field, record): """Mint the given PID for the given record.""" if pid_field not in record.keys(): record[pid_field] = create_pid() pid = PersistentIdentifier.get( pid_type="recid", pid_value=record[pid_field] ) pid.status = PIDStatus.REGISTERED pid.object_type = "rec" pid.object_uuid = record.id pid.pid_type = pid_type
21,274
def export_to_labelbox( sample_collection, ndjson_path, video_labels_dir=None, labelbox_id_field="labelbox_id", label_field=None, frame_labels_field=None, ): """Exports labels from the FiftyOne samples to Labelbox format. This function is useful for loading predictions into Labelbox for `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_. You can use :meth:`upload_labels_to_labelbox` to upload the exported labels to a Labelbox project. You can use :meth:`upload_media_to_labelbox` to upload sample media to Labelbox and populate the ``labelbox_id_field`` field, if necessary. The IDs of the Labelbox DataRows corresponding to each sample must be stored in the ``labelbox_id_field`` of the samples. Any samples with no value in ``labelbox_id_field`` will be skipped. When exporting frame labels for video datasets, the ``frames`` key of the exported labels will contain the paths on disk to per-sample NDJSON files that are written to ``video_labels_dir`` as follows:: video_labels_dir/ <labelbox-id1>.json <labelbox-id2>.json ... where each NDJSON file contains the frame labels for the video with the corresponding Labelbox ID. Args: sample_collection: a :class:`fiftyone.core.collections.SampleCollection` ndjson_path: the path to write an NDJSON export of the labels video_labels_dir (None): a directory to write the per-sample video labels. Only applicable for video datasets labelbox_id_field ("labelbox_id"): the sample field to lookup/store the IDs of the Labelbox DataRows label_field (None): optional label field(s) to export. Can be any of the following: - the name of a label field to export - a glob pattern of label field(s) to export - a list or tuple of label field(s) to export - a dictionary mapping label field names to keys to use when constructing the exported labels By default, no labels are exported frame_labels_field (None): optional frame label field(s) to export. Only applicable to video datasets. Can be any of the following: - the name of a frame label field to export - a glob pattern of frame label field(s) to export - a list or tuple of frame label field(s) to export - a dictionary mapping frame label field names to keys to use when constructing the exported frame labels By default, no frame labels are exported """ is_video = sample_collection.media_type == fomm.VIDEO # Get label fields to export label_fields = sample_collection._parse_label_field( label_field, allow_coersion=False, force_dict=True, required=False, ) # Get frame label fields to export if is_video: frame_label_fields = sample_collection._parse_frame_labels_field( frame_labels_field, allow_coersion=False, force_dict=True, required=False, ) if frame_label_fields and video_labels_dir is None: raise ValueError( "Must provide `video_labels_dir` when exporting frame labels " "for video datasets" ) etau.ensure_empty_file(ndjson_path) # Export the labels with fou.ProgressBar() as pb: for sample in pb(sample_collection): labelbox_id = sample[labelbox_id_field] if labelbox_id is None: logger.warning( "Skipping sample '%s' with no '%s' value", sample.id, labelbox_id_field, ) continue # Compute metadata if necessary if sample.metadata is None: if is_video: metadata = fom.VideoMetadata.build_for(sample.filepath) else: metadata = fom.ImageMetadata.build_for(sample.filepath) sample.metadata = metadata sample.save() # Get frame size if is_video: frame_size = ( sample.metadata.frame_width, sample.metadata.frame_height, ) else: frame_size = (sample.metadata.width, sample.metadata.height) # Export sample-level labels if label_fields: labels_dict = _get_labels(sample, label_fields) annos = _to_labelbox_image_labels( labels_dict, frame_size, labelbox_id ) etas.write_ndjson(annos, ndjson_path, append=True) # Export frame-level labels if is_video and frame_label_fields: frames = _get_frame_labels(sample, frame_label_fields) video_annos = _to_labelbox_video_labels( frames, frame_size, labelbox_id ) video_labels_path = os.path.join( video_labels_dir, labelbox_id + ".json" ) etas.write_ndjson(video_annos, video_labels_path) anno = _make_video_anno( video_labels_path, data_row_id=labelbox_id ) etas.write_ndjson([anno], ndjson_path, append=True)
21,275
def zip_minibatch_iterate_info(arrays, minibatch_size, n_epochs=None, test_epochs = None): """ Iterate through minibatches of arrays and yield info about the state of iteration though training. :param arrays: :param arrays: A collection of arrays, all of which must have the same shape[0] :param minibatch_size: The number of samples per minibatch :param n_epochs: The number of epochs to run for :param test_epochs: A list of epochs to test at. :return: (array_minibatches, info) arrays is a tuple of minibatches from arrays info is an IterationInfo object returning information about the state of iteration. """ if n_epochs is None: assert isinstance(test_epochs, (list, tuple, np.ndarray)), "If you don't specify n_epochs, you need to specify an array of test epochs." n_epochs = test_epochs[-1] for arrays, info in zip( zip_minibatch_iterate(arrays, minibatch_size=minibatch_size, n_epochs='inf'), iteration_info(n_samples=arrays[0].shape[0], minibatch_size=minibatch_size, test_epochs=test_epochs, n_epochs=n_epochs) ): yield arrays, info if info.done: break
21,276
def d_beta_dr(radius, beta, mass_r, epsilon, pressure, h_r): """ d_beta_dr """ return 2. * (1 - 2 * (mass_r/radius)) ** (-1.) * h_r * \ ( -2. * math.pi * (5*epsilon + 9*pressure + f(epsilon, pressure)) + (3/radius**2.) + 2*(1 - 2 * mass_r / radius)**(-1) * \ ((mass_r/radius) + 4 * math.pi*radius*pressure)**2 ) + (2 * beta/radius) *(1 - 2 * mass_r / radius)**(-1) * \ (-1 + mass_r/radius + 2 * math.pi * radius**2 * (epsilon - pressure))
21,277
def main(args=None): """Main function called by the `MPfspec` command line script.""" import argparse description = ('Create frequency spectra (PDS, CPDS, cospectrum) ' 'starting from well-defined input ligthcurves') parser = argparse.ArgumentParser(description=description) parser.add_argument("files", help="List of light curve files", nargs='+') parser.add_argument("-b", "--bintime", type=float, default=1/4096, help="Light curve bin time; if negative, interpreted" + " as negative power of 2." + " Default: 2^-10, or keep input lc bin time" + " (whatever is larger)") parser.add_argument("-r", "--rebin", type=int, default=1, help="(C)PDS rebinning to apply. Default: none") parser.add_argument("-f", "--fftlen", type=float, default=512, help="Length of FFTs. Default: 512 s") parser.add_argument("-k", "--kind", type=str, default="PDS,CPDS,cos", help='Spectra to calculate, as comma-separated list' + ' (Accepted: PDS and CPDS;' + ' Default: "PDS,CPDS")') parser.add_argument("--norm", type=str, default="Leahy", help='Normalization to use' + ' (Accepted: Leahy and rms;' + ' Default: "Leahy")') parser.add_argument("--noclobber", help="Do not overwrite existing files", default=False, action='store_true') parser.add_argument("-o", "--outroot", type=str, default=None, help='Root of output file names for CPDS only') parser.add_argument("--loglevel", help=("use given logging level (one between INFO, " "WARNING, ERROR, CRITICAL, DEBUG; " "default:WARNING)"), default='WARNING', type=str) parser.add_argument("--nproc", help=("Number of processors to use"), default=1, type=int) parser.add_argument("--back", help=("Estimated background (non-source) count rate"), default=0., type=float) parser.add_argument("--debug", help="use DEBUG logging level", default=False, action='store_true') parser.add_argument("--save-dyn", help="save dynamical power spectrum", default=False, action='store_true') args = parser.parse_args(args) if args.debug: args.loglevel = 'DEBUG' numeric_level = getattr(logging, args.loglevel.upper(), None) logging.basicConfig(filename='MPfspec.log', level=numeric_level, filemode='w') bintime = args.bintime fftlen = args.fftlen pdsrebin = args.rebin normalization = args.norm do_cpds = do_pds = do_cos = do_lag = False kinds = args.kind.split(',') for k in kinds: if k == 'PDS': do_pds = True elif k == 'CPDS': do_cpds = True elif k == 'cos' or k == 'cospectrum': do_cos = True do_cpds = True elif k == 'lag': do_lag = True do_cpds = True calc_fspec(args.files, fftlen, do_calc_pds=do_pds, do_calc_cpds=do_cpds, do_calc_cospectrum=do_cos, do_calc_lags=do_lag, save_dyn=args.save_dyn, bintime=bintime, pdsrebin=pdsrebin, outroot=args.outroot, normalization=normalization, nproc=args.nproc, back_ctrate=args.back, noclobber=args.noclobber)
21,278
def handle_signals(): """ typing the interrupt character (probably Ctrl-C) causes SIGINT to be sent typing the quit character (probably Ctrl-\) sends SIGQUIT. hanging up the phone (modem) sends SIGHUP typing the stop character (probably Ctrl-Z) sends SIGSTOP. """ LOG.info('Setting up signal handlers.') if platform.uname()[0] != 'Windows': signal.signal(signal.SIGHUP, SigHupHandler) signal.signal(signal.SIGINT, SigIntHandler) signal.signal(signal.SIGTERM, SigTermHandler)
21,279
def test_ndcg_after_2_perfect(): """ ndcg with top 2 classes removed """ y_true = np.array([[1, 1, 1, 0, 0, 0], [1, 1, 0, 1, 0, 1], [0, 1, 1, 1, 1, 1], [0, 1, 1, 0, 1, 1], [0, 0, 1, 1, 1, 1]]) y_prob = np.array([[0.5, 0.9, 0.8, 0.4, 0.2, 0.2], [0.9, 0.8, 0.2, 0.5, 0.2, 0.4], [0.2, 0.9, 0.8, 0.4, 0.6, 0.5], [0.2, 0.7, 0.8, 0.4, 0.6, 0.9], [0.2, 0.2, 0.8, 0.5, 0.3, 0.9]]) expected = 1.0 actual = metriks.ndcg(y_true, y_prob, 2) np.testing.assert_allclose([actual], [expected])
21,280
def score(capstone, student_api): """ Calculates the score of the students' API model :param student_api: StudentApi object :return: score as a float """ # Check which simulators have datapoints with outcomes outcomes simulator_ids = [] for simulator in capstone.simulators.all(): if simulator.datapoints.exclude(outcome="").count() > 0: simulator_ids.append(simulator.id) if len(simulator_ids) == 0: raise RuntimeError("No simulators with outcomes found.") qs = DueDatapoint.objects.filter( simulator_id__in=simulator_ids, student=student_api.student, ) outcomes = [] predictions = [] sensitive_class_race = {} sensitive_class_sex = {} for ddp in qs: # loop through each entry in DueDataPoint outcome = bool(json.loads(ddp.datapoint.outcome)) data = json.loads(ddp.datapoint.data) if ddp.response_status != 200: # Missing or bad response predictions.append(not outcome) outcomes.append(outcome) else: try: prediction = json.loads(ddp.response_content)["prediction"] except (json.JSONDecodeError, KeyError): predictions.append(not outcome) outcomes.append(outcome) else: sex = data["sex"].lower() if sex not in sensitive_class_sex: sensitive_class_sex[sex] = { "outcomes": [], "predictions": [], } sensitive_class_sex[sex]["outcomes"].append(outcome) sensitive_class_sex[sex]["predictions"].append(prediction) race = data["race"].lower() if race not in sensitive_class_race: sensitive_class_race[race] = { "outcomes": [], "predictions": [], } sensitive_class_race[race]["outcomes"].append(outcome) sensitive_class_race[race]["predictions"].append(prediction) if not isinstance(prediction, bool): predictions.append(not outcome) else: predictions.append(prediction) outcomes.append(outcome) logger.info(student_api.student) f1_score = metrics.f1_score(outcomes, predictions, pos_label=True) logger.info("f1_score %s" % f1_score) race_diff = fairness_score_precision(sensitive_class_race) sex_diff = fairness_score_precision(sensitive_class_sex) is_fair = race_diff < 0.2 and sex_diff < 0.2 logger.info("race_diff %s" % race_diff) logger.info("sex_diff %s" % sex_diff) logger.info("is_fair %s" % is_fair) if not is_fair: f1_score -= 0.1 return f1_score
21,281
def clean_lhdf(df: pd.DataFrame): """ Removes unneccessary columms from the location history data frame and computes new required columns Parameters ---------- df : pandas.DataFrame DataFrame to process Returns ------- Copy of `df`, altered the following way: * Colums removed * `activity` * `altitude` * `heading` * Columns expected in `df` * `time` * `latitudeE7` * `longitudeE7` * Columns added * `date` (Format `YYYY-MM-DD`) * `weekday` (Format: `0-6`; 0 = Sunday) * `daytime` (Format: HH:ii, 24h style) * `lat` (Format: dd.ddddd) * `lon` (Format: dd.ddddd) """ df = df.copy() # Drop unneccessary cols df.drop(labels=["activity", "altitude", "heading"], axis=1, inplace=True) # compute time cols df.loc[:, "date"] = df.time.dt.strftime("%Y-%m-%d") df.loc[:, "weekday"] = df.time.dt.strftime("%w") #was: %u df.loc[:, "daytime"] = df.time.dt.strftime("%H:%M") df.loc[:,"lat"] = pd.to_numeric(df.latitudeE7) / 1e7 df.loc[:,"lng"] = pd.to_numeric(df.longitudeE7) / 1e7 return df
21,282
def year_from_operating_datetime(df): """Add a 'year' column based on the year in the operating_datetime. Args: df (pandas.DataFrame): A DataFrame containing EPA CEMS data. Returns: pandas.DataFrame: A DataFrame containing EPA CEMS data with a 'year' column. """ df['year'] = df.operating_datetime_utc.dt.year return df
21,283
def is_thrift(target): """Returns True if the target has thrift IDL sources.""" return isinstance(target, JavaThriftLibrary)
21,284
def print_events_unified(success_result: dict): """Print the results of `getevents`, in one table""" elist = success_result["events"] for event in elist: event["time"] = datetime.datetime.fromisoformat(event["time"]) lowest_time = min(event["time"] for event in elist) for event in elist: offset_time = event["time"] - lowest_time event["offset_time"] = offset_time print(cf.bold("{:>14} {} {}".format("Time", "Thread", "Event"))) for event in sorted(elist, key=itemgetter("offset_time")): time = dim(str(event["offset_time"])) name = event["event"] thread = event["thread"] data = event["data"] if len(event["data"]) else "" print(f"{time:>16} {thread:^7} {name} {data}")
21,285
def get_instance(value, model): """Returns a model instance from value. If value is a string, gets by key name, if value is an integer, gets by id and if value is an instance, returns the instance. """ if not issubclass(model, db.Model): raise TypeError('Invalid type (model); expected subclass of Model.') if isinstance(value, basestring): return model.get_by_key_name(value) elif isinstance(value, (int, long)): return model.get_by_id(value) elif isinstance(value, model): return value else: raise TypeError('Invalid type (value); expected string, number or ' '%s.' % model.__name__)
21,286
def add_badfit_estimates(results, base_estimate_label="default", estimate_types=('wildcard',), badFitThreshold=None, opt_args=None, evaltree_cache=None, comm=None, memLimit=None, verbosity=0): """ Add any and all "bad fit" estimates to `results`. TODO: docstring """ printer = _objs.VerbosityPrinter.build_printer(verbosity, comm) base_estimate = results.estimates[base_estimate_label] lsgstLists = results.circuit_structs['iteration'] mdl_lsgst_list = base_estimate.models['iteration estimates'] mdl_start = base_estimate.models['seed'] target_model = base_estimate.models['target'] ds = results.dataset parameters = base_estimate.parameters if evaltree_cache is None: evaltree_cache = {} # so tree gets cached if badFitThreshold is not None and \ base_estimate.misfit_sigma(evaltree_cache=evaltree_cache, use_accurate_Np=True, comm=comm) <= badFitThreshold: return # fit is good enough - no need to add any estimates objective = parameters.get('objective', 'logl') validStructTypes = (_objs.LsGermsStructure, _objs.LsGermsSerialStructure) rawLists = [l.allstrs if isinstance(l, validStructTypes) else l for l in lsgstLists] circuitList = rawLists[-1] # use final circuit list mdl = mdl_lsgst_list[-1] # and model assert(parameters.get('weights', None) is None), \ "Cannot perform bad-fit scaling when weights are already given!" for badfit_typ in estimate_types: new_params = parameters.copy() new_final_model = None if badfit_typ in ("robust", "Robust", "robust+", "Robust+"): new_params['weights'] = get_robust_scaling(badfit_typ, mdl, ds, circuitList, parameters, evaltree_cache, comm, memLimit) if badfit_typ in ("Robust", "Robust+") and (opt_args is not None): mdl_reopt = reoptimize_with_weights(mdl, ds, circuitList, new_params['weights'], objective, opt_args, printer - 1) new_final_model = mdl_reopt elif badfit_typ == "wildcard": new_params['unmodeled_error'] = get_wildcard_budget(mdl, ds, circuitList, parameters, evaltree_cache, comm, memLimit, printer - 1) elif badfit_typ == "do nothing": continue # go to next on-bad-fit directive else: raise ValueError("Invalid on-bad-fit directive: %s" % badfit_typ) # In case we've computed an updated final model, Just keep (?) old estimates of all # prior iterations (or use "blank" sentinel once this is supported). models_by_iter = mdl_lsgst_list[:] if (new_final_model is None) \ else mdl_lsgst_list[0:-1] + [new_final_model] results.add_estimate(target_model, mdl_start, models_by_iter, new_params, base_estimate_label + "." + badfit_typ) #Add gauge optimizations to the new estimate for gokey, gaugeOptParams in base_estimate.goparameters.items(): if new_final_model is not None: add_gauge_opt(results.estimates[base_estimate_label + '.' + badfit_typ], gaugeOptParams, target_model, new_final_model, comm, printer - 1) else: # add same gauge-optimized result as above go_gs_final = base_estimate.models[gokey] results.estimates[base_estimate_label + '.' + badfit_typ].add_gaugeoptimized( gaugeOptParams.copy(), go_gs_final, None, comm, printer - 1)
21,287
def clean_infix(token, INFIX): """ Checks token for infixes. (ex. bumalik = balik) token: word to be stemmed for infixes returns STRING """ if check_validation(token): return token for infix in INFIX_SET: if len(token) - len(infix) >= 3 and count_vowel(token[len(infix):]) >= 2: if token[0] == token[4] and token[1: 4] == infix: INFIX.append(infix) return token[4:] elif token[2] == token[4] and token[1: 3] == infix: INFIX.append(infix) return token[0] + token[3:] elif token[1: 3] == infix and check_vowel(token[3]): INFIX.append(infix) return token[0] + token[3:] return token
21,288
def test_catalog_size(test_catalog): """Passes if catalog.size matches the length of data""" assert test_catalog.size == len(test_catalog.data)
21,289
def seq_to_encoder(input_seq): """从输入空格分隔的数字id串,转成预测用的encoder、decoder、target_weight等 """ input_seq_array = [int(v) for v in input_seq.split()] encoder_input = [PAD_ID] * \ (input_seq_len - len(input_seq_array)) + input_seq_array decoder_input = [GO_ID] + [PAD_ID] * (output_seq_len - 1) encoder_inputs = [np.array([v], dtype=np.int32) for v in encoder_input] decoder_inputs = [np.array([v], dtype=np.int32) for v in decoder_input] target_weights = [np.array([1.0], dtype=np.float32)] * output_seq_len return encoder_inputs, decoder_inputs, target_weights
21,290
def SendEmailNotificationSuccess(): """ Sends a successful email notification to the EmailNotice attribute""" global gVoOutput global gSitesMissingData subject = "GRATIA-APEL interface for %s - SUCCESS (%s)" % (gDateFilter,gRunTime) contents = """The interface from Gratia to the APEL (WLCG) database was successful.""" if len(gWarnings) == 0: contents = contents + "\nNo warning conditions detected." else: contents = contents + "\n\nWarning conditions have been detected and a separate email will be sent." if len(gSitesMissingData) == 0: contents = contents + "\nAll sites are reporting.\n" else: contents = contents + "\nSites missing data for more than %s days:" % gParams["MissingDataDays"] sites = gSitesMissingData.keys() for site in sites: contents = contents + "\n" + site + ": " + str(gSitesMissingData[site]) SendEmailNotification(subject,contents)
21,291
def getMergers(tree, map_strain2species, options): """merge strains to species. returns the new tree with species merged and a dictionary of genes including the genes that have been merged. Currently, only binary merges are supported. """ n = TreeTools.GetSize(tree) + 1 all_strains = map_strain2species.keys() all_species = map_strain2species.values() genes = [] for x in range(n): g = {} for s in all_strains: g[s] = set() genes.append(g) # build list of species pairs that can be joined. map_species2strain = IOTools.getInvertedDictionary(map_strain2species) pairs = [] for species, strains in map_species2strain.items(): for x in range(len(strains)): for y in range(0, x): pairs.append((strains[x], strains[y])) # map of genes to new genes # each entry in the list is a pair of genes of the same species # but different strains to be joined. map_genes2new_genes = [] # dictionary of merged genes. This is to ensure that no gene # is merged twice merged_genes = {} def count_genes(node_id): """record number of genes per species for each node This is done separately for each strain. The counts are aggregated for each species over strains by taking the maximum gene count per strain. This ignores any finer tree structure below a species node. """ node = tree.node(node_id) if node.succ: this_node_set = genes[node_id] # process non-leaf node for s in node.succ: # propagate: terminated nodes force upper nodes to terminate # (assigned to None). if not genes[s]: this_node_set = None break # check if node merges genes that are not part of the positive # set for strain in all_strains: if strain in map_strain2species: # merge genes from all children this_node_set[strain] = this_node_set[ strain].union(genes[s][strain]) if len(this_node_set[strain]) > 1: # more than two genes for a single species, so no # join this_node_set = None break elif strain not in map_strain2species and \ this_node_set[strain] > 0: this_node_set = None break if this_node_set is None: genes[node_id] = None return for strain_x, strain_y in pairs: if len(this_node_set[strain_x]) == 1 and len(this_node_set[strain_y]) == 1: species = map_strain2species[strain_x] gene_x, gene_y = tuple(this_node_set[strain_x])[0], tuple( this_node_set[strain_y])[0] # check if these to genes have already been merged or are # merged with other partners already # The merged genes are assigned the same node_id, if they have # been already merged. key1 = strain_x + gene_x key2 = strain_y + gene_y if key1 > key2: key1, key2 = key2, key1 merge = False if key1 in merged_genes and key2 in merged_genes: if merged_genes[key1] == merged_genes[key2]: merge = True elif key1 not in merged_genes and key2 not in merged_genes: merge = True merged_genes[key1] = node_id merged_genes[key2] = node_id if merge: map_genes2new_genes.append( (node_id, species, strain_x, gene_x, strain_y, gene_y)) # once two genes have been joined, they can not be remapped # further genes[node_id] = None return else: # process leaf strain, t, g, q = parseIdentifier(node.data.taxon, options) if strain in map_strain2species: genes[node_id][strain].add(g) else: # do not process nodes that do not need to be mapped genes[node_id] = None tree.dfs(tree.root, post_function=count_genes) return map_genes2new_genes
21,292
def convolve_with_gaussian( data: np.ndarray, kernel_width: int = 21 ) -> np.ndarray: """ Convolves a 1D array with a gaussian kernel of given width """ # create kernel and normalize area under curve norm = stats.norm(0, kernel_width) X = np.linspace(norm.ppf(0.0001), norm.ppf(0.9999), kernel_width) _kernnel = norm.pdf(X) kernel = _kernnel / np.sum(_kernnel) return np.convolve(data, kernel, mode="same")
21,293
def post_netspeed(event, context): """ Speed test data ingestion handler """ return process_reading(event['query'], NETSPEED_SQL)
21,294
def notify_host_disabled(token, host_name): """ Notify OpenStack Nova that a host is disabled """ url = token.get_service_url(OPENSTACK_SERVICE.NOVA, strip_version=True) if url is None: raise ValueError("OpenStack Nova URL is invalid") # Get the service ID for the nova-compute service. compute_service_id = get_host_service_id(token, host_name, 'nova-compute') api_cmd = url + "/v2.1/%s/os-services/%s" % (token.get_tenant_id(), compute_service_id) api_cmd_headers = dict() api_cmd_headers['Content-Type'] = "application/json" api_cmd_headers['X-OpenStack-Nova-API-Version'] = NOVA_API_VERSION api_cmd_payload = dict() api_cmd_payload['forced_down'] = True response = rest_api_request(token, "PUT", api_cmd, api_cmd_headers, json.dumps(api_cmd_payload)) return response
21,295
def compute_moments_weights_slow(mu, x2, neighbors, weights): """ This version exaustively iterates over all |E|^2 terms to compute the expected moments exactly. Used to test the more optimized formulations that follow """ N = neighbors.shape[0] K = neighbors.shape[1] # Calculate E[G] EG = 0 for i in range(N): for k in range(K): j = neighbors[i, k] wij = weights[i, k] EG += wij*mu[i]*mu[j] # Calculate E[G^2] EG2 = 0 for i in range(N): EG2_i = 0 for k in range(K): j = neighbors[i, k] wij = weights[i, k] for x in range(N): for z in range(K): y = neighbors[x, z] wxy = weights[x, z] s = wij*wxy if s == 0: continue if i == x: if j == y: t1 = x2[i]*x2[j] else: t1 = x2[i]*mu[j]*mu[y] elif i == y: if j == x: t1 = x2[i]*x2[j] else: t1 = x2[i]*mu[j]*mu[x] else: # i is unique since i can't equal j if j == x: t1 = mu[i] * x2[j] * mu[y] elif j == y: t1 = mu[i] * x2[j] * mu[x] else: # i and j are unique, no shared nodes t1 = mu[i] * mu[j] * mu[x] * mu[y] EG2_i += s * t1 EG2 += EG2_i return EG, EG2
21,296
def sum_fn(fun, ndims=0): """Higher order helper for summing the result of fun.""" @functools.wraps(fun) def wrapped(*args): batch_loglik = fun(*args) return jnp.sum( batch_loglik.reshape((-1,) + batch_loglik.shape[-ndims + len(batch_loglik.shape):]), axis=0) return wrapped
21,297
def p_value_analysis(data, opts): """ p-value analysis with cumulative freq. Only positive signals. Negative ctools ts => 0 """ # photometrics analysis total = len(data) positive_signals = [d for d in data if d['phm_excess'] > 0] log.info(f' considered data: {len(data)}') log.info(f'with positive signals: {len(positive_signals)}') # data augmentation. add sqrt(ts) to data for p in positive_signals: # if False and p['ts'] < -1: # raise Exception(f"negative ts {p['ts']:.2e} is not so tiny. {p}") p['sqrt_ts'] = np.sqrt(p['ts']) if p['ts'] > 0 else 0 # ######## thresholds = np.linspace(0, 5, 11) resulting_freq = compute_integral_frequency_distribution( positive_signals, keys=['phm_li_ma', 'li_ma', 'sqrt_ts'], thresholds=thresholds, total=total, ) data_to_plot = [ { 'x': thresholds, 'y': resulting_freq['phm_li_ma']['freq'], 'yerr': resulting_freq['phm_li_ma']['freq_err'], 'label': 'photometric Li&Ma', 'marker': '.', 'color': 'orange', }, { 'x': thresholds, 'y': resulting_freq['sqrt_ts']['freq'], 'yerr': resulting_freq['sqrt_ts']['freq_err'], 'label': 'ctools sqrt(TS)', 'marker': '.', 'color': 'blue', }, # { 'x': thresholds, # 'y': resulting_freq['li_ma']['freq'], # 'label': 'Li&Ma over ctools data', # 'marker': 'x', # 'color': 'green', }, ] save_filename = None if opts.save: save_filename = f'empty_field_norm_{opts.tmax:04d}.png' plot_frequency(data_to_plot, n=total, save=save_filename, title=f't={opts.tmax} sec Np={len(positive_signals)} N={total}')
21,298
def get_control_policy_attachments(language: Optional[str] = None, output_file: Optional[str] = None, policy_type: Optional[str] = None, target_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetControlPolicyAttachmentsResult: """ This data source provides the Resource Manager Control Policy Attachments of the current Alibaba Cloud user. > **NOTE:** Available in v1.120.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud example = alicloud.resourcemanager.get_control_policy_attachments(target_id="example_value") pulumi.export("firstResourceManagerControlPolicyAttachmentId", example.attachments[0].id) ``` :param str language: The language. Valid value `zh-CN`, `en`, and `ja`. Default value `zh-CN` :param str policy_type: The type of policy. :param str target_id: The Id of target. """ __args__ = dict() __args__['language'] = language __args__['outputFile'] = output_file __args__['policyType'] = policy_type __args__['targetId'] = target_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:resourcemanager/getControlPolicyAttachments:getControlPolicyAttachments', __args__, opts=opts, typ=GetControlPolicyAttachmentsResult).value return AwaitableGetControlPolicyAttachmentsResult( attachments=__ret__.attachments, id=__ret__.id, ids=__ret__.ids, language=__ret__.language, output_file=__ret__.output_file, policy_type=__ret__.policy_type, target_id=__ret__.target_id)
21,299