content
stringlengths
22
815k
id
int64
0
4.91M
def get_internal_plot_drive_to_use(): """ Same as above but returns the next drive. This is the drive we will use for internal plots. We do this to make sure we are not over saturating a single drive with multiple plot copies. When you run out of drives, these scripts will fail. """ available_drives = [] try: for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') \ and part.mountpoint.startswith('/mnt/enclosure') \ and get_drive_info('space_free_plots_by_mountpoint', part.mountpoint) >= 1 \ and get_drive_by_mountpoint(part.mountpoint) not in chianas.offlined_drives: drive = get_drive_by_mountpoint(part.mountpoint) available_drives.append((part.mountpoint, part.device, drive)) return (natsorted(available_drives)[1]) except IndexError: log.debug("ERROR: No Additional Internal Drives Found, Please add drives, run auto_drive.py and try again!") exit()
5,332,400
def store_build_time(tsuite, res, host, build_time): """Handle storing the build time for processing. Args: tsuite: runtime tsuite instance res: resource listing type host: host of the process build_time: time in seconds required to create""" if res not in tsuite.test_report: tsuite.test_report["build"][res] = {} tsuite.test_report["build"][res][host] = { "build_time": build_time }
5,332,401
def _build_discretize_fn(value_type, stochastic, beta): """Builds a `tff.tf_computation` for discretization.""" @computations.tf_computation(value_type, tf.float32, tf.float32) def discretize_fn(value, scale_factor, prior_norm_bound): return _discretize_struct(value, scale_factor, stochastic, beta, prior_norm_bound) return discretize_fn
5,332,402
def expand_mapping_target(namespaces, val): """Expand a mapping target, expressed as a comma-separated list of CURIE-like strings potentially prefixed with ^ to express inverse properties, into a list of (uri, inverse) tuples, where uri is a URIRef and inverse is a boolean.""" vals = [v.strip() for v in val.split(',')] ret = [] for v in vals: inverse = False if v.startswith('^'): inverse = True v = v[1:] ret.append((expand_curielike(namespaces, v), inverse)) return ret
5,332,403
def home(request): """Handle the default request, for when no endpoint is specified.""" return Response('This is Michael\'s REST API!')
5,332,404
def create_message(sender, to, subject, message_text, is_html=False): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ if is_html: message = MIMEText(message_text, "html") else: message = MIMEText(message_text) message["to"] = to message["from"] = sender message["subject"] = subject encoded_message = urlsafe_b64encode(message.as_bytes()) return {"raw": encoded_message.decode()}
5,332,405
def get_agent(runmode, name): # noqa: E501 """get_agent # noqa: E501 :param runmode: :type runmode: str :param name: :type name: str :rtype: None """ return 'do some magic!'
5,332,406
def find_splits(array1: list, array2: list) -> list: """Find the split points of the given array of events""" keys = set() for event in array1: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) for event in array2: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) return list(sorted(keys))
5,332,407
def to_subtask_dict(subtask): """ :rtype: ``dict`` """ result = { 'id': subtask.id, 'key': subtask.key, 'summary': subtask.fields.summary } return result
5,332,408
def _partition_labeled_span( contents: Text, labeled_span: substitution.LabeledSpan ) -> Tuple[substitution.LabeledSpan, Optional[substitution.LabeledSpan], Optional[substitution.LabeledSpan]]: """Splits a labeled span into first line, intermediate, last line.""" start, end = labeled_span.span first_newline = contents.find('\n', start, end) if first_newline == -1: return (labeled_span, None, None) first, remainder = _split_labeled_span_after(labeled_span, first_newline) last_newline = contents.rfind('\n', *remainder.span) if last_newline == -1: return (first, None, remainder) between, last = _split_labeled_span_after(remainder, last_newline) return (first, between, last)
5,332,409
def is_active(relation_id: RelationID) -> bool: """Retrieve an activation record from a relation ID.""" # query to DB try: sups = db.session.query(RelationDB) \ .filter(RelationDB.supercedes_or_suppresses == int(relation_id)) \ .first() except Exception as e: raise DBLookUpError from e # return true if there is no superceder/suppressor return bool(sups is None)
5,332,410
def vectorize_with_similarities(text, vocab_tokens, vocab_token_to_index, vocab_matrix): """ Generate a vector representation of a text string based on a word similarity matrix. The resulting vector has n positions, where n is the number of words or tokens in the full vocabulary. The value at each position indicates the maximum similarity between that corresponding word in the vocabulary and any of the words or tokens in the input text string, as given by the input similarity matrix. Therefore, this is similar to an n-grams approach but uses the similarity between non-identical words or tokens to make the vector semantically meaningful. Args: text (str): Any arbitrary text string. vocab_tokens (list of str): The words or tokens that make up the entire vocabulary. vocab_token_to_index (dict of str:int): Mapping between words in the vocabulary and an index in rows and columns of the matrix. vocab_matrix (numpy.array): A pairwise distance matrix holding the similarity values between all possible pairs of words in the vocabulary. Returns: numpy.Array: A numerical vector with length equal to the size of the vocabulary. """ doc_tokens = [token for token in text.split() if token in vocab_tokens] vector = [max([vocab_matrix[vocab_token_to_index[vocab_token]][vocab_token_to_index[doc_token]] for doc_token in doc_tokens]) for vocab_token in vocab_tokens] return(vector)
5,332,411
def get_puppet_node_cert_from_server(node_name): """ Init environment to connect to Puppet Master and retrieve the certificate for that node in the server (if exists) :param node_name: Name of target node :return: Certificate for that node in Puppet Master or None if this information has not been found """ _init_puppet_master_connection() return _execute_command(COMMAND_PUPPET_GET_CERT.format(node_name))
5,332,412
def get_notebook_workspace(account_name: Optional[str] = None, notebook_workspace_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotebookWorkspaceResult: """ A notebook workspace resource :param str account_name: Cosmos DB database account name. :param str notebook_workspace_name: The name of the notebook workspace resource. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['accountName'] = account_name __args__['notebookWorkspaceName'] = notebook_workspace_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20190801:getNotebookWorkspace', __args__, opts=opts, typ=GetNotebookWorkspaceResult).value return AwaitableGetNotebookWorkspaceResult( id=__ret__.id, name=__ret__.name, notebook_server_endpoint=__ret__.notebook_server_endpoint, status=__ret__.status, type=__ret__.type)
5,332,413
def get_current_player(player_one_turn: bool) -> str: """Return 'player one' iff player_one_turn is True; otherwise, return 'player two'. >>> get_current_player(True) 'player one' >>> get_current_player(False) 'player two' """ if player_one_turn: return P1 else: return P2 # Complete this function.
5,332,414
def dis_table_reformat(): """ Reformat table (and recalculate any formulae) """ if not dis_in_table(): return _recalc() _reformat()
5,332,415
def CityscapesGTFine(path: str) -> Dataset: """`CityscapesGTFine <https://www.cityscapes-dataset.com/>`_ dataset. The file structure should be like:: <path> leftImg8bit/ test/ berlin/ berlin_000000_000019_leftImg8bit.png ... ... train/ aachen/ aachen_000000_000019_leftImg8bit.png ... ... val/ frankfurt/ frankfurt_000000_000019_leftImg8bit.png ... ... ... gtFine/ test/ berlin/ berlin_000000_000019_gtFine_instanceIds.png berlin_000000_000019_gtFine_labelIds.png berlin_000000_000019_gtFine_polygons.json ... ... train/ aachen/ aachen_000000_000019_gtFine_instanceIds.png aachen_000000_000019_gtFine_labelIds.png aachen_000000_000019_gtFine_polygons.json ... ... val/ frankfurt/ frankfurt_000000_000019_gtFine_instanceIds.png frankfurt_000000_000019_gtFine_labelIds.png frankfurt_000000_000019_gtFine_polygons.json ... ... ... Arguments: path: The root directory of the dataset. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ root_path = os.path.join(os.path.abspath(os.path.expanduser(path))) dataset = Dataset(DATASET_NAME_GTFINE) dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json")) for segment_name in _SEGMENT_NAMES_GTFINE: segment = dataset.create_segment(segment_name) for image_path in glob(os.path.join(root_path, "leftImg8bit", segment_name, "*", "*.png")): segment.append(_get_data(image_path, root_path, segment_name, "gtFine")) return dataset
5,332,416
def test_ecs_mxnet_training_dgl_cpu(cpu_only, py3_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name): """ CPU DGL test for MXNet Training Instance Type - c4.2xlarge DGL is only supported in py3, hence we have used the "py3_only" fixture to ensure py2 images don't run on this function. Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster. """ # TODO: remove/update this when DGL supports MXNet 1.9 _, framework_version = get_framework_and_version_from_tag(mxnet_training) if Version(framework_version) >= Version('1.9.0'): pytest.skip("Skipping DGL tests as DGL does not yet support MXNet 1.9") instance_id, cluster_arn = ecs_container_instance ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id)
5,332,417
def is_word(s): """ String `s` counts as a word if it has at least one letter. """ for c in s: if c.isalpha(): return True return False
5,332,418
def index_all_messages(empty_index): """ Expected index of `initial_data` fixture when model.narrow = [] """ return dict(empty_index, **{'all_msg_ids': {537286, 537287, 537288}})
5,332,419
def compute_affine_matrix(in_shape, out_shape, crop=None, degrees=0.0, translate=(0.0, 0.0), flip_h=False, flip_v=False, resize=False, keep_ratio=False): """ Similarity warp transformation of the image keeping center invariant. Args: in_shape (Sequence): the shape of the input image out_shape (Sequence): the shape of the output image crop (Sequence, optional): crop center location, width and height. The center location is relative to the center of the image. If :attr:`resize` is not ``True``, crop is simply a translation in the :attr:`in_shape` space. degrees (float or int, optional): degrees to rotate the crop. (default: ``(0.0)``) translate (Sequence, optional): horizontal and vertical translations. (default: ``(0.0, 0.0)``) flip_h (bool, optional): flip the image horizontally. (default: ``False``) flip_v (bool, optional): flip the image vertically. (default: ``False``) resize (bool, optional): resize the cropped image to fit the output's size. (default: ``False``) keep_ratio (bool, optional): match the smaller edge to the corresponding output edge size, keeping the aspect ratio after resize. Has no effect if :attr:`resize` is ``False``. (default: ``False``) """ if crop is not None: T_crop_x, T_crop_y, crop_w, crop_h = crop else: T_crop_x, T_crop_y = 0, 0 crop_w, crop_h = in_shape r = np.deg2rad(degrees) tx, ty = translate fh = 1 - 2 * float(flip_h) fv = 1 - 2 * float(flip_v) # # H = T_inshape*T_crop*R*S_resize*T_outshapeT # T_i_x = (in_shape[0] - 1) / 2 T_i_y = (in_shape[1] - 1) / 2 T_inshape = np.asarray([[fh, 0, T_i_x], [0, fv, T_i_y], [0, 0, 1]]) T_crop = np.asarray([[1, 0, T_crop_x], [0, 1, T_crop_y], [0, 0, 1]]) R = np.asarray([[+np.cos(r), -np.sin(r), 0], [+np.sin(r), +np.cos(r), 0], [0, 0, 1]]) S_r_x = 1 S_r_y = 1 if resize: top_left, bot_right = R.dot([[-crop_w / 2, crop_w / 2], [-crop_h / 2, crop_h / 2], [1, 1]]).transpose()[:, 0:2] crop_w, crop_h = np.absolute(bot_right - top_left) S_r_x = crop_w / out_shape[0] S_r_y = crop_h / out_shape[1] if keep_ratio: scale_ratio = min(S_r_x, S_r_y) S_r_x = scale_ratio S_r_y = scale_ratio S_resize = np.asarray([[S_r_x, 0, 0], [0, S_r_y, 0], [0, 0, 1]]) T_o_x = tx - (out_shape[0] - 1) / 2 T_o_y = ty - (out_shape[1] - 1) / 2 T_outshapeT = np.asarray([[1, 0, T_o_x], [0, 1, T_o_y], [0, 0, 1]]) return T_inshape.dot(T_crop).dot(R).dot(S_resize).dot(T_outshapeT)
5,332,420
def Bern_to_Fierz_nunu(C,ddll): """From semileptonic Bern basis to Fierz semileptonic basis for Class V. C should be the corresponding leptonic Fierz basis and `ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.""" ind = ddll.replace('l_','').replace('nu_','') return { 'F' + ind + 'nu': C['nu1' + ind], 'F' + ind + 'nup': C['nu1p' + ind], }
5,332,421
def enforce_boot_from_volume(client): """Add boot from volume args in create server method call """ class ServerManagerBFV(servers.ServerManager): def __init__(self, client): super(ServerManagerBFV, self).__init__(client) self.bfv_image_client = images.ImageManager(client) def create(self, name, image, flavor, **kwargs): image_obj = self.bfv_image_client.get(image) if "block_device_mapping" not in image_obj.metadata.keys() and \ not "block_device_mapping_v2" in kwargs.keys() and \ not "block_device_mapping" in kwargs.keys(): if 'volume_size' in kwargs: vol_size = kwargs.pop('volume_size') else: vol_size = CONF.nova_server_volume_size bv_map = [{ "source_type": "image", "destination_type": "volume", "delete_on_termination": "1", "boot_index": 0, "uuid": image, "device_name": "vda", "volume_size": str(vol_size)}] bdm_args = { 'block_device_mapping_v2' : bv_map, } kwargs.update(bdm_args) image = '' return super(ServerManagerBFV, self).create(name, image, flavor, **kwargs) client.servers = ServerManagerBFV(client)
5,332,422
def img_to_yuv(frame, mode, grayscale=False): """Change color space of `frame` from any supported `mode` to YUV Args: frame: 3-D tensor in either [H, W, C] or [C, H, W] mode: A string, must be one of [YV12, YV21, NV12, NV21, RGB, BGR] grayscale: discard uv planes return: 3-D tensor of YUV in [H, W, C] """ _planar_mode = ('YV12', 'YV21', 'NV12', 'NV21') _packed_mode = ('RGB', 'BGR') _allowed_mode = (*_planar_mode, *_packed_mode) if not isinstance(frame, list): raise TypeError("frame must be a list of numpy array") if not mode in _allowed_mode: raise ValueError("invalid mode: " + mode) if mode in _planar_mode: if mode in ('YV12', 'YV21'): y, u, v = frame elif mode in ('NV12', 'NV21'): y, uv = frame u = uv.flatten()[0::2].reshape([1, uv.shape[1] // 2, uv.shape[2]]) v = uv.flatten()[1::2].reshape([1, uv.shape[1] // 2, uv.shape[2]]) else: y = u = v = None y = np.transpose(y) u = np.transpose(u) v = np.transpose(v) if '21' in mode: u, v = v, u if not grayscale: up_u = np.zeros(shape=[u.shape[0] * 2, u.shape[1] * 2, u.shape[2]]) up_v = np.zeros(shape=[v.shape[0] * 2, v.shape[1] * 2, v.shape[2]]) up_u[0::2, 0::2, :] = up_u[0::2, 1::2, :] = u up_u[1::2, ...] = up_u[0::2, ...] up_v[0::2, 0::2, :] = up_v[0::2, 1::2, :] = v up_v[1::2, ...] = up_v[0::2, ...] yuv = np.concatenate([y, up_u, up_v], axis=-1) yuv = np.transpose(yuv, [1, 0, 2]) # PIL needs [W, H, C] img = Image.fromarray(yuv.astype('uint8'), mode='YCbCr') else: y = np.squeeze(y) img = Image.fromarray(np.transpose(y).astype('uint8'), mode='L') elif mode in _packed_mode: assert len(frame) is 1 rgb = np.asarray(frame[0]) if mode == 'BGR': rgb = rgb[..., ::-1] rgb = np.transpose(rgb, [1, 0, 2]) if not grayscale: img = Image.fromarray(rgb, mode='RGB').convert('YCbCr') else: img = Image.fromarray(rgb, mode='RGB').convert('L') else: raise RuntimeError("unreachable!") # return img_to_array(image1) if turn_array else image1 return img
5,332,423
def assemblenet_kinetics600() -> cfg.ExperimentConfig: """Video classification on Videonet with assemblenet.""" exp = video_classification.video_classification_kinetics600() feature_shape = (32, 224, 224, 3) exp.task.train_data.global_batch_size = 1024 exp.task.validation_data.global_batch_size = 32 exp.task.train_data.feature_shape = feature_shape exp.task.validation_data.feature_shape = (120, 224, 224, 3) exp.task.train_data.dtype = 'bfloat16' exp.task.validation_data.dtype = 'bfloat16' model = AssembleNetModel() model.backbone.assemblenet.model_id = '50' model.backbone.assemblenet.blocks = flat_lists_to_blocks( asn50_structure, asn_structure_weights) model.backbone.assemblenet.num_frames = feature_shape[0] exp.task.model = model assert exp.task.model.backbone.assemblenet.num_frames > 0, ( f'backbone num_frames ' f'{exp.task.model.backbone.assemblenet}') return exp
5,332,424
def sendTelegramMessage(TOKEN, CHAT_ID, MSG): """ Function to send message to telegram bot Parameters ---------- TOKEN : STRING Telegram secret token. CHAT_ID : STRING Telegram chat id of user/ group to which you want to send msg. MSG : TYPE Message sent to telegram. Returns ------- None. """ url = "https://api.telegram.org/bot{token}/sendMessage?".format(token = TOKEN) payload = url + "chat_id={chat_id}&text={msg}".format(chat_id = CHAT_ID, msg = MSG) status = json.loads((requests.get(payload)).text).get('ok') if(status == False): print("Message sending to telegram failed. Check Telegram ID or Telegram Token entered. Exiting!!!") sys.exit()
5,332,425
def channel_selection(inputs, module, sparsity=0.5, method='greedy'): """ ํ˜„์žฌ ๋ชจ๋“ˆ์˜ ์ž…๋ ฅ ์ฑ„๋„์ค‘, ์ค‘์š”๋„๊ฐ€ ๋†’์€ ์ฑ„๋„์„ ์„ ํƒํ•ฉ๋‹ˆ๋‹ค. ๊ธฐ์กด์˜ output์„ ๊ฐ€์žฅ ๊ทผ์ ‘ํ•˜๊ฒŒ ๋งŒ๋“ค์–ด๋‚ผ ์ˆ˜ ์žˆ๋Š” ์ž…๋ ฅ ์ฑ„๋„์„ ์ฐพ์•„๋ƒ…๋‹ˆ๋Œœ. :param inputs: torch.Tensor, input features map :param module: torch.nn.module, layer :param sparsity: float, 0 ~ 1 how many prune channel of output of this layer :param method: str, how to select the channel :return: list of int, indices of channel to be selected and pruned """ num_channel = inputs.size(1) # ์ฑ„๋„ ์ˆ˜ num_pruned = int(math.ceil(num_channel * sparsity)) # ์ž…๋ ฅ๋œ sparsity ์— ๋งž์ถฐ ์‚ญ์ œ๋˜์–ด์•ผ ํ•˜๋Š” ์ฑ„๋„ ์ˆ˜ num_stayed = num_channel - num_pruned print('num_pruned', num_pruned) if method == 'greedy': indices_pruned = [] while len(indices_pruned) < num_pruned: min_diff = 1e10 min_idx = 0 for idx in range(num_channel): if idx in indices_pruned: continue indices_try = indices_pruned + [idx] inputs_try = torch.zeros_like(inputs) inputs_try[:, indices_try, ...] = inputs[:, indices_try, ...] output_try = module(inputs_try) output_try_norm = output_try.norm(2) if output_try_norm < min_diff: min_diff = output_try_norm min_idx = idx indices_pruned.append(min_idx) print('indices_pruned !!! ', indices_pruned) indices_stayed = list(set([i for i in range(num_channel)]) - set(indices_pruned)) elif method == 'greedy_GM': indices_stayed = [] while len(indices_stayed) < num_stayed: max_farthest_channel_norm = 1e-10 farthest_channel_idx = 0 for idx in range(num_channel): if idx in indices_stayed: continue indices_try = indices_stayed + [idx] inputs_try = torch.zeros_like(inputs) inputs_try[:, indices_try, ...] = inputs[:, indices_try, ...] output_try = module(inputs_try).view(num_channel,-1).cpu().detach().numpy() similar_matrix = distance.cdist(output_try, output_try,'euclidean') similar_sum = np.sum(np.abs(similar_matrix), axis=0) similar_large_index = similar_sum.argsort()[-1] farthest_channel_norm= np.linalg.norm(similar_sum[similar_large_index]) if max_farthest_channel_norm < farthest_channel_norm : max_farthest_channel_norm = farthest_channel_norm farthest_channel_idx = idx print(farthest_channel_idx) indices_stayed.append(farthest_channel_idx) print('indices_stayed !!! ', indices_stayed) indices_pruned = list(set([i for i in range(num_channel)]) - set(indices_stayed)) elif method == 'lasso': y = module(inputs) if module.bias is not None: # bias.shape = [N] bias_size = [1] * y.dim() # bias_size: [1, 1, 1, 1] bias_size[1] = -1 # [1, -1, 1, 1] bias = module.bias.view(bias_size) # bias.view([1, -1, 1, 1] = [1, N, 1, 1]) y -= bias # output feature ์—์„œ bias ๋งŒํผ์„ ๋นผ์คŒ (y - b) else: bias = 0. y = y.view(-1).data.cpu().numpy() # flatten all of outputs y_channel_spread = [] for i in range(num_channel): x_channel_i = torch.zeros_like(inputs) x_channel_i[:, i, ...] = inputs[:, i, ...] y_channel_i = module(x_channel_i) - bias y_channel_spread.append(y_channel_i.data.view(-1, 1)) y_channel_spread = torch.cat(y_channel_spread, dim=1).cpu() alpha = 1e-7 solver = Lasso(alpha=alpha, warm_start=True, selection='random', random_state=0) # choice_idx = np.random.choice(y_channel_spread.size()[0], 2000, replace=False) # selected_y_channel_spread = y_channel_spread[choice_idx, :] # new_output = y[choice_idx] # # del y_channel_spread, y # ์›ํ•˜๋Š” ์ˆ˜์˜ ์ฑ„๋„์ด ์‚ญ์ œ๋  ๋•Œ๊นŒ์ง€ alpha ๊ฐ’์„ ์กฐ๊ธˆ์”ฉ ๋Š˜๋ ค๋‚˜๊ฐ alpha_l, alpha_r = 0, alpha num_pruned_try = 0 while num_pruned_try < num_pruned: alpha_r *= 2 solver.alpha = alpha_r # solver.fit(selected_y_channel_spread, new_output) solver.fit(y_channel_spread,y) num_pruned_try = sum(solver.coef_ == 0) # ์ถฉ๋ถ„ํ•˜๊ฒŒ pruning ๋˜๋Š” alpha ๋ฅผ ์ฐพ์œผ๋ฉด, ์ดํ›„ alpha ๊ฐ’์˜ ์ขŒ์šฐ๋ฅผ ์ขํ˜€ ๋‚˜๊ฐ€๋ฉด์„œ ์ข€ ๋” ์ •ํ™•ํ•œ alpha ๊ฐ’์„ ์ฐพ์Œ num_pruned_max = int(num_pruned) while True: alpha = (alpha_l + alpha_r) / 2 solver.alpha = alpha # solver.fit(selected_y_channel_spread, new_output) solver.fit(y_channel_spread,y) num_pruned_try = sum(solver.coef_ == 0) if num_pruned_try > num_pruned_max: alpha_r = alpha elif num_pruned_try < num_pruned: alpha_l = alpha else: break # ๋งˆ์ง€๋ง‰์œผ๋กœ, lasso coeff๋ฅผ index๋กœ ๋ณ€ํ™˜ indices_stayed = np.where(solver.coef_ != 0)[0].tolist() indices_pruned = np.where(solver.coef_ == 0)[0].tolist() else: raise NotImplementedError inputs = inputs.cuda() module = module.cuda() return indices_stayed, indices_pruned
5,332,426
def format_host(host_tuple): """ Format a host tuple to a string """ if isinstance(host_tuple, (list, tuple)): if len(host_tuple) != 2: raise ValueError('host_tuple has unexpeted length: %s' % host_tuple) return ':'.join([six.text_type(s) for s in host_tuple]) elif isinstance(host_tuple, six.string_types): return host_tuple else: raise ValueError('host_tuple unexpected type: (%s) %s' % (type(host_tuple), host_tuple))
5,332,427
def get_cpu_stats(): """ Obtains the system's CPU status. :returns: System CPU static. """ return psutil.cpu_stats()
5,332,428
def test_get_user_invalid_id(mock_user_qs): """ If there is no user with the specified ID, ``None`` should be returned. """ mock_user_qs.get.side_effect = get_user_model().DoesNotExist backend = authentication.VerifiedEmailBackend() retrieved_user = backend.get_user(42) assert retrieved_user is None assert mock_user_qs.get.call_args[1] == {"pk": 42}
5,332,429
def load_gene_metadata(gtf_file : str) -> Dict[str, Dict[str, Union[int, str]]]: """ Read gene metadata from a GTF file. Args: gtf_file (str): path to GTF file Returns: A Dict with each GeneId pointing to a Dict of metadata keys -> values """ if not os.path.exists(gtf_file): raise ValueError(f"Gene metadata file '{gtf_file}' not found.") regex_genetype = re.compile('gene_biotype "([^"]+)"') regex_geneid = re.compile('gene_id "([^"]+)"') regex_genename = re.compile('gene_name "([^"]+)"') geneid2annots = {} for line in open(gtf_file).readlines(): if line.startswith('#'): continue fields = line.rstrip().split('\t') chrom, feature_class, feature_type, start_str, end_str, junk, strand, junk, tags = fields if feature_type == "gene": genename = geneid = regex_geneid.search(tags).group(1) _genename_search = regex_genename.search(tags) if _genename_search: genename = _genename_search.group(1) genetype = regex_genetype.search(tags).group(1) chrid, start, end = fields[0], int(fields[3]), int(fields[4]) geneid2annots[geneid] = { "Gene:": genename, "Accession": geneid, "Biotype": genetype, \ "Chromosome": chrid, "Start": start, "End": end } return geneid2annots
5,332,430
def get_plugins() -> dict[str, Plugin]: """ This function is really time consuming... """ # get entry point plugins # Users can use Python's entry point system to create rich plugins, see # example here: https://github.com/Pioreactor/pioreactor-air-bubbler eps = entry_points() pioreactor_plugins: tuple = eps.get("pioreactor.plugins", tuple()) plugins: dict[str, Plugin] = {} for plugin in pioreactor_plugins: try: md = metadata(plugin.name) plugins[md["Name"]] = Plugin( plugin.load(), md["Summary"], md["Version"], md["Home-page"], md["Author"], "entry_points", ) except Exception as e: print(f"{plugin.name} plugin load error: {e}") # get file-based plugins. # Users can put .py files into the MODULE_DIR folder below. # The below code will load it into Python, and treat it like any other plugin. # The authors can add metadata to their file with the following variables at the # highest level in the file: # __plugin_name__ # __plugin_author__ # __plugin_summary__ # __plugin_version__ # __plugin_homepage__ BLANK = "Unknown" # The directory containing your modules needs to be on the search path. if is_testing_env(): MODULE_DIR = "plugins_dev" else: MODULE_DIR = "/home/pioreactor/.pioreactor/plugins" sys.path.append(MODULE_DIR) # Get the stem names (file name, without directory and '.py') of any # python files in your directory, load each module by name and run # the required function. py_files = glob.glob(os.path.join(MODULE_DIR, "*.py")) for py_file in py_files: module_name = pathlib.Path(py_file).stem module = importlib.import_module(module_name) plugins[getattr(module, "__plugin_name__", module_name)] = Plugin( module, getattr(module, "__plugin_summary__", BLANK), getattr(module, "__plugin_version__", BLANK), getattr(module, "__plugin_homepage__", BLANK), getattr(module, "__plugin_author__", BLANK), "plugins_folder", ) return plugins
5,332,431
def test_odrive_motors(odrv): """ Give two position commands to both axis """ print('Testing motors...') odrv.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL odrv.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL time.sleep(0.5) odrv.axis0.controller.set_pos_setpoint(1000.0, 0.0, 0.0) odrv.axis1.controller.set_pos_setpoint(-1000.0, 0.0, 0.0) time.sleep(1) odrv.axis0.controller.set_pos_setpoint(0.0, 0.0, 0.0) odrv.axis1.controller.set_pos_setpoint(0.0, 0.0, 0.0) print('Done.')
5,332,432
def _deserialise_list_of(collection_type, kind, owning_cls, field, value): """ Deserialise a list of items into a collection objects of class `kind`. Note that if the value is None, we return None here so that we get a more meaningful error message later on. Args: kind: Type to deserialise into value: List of raw items collection_type: The type of the container (list, set, etc) Returns: Collection of deserialized items, or None (if value was None) """ if value is None: return None # TODO(dan): Is this json stuff necessary? if isinstance(value, (bytes, str)): value = json.loads(value) result = [ _deserialise_maybe_union(owning_cls, field, each) for each in value ] return collection_type(result)
5,332,433
def get_minmax_array(X): """Utility method that returns the boundaries for each feature of the input array. Args: X (np.float array of shape (num_instances, num_features)): The input array. Returns: min (np.float array of shape (num_features,)): Minimum values for each feature in array. max (np.float array of shape (num_features,)): Maximum values for each feature in array. """ min = np.min(X, axis=0) max = np.max(X, axis=0) return min, max
5,332,434
def getAllHeaders(includeText=False): """ Get a dictionary of dream numbers and headers. If includeText=true, also add the text of the dream to the dictionary as 'text' (note that this key is all lowercase so it will not conflict with the usual convention for header names, even if "Text" would be an odd header name). """ dreams = {} for f in allDreamfiles(): dream = {} textLines = [] inHeaders = True for line in f: if not line.strip(): # end of headers if includeText: inHeaders = False else: break if inHeaders: header, value = (i.strip() for i in line.split(':\t')) dream[header] = value else: textLines.append(line) if includeText: # omit the first blank separator line dream['text'] = '\n'.join(i for i in textLines[1:]) dreams[dream['Id']] = dream return dreams
5,332,435
def zscore(arr, period): """ ZScore transformation of `arr` for rolling `period.` ZScore = (X - MEAN(X)) / STDEV(X) :param arr: :param period: :return: """ if period <= 0: raise YaUberAlgoArgumentError("'{}' must be positive number".format(period)) # Do quick sanity checks of arguments _check_series_args(arr=arr) try: if isinstance(arr, pd.Series): return pd.Series(_zscore(arr.values, period), index=arr.index) elif isinstance(arr, np.ndarray): return _zscore(arr, period) except ValueError as exc: raise YaUberAlgoInternalError(str(exc))
5,332,436
def _load_event_data(prefix, name): """Load per-event data for one single type, e.g. hits, or particles. """ expr = '{!s}-{}.csv*'.format(prefix, name) files = glob.glob(expr) dtype = DTYPES[name] if len(files) == 1: return pandas.read_csv(files[0], header=0, index_col=False, dtype=dtype) elif len(files) == 0: raise Exception('No file matches \'{}\''.format(expr)) else: raise Exception('More than one file matches \'{}\''.format(expr))
5,332,437
def users(): """list all users along with their associated tibanna user groups""" API().users()
5,332,438
def test_weak_ref(v): """Check that weak references can be made to Vectors.""" assert weakref.ref(v) is not None
5,332,439
async def test_entity_device_info_update(hass, mqtt_mock): """Test device registry update.""" registry = dr.async_get(hass) config = { "automation_type": "trigger", "topic": "test-topic", "type": "foo", "subtype": "bar", "device": { "identifiers": ["helloworld"], "connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]], "manufacturer": "Whatever", "name": "Beer", "model": "Glass", "sw_version": "0.1-beta", }, } data = json.dumps(config) async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data) await hass.async_block_till_done() device = registry.async_get_device({("mqtt", "helloworld")}) assert device is not None assert device.name == "Beer" config["device"]["name"] = "Milk" data = json.dumps(config) async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data) await hass.async_block_till_done() device = registry.async_get_device({("mqtt", "helloworld")}) assert device is not None assert device.name == "Milk"
5,332,440
def check_file(file_id: str, upsert: bool = False) -> File: """Checks that the file with file_id exists in the DB Args: file_id: The id for the requested file. upsert: If the file doesn't exist create a placeholder file Returns: The file object Raises: NotFoundError: File with the requested ID doesn't exist and is expected to ModelValidationError: Incorrectly formatted ID is given """ try: ObjectId(file_id) except (InvalidId, TypeError): raise ModelValidationError( f"Cannot create a file id with the string {file_id}. " "Requires 24-character hex string." ) res = db.query_unique(File, id=file_id) if res is None: if upsert: create_file("BG_placeholder", 0, 0, file_id) res = db.query_unique(File, id=file_id) else: raise NotFoundError(f"Tried to fetch an unsaved file {file_id}") db.modify(res, updated_at=datetime.utcnow()) return res
5,332,441
def edit_config_popup(icon): """Open a window to edit config.""" padx = 4 pady = 6 # creating popup window window = tk.Tk() window.title("Pypit config") window.after(1, lambda: window.focus_force()) # focus on create window.bind("<Escape>", lambda e: window.destroy()) # destroy on Escape config_frame = tk.Frame(window, bg="#1e1e1e") config_frame.grid() # character label char_label = tk.Label( config_frame, text="Character name: ", bd=5, bg="#1e1e1e", fg="#a4b5b0", font=("Helvetica", 12), ) char_label.grid( row=0, column=0, padx=padx, pady=pady, ) # characted name input field name_field = tk.Entry( config_frame, bd=5, bg="#1e1e1e", fg="#a4b5b0", font=("Helvetica", 12), ) # default value is taken from config dict name_field.insert(0, config_dict["character"]) name_field.grid( row=0, column=1, padx=padx, pady=pady, ) # league label league_label = tk.Label( config_frame, text="League: ", bd=5, bg="#1e1e1e", fg="#a4b5b0", font=("Helvetica", 12), ) league_label.grid( row=1, column=0, padx=padx, pady=pady, ) # Create a Tkinter variable tkvar = tk.StringVar(window) # List with options leagues = api.get_current_leagues() tkvar.set(config_dict["league"]) # set the default option popupMenu = tk.OptionMenu(config_frame, tkvar, *leagues) popupMenu.grid(row=1, column=1) # on change dropdown value def change_dropdown(*args): logging.info(f"selecter league: {tkvar.get()}\n") # link function to change dropdown tkvar.trace("w", change_dropdown) # helper function for save button def update_config(): logging.info("writing config") logging.info(f"character name: {name_field.get()}") logging.info(f"league: {tkvar.get()}\n") config_dict["character"] = name_field.get() config_dict["league"] = tkvar.get().replace(" ", "%20") config.write(config_dict) # save button tk.Button(config_frame, text="Save", command=update_config).grid( row=2, column=1, sticky=tk.W, pady=pady ) # opening popup window window.mainloop()
5,332,442
def parse(volume_str): """Parse combined k8s volume string into a dict. Args: volume_str: The string representation for k8s volume, e.g. "claim_name=c1,mount_path=/path1". Return: A Python dictionary parsed from the given volume string. """ kvs = volume_str.split(",") volume_keys = [] parsed_volume_dict = {} for kv in kvs: k, v = kv.split("=") if k not in volume_keys: volume_keys.append(k) else: raise ValueError( "The volume string contains duplicate volume key: %s" % k ) if k not in _ALLOWED_VOLUME_KEYS: raise ValueError( "%s is not in the allowed list of volume keys: %s" % (k, _ALLOWED_VOLUME_KEYS) ) parsed_volume_dict[k] = v return parsed_volume_dict
5,332,443
def expect_warnings_on(db, *messages, **kw): """Context manager which expects one or more warnings on specific dialects. The expect version **asserts** that the warnings were in fact seen. """ spec = db_spec(db) if isinstance(db, util.string_types) and not spec(config._current): yield elif not _is_excluded(*db): yield else: with expect_warnings(*messages, **kw): yield
5,332,444
def longest_common_substring(text1, text2): """ๆœ€้•ฟๅ…ฌๅ…ฑๅญๅญ—็ฌฆไธฒ๏ผŒๅŒบๅˆ†ๅคงๅฐๅ†™""" n = len(text1) m = len(text2) maxlen = 0 span1 = (0, 0) span2 = (0, 0) if n * m == 0: return span1, span2, maxlen dp = np.zeros((n+1, m+1), dtype=np.int32) for i in range(1, n+1): for j in range(1, m+1): if text1[i-1] == text2[j-1]: dp[i][j] = dp[i-1][j-1] + 1 if dp[i][j] > maxlen: maxlen = dp[i][j] span1 = (i - maxlen, i) span2 = (j - maxlen, j) return span1, span2, maxlen
5,332,445
def make_subclasses_dict(cls): """ Return a dictionary of the subclasses inheriting from the argument class. Keys are String names of the classes, values the actual classes. :param cls: :return: """ the_dict = {x.__name__:x for x in get_all_subclasses(cls)} the_dict[cls.__name__] = cls return the_dict
5,332,446
def _parse_realtime_data(xmlstr): """ Takes xml a string and returns a list of dicts containing realtime data. """ doc = minidom.parseString(xmlstr) ret = [] elem_map = {"LineID": "id", "DirectionID": "direction", "DestinationStop": "destination" } ack = _single_element(doc, "Acknowledge") if ack == None or ack.attributes["Result"].nodeValue != "ok": return None curtime = time.mktime(time.strptime( ack.attributes["TimeStamp"].nodeValue[:-10], "%Y-%m-%dT%H:%M:%S")) for elem in doc.getElementsByTagName("DISDeviation"): entry = {"is_realtime": False} for name, value in [ (e.nodeName, _get_text(e.childNodes)) \ for e in elem.childNodes \ if e.nodeType == e.ELEMENT_NODE ]: if name in elem_map: entry[elem_map[name]] = unicode(value) elif name == "TripStatus": entry["is_realtime"] = value == "Real" if entry["is_realtime"]: timeele = _single_element(elem, "ExpectedDISDepartureTime") else: timeele = _single_element(elem, "ScheduledDISDepartureTime") parsed_time = time.strptime( _get_text(timeele.childNodes)[:-10], "%Y-%m-%dT%H:%M:%S") entry["time"] = parsed_time entry["wait_time"] = int(time.mktime(parsed_time) - curtime) ret.append(entry) return ret
5,332,447
def tocopo_accuracy_fn(tocopo_logits: dt.BatchedTocopoLogits, target_data: dt.BatchedTrainTocopoTargetData, oov_token_id: int, pad_token_id: int, is_distributed: bool = True) -> AccuracyMetrics: """Computes accuracy metrics. Args: tocopo_logits: Predictions from model (unnormalized log scores). target_data: target data to compare prediction against. oov_token_id: Id of out of vocabulary token. pad_token_id: Id of pad token. is_distributed: Whether to perform cross-device aggregation. Returns: A `AccuracyMetrics` instance. """ vocab_size = tocopo_logits.token_logits.shape[2] one_hot_target_tokens = jax.nn.one_hot(target_data.token_ids, vocab_size) # (B, O, U) # Don't give credit for OOV tokens. one_hot_target_tokens = one_hot_target_tokens.at[:, :, oov_token_id].set( jnp.zeros_like(target_data.token_ids)) # Disable predictions for all tokens when there is a pointer. # Mask indicating absence of a pointer at target. not_pointer_mask = target_data.is_target_pointer.sum(axis=2) == 0 # (B, O) one_hot_target_tokens = one_hot_target_tokens * jnp.expand_dims( not_pointer_mask, axis=2) few_hot_targets = jnp.concatenate([ one_hot_target_tokens, target_data.is_target_copy, target_data.is_target_pointer ], axis=2) # (B, O, U+2V) # Get the one hot predictions. tocopo_logits_stacked = jnp.concatenate([ tocopo_logits.token_logits, tocopo_logits.copy_logits, tocopo_logits.pointer_logits ], axis=2) # (B, O, U+2V) prediction_indices = jnp.argmax(tocopo_logits_stacked, axis=2) # (B, O) one_hot_predictions = jax.nn.one_hot( prediction_indices, tocopo_logits_stacked.shape[2]) # (B, O, U+2V) # (B, O) is_pad = (target_data.token_ids == pad_token_id) # (B, O, U+2V) -> (B, O) # If the target is a pad token, then we remove it from consideration when # calculating accuracies. `element_correct_or_pad` array always assign a 1 to # padded prediction (this property is used in the sequence accuracy # computation). element_correct = jnp.sum(one_hot_predictions * few_hot_targets, axis=-1) element_correct_or_pad = jnp.where(is_pad, 1, element_correct) per_element_correct = jnp.sum(element_correct_or_pad * (1 - is_pad)) per_element_attempts = jnp.sum(1 - is_pad) per_sequence_correct = jnp.sum(jnp.prod(element_correct_or_pad, axis=-1)) per_sequence_attempts = element_correct_or_pad.shape[0] pointer_mask = jnp.logical_and( jnp.logical_not(not_pointer_mask), jnp.logical_not(is_pad)) pointer_correct = jnp.sum(element_correct * pointer_mask) pointer_attempts = jnp.sum(pointer_mask) # Pointer sequence accuracy: construct an array of 1s everywhere except where # a pointer is incorrectly predicted. Note: this counts a sequence without # pointers as accurately predicted. pointer_correct_or_toco_or_pad = jnp.where(not_pointer_mask, 1, element_correct_or_pad) per_sequence_po_correct = jnp.sum( jnp.prod(pointer_correct_or_toco_or_pad, axis=-1)) toco_mask = jnp.logical_and(not_pointer_mask, jnp.logical_not(is_pad)) toco_correct = jnp.sum(element_correct * toco_mask) toco_attempts = jnp.sum(toco_mask) # ToCo sequence accuracy: construct an array of 1s everywhere except where # a To/Co is incorrectly predicted. Note: this counts a sequence without # ToCo as accurately predicted. toco_correct_or_po_or_pad = jnp.where(pointer_mask, 1, element_correct_or_pad) per_sequence_toco_correct = jnp.sum( jnp.prod(toco_correct_or_po_or_pad, axis=-1)) # Correct predictions using the To head. is_prediction_token_mask = prediction_indices < vocab_size token_correct = jnp.sum( element_correct * jnp.logical_and(is_prediction_token_mask, jnp.logical_not(is_pad))) # Aggregate across devices. if is_distributed: per_element_correct = jax.lax.psum(per_element_correct, axis_name='i') per_element_attempts = jax.lax.psum(per_element_attempts, axis_name='i') per_sequence_correct = jax.lax.psum(per_sequence_correct, axis_name='i') per_sequence_attempts = jax.lax.psum(per_sequence_attempts, axis_name='i') pointer_correct = jax.lax.psum(pointer_correct, axis_name='i') pointer_attempts = jax.lax.psum(pointer_attempts, axis_name='i') toco_correct = jax.lax.psum(toco_correct, axis_name='i') token_correct = jax.lax.psum(token_correct, axis_name='i') toco_attempts = jax.lax.psum(toco_attempts, axis_name='i') per_sequence_po_correct = jax.lax.psum( per_sequence_po_correct, axis_name='i') per_sequence_toco_correct = jax.lax.psum( per_sequence_toco_correct, axis_name='i') return AccuracyMetrics( num_element_correct=per_element_correct, num_element_attempts=per_element_attempts, num_seq_correct=per_sequence_correct, num_seq_attempts=per_sequence_attempts, num_pointer_correct=pointer_correct, num_pointer_attempts=pointer_attempts, num_pointer_seq_correct=per_sequence_po_correct, num_toco_correct=toco_correct, num_token_correct=token_correct, num_toco_attempts=toco_attempts, num_toco_seq_correct=per_sequence_toco_correct)
5,332,448
def cols_from_html_tbl(tbl): """ Extracts columns from html-table tbl and puts columns in a list. tbl must be a results-object from BeautifulSoup)""" rows = tbl.tbody.find_all('tr') if rows: for row in rows: cols = row.find_all('td') for i,cell in enumerate(cols): if not'col_list' in locals(): col_list=[[] for x in range(len(cols))] col_list[i].append(cell.text) else: col_list=[] return col_list
5,332,449
def get_score_park(board: List[List[str]]) -> Tuple[int]: """ Calculate the score for the building - park (PRK). Score 1: If ONLY 1 park. Score 3: If the park size is 2. Score 8: If the park size is 3. Score 16: If the park size is 4. Score 22: If the park size is 5. Score 23: If the park size is 6. Score 24: If the park size is 7. Score 25: If the park size is 8. Score 17 + x: For all park size > 8, where x = size of park Parameters ---------- board: List[List[str]] 2D array containing all the game detail, including column header, row header and placed buildings. Returns ------- score: Tuple[int] A list containing all the score for the specific building - park (PRK). """ type = 'PRK' # @ Convert board into logical matrix, where 1 represent park and other type of building are represent by 0. grid = [[1 if type == col else 0 for col in row] for row in board] visited_location_set = set() score_list = [] table = [ [1, 2, 3, 4, 5, 6, 7, 8], [1, 3, 8, 16, 22, 23, 24, 25] ] for idx_row in range(len(grid)): for idx_col in range(len(grid[0])): score = 0 size = get_island_size(idx_row, idx_col, grid, visited_location_set, direction=('up', 'down', 'left', 'right')) if 0 == size: continue if 8 > size: score_idx = table[0].index(size) score = table[1][score_idx] else: score = 17 + size score_list.append(score) return *score_list,
5,332,450
def kernel_epanechnikov(inst: np.ndarray) -> np.ndarray: """Epanechnikov kernel.""" if inst.ndim != 1: raise ValueError("'inst' vector must be one-dimensional!") return 0.75 * (1.0 - np.square(inst)) * (np.abs(inst) < 1.0)
5,332,451
def submission_parser(reddit_submission_object): """Parses a submission and returns selected parameters""" post_timestamp = reddit_submission_object.created_utc post_id = reddit_submission_object.id score = reddit_submission_object.score ups = reddit_submission_object.ups downs = reddit_submission_object.downs # post_body = np.nan thread_title = reddit_submission_object.title thread_url = reddit_submission_object.url subreddit = reddit_submission_object.subreddit.display_name return post_timestamp, post_id, score, ups, downs, thread_title, thread_url, subreddit
5,332,452
def run(): """Runs the command line interface via ``main``, then exists the process with the proper return code.""" sys.exit(main(sys.argv[1:]) or 0)
5,332,453
def test_setupTestKeep(): """ Test setting up test Keep directory """ print("Testing setupTestKeep") keepDirPath = keeping.setupTestKeep() assert keepDirPath.startswith("/tmp/bluepea") assert keepDirPath.endswith("test/bluepea/keep") assert os.path.exists(keepDirPath) assert keepDirPath == keeping.gKeepDirPath cleanupTmpBaseDir(keepDirPath) assert not os.path.exists(keepDirPath) print("Done Test")
5,332,454
def dashboard(): """Displays dashboard to logged in user""" user_type = session.get('user_type') user_id = session.get('user_id') if user_type == None: return redirect ('/login') if user_type == 'band': band = crud.get_band_by_id(user_id) display_name = band.display_name age = band.age gender = band.gender influences = band.influences location = band.location description = band.description seeking = band.skills genres = band.genres return render_template('dashboard.html', user_type=user_type, display_name=display_name, age=age, gender=gender, influences=influences, location=location, description=description, seeking=seeking, genres=genres) if user_type == 'musician': musician = crud.get_musician_by_id(user_id) display_name = musician.display_name age = musician.age gender = musician.gender influences = musician.influences location = musician.location description = musician.description skills = musician.skills genres = musician.genres return render_template('dashboard.html', user_type=user_type, display_name=display_name, age=age, gender=gender, influences=influences, location=location, description=description, skills=skills, genres=genres)
5,332,455
def parse_site(site_content, gesture_id): """ Parses the following attributes: title, image, verbs and other_gesture_ids :param site_content: a html string :param gesture_id: the current id :return: { title: str, img: str, id: number, compares: [ { verb: [str], other_gesture_id: number } ] } """ soup = BeautifulSoup(site_content, 'html.parser') img = soup.body.img img = img['src'] if img else False title = soup.body.font.b.contents[0].lower().strip() table = soup.body.table.tr rows = table.find_all('td') compares = [] for td in rows: content = td.font.contents current_verb = [] current_other = '' for line in content: if str(line) == '<br/>': compares.append({ 'verb': current_verb, 'other_gesture_id': current_other, }) current_verb = [] current_other = '' elif hasattr(line, 'name') and line.name == 'a': current_other = line['href'].replace('.htm', '') else: current_verb.append(str(line).strip().replace('\\n', '').lower()) return { 'id': gesture_id, 'title': title, 'img': img, 'compares': compares, }
5,332,456
def test_multiprocessing_function () : """Test parallel processnig with multiprocessing """ logger = getLogger ("ostap.test_multiprocessing_function") logger.info ('Test job submission with module %s' % multiprocessing ) ncpus = multiprocessing.cpu_count() from multiprocessing import Pool pool = Pool ( ncpus ) jobs = pool.imap_unordered ( make_histos , zip ( count () , inputs ) ) result = None for h in progress_bar ( jobs , max_value = len ( inputs ) ) : if not result : result = h else : result.Add ( h ) pool.close () pool.join () logger.info ( "Histogram is %s" % result.dump ( 80 , 20 ) ) logger.info ( "Entries %s/%s" % ( result.GetEntries() , sum ( inputs ) ) ) with wait ( 5 ) , use_canvas ( 'test_multiprocessing_function' ) : result.draw ( ) return result
5,332,457
def main(): """ Main function for run script. """ app.run()
5,332,458
def fnCalculate_Bistatic_Coordinates(a,B): """ Calculate the coordinates of the target in the bistatic plane A,B,C = angles in the triangle a,b,c = length of the side opposite the angle Created: 22 April 2017 """ u = a*math.cos(B); v = a*math.sin(B); return u,v
5,332,459
def _fn_lgamma_ ( self , b = 1 ) : """ Gamma function: f = log(Gamma(ab)) >>> f = >>> a = f.lgamma ( ) >>> a = f.lgamma ( b ) >>> a = lgamma ( f ) """ return _fn_make_fun_ ( self , b , Ostap.MoreRooFit.LGamma , 'lgamma_%s_%s' )
5,332,460
def convolve_smooth(x, win=10, mode="same"): """Smooth data using a given window size, in units of array elements, using the numpy.convolve function.""" return np.convolve(x, np.ones((win,)), mode=mode) / win
5,332,461
def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False, Lm=None): """ Multioutput conditional for an independent kernel and shared inducing features. Same behaviour as conditional with non-multioutput kernels. The covariance matrices used to calculate the conditional have the following shape: - Kuu: M x M - Kuf: M x N - Kff: N or N x N Further reference ----------------- - See `gpflow.conditionals._conditional` for a detailed explanation of conditional in the single-output case. - See the multiouput notebook for more information about the multiouput framework. Parameters ---------- :param Xnew: data matrix, size N x D. :param f: data matrix, P x M or P x M x N :param full_cov: return the covariance between the datapoints :param full_output_cov: return the covariance between the outputs. Note: as we are using a independent kernel these covariances will be zero. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size P x M or P x M x M. :param white: boolean of whether to use the whitened representation :return: - mean: N x P - variance: N x P, P x N x N, N x P x P or N x P x N x P Please see `gpflow.conditional._expand_independent_outputs` for more information about the shape of the variance, depending on `full_cov` and `full_output_cov`. """ logger.debug("Conditional: SharedIndependentMof - SharedIndepedentMok") f_ndims = f.shape.ndims assert f_ndims is not None Kmn = Kuf(feat, kern, Xnew) # M x N Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level) if Lm is None else None # M x M if full_cov: Knn = kern.K(Xnew, full_output_cov=False)[0, ...] # N x N else: Knn = kern.Kdiag(Xnew, full_output_cov=False)[..., 0] # N _f = tf.transpose(f) if f_ndims == 2 else f fmean, fvar = base_conditional(Kmn, Kmm, Knn, _f, full_cov=full_cov, q_sqrt=q_sqrt, white=white, Lm=Lm) if f_ndims == 3: fmean = tf.transpose(fmean) if q_sqrt is None: if full_cov: fvar = tf.tile(fvar[None, :, :], [tf.shape(f)[0], 1, 1]) else: fvar = tf.tile(fvar[:, None], [1, tf.shape(f)[0]]) return fmean, _expand_independent_outputs(fvar, full_cov, full_output_cov)
5,332,462
def bq_wait_for_rows(bq_client: bigquery.Client, table: bigquery.Table, expected_num_rows: int): """ polls tables.get API for number of rows until reaches expected value or times out. This is mostly an optimization to speed up the test suite without making it flaky. """ start_poll = time.monotonic() actual_num_rows = 0 while time.monotonic() - start_poll < LOAD_JOB_POLLING_TIMEOUT: bq_table: bigquery.Table = bq_client.get_table(table) actual_num_rows = bq_table.num_rows if actual_num_rows == expected_num_rows: return if actual_num_rows > expected_num_rows: raise AssertionError( f"{table.project}.{table.dataset_id}.{table.table_id} has" f"{actual_num_rows} rows. expected {expected_num_rows} rows.") raise AssertionError( f"Timed out after {LOAD_JOB_POLLING_TIMEOUT} seconds waiting for " f"{table.project}.{table.dataset_id}.{table.table_id} to " f"reach {expected_num_rows} rows." f"last poll returned {actual_num_rows} rows.")
5,332,463
def descent(x0, fn, iterations=1000, gtol=10**(-6), bounds=None, limit=0, args=()): """A gradient descent optimisation solver. Parameters ---------- x0 : array-like n x 1 starting guess of x. fn : obj The objective function to minimise. iterations : int Maximum number of iterations. gtol : float Mean residual of the gradient for convergence. bounds : list List of lower and upper bound pairs [lb, ub], None=unconstrained. limit : float Value of the objective function for which to terminate optimisation. args : tuple Additional parameters needed for fn. Returns ------- float Final value of the objective function. array Values of x at the found local minimum. """ r = 0.5 c = 0.0001 n = len(x0) x0 = reshape(array(x0), (n, 1)) if bounds: bounds = array(bounds) lb = bounds[:, 0][:, newaxis] ub = bounds[:, 1][:, newaxis] else: lb = ones((n, 1)) * -10**20 ub = ones((n, 1)) * +10**20 zn = zeros((n, 1)) g = zeros((n, 1)) v = eye(n) * e def phi(x, mu, *args): p = mu * (sum(maximum(lb - x, zn)) + sum(maximum(x - ub, zn)))**2 return fn(x, *args) + p i = 0 mu = 1 while i < iterations: p0 = phi(x0, mu, *args) for j in range(n): vj = v[:, j][:, newaxis] g[j, 0] = (phi(x0 + vj, mu, *args) - p0) / e D = sum(-g * g) a = 1 x1 = x0 - a * g while phi(x1, mu, *args) > p0 + c * a * D: a *= r x1 = x0 - a * g x0 -= a * g mu *= 10 res = mean(abs(g)) i += 1 f1 = phi(x0, mu, *args) if f1 < limit: break if res < gtol: break print('Iteration: {0} fopt: {1:.3g} gres: {2:.3g} step: {3}'.format(i, f1, res, a)) return f1, x0
5,332,464
def test_fields(): """Try using 'fields' getter""" TestSchema = Unischema('TestSchema', [ UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False), UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False), ]) assert len(TestSchema.fields) == 2 assert TestSchema.fields['int_field'].name == 'int_field' assert TestSchema.fields['string_field'].name == 'string_field'
5,332,465
def count_teams_for_party(party_id: PartyID) -> int: """Return the number of orga teams for that party.""" return db.session \ .query(DbOrgaTeam) \ .filter_by(party_id=party_id) \ .count()
5,332,466
def num_series(datetime_series) -> pd.Series: """Return a datetime series with numeric values.""" return datetime_series(LENGTH)
5,332,467
def obtain_ranks(outputs, targets, mode=0): """ outputs : tensor of size (batch_size, 1), required_grad = False, model predictions targets : tensor of size (batch_size, ), required_grad = False, labels Assume to be of format [1, 0, ..., 0, 1, 0, ..., 0, ..., 0] mode == 0: rank from distance (smaller is preferred) mode == 1: rank from similarity (larger is preferred) """ if mode == 0: calculate_ranks = calculate_ranks_from_distance else: calculate_ranks = calculate_ranks_from_similarities all_ranks = [] prediction = outputs.cpu().numpy().squeeze() label = targets.cpu().numpy() sep = np.array([0, 1], dtype=label.dtype) # fast way to find subarray indices in a large array, c.f. https://stackoverflow.com/questions/14890216/return-the-indexes-of-a-sub-array-in-an-array end_indices = [(m.start() // label.itemsize)+1 for m in re.finditer(sep.tostring(), label.tostring())] end_indices.append(len(label)+1) start_indices = [0] + end_indices[:-1] for start_idx, end_idx in zip(start_indices, end_indices): distances = prediction[start_idx: end_idx] labels = label[start_idx:end_idx] positive_relations = list(np.where(labels == 1)[0]) ranks = calculate_ranks(distances, positive_relations) all_ranks.append(ranks) return all_ranks
5,332,468
def warn(path: str, msg: str, req: "RequirementDirective" = None): """ Log a warning """ req_id = req.requirement_id or "UNKNOWN" if req else "UNKNOWN" print(f"WARNING: {path} | {req_id} | {msg}")
5,332,469
def sample_a2c_params(trial: optuna.Trial) -> Dict[str, Any]: """ Sampler for A2C hyperparams. """ lr_schedule = trial.suggest_categorical("lr_schedule", ["linear", "constant"]) learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1) n_steps = trial.suggest_categorical("n_steps", [4, 8, 16, 32, 64, 128]) gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0]) ent_coef = trial.suggest_loguniform("ent_coef", 0.0000001, 0.1) vf_coef = trial.suggest_uniform("vf_coef", 0, 1) normalize_advantage = trial.suggest_categorical("normalize_advantage", [False, True]) max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5]) # Toggle PyTorch RMS Prop (different from TF one, cf doc) use_rms_prop = trial.suggest_categorical("use_rms_prop", [False, True]) # Uncomment for gSDE (continuous actions) #log_std_init = trial.suggest_uniform("log_std_init", -4, 1) #ortho_init = trial.suggest_categorical("ortho_init", [False, True]) # Uncomment for network architecture setting #net_arch = trial.suggest_categorical("net_arch", ["small", "medium"]) # sde_net_arch = trial.suggest_categorical("sde_net_arch", [None, "tiny", "small"]) # full_std = trial.suggest_categorical("full_std", [False, True]) # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu']) activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"]) if lr_schedule == "linear": learning_rate = linear_schedule(learning_rate) # net_arch = { # "small": [dict(pi=[64, 64], vf=[64, 64])], # "medium": [dict(pi=[256, 256], vf=[256, 256])], # }[net_arch] activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn] return { "learning_rate": learning_rate, "n_steps": n_steps, "gae_lambda": gae_lambda, "ent_coef": ent_coef, "vf_coef": vf_coef, "max_grad_norm": max_grad_norm, "use_rms_prop": use_rms_prop, "normalize_advantage": normalize_advantage, "policy_kwargs": dict( #log_std_init=log_std_init, #net_arch=net_arch, activation_fn=activation_fn #ortho_init=ortho_init, ), }
5,332,470
def standard_parameter_writer_bounds_mask(filename, params, pmin, pmax, mask, pid=None): """ Standard parameter writer with parameter bounds and mask. The standard parameter writer writes a space separated file containing 1 header line (# value min max mask) plus 1 line per parameter with the following columns: consecutive parameter number, current parameter value, minimum parameter value, maximum parameter value, parameter mask (1: include, 0: exclude). All values will be written in IEEE double precision: {:.14e}. That means: # value min max mask 1 3.000000000000000e-01 0.000000000000000e+00 1.000000000000000e+00 1 2 2.300000000000000e-01 -1.000000000000000e+00 1.000000000000000e+00 1 3 1.440000000000000e+01 9.000000000000000e+00 2.000000000000000e+01 1 4 3.000000000000000e-01 0.000000000000000e+00 1.000000000000000e+00 0 ... Parameters ---------- filename : string Output filename with parameter values params : iterable Parameter values pmin : iterable Minimum parameter values pmax : iterable Maximum parameter values mask : iterable Parameter mask (1: include, 0: exclude from optimisation) pid : int, optional If given, output file will be `filename` suffixed by .pid Returns ------- None No return value but output file written: filename or filename.pid Examples -------- >>> randst = np.random.RandomState() >>> pid = str(randst.randint(2147483647)) >>> params = sample_parameter(pis, pmin, pmax, pmask) >>> standard_parameter_writer_bounds_mask(paramfile, params, pmin, pmax, ... pmask, pid) """ # Assert correct call astr = 'Parameter and minima do not have the same length.' assert len(params) == len(pmin), astr astr = 'Parameter and maxima do not have the same length.' assert len(params) == len(pmax), astr astr = 'Parameter and mask do not have the same length.' assert len(params) == len(mask), astr # Convert mask to integer if boolean pmask = [ int(i) for i in mask ] # Existing file will be overwritten if pid: fname = filename+'.'+str(pid) else: fname = filename with open(fname, 'w') as ff: # header hstr = '# value min max mask' print(hstr, file=ff) # data for i in range(len(params)): dstr = '{:d} {:.14e} {:.14e} {:.14e} {:d}'.format( i+1, params[i], pmin[i], pmax[i], pmask[i]) print(dstr, file=ff) return
5,332,471
def location_parser(selected_variables, column): """ Parse the location variable by creating a list of tuples. Remove the hyphen between the start/stop positions. Convert all elements to integers and create a list of tuples. Parameters: selected_variables (dataframe): The dataframe containing the location of the variables contained in the cps_selected_variables file column (character): The name of the column containing the start/stop positions Returns: selected_fields: A list of tuples containing the start/stop positions """ fields = [] for field in selected_variables[column]: field = field.split('-') field = [int(i) for i in field] fields.append(field) return fields
5,332,472
def get_qmf_bank(h, n_band): """ Modulates an input protoype filter into a bank of cosine modulated filters Parameters ---------- h: torch.Tensor prototype filter n_band: int number of sub-bands """ k = torch.arange(n_band).reshape(-1, 1) N = h.shape[-1] t = torch.arange(-(N // 2), N // 2 + 1) p = (-1)**k * math.pi / 4 mod = torch.cos((2 * k + 1) * math.pi / (2 * n_band) * t + p) hk = 2 * h * mod return hk
5,332,473
def test_theme_manager_themes(themes): """Test ThemeManager.themes""" tm = ThemeManager(themes=themes) theme_names = [t.name for t in themes] assert len(tm.themes) == len(themes) for theme in tm.themes: assert theme.name in theme_names
5,332,474
def compare_one(col, cons_aa, aln_size, weights, aa_freqs, pseudo_size): """Compare column amino acid frequencies to overall via G-test.""" observed = count_col(col, weights, aa_freqs, pseudo_size) G = 2 * sum(obsv * math.log(obsv / aa_freqs.get(aa, 0.0)) for aa, obsv in observed.items()) pvalue = chisqprob(G, 19) return pvalue
5,332,475
def open(uri, mode='a', eclass=_eclass.manifest): """Open a Blaze object via an `uri` (Uniform Resource Identifier). Parameters ---------- uri : str Specifies the URI for the Blaze object. It can be a regular file too. The URL scheme indicates the storage type: * carray: Chunked array * ctable: Chunked table * sqlite: SQLite table (the URI 'sqlite://' creates in-memory table) If no URI scheme is given, carray is assumed. mode : the open mode (string) Specifies the mode in which the object is opened. The supported values are: * 'r' for read-only * 'w' for emptying the previous underlying data * 'a' for allowing read/write on top of existing data Returns ------- out : an Array or Table object. """ ARRAY = 1 TABLE = 2 uri = urlparse(uri) path = uri.netloc + uri.path parms = params(storage=path) if uri.scheme == 'carray': source = CArraySource(params=parms) structure = ARRAY elif uri.scheme == 'ctable': source = CTableSource(params=parms) structure = TABLE elif uri.scheme == 'sqlite': # Empty path means memory storage parms = params(storage=path or None) source = SqliteSource(params=parms) structure = TABLE else: # Default is to treat the URI as a regular path parms = params(storage=path) source = CArraySource(params=parms) structure = ARRAY # Don't want a deferred array (yet) # return NDArray(source) if structure == ARRAY: if eclass is _eclass.manifest: return Array(source) elif eclass is _eclass.delayed: return NDArray(source) elif structure == TABLE: if eclass is _eclass.manifest: return Table(source) elif eclass is _eclass.delayed: return NDTable(source)
5,332,476
def create_df(dic_in, cols, input_type): """ Convert JSON output from OpenSea API to pandas DataFrame :param dic_in: JSON output from OpenSea API :param cols: Keys in JSON output from OpenSea API :param input_type: <TBD> save the columns with dictionaries as entries seperately :return: Cleaned DataFrame """ # First pass create dataframe where some of the values are a dictionary with multiple values df = pd.DataFrame(columns=cols) for col in cols: data = [] for row in dic_in: data.append(row.get(col)) df[col] = data # Second Pass get rid of columns with dictionaries, for now just forgetting about dictionary df_2 = df.copy() for col in df_2.columns: if col in map_dic: for df_index, df_row in df.iterrows(): embed_dic_type = map_dic[col] df_2.at[df_index, col] = map_replace_dic[embed_dic_type] return df_2
5,332,477
def GenerateApiMap(base_dir, root_dir, api_config): """Create an apis_map.py file in the given root_dir with for given api_config. Args: base_dir: str, Path of directory for the project. root_dir: str, Path of the map file location within the project. api_config: regeneration config for all apis. """ api_def_filename, _ = os.path.splitext(api_def.__file__) api_def_source = files.ReadFileContents(api_def_filename + '.py') tpl = template.Template(filename=os.path.join(os.path.dirname(__file__), 'template.tpl')) api_map_file = os.path.join(base_dir, root_dir, 'apis_map.py') logging.debug('Generating api map at %s', api_map_file) api_map = _MakeApiMap(root_dir.replace('/', '.'), api_config) logging.debug('Creating following api map %s', api_map) with files.FileWriter(api_map_file) as apis_map_file: ctx = runtime.Context(apis_map_file, api_def_source=api_def_source, apis_map=api_map) tpl.render_context(ctx)
5,332,478
def file_updated_at(file_id, db_cursor): """ Update the last time the file was checked """ db_cursor.execute(queries.file_updated_at, {'file_id': file_id}) db_cursor.execute(queries.insert_log, {'project_id': settings.project_id, 'file_id': file_id, 'log_area': 'file_updated_at', 'log_text': db_cursor.query.decode("utf-8")}) return True
5,332,479
def test_get_files_directory(runner: click.testing.CliRunner) -> None: """It returns file in directory.""" with runner.isolated_filesystem(): os.mkdir("dir") with open("dir/file.toml", "w") as f: f.write("content doesnt matter") src = "dir" expected = [pathlib.Path("dir/file.toml")] sources = list(__main__.get_files((src,))) assert sorted(expected) == sorted(sources)
5,332,480
def generate_comic_testers(): """For each comic scraper, create a test class.""" g = globals() if "TESTALL" in os.environ: # test all comics (this will take some time) scraperclasses = scraper.get_scraperclasses() else: # Get limited number of scraper tests on Travis builds to make # it faster testscrapernames = ['GoComics/CalvinandHobbes'] scraperclasses = [ scraperclass for scraperclass in scraper.get_scraperclasses() if scraperclass.getName() in testscrapernames ] for scraperclass in scraperclasses: name = 'Test'+scraperclass.__name__ g[name] = make_comic_tester(name, scraperclass=scraperclass)
5,332,481
def action_to_upper(action): """ action to upper receives an action in pddl_action_representation, and returns it in upper case. :param action: A action in PddlActionRepresentation :return: PddlActionRepresentation: The action in upper case """ if action: action.name = action.name.upper() action.types = [type.upper() for type in action.types] action.predicates = [pred.upper() for pred in action.predicates] action.requirements = [req.upper() for req in action.requirements] action.action = action.action.upper() return action
5,332,482
def load_qa_frame(filename, frame=None, flavor=None): """ Load an existing QA_Frame or generate one, as needed Args: filename: str frame: Frame object, optional flavor: str, optional Type of QA_Frame Returns: qa_frame: QA_Frame object """ from lvmspec.qa.qa_frame import QA_Frame log=get_logger() if os.path.isfile(filename): # Read from file, if it exists qaframe = read_qa_frame(filename) log.info("Loaded QA file {:s}".format(filename)) # Check against frame, if provided if frame is not None: for key in ['camera','expid','night','flavor']: assert getattr(qaframe, key) == frame.meta[key.upper()] else: # Init if frame is None: log.error("QA file {:s} does not exist. Expecting frame input".format(filename)) qaframe = QA_Frame(frame) # Set flavor? if flavor is not None: qaframe.flavor = flavor # Return return qaframe
5,332,483
def generate_model_example(model: Type["Model"], relation_map: Dict = None) -> Dict: """ Generates example to be included in schema in fastapi. :param model: ormar.Model :type model: Type["Model"] :param relation_map: dict with relations to follow :type relation_map: Optional[Dict] :return: dict with example values :rtype: Dict[str, int] """ example: Dict[str, Any] = dict() relation_map = ( relation_map if relation_map is not None else translate_list_to_dict(model._iterate_related_models()) ) for name, field in model.Meta.model_fields.items(): populates_sample_fields_values( example=example, name=name, field=field, relation_map=relation_map ) to_exclude = {name for name in model.Meta.model_fields} pydantic_repr = generate_pydantic_example(pydantic_model=model, exclude=to_exclude) example.update(pydantic_repr) return example
5,332,484
def compare_folder(request): """ Creates the compare folder path `dione-sr/tests/data/test_name/compare`. """ return get_test_path('compare', request)
5,332,485
def _get_param_combinations(lists): """Recursive function which generates a list of all possible parameter values""" if len(lists) == 1: list_p_1 = [[e] for e in lists[0]] return list_p_1 list_p_n_minus_1 = _get_param_combinations(lists[1:]) list_p_1 = [[e] for e in lists[0]] list_p_n = [p_1 + p_n_minus_1 for p_1 in list_p_1 for p_n_minus_1 in list_p_n_minus_1] return list_p_n
5,332,486
def test_get_unitary_matrix_CNOT(target_wire): """Test CNOT: 2-qubit gate with different target wires, some non-adjacent.""" wires = [0, 1, 2, 3, 4] def testcircuit(): qml.CNOT(wires=[1, target_wire]) get_matrix = get_unitary_matrix(testcircuit, wires) matrix = get_matrix() # test the matrix operation on a state state0 = [1, 0] state1 = [0, 1] teststate = reduce(np.kron, [state1, state1, state1, state1, state1]) if target_wire == 0: expected_state = reduce(np.kron, [state0, state1, state1, state1, state1]) elif target_wire == 2: expected_state = reduce(np.kron, [state1, state1, state0, state1, state1]) elif target_wire == 3: expected_state = reduce(np.kron, [state1, state1, state1, state0, state1]) elif target_wire == 4: expected_state = reduce(np.kron, [state1, state1, state1, state1, state0]) obtained_state = matrix @ teststate assert np.allclose(obtained_state, expected_state)
5,332,487
def system_mass_spring_dumper(): """ใƒžใ‚นใƒใƒใƒ€ใƒณใƒ‘็ณปใฎ่จญ่จˆไพ‹""" # define the system m = 1.0 k = 1.0 c = 1.0 A = np.array([ [0.0, 1.0], [-k/m, -c/m] ]) B = np.array([ [0], [1/m] ]) C = np.eye(2) D = np.zeros((2,1),dtype=float) W = np.diag([1.0, 1.0]) S1, S2, A_, B_, T = optimal_hyperplane_vector(A, B, W) S = np.hstack((S1, S2)) x, u = initialize_system(A, B) x[0] = 0.0 x[1] = 10.0 # define the gain of k = 10 return C, D, S, k, x, u, A_, B_, T
5,332,488
def explore_voxel( start_pos: tuple, masked_atlas: ma.MaskedArray, *, count: int = -1, ) -> int: """Explore a given voxel. Ask Dimitri for more details. Seems like this is a BFS until a voxel with a different value is found or the maximal number of new voxels were seen. Parameters ---------- start_pos A triplet with the (x, y, z) coordinates of the starting voxel. masked_atlas A masked 3D array with the volume data. count Maximal number of iterations. A negative value means no limit on the number of iterations. Returns ------- int The value of some voxel in the data volume. """ logger.debug("exploring voxel %s", start_pos) if not isinstance(start_pos, tuple): raise ValueError( f"The starting position must be a tuple (got {type(start_pos)})" ) def in_bounds(pos_): """Check that the position is within the atlas bounds.""" return all(0 <= x < x_max for x, x_max in zip(pos_, masked_atlas.shape)) # The order in which the neighbours are explored probably matters deltas = [(-1, 0, 0), (0, -1, 0), (1, 0, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)] start_value = masked_atlas[start_pos] seen = {start_pos} queue = deque([start_pos]) while len(queue) > 0 and count != 0: pos = queue.popleft() value = masked_atlas[pos] # Found a different value? if value != start_value and value is not ma.masked: return value # BFS step for dx, dy, dz in deltas: new_pos = (pos[0] + dx, pos[1] + dy, pos[2] + dz) if in_bounds(new_pos) and new_pos not in seen: seen.add(new_pos) queue.append(new_pos) count -= 1 return start_value
5,332,489
def plot_compressed_channel_stats(stats, color=None, y_center=False, title=None): """ Similar to plot_channel_stats except everything is represented in a single plot (i.e. no subplots). """ plt.figure(figsize=(6,4)) ax = plt.gca() # If requested, center all axes around 0 if y_center: # Calculate the approximate amplitude of the given stats values amplitude = math.ceil(stats.abs().values.max()*10)/10 ax.set_ylim(-amplitude, amplitude) # If we have negative values, grey out the negative space # for better contrast if stats.values.min() < 0: ax.axhspan(0, ax.get_ylim()[0], facecolor='0.2', alpha=0.15) # The actual plot stats.plot(kind='bar', color=color, width=0.6, ax=ax) # Presentation cleanup plt.xlabel('') plt.xticks(rotation=0) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # Optional title at the top if title is not None: plt.title(title) plt.show()
5,332,490
def get_green_button_xml( session: requests.Session, start_date: date, end_date: date ) -> str: """Download Green Button XML.""" response = session.get( f'https://myusage.torontohydro.com/cassandra/getfile/period/custom/start_date/{start_date:%m-%d-%Y}/to_date/{end_date:%m-%d-%Y}/format/xml' ) response.raise_for_status() return response.text
5,332,491
def calculateSecFromEpoch(date,hour): """ Calculates seconds from EPOCH """ months={ '01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May', '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct', '11':'Nov', '12':'Dec' } year=YEAR_PREFIX+date[0:2] month=months[date[2:4]] day=date[4:6] hourF=hour[0:2]+':'+hour[2:4]+':'+hour[4:6] dateFormatted=month+' '+day+','+' '+year+' @ '+hourF+' '+TIME_ZONE secs=timegm(strptime(dateFormatted, '%b %d, %Y @ %H:%M:%S '+TIME_ZONE)) return secs
5,332,492
def star_noise_simulation(Variance, Pk, nongaussian = False): """simulates star + noise signal, Pk is hyperprior on star variability and flat at high frequencies which is stationary noise""" Pk_double = np.concatenate((Pk, Pk)) phases = np.random.uniform(0, 2 * np.pi, len(Pk)) nodes0 = np.sqrt(Pk_double) * np.concatenate((np.cos(phases), np.sin(phases))) if nongaussian: flux= flux_nodes(nodes0, len(Variance)) #average, sigma = prepare_data.normalization(flux) #flux /= sigma mask = np.random.random(len(flux)) < distribution_parameters[0] outliers = stats.nct.rvs(*distribution_parameters[1:], size=np.sum(mask)) flux[mask] = outliers return flux / Variance else: return (flux_nodes(nodes0, len(Variance))) / Variance
5,332,493
def main(): """Make a jazz noise here""" args = get_args() words1 = {} for line in args.file1: for word in line.split(): words1[word] = 1 words2 = {} for line in args.file2: for word in line.split(): words2[word] = 1 for key in words1: if key in words2: print(key)
5,332,494
def test_cancel_already_done_normal(client): """attempt to cancel application that already-done lottery 1. create 'done' application 2. attempt to cancel that application target_url: /lotteries/<id> [DELETE] """ token = login(client, test_user['secret_id'], test_user['g-recaptcha-response'])['token'] lottery_id = 1 application_id = make_application( client, test_user['secret_id'], lottery_id) with client.application.app_context(): target_application = Application.query.filter_by( id=application_id).first() target_application.status = 'lose' db.session.add(target_application) db.session.commit() resp = client.delete(f'/applications/{application_id}', headers={'Authorization': f'Bearer {token}'}) assert resp.status_code == 400 assert 'The Application has already fullfilled' in resp.get_json()[ 'message']
5,332,495
def get_seq_num(): """ Simple class for creating sequence numbers Truncate epoch time to 7 digits which is about one month """ t = datetime.datetime.now() mt = time.mktime(t.timetuple()) nextnum = int(mt) retval = nextnum % 10000000 return retval
5,332,496
def test_trydoer_throw(): """ Test TryDoer testing class with throw to force exit """ tymist = tyming.Tymist(tock=0.125) doer = TryDoer(tymth=tymist.tymen(), tock=0.25) assert doer.tock == 0.25 assert doer.states == [] assert tymist.tyme == 0.0 do = doer(tymth=doer.tymth, tock=doer.tock) assert inspect.isgenerator(do) result = do.send(None) assert result == 0.25 assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0)] result = do.send("Hello") assert result == 0.25 assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0), State(tyme=0.0, context='recur', feed='Hello', count=1)] tymist.tick() result = do.send("Hi") assert result == 0.25 assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0), State(tyme=0.0, context='recur', feed='Hello', count=1), State(tyme=0.125, context='recur', feed='Hi', count=2)] tymist.tick() try: result = do.throw(ValueError, "Bad") except ValueError as ex: assert ex.args[0] == "Bad" # exception alue is thrown value assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0), State(tyme=0.0, context='recur', feed='Hello', count=1), State(tyme=0.125, context='recur', feed='Hi', count=2), State(tyme=0.25, context='abort', feed='Bad', count=3), State(tyme=0.25, context='exit', feed=None, count=4)] # send after throw tymist.tick() try: result = do.send("what?") except StopIteration as ex: assert ex.value == None # after throw no StopIteration value assert doer.states == [State(tyme=0.0, context='enter', feed='Default', count=0), State(tyme=0.0, context='recur', feed='Hello', count=1), State(tyme=0.125, context='recur', feed='Hi', count=2), State(tyme=0.25, context='abort', feed='Bad', count=3), State(tyme=0.25, context='exit', feed=None, count=4)]
5,332,497
def get_packages_for_file_or_folder(source_file, source_folder): """ Collects all the files based on given parameters. Exactly one of the parameters has to be specified. If source_file is given, it will return with a list containing source_file. If source_folder is given, it will search recursively all files in the directory and return the list of found files. """ if not bool(source_folder) ^ bool(source_file): log('Source_folder XOR source_file has to be specified, exactly one of them.', logging.ERROR, source_file=source_file, source_folder=source_folder) return () # validate path parameters, collect packages entries = () if source_file: source = abspath(source_file) if isfile(source): entries = [source] else: log('Source file does not exist', logging.ERROR) else: source = abspath(source_folder) if isdir(source): entries = get_files(source) else: log('Source folder does not exist', logging.ERROR) return entries
5,332,498
def sanitize_tag(tag: str) -> str: """Clean tag by replacing empty spaces with underscore. Parameters ---------- tag: str Returns ------- str Cleaned tag Examples -------- >>> sanitize_tag(" Machine Learning ") "Machine_Learning" """ return tag.strip().replace(" ", "_")
5,332,499