content
stringlengths
22
815k
id
int64
0
4.91M
def test_non_provider_fields(get_good_response): """Ensure provider fields are excluded when not requested""" non_provider_specific_field = "elements" request = f"/structures?response_fields={non_provider_specific_field}" response = get_good_response(request) returned_attributes = set() for _ in response.get("data", []): returned_attributes |= set(_.get("attributes", {}).keys()) assert returned_attributes == { non_provider_specific_field, }
5,333,200
def track_to_note_string_list( track: Track, ) -> List[str]: """Convert a mingus.containers.Track to a list of note strings""" final_note_list = [] for element in track.get_notes(): for note in element[-1]: final_note_list.append(note_to_string(note)) return final_note_list
5,333,201
def test_number_of_interval_slicer_numberpoints3_numberintervals2_include_max( test_data, ): """ Function to compare characterisics of reference interval generated by NumberOfIntervalSlicer and manually generated interval. Specific test case: the number of intervals = 3, the upper boundary of the last interval is inclusive, the minimum points per intervals = 3 and the minimum number of intervals = 2. Parameters ---------- test_data : numpy array Data to test the interval slicer. Returns ------- None. """ ref_intervals = [[1.2, 1.5, 2.4], [2.5, 2.6, 3.1, 3.5, 3.6]] ref_width = (5 - 1.2) / 3 ref_references = [1.2 + ref_width / 2, 1.2 + 3 * ref_width / 2] ref_boundaries = [(c - ref_width / 2, c + ref_width / 2) for c in ref_references] number_slicer = NumberOfIntervalsSlicer(3, min_n_points=3, min_n_intervals=2) my_slices, my_references, my_boundaries = number_slicer.slice_(test_data) my_intervals = [test_data[slice_] for slice_ in my_slices] np.testing.assert_almost_equal(my_references, ref_references) assert len(my_intervals) == len(ref_intervals) for i in range(len(my_intervals)): np.testing.assert_array_equal(my_intervals[i], ref_intervals[i]) for i in range(len(my_boundaries)): np.testing.assert_almost_equal(my_boundaries[i], ref_boundaries[i])
5,333,202
def group_sums_dummy(x, group_dummy): """sum by groups given group dummy variable group_dummy can be either ndarray or sparse matrix """ if data_util._is_using_ndarray_type(group_dummy, None): return np.dot(x.T, group_dummy) else: # check for sparse return x.T * group_dummy
5,333,203
def fixture_multi_check_schema() -> DataFrameSchema: """Schema with multiple positivity checks on column `a`""" return _multi_check_schema()
5,333,204
def get_featurizer(featurizer_key: str) -> ReactionFeaturizer: """ :param: featurizer_key: key of a ReactionFeaturizer :return: a ReactionFeaturizer for a specified key """ if featurizer_key not in FEATURIZER_INITIALIZERS: raise ValueError(f"No featurizer for key {featurizer_key}") return FEATURIZER_INITIALIZERS[featurizer_key]()
5,333,205
def window_open(dev, temp, duration): """ Gets and sets the window open settings. """ click.echo("Window open: %s" % dev.window_open) if dev.window_open_temperature is not None: click.echo("Window open temp: %s" % dev.window_open_temperature) if dev.window_open_time is not None: click.echo("Window open time: %s" % dev.window_open_time) if temp and duration: click.echo("Setting window open conf, temp: %s duration: %s" % (temp, duration)) dev.window_open_config(temp, duration)
5,333,206
def test_create_update_file_share_link(requests_mock, mocker): """ Tests the box-create-file-share-link function and command. Configures requests_mock instance to generate the appropriate files API response, loaded from a local JSON file. Checks the output of the command function with the expected output. Verifies: - The Authorization header is correct - Shared link request sends the correct password - Outputs match the expected result. Given: A valid file_id and password When: Executing the box-create-file-share-link command Then: Return the result where the outputs match the mocked response. """ from BoxV2 import create_update_file_share_link_command mock_response = util_load_json('test_data/create_update_file_share_link.json') requests_mock.put( 'https://api.box.com/2.0/files/742246263170/?fields=shared_link', json=mock_response) client = TestBox(mocker).client args = { 'file_id': '742246263170', 'password': 'some_pass', 'access': 'open', 'as_user': '1234567' } response = create_update_file_share_link_command(client, args) assert requests_mock.request_history[0].headers.get('Authorization') == "Bearer JWT_TOKEN" assert requests_mock.request_history[0].json().get('shared_link').get('password') == 'some_pass' assert response.outputs_prefix == 'Box.ShareLink' assert response.outputs_key_field == 'id' assert response.outputs == mock_response
5,333,207
def is_negative_spec(*specs: List[List[str]]) -> bool: """ Checks for negative values in a variable number of spec lists Each spec list can have multiple strings. Each string within each list will be searched for a '-' sign. """ for specset in specs: if specset: for spec in specset: if '-' in spec: return True return False
5,333,208
async def download( client: HTTPClient, outpath: FilePath, api_root: DecodedURL, cap: str, child_path: Optional[Iterable[str]] = None, ) -> None: """ Download the object identified by the given capability to the given path. :param client: An HTTP client to use to make requests to the Tahoe-LAFS HTTP API to perform the upload. :param outpath: The path to the regular file to which the downloaded content will be written. The content will be written to a temporary file next to this one during download and then moved to this location at the end. :param api_root: The location of the root of the Tahoe-LAFS HTTP API to use to perform the upload. This should typically be the ``node.url`` value from a Tahoe-LAFS client node. :raise: If there is a problem downloading the data then some exception is raised. """ outtemp = outpath.temporarySibling() uri = api_root.child("uri").child(cap) if child_path is not None: for segment in child_path: uri = uri.child(segment) resp = await client.get(uri) if resp.code == 200: with outtemp.open("w") as f: await treq.collect(resp, f.write) outtemp.moveTo(outpath) else: content = (await treq.content(resp)).decode("utf-8") raise TahoeAPIError("get", uri, resp.code, content)
5,333,209
def is_np_timedelta_like(dtype: DTypeLike) -> bool: """Check whether dtype is of the timedelta64 dtype.""" return np.issubdtype(dtype, np.timedelta64)
5,333,210
def pp2mr(pv,p): """ Calculates mixing ratio from the partial and total pressure assuming both have same unitsa nd no condensate is present. Returns value in units of kg/kg. Checked 20.03.20 """ pv, scalar_input1 = flatten_input(pv) # don't specify pascal as this will wrongly corrected p , scalar_input2 = flatten_input(p ) scalar_input = scalar_input1 and scalar_input2 mr = eps1*pv/(p-pv) if scalar_input: return np.squeeze(mr) return mr
5,333,211
def test_p_plot(): """ The purpose of this test is evaluating if the matplotlib object created with p_plot has the correct layers compared to the required plot. The test will cover the same things we checked with R: - The output is a matplotlib object. - The axis labels are correct. In this case if the labels are "p(k)" and "k". - The chart type used is correct. In this case, if it is a scatter plot combined with two lines. - The series used for plotting are the correct ones. "pvalue" and "k". ## We couldn't find a way to extract information from the axes on the matplotlib object. With R, everything is stored ## on a list. We will get deeper this week to solve this issue and add the test announced before. """ ##fig = p_plot(X,y) ##assert type(fig) == matplotlib.figure.Figure, "the object includes a matplotlib figure "
5,333,212
def initialize_database(app): """ Takes an initalized flask application and binds a database context to allow query execution """ # see https://github.com/mitsuhiko/flask-sqlalchemy/issues/82 db.app = app db.init_app(app) return db
5,333,213
def decrypt(input_file: TextIO, wordlist_filename: str) -> str: """ Using wordlist_filename, decrypt input_file according to the handout instructions, and return the plaintext. """ encrypt = [] result = '' ans = '' plaintext = '' # store English wordlist into a set english_wordlist = set() with open(wordlist_filename) as file: for line_text in file: english_wordlist.add(line_text.strip()) for line in input_file: encrypt = line.lower() max_so_far = 0 for count in range(26): text = shift(encrypt, count).split() #print(text) # reset max end every new shift max_end = 0 # check if English word match the text # add 1 whenever there is a match for word in text: # remove symbol & punctuation words = ''.join(char for char in word if char.isalnum()) if words in english_wordlist: max_end += 1 # if new max found, set the result to that text # and set max_so_far to new max #print(max_end) if max_so_far < max_end: result = ' '.join(text) max_so_far = max_end ans += result + '\n' return ans.strip()
5,333,214
def loadEvents(fname): """ Reads a file that consists of first column of unix timestamps followed by arbitrary string, one per line. Outputs as dictionary. Also keeps track of min and max time seen in global mint,maxt """ events = [] ws = open(fname, 'r').read().splitlines() events = [] for w in ws: ix = w.find(' ') # find first space, that's where stamp ends stamp = int(w[:ix]) str = w[ix+1:] events.append({'t': stamp, 's': str}) # except Exception as e: # print ('%s probably does not exist, setting empty events list.' % (fname, )) # print ('error was:', e) return events
5,333,215
def show_wordcloud(data, title = None): """ Word cloud Parameters ---------- data : list list of (string) documents """ stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color='black', stopwords=stopwords, max_words=200, max_font_size=40, scale=3, random_state=1).generate(str(data)) fig = plt.figure(1, figsize=(15, 15)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show()
5,333,216
def compute_relative_target_raw(current_pose, target_pose): """ Computes the relative target pose which has to be fed to the network as an input. Both target pose and current_pose have to be in the same coordinate frame (gloabl map). """ # Compute the relative goal position goal_position_difference = [target_pose.pose.position.x - current_pose.pose.position.x, target_pose.pose.position.y - current_pose.pose.position.y] # Get the current orientation and the goal orientation current_orientation = current_pose.pose.orientation p = [current_orientation.x, current_orientation.y, current_orientation.z, current_orientation.w] goal_orientation = target_pose.pose.orientation q = [goal_orientation.x, goal_orientation.y, goal_orientation.z, goal_orientation.w] # Rotate the relative goal position into the base frame (robot frame) goal_position_base_frame = tf.transformations.quaternion_multiply(tf.transformations.quaternion_inverse(p), tf.transformations.quaternion_multiply([goal_position_difference[0], goal_position_difference[1], 0, 0], p)) # Compute the difference to the goal orientation orientation_to_target = tf.transformations.quaternion_multiply(q, tf.transformations.quaternion_inverse(p)) yaw = tf.transformations.euler_from_quaternion(orientation_to_target)[2] return (goal_position_base_frame[0], -goal_position_base_frame[1], yaw)
5,333,217
def get_tokeninfo_remote(token_info_url, token): """ Retrieve oauth token_info remotely using HTTP :param token_info_url: Url to get information about the token :type token_info_url: str :param token: oauth token from authorization header :type token: str :rtype: dict """ token_request = httpx.get(token_info_url, headers={'Authorization': 'Bearer {}'.format(token)}, timeout=5) if not token_request.ok: return None return token_request.json()
5,333,218
def make_site_object(config, seen): """Make object with site values for evaluation.""" now = datetime.today().strftime("%Y-%m-%d") subtitle = ( f'<h2 class="subtitle">{config.subtitle}</h2>' if config.subtitle else "" ) site = SN( author=lambda: config.author, builddate=lambda: now, copyrightyear=lambda: config.copyrightyear, domain=lambda: config.domain, email=lambda: config.email, lang=lambda: config.lang, repo=lambda: config.repo, title=lambda: config.title, subtitle=lambda: subtitle, tool=lambda: config.tool ) if "foot.html" in config.template: site.foot = lambda root: _fill( "foot.html", config.template["foot.html"], site, SN(root=root) ) else: site.foot = lambda root: "" if "head.html" in config.template: site.head = lambda root: _fill( "head.html", config.template["head.html"], site, SN(root=root) ) else: site.head = lambda root: "" if "stats.html" in config.template: filled = _fill("stats.html", config.template["stats.html"], site, SN()) site.stats = lambda: filled else: site.stats = lambda: "" return site
5,333,219
def get_filename(): """ Build the output filename """ now_date = datetime.now() out_date = now_date.strftime("%Y-%m-%d_%H-%M") outfile_name = "node_ip_cfg_info_" + out_date + '.txt' if os.path.exists(outfile_name): os.remove(outfile_name) print('Output file name is: {}'.format(outfile_name)) return outfile_name
5,333,220
def map_aircraft_to_record(aircrafts, message_now, device_id): """ Maps the `aircraft` entity to a BigQuery record and its unique id. Returns `(unique_ids, records)` """ def copy_data(aircraft): result = { 'hex': aircraft.get('hex'), 'squawk': aircraft.get('squawk'), 'flight': aircraft.get('flight'), 'lat': aircraft.get('lat'), 'lon': aircraft.get('lon'), 'nucp': aircraft.get('nucp'), 'seen_pos': aircraft.get('seen_pos'), 'altitude': aircraft.get('altitude'), 'vert_rate': aircraft.get('vert_rate'), 'track': aircraft.get('track'), 'speed': aircraft.get('speed'), 'messages': aircraft.get('messages'), 'seen': aircraft.get('seen'), 'rssi': aircraft.get('rssi'), 'device_id': device_id, 'timestamp': datetime.utcfromtimestamp(float(message_now)).isoformat() } result_json = json.dumps(result) result_hash = hashlib.sha512(result_json.encode('utf-8')).hexdigest() unique_id = f'{message_now}_{result_hash}' result['created_at'] = datetime.now().isoformat() return (unique_id, result) return zip( *map( copy_data, aircrafts ) )
5,333,221
def add_gaussian_noise(image, mean=0, std=0.001): """ 添加高斯噪声 mean : 均值 var : 方差 """ image = np.array(image / 255, dtype=float) noise = np.random.normal(mean, std ** 0.5, image.shape) print(np.mean(noise ** 2) - np.mean(noise) ** 2) out = image + noise if image.min() < 0: low_clip = -1. else: low_clip = 0. out = np.clip(out, low_clip, 1.0) out = np.uint8(out * 255) return out
5,333,222
def imagenet_get_datasets(data_dir, arch, load_train=True, load_test=True): """ Load the ImageNet dataset. """ # Inception Network accepts image of size 3, 299, 299 if distiller.models.is_inception(arch): resize, crop = 336, 299 else: resize, crop = 256, 224 if arch == 'googlenet': normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) else: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dir = os.path.join(data_dir, 'train') test_dir = os.path.join(data_dir, 'val') train_dataset = None if load_train: train_transform = transforms.Compose([ transforms.RandomResizedCrop(crop), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) train_dataset = datasets.ImageFolder(train_dir, train_transform) test_dataset = None if load_test: test_transform = transforms.Compose([ transforms.Resize(resize), transforms.CenterCrop(crop), transforms.ToTensor(), normalize, ]) test_dataset = datasets.ImageFolder(test_dir, test_transform) return train_dataset, test_dataset
5,333,223
def sub(xs, ys): """ Computes xs - ys, such that elements in xs that occur in ys are removed. @param xs: list @param ys: list @return: xs - ys """ return [x for x in xs if x not in ys]
5,333,224
def create_folio_skill(request, folio_id): """ Creates a new folio skill """ if request.method == "POST": form = FolioSkillForm(request.POST) if form.is_valid(): skill = form.save(commit=False) skill.author_id = request.user skill.save() messages.success( request, f"The {skill.skill_title} skill has " f"been created successfully." ) else: messages.error( request, "Data posted was not valid " "to create a new skill." ) else: messages.error( request, "Data should be posted when " "attempting to create a new skill." ) return redirect( reverse("edit_folio_skills", kwargs={"folio_id": folio_id}) )
5,333,225
def lambda_handler(event, context): """AWS Lambda Function entrypoint to cancel booking Parameters ---------- event: dict, required Step Functions State Machine event chargeId: string pre-authorization charge ID context: object, required Lambda Context runtime methods and attributes Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html Returns ------- boolean Raises ------ BookingCancellationException Booking Cancellation Exception including error message upon failure """ global _cold_start if _cold_start: log_metric( name="ColdStart", unit=MetricUnit.Count, value=1, function_name=context.function_name ) _cold_start = False print("COLDSTART", context.aws_request_id) booking_id = event.get("bookingId") if not booking_id: log_metric( name="InvalidBookingRequest", unit=MetricUnit.Count, value=1, operation="cancel_booking" ) logger.error({"operation": "invalid_event", "details": event}) raise ValueError("Invalid booking ID") try: logger.debug(f"Cancelling booking - {booking_id}") ret = cancel_booking(booking_id) log_metric(name="SuccessfulCancellation", unit=MetricUnit.Count, value=1) logger.debug("Adding Booking Status annotation") tracer.put_annotation("BookingStatus", "CANCELLED") return ret except BookingCancellationException as err: log_metric(name="FailedCancellation", unit=MetricUnit.Count, value=1) logger.debug("Adding Booking Status annotation before raising error") tracer.put_annotation("BookingStatus", "ERROR") logger.error({"operation": "cancel_booking", "details": err}) raise BookingCancellationException(details=err)
5,333,226
def reduce_tags(tags): """Filter a set of tags to return only those that aren't descendents from others in the list.""" reduced_tags = [] for tag_a in tags: include = True for tag_b in tags: if tag_a == tag_b: continue if not tag_before(tag_a, tag_b): include = False break if include: reduced_tags.append(tag_a) return reduced_tags
5,333,227
def init(): """ This method will be run once on startup. You should check if the supporting files your model needs have been created, and if not then you should create/fetch them. """ # Placeholder init code. Replace the sleep with check for model files required etc... global nlp nlp = pipeline("ner") time.sleep(1)
5,333,228
def test_basic(): """A simple end-to-end test case.""" digits = load_digits() X, y = digits.data, digits.target clf = RandomForestClassifier(n_estimators=5) param_def = [ { 'name': 'max_depth', 'integer': True, 'lb': 3, 'ub': 6, }, { 'name': 'max_features', 'integer': True, 'lb': 1, 'ub': 11, }, { 'name': 'min_samples_split', 'integer': True, 'lb': 2, 'ub': 11, }, ] n_iter_search = 100 surrogate_search = SurrogateSearchCV(clf, param_def=param_def, n_iter=n_iter_search, cv=5) start = time() surrogate_search.fit(X, y) print('SurrogateSearchCV took {0:.2f} seconds for {1} candidates' ' parameter settings.'.format((time() - start), n_iter_search)) print('Best score is {0}'.format(surrogate_search.best_score_)) print('Best params are {0}'.format(surrogate_search.best_params_)) assert len(surrogate_search.params_history_) == n_iter_search assert len(surrogate_search.score_history_) == n_iter_search
5,333,229
def default_param_noise_filter(var): """ check whether or not a variable is perturbable or not :param var: (TensorFlow Tensor) the variable :return: (bool) can be perturb """ if var not in tf.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because # we use them for normalization purposes). If you change your network, you will likely want # to re-consider which layers to perturb and which to keep untouched. return False
5,333,230
def test_model_instantiation() -> None: """Test that a plain Model can be used.""" Model()
5,333,231
def cli() -> None: """A cli to provision and manage local developer environments."""
5,333,232
def input_fn(request_body, request_content_type): """ An input_fn that loads the pickled tensor by the inference server of SageMaker. The function deserialize the inference request, then the predict_fn get invoked. Does preprocessing and returns a tensor representation of the source sentence ready to give to the model to make inference. :param request_body: str The request body :param request_content_type: type The request body type. :return: torch.Tensor """ if request_content_type == 'application/json': return None return 'WHAT HAPPEN TO YOU !'
5,333,233
def get_colors(k): """ Return k colors in a list. We choose from 7 different colors. If k > 7 we choose colors more than once. """ base_colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k'] colors = [] index = 1 for i in range(0, k): if index % (len(base_colors) + 1) == 0: index = 1 colors.append(base_colors[index - 1]) index += 1 return colors
5,333,234
def update_user(user_id: str, content: Optional[Dict] = None): """ Update a user with new fields in Auth0 with a given user id :param user_id: the user id to update new information with :param content: a dictionary of Auth0 matching fields to update content See: https://auth0.com/docs/api/management/v2/#!/Users/patch_users_by_id for full options """ logger.info(f"Updating the user {user_id} in Auth0 with {content}") request = requests.patch( url=Auth0ManagementClient.get_url('/users/' + user_id), json=content, headers=Auth0ManagementClient.get_jwt_header() ) request.raise_for_status()
5,333,235
def test_significance(stat, A, b, eta, mu, cov, z, alpha): """ Compute an p-value by testing a one-tail. Look at right tail or left tail? Returns "h_0 Reject """ ppf, params = psi_inf(A, b, eta, mu, cov, z) if np.isnan(params['scale']) or not np.isreal(params['scale']): logging.warning("Scale is not real or negative, test reject") return False, params threshold = ppf(1.-alpha) return stat > threshold, params
5,333,236
def _get_cluster_group_idx(clusters: np.ndarray) -> nb.typed.List: """ Get start and stop indexes for unique cluster labels. Parameters ---------- clusters : np.ndarray The ordered cluster labels (noise points are -1). Returns ------- nb.typed.List[Tuple[int, int]] Tuples with the start index (inclusive) and end index (exclusive) of the unique cluster labels. """ start_i = 0 while clusters[start_i] == -1: start_i += 1 group_idx, stop_i = nb.typed.List(), start_i while stop_i < clusters.shape[0]: start_i, label = stop_i, clusters[stop_i] while stop_i < clusters.shape[0] and clusters[stop_i] == label: stop_i += 1 group_idx.append((start_i, stop_i)) return group_idx
5,333,237
def genes_flyaltas2( genes: Union[str, list] = None, gene_nametype: Optional[str] = "symbol", stage: Optional[str] = "male_adult", enrich_threshold: Optional[float] = 1.0, fbgn_path: Optional[str] = "deml_fbgn.tsv.gz", ) -> pd.DataFrame: """ Annotate a gene list based on the flyaltas2 database Parameters ---------- genes: `str` or `list` (default: `None`) The name of a gene, or a list of genes. gene_nametype : `str` (default: `'symbol'`) Type of gene name, including `'symbol'` and `'FBgn'`. stage: `str` (default: `'male_adult'`) The developmental stages of Drosophila melanogaster. Available stages are: * `'larval'` * `'female_adult'` * `'male_adult'` enrich_threshold: `float` (default: `1.0`) Threshold for filtering enrichment in FlyAtlas 2. fbgn_path: `str` (default: `'deml_fbgn.tsv.gz'`) Absolute path to the deml_fbgn.tsv.gz. Returns ------- anno_genes: `pandas.DataFrame` The genes and the particular tissues in which the genes are specifically expressed of each group. """ genes = [genes] if isinstance(genes, str) else genes fbgn_names = ( symbol2fbgn(gene=genes, datapath=fbgn_path) if gene_nametype is "symbol" else genes ) # Find the particular tissue in which the gene is specifically expressed anno_genes = pd.DataFrame() for fbgn_name in fbgn_names: particular_tissues = gene2tissue(fbgn_name, stage, enrich_threshold) if particular_tissues is not None: anno_genes = pd.concat([anno_genes, particular_tissues], axis=0) return anno_genes.astype(str)
5,333,238
def output_elbs(elbs=None): """ @type elbs: list """ if elbs: elbs.sort(key=lambda k: k.get('DNSName')) td = list() table_header = [Color('{autoblue}name{/autoblue}'), Color('{autoblue}instances{/autoblue}'), Color('{autoblue}dns name{/autoblue}')] for elb in elbs: td.append([elb.get('LoadBalancerName'), str(len(elb.get('Instances'))), elb.get('DNSName')]) output_ascii_table_list(table_title=Color('{autowhite}ELBs{/autowhite}'), table_data=td, table_header=table_header, inner_heading_row_border=True) else: print("No ELBs found.") exit(0)
5,333,239
def loadEnv(envName): """ """ with prefix("source ~/.bash_profile"): with prefix("workon {}".format(envName)): yield
5,333,240
def _batch_embed(args, net, vecs: StringDataset, device, char_alphabet=None): """ char_alphabet[dict]: id to char """ # convert it into a raw string dataset if char_alphabet != None: vecs.to_bert_dataset(char_alphabet) test_loader = torch.utils.data.DataLoader(vecs, batch_size=args.test_batch_size, shuffle=False, num_workers=4) net.eval() embedding = [] with tqdm.tqdm(total=len(test_loader), desc="# batch embedding") as p_bar: for i, x in enumerate(test_loader): p_bar.update(1) if char_alphabet != None: for xx in x: xx = tokenizer(xx, return_tensors="pt") # 1 x 768 xx = bert(**xx)[0][0][1].unsqueeze(0) embedding.append(xx.cpu().data.numpy()) else: embedding.append(net(x.to(device)).cpu().data.numpy()) vecs.to_original_dataset() return np.concatenate(embedding, axis=0)
5,333,241
def get_adjacency_matrix(distance_df, sensor_ids, normalized_k=0.1): """ :param distance_df: data frame with three columns: [from, to, distance]. :param sensor_ids: list of sensor ids. :param normalized_k: entries that become lower than normalized_k after normalization are set to zero for sparsity. :return: adjacency matrix """ num_sensors = len(sensor_ids) dist_mx = np.zeros((num_sensors, num_sensors), dtype=np.float32) dist_mx[:] = np.inf # Builds sensor id to index map. sensor_id_to_ind = {} for i, sensor_id in enumerate(sensor_ids): sensor_id_to_ind[sensor_id] = i # Fills cells in the matrix with distances. for row in distance_df.values: if row[0] not in sensor_id_to_ind or row[1] not in sensor_id_to_ind: continue dist_mx[sensor_id_to_ind[row[0]], sensor_id_to_ind[row[1]]] = row[2] # Calculates the standard deviation as theta. distances = dist_mx[~np.isinf(dist_mx)].flatten() std = distances.std() adj_mx = np.exp(-np.square(dist_mx / std)) # Make the adjacent matrix symmetric by taking the max. # adj_mx = np.maximum.reduce([adj_mx, adj_mx.T]) # Sets entries that lower than a threshold, i.e., k, to zero for sparsity. adj_mx[adj_mx < normalized_k] = 0 return adj_mx
5,333,242
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>', strip_bos='<BOS>', strip_eos='<EOS>', compat=True): """Transforms `int` indexes to strings by mapping ids to tokens, concatenating tokens into sentences, and stripping special tokens, etc. Args: ids: An n-D numpy array or (possibly nested) list of `int` indexes. vocab: An instance of :class:`~texar.tf.data.Vocab`. join (bool): Whether to concat along the last dimension of the the tokens into a string separated with a space character. strip_pad (str): The PAD token to strip from the strings (i.e., remove the leading and trailing PAD tokens of the strings). Default is '<PAD>' as defined in :class:`~texar.tf.data.SpecialTokens`.PAD. Set to `None` or `False` to disable the stripping. strip_bos (str): The BOS token to strip from the strings (i.e., remove the leading BOS tokens of the strings). Default is '<BOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.BOS. Set to `None` or `False` to disable the stripping. strip_eos (str): The EOS token to strip from the strings (i.e., remove the EOS tokens and all subsequent tokens of the strings). Default is '<EOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.EOS. Set to `None` or `False` to disable the stripping. Returns: If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of concatenated strings. If :attr:`join` is False, returns an `n`-D numpy array (or list) of str tokens. Example: .. code-block:: python text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]] text = map_ids_to_strs(text_ids, data.vocab) # text == ['a sentence', 'parsed from ids'] text = map_ids_to_strs( text_ids, data.vocab, join=False, strip_pad=None, strip_bos=None, strip_eos=None) # text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'], # ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']] """ tokens = vocab.map_ids_to_tokens_py(ids) if isinstance(ids, (list, tuple)): tokens = tokens.tolist() if compat: tokens = compat_as_text(tokens) str_ = str_join(tokens, compat=False) str_ = strip_special_tokens( str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos, compat=False) if join: return str_ else: return _recur_split(str_, ids)
5,333,243
def mock_finish_setup(): """Mock out the finish setup method.""" with patch( "openpeerpower.components.mqtt.MQTT.async_connect", return_value=mock_coro(True) ) as mock_finish: yield mock_finish
5,333,244
def _parse_args() -> argparse.Namespace: """Parses and returns the command line arguments.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('in_json', type=argparse.FileType('r'), help='The JSON file containing a list of file names ' 'that the prefix map operations should be applied to') parser.add_argument( '--prefix-map-json', type=argparse.FileType('r'), required=True, help= 'JSON file containing an array of prefix map transformations to apply ' 'to the strings before tokenizing. These string literal ' 'transformations are of the form "from=to". All strings with the ' 'prefix `from` will have the prefix replaced with `to`. ' 'Transformations are applied in the order they are listed in the JSON ' 'file.') parser.add_argument('--output', type=argparse.FileType('w'), help='File path to write transformed paths to.') return parser.parse_args()
5,333,245
def rand_ascii_str(length): """Generates a random string of specified length, composed of ascii letters and digits. Args: length: The number of characters in the string. Returns: The random string generated. """ letters = [random.choice(ascii_letters_and_digits) for _ in range(length)] return ''.join(letters)
5,333,246
def set_achievement_disabled(aid, disabled): """ Updates a achievement's availability. Args: aid: the achievement's aid disabled: whether or not the achievement should be disabled. Returns: The updated achievement object. """ return update_achievement(aid, {"disabled": disabled})
5,333,247
def test_stochatreat_stratum_ids(df, misfit_strategy, stratum_cols): """Tests that the function returns the right number of stratum ids""" treats = stochatreat( data=df, stratum_cols=stratum_cols, treats=2, idx_col="id", random_state=42, misfit_strategy=misfit_strategy, ) n_unique_strata = len(df[stratum_cols].drop_duplicates()) n_unique_stratum_ids = len(treats["stratum_id"].drop_duplicates()) if misfit_strategy == "global": # depending on whether there are misfits assert ( (n_unique_stratum_ids == n_unique_strata) or (n_unique_stratum_ids - 1 == n_unique_strata) ) else: assert n_unique_stratum_ids == n_unique_strata
5,333,248
def test_azure_machine_pool_default(release, cluster_v1alpha4, azuremachinepool) -> None: """ test_azure_machine_pool_default tests defaulting of an AzureMachinePool where all required values are empty strings. :param release: Release CR which is used by the Cluster. :param cluster_v1alpha4: Cluster CR which uses the release and matches the AzureCluster. :param azuremachinepool: AzureMachinePool CR with empty strings which matches the Cluster CR. """ assert azuremachinepool['metadata']['labels']['cluster.x-k8s.io/watch-filter'] == ensure.watch_label assert azuremachinepool['spec']['location'] == "westeurope"
5,333,249
def get_block(in_dt: datetime): """Get the BlockNumber instance at or before the datetime timestamp.""" return BlockNumber.from_timestamp(in_dt.replace(tzinfo=timezone.utc).timestamp())
5,333,250
def get_channels(posts): """ <summary> Returns post channel (twitter/facebook)</summary> <param name="posts" type="list"> List of posts </param> <returns> String "twitter" or "facebook" </returns> """ channel = [] for i in range(0, len(posts['post_id'])): if len(posts['post_text'][i]) <= 140: channel.append("twitter") else: channel.append("facebook") return channel
5,333,251
def clean_text(text): """ text: a string return: modified initial string """ text = BeautifulSoup(text, "lxml").text # HTML decoding text = text.lower() # lowercase text text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text text = ' '.join(word for word in text.split() if word not in STOPWORDS) # delete stopwords from text return text
5,333,252
def assert_allclose( actual: List[numpy.ndarray], desired: List[float], rtol: float, atol: float ): """ usage.scipy: 1 """ ...
5,333,253
def ConvertToTypeEnum(type_enum, airflow_executor_type): """Converts airflow executor type string to enum. Args: type_enum: AirflowExecutorTypeValueValuesEnum, executor type enum value. airflow_executor_type: string, executor type string value. Returns: AirflowExecutorTypeValueValuesEnum: the executor type enum value. """ return type_enum(airflow_executor_type)
5,333,254
def message(message,GUI,error=False): """Prints message in manner dictated by GUI and error.""" if GUI: openMessageBox(message) elif error: raise SDEValueError(message) else: print(message)
5,333,255
def plot_univariate_series( series: pd.Series, title: str, xlabel: str, ylabel: str, graph_type: GraphType = None, **kwargs) -> None: """Bar plots a interger series Args: series (pd.Series): series to be plotted title (str): graph title xlabel (str): x-axis label ylabel (str): y-axis label display_format (str, optional): number format. Defaults to '{0:,.0f}'. figsize ([type], optional): figure size. Defaults to None. show_count (bool, optional): show value at the top of bar. Defaults to True. graph_type (GraphType, optional): graph type """ labels = {"x": xlabel, "y": ylabel} fig = None if graph_type is None or graph_type == GraphType.BAR: fig = px.bar(x=series.index, y=series, color=series.index, title=title, labels=labels, **kwargs) if graph_type == GraphType.LINE: px.scatter(x=series.index, y=series, title=title, labels=labels, color=series.index, **kwargs) fig.show()
5,333,256
def main(): """ Command line tool for running simulate_data.py """ parser = ArgumentParser(description="Generate an artificial data set.") optional = parser._action_groups.pop() required = parser.add_argument_group('required arguments') required.add_argument('-i', action='store', required=True, dest='inputs_cfg', type=str, help="Configuration script of inputs") parser._action_groups.append(optional) if len(sys.argv)==1: parser.print_help() sys.exit(1) parse_args = parser.parse_args() # Initialize parameter values inputs_cfg = parse_args.inputs_cfg # Run task using inputs from configuration script. myexecute(inputs_cfg)
5,333,257
def image2array(image): """PIL Image to NumPy array""" assert image.mode in ('L', 'RGB', 'CMYK') arr = numpy.fromstring(image.tostring(), numpy.uint8) arr.shape = (image.size[1], image.size[0], len(image.getbands())) return arr.swapaxes(0, 2).swapaxes(1, 2).astype(numpy.float32)
5,333,258
def enum_name_callback(ctx: 'mypy.plugin.AttributeContext') -> Type: """This plugin refines the 'name' attribute in enums to act as if they were declared to be final. For example, the expression 'MyEnum.FOO.name' normally is inferred to be of type 'str'. This plugin will instead make the inferred type be a 'str' where the last known value is 'Literal["FOO"]'. This means it would be legal to use 'MyEnum.FOO.name' in contexts that expect a Literal type, just like any other Final variable or attribute. This plugin assumes that the provided context is an attribute access matching one of the strings found in 'ENUM_NAME_ACCESS'. """ enum_field_name = _extract_underlying_field_name(ctx.type) if enum_field_name is None: return ctx.default_attr_type else: str_type = ctx.api.named_generic_type('builtins.str', []) literal_type = LiteralType(enum_field_name, fallback=str_type) return str_type.copy_modified(last_known_value=literal_type)
5,333,259
def test_sort_with_axis_graph_mode(): """ Feature: sort op support the axis value not -1 with graph mode. Description: sort op support the axis value not -1. Expectation: same as the calculation result on CPU. """ tensor = Tensor(np.random.random([3, 7, 7, 2]), mindspore.float16) cpu_out = cpu(tensor, context.GRAPH_MODE) ascend_out = ascend(tensor, context.GRAPH_MODE) assert np.allclose(cpu_out[0].asnumpy(), ascend_out[0].asnumpy(), 0.00001, 0.00001) assert np.allclose(cpu_out[1].asnumpy(), ascend_out[1].asnumpy(), 0, 0)
5,333,260
def test_can_add_reader(loop, sock_pair): """Verify that we can add a reader callback to an event loop.""" def can_read(): if fut.done(): return data = srv_sock.recv(1) if len(data) != 1: return nonlocal got_msg got_msg = data # Indicate that we're done fut.set_result(None) srv_sock.close() def write(): client_sock.send(ref_msg) client_sock.close() ref_msg = b'a' client_sock, srv_sock = sock_pair loop.call_soon(write) exp_num_notifiers = len(loop._read_notifiers) + 1 got_msg = None fut = asyncio.Future() loop.add_reader(srv_sock.fileno(), can_read) assert len(loop._read_notifiers) == exp_num_notifiers, 'Notifier should be added' loop.run_until_complete(asyncio.wait_for(fut, timeout=1.0)) assert got_msg == ref_msg
5,333,261
def is_forward_angle(n, theta): """ if a wave is traveling at angle theta from normal in a medium with index n, calculate whether or not this is the forward-traveling wave (i.e., the one going from front to back of the stack, like the incoming or outgoing waves, but unlike the reflected wave). For real n & theta, the criterion is simply -pi/2 < theta < pi/2, but for complex n & theta, it's more complicated. See https://arxiv.org/abs/1603.02720 appendix D. If theta is the forward angle, then (pi-theta) is the backward angle and vice-versa. """ assert n.real * n.imag >= 0, ("For materials with gain, it's ambiguous which " "beam is incoming vs outgoing. See " "https://arxiv.org/abs/1603.02720 Appendix C.\n" "n: " + str(n) + " angle: " + str(theta)) ncostheta = n * cos(theta) if abs(ncostheta.imag) > 100 * EPSILON: # Either evanescent decay or lossy medium. Either way, the one that # decays is the forward-moving wave answer = (ncostheta.imag > 0) else: # Forward is the one with positive Poynting vector # Poynting vector is Re[n cos(theta)] for s-polarization or # Re[n cos(theta*)] for p-polarization, but it turns out they're consistent # so I'll just assume s then check both below answer = (ncostheta.real > 0) # convert from numpy boolean to the normal Python boolean answer = bool(answer) # double-check the answer ... can't be too careful! error_string = ("It's not clear which beam is incoming vs outgoing. Weird" " index maybe?\n" "n: " + str(n) + " angle: " + str(theta)) if answer is True: assert ncostheta.imag > -100 * EPSILON, error_string assert ncostheta.real > -100 * EPSILON, error_string assert (n * cos(theta.conjugate())).real > -100 * EPSILON, error_string else: assert ncostheta.imag < 100 * EPSILON, error_string assert ncostheta.real < 100 * EPSILON, error_string assert (n * cos(theta.conjugate())).real < 100 * EPSILON, error_string return answer
5,333,262
def get_data(cfg, working_dir, global_parameters, res_incl=None, res_excl=None): """Reads experimental measurements""" exp_type = global_parameters['experiment_type'] path = os.path.dirname(__file__) pkgs = [ modname for _, modname, ispkg in pkgutil.iter_modules([path]) if ispkg and modname in exp_type ] if pkgs: pkg = max(pkgs) else: exit("\nUnknown data type {:s}" "\nDid you forget _cpmg, _cest, etc?" "\n".format(global_parameters['experiment_type'])) reading = __import__( '.'.join([pkg, 'reading']), globals(), locals(), ['get_data'], -1 ) data = reading.read_data(cfg, working_dir, global_parameters, res_incl, res_excl) return data
5,333,263
def main_program_loop(glc,geh,gth): """GuiLayoutContext,GuiEventHandler,GuiTimeHandler""" done=False t=0 new_select_possible=True selected=None while not done: try: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type == pygame.MOUSEBUTTONDOWN and event.button==1: #Left button of mouse geh.handle_left_click() elif event.type == pygame.MOUSEBUTTONDOWN and event.button==3: #Right button of mouse geh.handle_right_click(event) elif event.type == pygame.MOUSEBUTTONUP: geh.handle_unclick() elif event.type == pygame.MOUSEMOTION: for i,table in enumerate(glc.tables): if geh.actively_selected_draggable_component==table: geh.drag_table(table,event) for i,rect in enumerate(glc.rects+glc.entries): if geh.actively_selected_draggable_component==rect: geh.drag_rect(rect,event) elif event.type == pygame.KEYDOWN: geh.handle_key_down(event) elif event.type == pygame.KEYUP: geh.handle_key_up(event) elif event.type == pygame.QUIT: done = True pygame.event.pump() keys = pygame.key.get_pressed() pygame.display.flip() except KeyboardInterrupt: pygame.display.quit() pygame.quit() gth.tick() pygame.display.quit() pygame.quit()
5,333,264
def fit_cubic1(points,rotate,properties=None): """This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0""" r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z') rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z') Sxy = 0 Sx = 0 Sy = 0 Sx2 = 0 Sx2y = 0 Sx3y = 0 Sx3 = 0 Sx4 = 0 Sx5 = 0 Sx6 = 0 Sw = 0 for p in points: pr=p['point']*r x = pr.x y = pr.y Sxy = Sxy + x*y * p['weight'] Sx = Sx + x * p['weight'] Sy = Sy + y * p['weight'] Sx2 = Sx2 + math.pow(x,2) * p['weight'] Sx2y = Sx2y+ math.pow(x,2)*y * p['weight'] Sx3y = Sx3y+ math.pow(x,3)*y * p['weight'] Sx3 = Sx3 + math.pow(x,3) * p['weight'] Sx4 = Sx4 + math.pow(x,4) * p['weight'] Sx5 = Sx5 + math.pow(x,5) * p['weight'] Sx6 = Sx6 + math.pow(x,6) * p['weight'] Sw += p['weight'] N = Sw A=[[N, Sx, Sx2,Sx3,Sy], [Sx, Sx2, Sx3,Sx4,Sxy], [Sx2, Sx3, Sx4, Sx5,Sx2y], [Sx3, Sx4, Sx5, Sx6,Sx3y]] xM=like_a_gauss(A) a0=xM[0][4] a1=xM[1][4] a2=xM[2][4] a3=xM[3][4] def line_func(x,a): return a[0] + a[1]*x + a[2]*math.pow(x,2) + a[3]*math.pow(x,3) points=sort_index1(points,r) return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,a3])
5,333,265
def tile_from_slippy_map(root, x, y, z): """Retrieve a single tile from a slippy map dir.""" path = glob.glob(os.path.join(os.path.expanduser(root), z, x, y + ".*")) if not path: return None return mercantile.Tile(x, y, z), path[0]
5,333,266
def init(quiet_mode, filename_only): """ Initialize output based on quiet/filename-only flags """ global quiet # global quiet is used to pass -q to Sphinx build so it should be set when # either in quiet mode or filename-only mode quiet = quiet_mode or filename_only # always handle write as it also output all errors write.addHandler(logging.StreamHandler()) if filename_only: # in filename-only mode, also handle filename and suppress other # messages below ERROR level filename.addHandler(logging.StreamHandler(sys.stdout)) write.setLevel(logging.ERROR) filename.setLevel(logging.INFO) elif quiet: # in quiet mode, only display ERROR and above write.setLevel(logging.ERROR) else: # otherwise display INFO write.setLevel(logging.INFO)
5,333,267
def get_labelstats_df_list(fimage_list, flabel_list): """loop over lists of image and label files and extract label statisics as pandas.DataFrame """ if np.ndim(fimage_list) == 0: fimage_list = [fimage_list] if np.ndim(flabel_list) == 0: flabel_list = [flabel_list] columns = ['imagefile', 'labelfile', 'label', 'mean', 'var', 'min', 'max', 'median', 'count', 'sum', 'boundingbox', 'voxels'] DF = pd.DataFrame(columns=columns) for fimage in fimage_list: for flabel in flabel_list: df = get_labelstats_df(fimage, flabel) df['imagefile'] = fimage df['labelfile'] = flabel DF = DF.append(df) return DF
5,333,268
def index(web): """The web.request.params is a dictionary, pointing to falcon.Request directly.""" name = web.request.params["name"] return f"Hello {name}!\n"
5,333,269
def process_object(obj): """ Recursively process object loaded from json When the dict in appropriate(*) format is found, make object from it. (*) appropriate is defined in create_object function docstring. """ if isinstance(obj, list): result_obj = [] for elem in obj: result_obj.append(process_object(elem)) return result_obj elif isinstance(obj, dict): processed_obj = {} for key in obj.keys(): processed_obj[key] = process_object(obj[key]) as_obj = obj.get(ObjSpecification.AS_OBJECT, False) if as_obj: result_obj = create_object(processed_obj) else: result_obj = processed_obj return result_obj else: return obj
5,333,270
def remove_role(principal, role): """Removes role from passed principal. **Parameters:** principal The principal (actor or group) from which the role is removed. role The role which is removed. """ try: if isinstance(principal, Actor): ppr = PrincipalRoleRelation.objects.get( actor=principal, role=role, content_id=None, content_type=None) else: ppr = PrincipalRoleRelation.objects.get( group=principal, role=role, content_id=None, content_type=None) except PrincipalRoleRelation.DoesNotExist: return False else: ppr.delete() return True
5,333,271
def set_var_input_validation( prompt="", predicate=lambda _: True, failure_description="Value is illegal", ): """Validating user input by predicate. Vars: - prompt: message displayed when prompting for user input. - predicate: lambda function to verify a condition. - failure_description: message displayed when predicate's condition is not met. Returns: - The value entered by the user if predicate's condition is met and after confirmation by the user. - If the predicate fails failure_description is displayed - If literal_eval fails an error message containing the raised exception. """ while True: try: value = literal_eval(input(f"{Color.INFORMATION}{prompt}{Color.END}\n")) if predicate(value): a = literal_eval( input( f"{Color.INFORMATION}Is this correct: {value} ? enter 1 to confirm, 0 to retry{Color.END}\n" ) ) if a == 1: return value else: print(f"{Color.FAIL}{failure_description}{Color.END}") except Exception as e: print(f"{Color.FAIL}{e} was raised, try again{Color.END}")
5,333,272
def mad(data, axis=None): """Mean absolute deviation""" return np.mean(np.abs(data - np.mean(data, axis)), axis)
5,333,273
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pad the 2nd and 3rd dimensions of a 4D tensor with "padding[0]" and "padding[1]" (resp.) zeros left and right. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 top_pad, bottom_pad = padding[0] left_pad, right_pad = padding[1] if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) input_shape = x.shape if data_format == 'channels_first': output_shape = (input_shape[0], input_shape[1], input_shape[2] + top_pad + bottom_pad, input_shape[3] + left_pad + right_pad) output = T.zeros(output_shape) indices = (slice(None), slice(None), slice(top_pad, input_shape[2] + top_pad), slice(left_pad, input_shape[3] + left_pad)) else: output_shape = (input_shape[0], input_shape[1] + top_pad + bottom_pad, input_shape[2] + left_pad + right_pad, input_shape[3]) output = T.zeros(output_shape) indices = (slice(None), slice(top_pad, input_shape[1] + top_pad), slice(left_pad, input_shape[2] + left_pad), slice(None)) y = T.set_subtensor(output[indices], x) y._keras_shape = output_shape return y
5,333,274
def doms_hit_pass_threshold(mc_hits, threshold, pass_k40): """ checks if there a at least <<threshold>> doms hit by monte carlo hits. retuns true or false""" if threshold == 0: return True if len(mc_hits) == 0: return bool(pass_k40) dom_id_set = set() for hit in mc_hits: dom_id = pmt_id_to_dom_id(hit.pmt_id) dom_id_set.add(dom_id) if len(dom_id_set) >= threshold: return True return False
5,333,275
def differ_filelist_with_two_dirs(s_p_dir, d_p_dir, filelist, mode='0'): """ 使用 difflib 库对两个文件夹下的文件列表进行比较,输出原始结果 """ output = '' for f_path in filelist: s_file_path = os.path.join(s_p_dir, f_path) if mode != '1' else None d_file_path = os.path.join(d_p_dir, f_path) if mode != '2' else None fd1 = open(s_file_path, 'rbU') if s_file_path else None fd2 = open(d_file_path, 'rbU') if d_file_path else None fromlines = fd1.readlines() if fd1 else '' tolines = fd2.readlines() if fd2 else '' if fd1: fd1.close() if fd2: fd2.close() dresult = difflib.unified_diff(fromlines, tolines, f_path, f_path) for line in dresult: line = patch_line(line) if line.startswith('---'): line = '\n' + line output += line return output
5,333,276
def _parseLocalVariables(line): """Accepts a single line in Emacs local variable declaration format and returns a dict of all the variables {name: value}. Raises ValueError if 'line' is in the wrong format. See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html """ paren = '-*-' start = line.find(paren) + len(paren) end = line.rfind(paren) if start == -1 or end == -1: raise ValueError("%r not a valid local variable declaration" % (line,)) items = line[start:end].split(';') localVars = {} for item in items: if len(item.strip()) == 0: continue split = item.split(':') if len(split) != 2: raise ValueError("%r contains invalid declaration %r" % (line, item)) localVars[split[0].strip()] = split[1].strip() return localVars
5,333,277
def add_custom_role(bot, plugin_name, role_name, role): """Adds the given role as a custom internal role used by the bot.""" roles = get( bot, plugin_name, 'custom_roles', guild_id=role.guild.id, create=True, default={}, save=True) roles[role_name] = role.id
5,333,278
def NLL(mu, sigma, mixing, y): """Computes the mean of negative log likelihood for P(y|x) y = T.matrix('y') # (minibatch_size, output_size) mu = T.tensor3('mu') # (minibatch_size, output_size, n_components) sigma = T.matrix('sigma') # (minibatch_size, n_components) mixing = T.matrix('mixing') # (minibatch_size, n_components) """ # multivariate Gaussian exponent = -0.5 * T.inv(sigma) * T.sum((y.dimshuffle(0,1,'x') - mu)**2, axis=1) normalizer = (2 * np.pi * sigma) exponent = exponent + T.log(mixing) - (y.shape[1]*.5)*T.log(normalizer) max_exponent = T.max(exponent ,axis=1, keepdims=True) mod_exponent = exponent - max_exponent gauss_mix = T.sum(T.exp(mod_exponent),axis=1) log_gauss = max_exponent + T.log(gauss_mix) res = -T.mean(log_gauss) return res
5,333,279
def get_non_subscribed_trainers(user) -> Optional[str]: """ returns all trainers the user is not subscrbed to """ conn = get_db() error = None try: trainers = conn.execute("""SELECT distinct u_name FROM user, trainer where t_userID = u_userID and u_trainer = 1 and u_name NOT IN ( SELECT u2.u_name FROM user u1, user u2, trainer, customer, subscription where c_userID = u1.u_userID and u1.u_name = ? and su_customerID = c_customerID and t_trainerID = su_trainerID and u2.u_userID = t_userID ) Order by u_name COLLATE NOCASE""", (user,)).fetchall() # print(trainers) close_db() return trainers except sqlite3.Error as error: print(error) return error
5,333,280
def __clean_datetime_value(datetime_string): """Given""" if datetime_string is None: return datetime_string if isinstance(datetime_string, str): x = datetime_string.replace("T", " ") return x.replace("Z", "") raise TypeError("Expected datetime_string to be of type string (or None)")
5,333,281
def counts(df): """Print value counts for each column of a dataframe :param df: Pandas dataframe :return: void """ for c in df.columns: print("---- %s ---" % c) print(df[c].value_counts()) print("--------\n")
5,333,282
def get_metric_by_name(metric: str, *args, **kwargs) -> Metric: """Returns metric using given `metric`, `args` and `kwargs` Args: metric (str): name of the metric Returns: Metric: requested metric as Metric """ assert metric in __metric_mapper__, "given metric {} is not found".format(metric) return __metric_mapper__[metric](*args, **kwargs)
5,333,283
def parse(handle,format): """Parses an output file of motif finding programs. Currently supported formats: - AlignAce - MEME You can also use single-motif formats, although the Bio.Motif.read() function is simpler to use in this situation. - jaspar-pfm - jaspar-sites For example: >>> from Bio import Motif >>> for motif in Motif.parse(open("Motif/alignace.out"),"AlignAce") : ... print motif.consensus() TCTACGATTGAG CTGCACCTAGCTACGAGTGAG GTGCCCTAAGCATACTAGGCG GCCACTAGCATAGCAGGGGGC CGACTCAGAGGTT CCACGCTAAGAGAAGTGCCGGAG GCACGTCCCTGATCA GTCCATCGCAAAGCTTGGGGC GAGATCAGAGGGCCG TGTACGCGGGG GACCAGAGCCTCGCATGGGGG AGCGCGCGTG GCCGGTTGCTGTTCATTAGG ACCGACGGCAGCTAAAAGGG GACGCCGGGGAT CGACTCGCGCTTACAAGG """ try: parser=_parsers[format] except KeyError: try: #not a true parser, try reader formats reader=_readers[format] except: raise ValueError("Wrong parser format") else: #we have a proper reader yield reader(handle) else: # we have a proper reader for m in parser().parse(handle).motifs: yield m
5,333,284
def p_replacecmd_replace(p): """replacecmd : REPLACE wc_stringlist""" p[0] = ParseTreeNode('COMMAND', raw='replace') p[0].add_children(p[2].children)
5,333,285
def Square(inputs, **kwargs): """Calculate the square of input. Parameters ---------- inputs : Tensor The input tensor. Returns ------- Tensor The square result. """ CheckInputs(inputs, 1) arguments = ParseArguments(locals()) output = Tensor.CreateOperator(nout=1, op_type='Square', **arguments) if inputs.shape is not None: output.shape = inputs.shape[:] return output
5,333,286
def make_call(rpc_name, request, retries=None, timeout=None): """Make a call to the Datastore API. Args: rpc_name (str): Name of the remote procedure to call on Datastore. request (Any): An appropriate request object for the call, eg, `entity_pb2.LookupRequest` for calling ``Lookup``. retries (int): Number of times to potentially retry the call. If :data:`None` is passed, will use :data:`_retry._DEFAULT_RETRIES`. If :data:`0` is passed, the call is attempted only once. timeout (float): Timeout, in seconds, to pass to gRPC call. If :data:`None` is passed, will use :data:`_DEFAULT_TIMEOUT`. Returns: tasklets.Future: Future for the eventual response for the API call. """ api = stub() method = getattr(api, rpc_name) if retries is None: retries = _retry._DEFAULT_RETRIES if timeout is None: timeout = _DEFAULT_TIMEOUT @tasklets.tasklet def rpc_call(): call = method.future(request, timeout=timeout) rpc = _remote.RemoteCall(call, "{}({})".format(rpc_name, request)) log.debug(rpc) log.debug("timeout={}".format(timeout)) result = yield rpc raise tasklets.Return(result) if retries: rpc_call = _retry.retry_async(rpc_call, retries=retries) return rpc_call()
5,333,287
def render_table(data, col_width=3.0, row_height=0.625, font_size=14, header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w', bbox=[0, 0, 1, 1], header_columns=0, ax=None, **kwargs): """[Taken from ref: https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure] [Prints given dataframe in a nice format, that is easy to save] Parameters ---------- data : [data frame] [data frame] col_width : float, optional [column width], by default 3.0 row_height : float, optional [row height], by default 0.625 font_size : int, optional [font size], by default 14 header_color : str, optional [header color], by default '#40466e' row_colors : list, optional [row color], by default ['#f1f1f2', 'w'] edge_color : str, optional [edge color], by default 'w' bbox : list, optional [bbox ], by default [0, 0, 1, 1] header_columns : int, optional [header columns], by default 0 ax : [type], optional [plotting table, by default None Returns ------- [object] [figure] """ if ax is None: size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height]) fig, ax = plt.subplots(figsize=size) ax.axis('off') mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs) mpl_table.auto_set_font_size(False) mpl_table.set_fontsize(font_size) for k, cell in mpl_table._cells.items(): cell.set_edgecolor(edge_color) if k[0] == 0 or k[1] < header_columns: cell.set_text_props(weight='bold', color='w') cell.set_facecolor(header_color) else: cell.set_facecolor(row_colors[k[0]%len(row_colors) ]) return ax.get_figure(), ax
5,333,288
def segment_image(class_colours, pixel_classes, height, width, bg_alpha=0, fg_alpha=255): """visualise pixel classes""" segment_colours = np.reshape(class_colours[pixel_classes], (height, width, 3)) segment_colours = segment_colours.astype("uint8") img = Image.fromarray(segment_colours) # set backgroud/unlabeled pixel alpha to 0. # note to self: do with numpy img = img.convert("RGBA") arr = np.array(img) arr = np.reshape(arr, (height*width, 4)) background = np.where(pixel_classes == 0) arr[background, 3] = bg_alpha background = np.where(pixel_classes > 0) arr[background, 3] = fg_alpha arr = np.reshape(arr, (height, width, 4)) return Image.fromarray(arr)
5,333,289
def deploy(instance): """ Takes all files from the 'deploy' folder in the the lobot directoy and uploads them to the remote machines '~/lobot/deploy' folder. """ print("?") deploy_path = os.path.dirname(os.path.realpath(__file__))+"/deploy/" print("\nContent of \"deploy\" folder:") for filename in os.listdir(deploy_path): print("\t\t"+filename) print("\t\t - - -") confirm_prompt = { 'type': 'confirm', 'message': 'Do you want to copy the content of the \"deploy\" folder to the remote machine?', 'name': 'deploy', 'default': False, } chosen_confirmation = prompt.prompt(confirm_prompt)["deploy"] if chosen_confirmation: if not os.path.exists(deploy_path): print("No \"deploy\" folder in the script's directory \""+os.path.dirname(os.path.realpath(__file__))) return key_name = instance["KeyName"] key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem" command = ["scp", "-i", key_path, "-r", deploy_path+".", GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]+":lobot/deploy/"] if os.path.exists(key_path): ls_command = ["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "ls", "-ll", "~/lobot/deploy"] ls_returncode = subprocess.call(ls_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if ls_returncode == 2: return_code = subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "mkdir", "~/lobot", ";", "mkdir", "~/lobot/deploy"]) if subprocess.call(command) == 0: print("Copied to \"~/lobot/deploy\" on remote machine.") else: raise ValueError("Key"+key_name+".pem is not available in my keys folder")
5,333,290
def cross_replica_average(inputs, num_shards=None, num_shards_per_group=None, physical_shape=None, tile_shape=None, use_spatial_partitioning=False): """Customized cross replica sum op.""" # if num_shards_per_group is defined, apply distributed batch norm. group_assignment = None if num_shards_per_group > 0: if num_shards % num_shards_per_group != 0: raise ValueError( 'num_shards: %d mod num_shards_per_group: %d, should be 0' % (num_shards, num_shards_per_group)) num_groups = num_shards // num_shards_per_group if physical_shape is not None and tile_shape is not None: if use_spatial_partitioning: group_assignment = spatial_partitioning_group_assignment( physical_shape, tile_shape, num_groups) else: group_assignment = normal_group_assignment(physical_shape, tile_shape, num_groups) else: group_assignment = [[ # pylint: disable=g-complex-comprehension x for x in range(num_shards) if x // num_shards_per_group == y ] for y in range(num_groups)] return tpu_ops.cross_replica_sum(inputs, group_assignment) / math_ops.cast( num_shards_per_group, inputs.dtype)
5,333,291
def run_length_to_bitstream(rl: np.ndarray, values: np.ndarray, v_high: int, v_low: int) -> np.ndarray: """Do run length DECODING and map low/high signal to logic 0/1. Supposed to leave middle values untouched. [1,2,1,1,1] [7,1,7,1,5] --> [1 0 0 1 0 5] :param rl: Array of run lengths :param values: Array of corresponding values (positive ints) :param v_high: Value that will be mapped to 1 :param v_low: Value that will be mapped to 0 :return: Array of hopefully only {0,1} with runs re-expanded. :raises: ValueError if rl not exactly same size as values. """ rl = np.asarray(rl) # so that technically it works on lists values = np.asarray(values) if rl.shape != values.shape: raise ValueError("rl and values shapes unequal: %s %s" % (str(rl.shape), str(values.shape))) high_shifts = np.where(values == v_high, 1 - v_high, 0) low_shifts = np.where(values == v_low, 0 - v_low, 0) values_edited = values + high_shifts + low_shifts # fixme exception (or warn?) if values not in the set {v_high, v_low} return np.repeat(values_edited, rl)
5,333,292
def edge_naming(col_list, split_collections=True): """ This function normalize the naming of edges collections If split_collections is True an edge collection name will be generated between each listed collection in order. So if col_list = [A, B, C] result will be [A__B, B__C] :param col_list: ordered list of collection names :return: an array of edge collection names """ result = [] name = "" for v in col_list: if name == "": name = v else: name = name + EDGE_MARKER + v if split_collections: result.append(name) name = v if len(result) == 0: result.append(name) return result
5,333,293
def interactive_visual_difference_from_threshold_by_day(ds_ext): """ Returns: 1) xarray DataArray, with three variables, for each day in the dataset i) Highest value difference from the threshold, across the area ii) Lowest value difference from the threshold, across the area iii) Average difference from the threshold, from all pixels in the area : 2) bokeh pane for interactive visualization. """ result_type = ds_ext.attrs['result_type'] temp_var = 'tasmax' if result_type=='max' else 'tasmin' diff_var = 'above_threshold' if result_type=='max' else 'below_threshold' threshold_diff_high = ds_ext[diff_var].max(dim=['lat','lon'], skipna=True) threshold_diff_low = ds_ext[diff_var].min(dim=['lat','lon'], skipna=True) threshold_diff_avg = ds_ext[diff_var].mean(dim=['lat','lon'], skipna=True) dt_index = threshold_diff_high.indexes['time'].to_datetimeindex() difference_from_threshold = xr.Dataset(data_vars = {'threshold_diff_high':(['time'],threshold_diff_high.to_numpy()) , 'threshold_diff_low':(['time'],threshold_diff_low.to_numpy()) , 'threshold_diff_avg':(['time'],threshold_diff_avg.to_numpy())} , coords=dict(time=dt_index)) difference_from_threshold_plot = difference_from_threshold.hvplot(y=['threshold_diff_low','threshold_diff_high','threshold_diff_avg'] , value_label='difference_from_threshold' , alpha=0.7) pane = pn.panel(difference_from_threshold_plot) return difference_from_threshold, pane
5,333,294
def _parse_compression_method(data): """Parses the value of "method" extension parameter.""" return common.parse_extensions(data)
5,333,295
def my_distance(drij): """ Compute length of displacement vector drij assume drij already accounts for PBC Args: drij (np.array) : vector(s) of length 3 Returns: float: length (distance) of vector(s) """ return np.linalg.norm(drij, axis=0)
5,333,296
def session_with_model(model_type, name): """Create a context manager that will start a TensorFlow session and load a model into it, and return the session and the model. Example: with model.session_with_model("grapheme", name) as session, model: session.run(model.outputs, ...) """ checkpoint_dir = os.path.join("runs", model_type, name, "checkpoint") graph_file = os.path.join(checkpoint_dir, GRAPH_FILENAME) graph = tf.Graph() with tf.Session(graph=graph) as session: latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) saver = tf.train.import_meta_graph(graph_file) saver.restore(session, latest_checkpoint) model = Model.restore(graph) yield session, model
5,333,297
def list_shared_projects(sortBy=None, sortOrder=None, maxResults=None, nextToken=None): """ Gets a list of projects that are shared with other AWS accounts or users. See also: AWS API Documentation Exceptions :example: response = client.list_shared_projects( sortBy='ARN'|'MODIFIED_TIME', sortOrder='ASCENDING'|'DESCENDING', maxResults=123, nextToken='string' ) :type sortBy: string :param sortBy: The criterion to be used to list build projects shared with the current AWS account or user. Valid values include:\n\nARN : List based on the ARN.\nMODIFIED_TIME : List based on when information about the shared project was last changed.\n\n :type sortOrder: string :param sortOrder: The order in which to list shared build projects. Valid values include:\n\nASCENDING : List in ascending order.\nDESCENDING : List in descending order.\n\n :type maxResults: integer :param maxResults: The maximum number of paginated shared build projects returned per response. Use nextToken to iterate pages in the list of returned Project objects. The default value is 100. :type nextToken: string :param nextToken: During a previous call, the maximum number of items that can be returned is the value specified in maxResults . If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned. :rtype: dict ReturnsResponse Syntax { 'nextToken': 'string', 'projects': [ 'string', ] } Response Structure (dict) -- nextToken (string) -- During a previous call, the maximum number of items that can be returned is the value specified in maxResults . If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned. projects (list) -- The list of ARNs for the build projects shared with the current AWS account or user. (string) -- Exceptions CodeBuild.Client.exceptions.InvalidInputException :return: { 'nextToken': 'string', 'projects': [ 'string', ] } :returns: (string) -- """ pass
5,333,298
def show(list_id): """Get single list via id.""" data = db_session.query(List).filter(List.id == list_id).first() if '/json' in request.path: return jsonify(data.as_dict()) else: return render_template('list/show.html', list=data)
5,333,299