content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def openTypeNameVersionFallback(info): """ Fallback to *versionMajor.versionMinor* in the form 0.000. """ versionMajor = getAttrWithFallback(info, "versionMajor") versionMinor = getAttrWithFallback(info, "versionMinor") return "%d.%s" % (versionMajor, str(versionMinor).zfill(3))
370ab06aedd9909cc1b5d0e7f2211a554695c268
3,641,900
def get_quoted_text(text): """Method used to get quoted text. If body/title text contains a quote, the first quote is considered as the text. :param text: The replyable text :return: The first quote in the text. If no quotes are found, then the entire text is returned """ lines = text.split('\n\n') for line in lines: if line.startswith('>'): return line[1:] return text
3ac1801edcaf16af45d118918cb548f41d9a08fb
3,641,901
def pad_sequences(sequences, pad_tok): """ Args: sequences: a generator of list or tuple pad_tok: the char to pad with Returns: a list of list where each sublist has same length """ max_length = max(map(lambda x: len(x), sequences)) sequence_padded, sequence_length = _pad_sequences(sequences, pad_tok, max_length) return sequence_padded, sequence_length
077d80424607864d6e0fa63d3843f80b9c822d1e
3,641,902
def get_username_for_os(os): """Return username for a given os.""" usernames = {"alinux2": "ec2-user", "centos7": "centos", "ubuntu1804": "ubuntu", "ubuntu2004": "ubuntu"} return usernames.get(os)
579ebfa4e76b6660d28afcc010419f32d74aa98c
3,641,903
import copy def stats_getter(context, core_plugin, ignore_list=None): """Update Octavia statistics for each listener (virtual server)""" stat_list = [] lb_service_client = core_plugin.nsxlib.load_balancer.service # Go over all the loadbalancers & services lb_bindings = nsx_db.get_nsx_lbaas_loadbalancer_bindings( context.session) for lb_binding in lb_bindings: if ignore_list and lb_binding['loadbalancer_id'] in ignore_list: continue lb_service_id = lb_binding.get('lb_service_id') try: # get the NSX statistics for this LB service # Since this is called periodically, silencing it at the logs rsp = lb_service_client.get_stats(lb_service_id, silent=True) if rsp and 'virtual_servers' in rsp: # Go over each virtual server in the response for vs in rsp['virtual_servers']: # look up the virtual server in the DB vs_bind = nsx_db.get_nsx_lbaas_listener_binding_by_vs_id( context.session, vs['virtual_server_id']) if vs_bind and 'statistics' in vs: vs_stats = vs['statistics'] stats = copy.copy(lb_const.LB_EMPTY_STATS) stats['id'] = vs_bind.listener_id stats['request_errors'] = 0 # currently unsupported for stat, stat_value in lb_const.LB_STATS_MAP.items(): lb_stat = stat_value stats[stat] += vs_stats[lb_stat] stat_list.append(stats) except nsxlib_exc.ManagerError: pass return stat_list
65ebac76b6543683103584c18a7c06d2ea453e0a
3,641,904
from typing import List def track_to_note_string_list( track: Track, ) -> List[str]: """Convert a mingus.containers.Track to a list of note strings""" final_note_list = [] for element in track.get_notes(): for note in element[-1]: final_note_list.append(note_to_string(note)) return final_note_list
71b4fab66d18242e67a4ea59998e94341531f77a
3,641,905
def group_sums_dummy(x, group_dummy): """sum by groups given group dummy variable group_dummy can be either ndarray or sparse matrix """ if data_util._is_using_ndarray_type(group_dummy, None): return np.dot(x.T, group_dummy) else: # check for sparse return x.T * group_dummy
2cfb448130b9c48b41dd491a4fe01ab11a38478b
3,641,906
def fixture_multi_check_schema() -> DataFrameSchema: """Schema with multiple positivity checks on column `a`""" return _multi_check_schema()
f2b95cda9d6cd3e5bf0055d22c4b8505370a9867
3,641,907
def get_featurizer(featurizer_key: str) -> ReactionFeaturizer: """ :param: featurizer_key: key of a ReactionFeaturizer :return: a ReactionFeaturizer for a specified key """ if featurizer_key not in FEATURIZER_INITIALIZERS: raise ValueError(f"No featurizer for key {featurizer_key}") return FEATURIZER_INITIALIZERS[featurizer_key]()
15583e90a0691ce10df9d789f6decef5443efed2
3,641,908
from typing import List def is_negative_spec(*specs: List[List[str]]) -> bool: """ Checks for negative values in a variable number of spec lists Each spec list can have multiple strings. Each string within each list will be searched for a '-' sign. """ for specset in specs: if specset: for spec in specset: if '-' in spec: return True return False
216e6db2e63a657ac95a31896b9b61329a10a3db
3,641,909
def is_np_timedelta_like(dtype: DTypeLike) -> bool: """Check whether dtype is of the timedelta64 dtype.""" return np.issubdtype(dtype, np.timedelta64)
b68d244d2d2c4d3029a93cbd8b3affca8c55f680
3,641,910
def pp2mr(pv,p): """ Calculates mixing ratio from the partial and total pressure assuming both have same unitsa nd no condensate is present. Returns value in units of kg/kg. Checked 20.03.20 """ pv, scalar_input1 = flatten_input(pv) # don't specify pascal as this will wrongly corrected p , scalar_input2 = flatten_input(p ) scalar_input = scalar_input1 and scalar_input2 mr = eps1*pv/(p-pv) if scalar_input: return np.squeeze(mr) return mr
97bd35658a54a53d7541aa0ee71f1b25cf8c2dbf
3,641,911
def initialize_database(app): """ Takes an initalized flask application and binds a database context to allow query execution """ # see https://github.com/mitsuhiko/flask-sqlalchemy/issues/82 db.app = app db.init_app(app) return db
11a9f6046f51239c071d3a61780d1edf775baa51
3,641,912
from typing import TextIO def decrypt(input_file: TextIO, wordlist_filename: str) -> str: """ Using wordlist_filename, decrypt input_file according to the handout instructions, and return the plaintext. """ encrypt = [] result = '' ans = '' plaintext = '' # store English wordlist into a set english_wordlist = set() with open(wordlist_filename) as file: for line_text in file: english_wordlist.add(line_text.strip()) for line in input_file: encrypt = line.lower() max_so_far = 0 for count in range(26): text = shift(encrypt, count).split() #print(text) # reset max end every new shift max_end = 0 # check if English word match the text # add 1 whenever there is a match for word in text: # remove symbol & punctuation words = ''.join(char for char in word if char.isalnum()) if words in english_wordlist: max_end += 1 # if new max found, set the result to that text # and set max_so_far to new max #print(max_end) if max_so_far < max_end: result = ' '.join(text) max_so_far = max_end ans += result + '\n' return ans.strip()
87fae6e252b57f9903cd4b46d18a625455761d62
3,641,913
def loadEvents(fname): """ Reads a file that consists of first column of unix timestamps followed by arbitrary string, one per line. Outputs as dictionary. Also keeps track of min and max time seen in global mint,maxt """ events = [] ws = open(fname, 'r').read().splitlines() events = [] for w in ws: ix = w.find(' ') # find first space, that's where stamp ends stamp = int(w[:ix]) str = w[ix+1:] events.append({'t': stamp, 's': str}) # except Exception as e: # print ('%s probably does not exist, setting empty events list.' % (fname, )) # print ('error was:', e) return events
495dbd5d47892b953c139b27b1f20dd9854ea29a
3,641,914
def compute_relative_target_raw(current_pose, target_pose): """ Computes the relative target pose which has to be fed to the network as an input. Both target pose and current_pose have to be in the same coordinate frame (gloabl map). """ # Compute the relative goal position goal_position_difference = [target_pose.pose.position.x - current_pose.pose.position.x, target_pose.pose.position.y - current_pose.pose.position.y] # Get the current orientation and the goal orientation current_orientation = current_pose.pose.orientation p = [current_orientation.x, current_orientation.y, current_orientation.z, current_orientation.w] goal_orientation = target_pose.pose.orientation q = [goal_orientation.x, goal_orientation.y, goal_orientation.z, goal_orientation.w] # Rotate the relative goal position into the base frame (robot frame) goal_position_base_frame = tf.transformations.quaternion_multiply(tf.transformations.quaternion_inverse(p), tf.transformations.quaternion_multiply([goal_position_difference[0], goal_position_difference[1], 0, 0], p)) # Compute the difference to the goal orientation orientation_to_target = tf.transformations.quaternion_multiply(q, tf.transformations.quaternion_inverse(p)) yaw = tf.transformations.euler_from_quaternion(orientation_to_target)[2] return (goal_position_base_frame[0], -goal_position_base_frame[1], yaw)
efee9b6ef48bda67dfd42526c20e7d1de6a164da
3,641,915
import httpx def get_tokeninfo_remote(token_info_url, token): """ Retrieve oauth token_info remotely using HTTP :param token_info_url: Url to get information about the token :type token_info_url: str :param token: oauth token from authorization header :type token: str :rtype: dict """ token_request = httpx.get(token_info_url, headers={'Authorization': 'Bearer {}'.format(token)}, timeout=5) if not token_request.ok: return None return token_request.json()
c0f72b47b97d2d9c57b8b7fe30a3cba9f29c2005
3,641,916
from datetime import datetime def make_site_object(config, seen): """Make object with site values for evaluation.""" now = datetime.today().strftime("%Y-%m-%d") subtitle = ( f'<h2 class="subtitle">{config.subtitle}</h2>' if config.subtitle else "" ) site = SN( author=lambda: config.author, builddate=lambda: now, copyrightyear=lambda: config.copyrightyear, domain=lambda: config.domain, email=lambda: config.email, lang=lambda: config.lang, repo=lambda: config.repo, title=lambda: config.title, subtitle=lambda: subtitle, tool=lambda: config.tool ) if "foot.html" in config.template: site.foot = lambda root: _fill( "foot.html", config.template["foot.html"], site, SN(root=root) ) else: site.foot = lambda root: "" if "head.html" in config.template: site.head = lambda root: _fill( "head.html", config.template["head.html"], site, SN(root=root) ) else: site.head = lambda root: "" if "stats.html" in config.template: filled = _fill("stats.html", config.template["stats.html"], site, SN()) site.stats = lambda: filled else: site.stats = lambda: "" return site
762382446736a1815deae275db0d7485c0718a4e
3,641,917
from datetime import datetime import os def get_filename(): """ Build the output filename """ now_date = datetime.now() out_date = now_date.strftime("%Y-%m-%d_%H-%M") outfile_name = "node_ip_cfg_info_" + out_date + '.txt' if os.path.exists(outfile_name): os.remove(outfile_name) print('Output file name is: {}'.format(outfile_name)) return outfile_name
f3cf65c2ceb71388fc026d3dd603d3878a9a4650
3,641,918
from datetime import datetime import json import hashlib def map_aircraft_to_record(aircrafts, message_now, device_id): """ Maps the `aircraft` entity to a BigQuery record and its unique id. Returns `(unique_ids, records)` """ def copy_data(aircraft): result = { 'hex': aircraft.get('hex'), 'squawk': aircraft.get('squawk'), 'flight': aircraft.get('flight'), 'lat': aircraft.get('lat'), 'lon': aircraft.get('lon'), 'nucp': aircraft.get('nucp'), 'seen_pos': aircraft.get('seen_pos'), 'altitude': aircraft.get('altitude'), 'vert_rate': aircraft.get('vert_rate'), 'track': aircraft.get('track'), 'speed': aircraft.get('speed'), 'messages': aircraft.get('messages'), 'seen': aircraft.get('seen'), 'rssi': aircraft.get('rssi'), 'device_id': device_id, 'timestamp': datetime.utcfromtimestamp(float(message_now)).isoformat() } result_json = json.dumps(result) result_hash = hashlib.sha512(result_json.encode('utf-8')).hexdigest() unique_id = f'{message_now}_{result_hash}' result['created_at'] = datetime.now().isoformat() return (unique_id, result) return zip( *map( copy_data, aircrafts ) )
d423b87e2018486de076cc94a719038c53c54602
3,641,919
def add_gaussian_noise(image, mean=0, std=0.001): """ 添加高斯噪声 mean : 均值 var : 方差 """ image = np.array(image / 255, dtype=float) noise = np.random.normal(mean, std ** 0.5, image.shape) print(np.mean(noise ** 2) - np.mean(noise) ** 2) out = image + noise if image.min() < 0: low_clip = -1. else: low_clip = 0. out = np.clip(out, low_clip, 1.0) out = np.uint8(out * 255) return out
1683ae5815e28ab0c3354be1623ec56e6058b449
3,641,920
import os import datasets def imagenet_get_datasets(data_dir, arch, load_train=True, load_test=True): """ Load the ImageNet dataset. """ # Inception Network accepts image of size 3, 299, 299 if distiller.models.is_inception(arch): resize, crop = 336, 299 else: resize, crop = 256, 224 if arch == 'googlenet': normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) else: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dir = os.path.join(data_dir, 'train') test_dir = os.path.join(data_dir, 'val') train_dataset = None if load_train: train_transform = transforms.Compose([ transforms.RandomResizedCrop(crop), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) train_dataset = datasets.ImageFolder(train_dir, train_transform) test_dataset = None if load_test: test_transform = transforms.Compose([ transforms.Resize(resize), transforms.CenterCrop(crop), transforms.ToTensor(), normalize, ]) test_dataset = datasets.ImageFolder(test_dir, test_transform) return train_dataset, test_dataset
1008dabb1e7da9f5cff82847e06b44992413a34d
3,641,921
def sub(xs, ys): """ Computes xs - ys, such that elements in xs that occur in ys are removed. @param xs: list @param ys: list @return: xs - ys """ return [x for x in xs if x not in ys]
8911bb2c79919cae88463a95521cf051828038e8
3,641,922
def create_folio_skill(request, folio_id): """ Creates a new folio skill """ if request.method == "POST": form = FolioSkillForm(request.POST) if form.is_valid(): skill = form.save(commit=False) skill.author_id = request.user skill.save() messages.success( request, f"The {skill.skill_title} skill has " f"been created successfully." ) else: messages.error( request, "Data posted was not valid " "to create a new skill." ) else: messages.error( request, "Data should be posted when " "attempting to create a new skill." ) return redirect( reverse("edit_folio_skills", kwargs={"folio_id": folio_id}) )
7c1966f7a6b3c98e972da90abb5eb984a4af85a2
3,641,923
def lambda_handler(event, context): """AWS Lambda Function entrypoint to cancel booking Parameters ---------- event: dict, required Step Functions State Machine event chargeId: string pre-authorization charge ID context: object, required Lambda Context runtime methods and attributes Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html Returns ------- boolean Raises ------ BookingCancellationException Booking Cancellation Exception including error message upon failure """ global _cold_start if _cold_start: log_metric( name="ColdStart", unit=MetricUnit.Count, value=1, function_name=context.function_name ) _cold_start = False print("COLDSTART", context.aws_request_id) booking_id = event.get("bookingId") if not booking_id: log_metric( name="InvalidBookingRequest", unit=MetricUnit.Count, value=1, operation="cancel_booking" ) logger.error({"operation": "invalid_event", "details": event}) raise ValueError("Invalid booking ID") try: logger.debug(f"Cancelling booking - {booking_id}") ret = cancel_booking(booking_id) log_metric(name="SuccessfulCancellation", unit=MetricUnit.Count, value=1) logger.debug("Adding Booking Status annotation") tracer.put_annotation("BookingStatus", "CANCELLED") return ret except BookingCancellationException as err: log_metric(name="FailedCancellation", unit=MetricUnit.Count, value=1) logger.debug("Adding Booking Status annotation before raising error") tracer.put_annotation("BookingStatus", "ERROR") logger.error({"operation": "cancel_booking", "details": err}) raise BookingCancellationException(details=err)
e29b8eb3fe2ab38aab91d97d0c63bfb19a9b9363
3,641,924
def reduce_tags(tags): """Filter a set of tags to return only those that aren't descendents from others in the list.""" reduced_tags = [] for tag_a in tags: include = True for tag_b in tags: if tag_a == tag_b: continue if not tag_before(tag_a, tag_b): include = False break if include: reduced_tags.append(tag_a) return reduced_tags
fa76e1cc5bd10ecd58bdc8f5277fa32d41484c17
3,641,925
def default_param_noise_filter(var): """ check whether or not a variable is perturbable or not :param var: (TensorFlow Tensor) the variable :return: (bool) can be perturb """ if var not in tf.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because # we use them for normalization purposes). If you change your network, you will likely want # to re-consider which layers to perturb and which to keep untouched. return False
12817bf2c2b726d91d9d3cc838b52499e5382d80
3,641,926
def input_fn(request_body, request_content_type): """ An input_fn that loads the pickled tensor by the inference server of SageMaker. The function deserialize the inference request, then the predict_fn get invoked. Does preprocessing and returns a tensor representation of the source sentence ready to give to the model to make inference. :param request_body: str The request body :param request_content_type: type The request body type. :return: torch.Tensor """ if request_content_type == 'application/json': return None return 'WHAT HAPPEN TO YOU !'
62d45e188d5537eaa566bd4b90bdb8abc7626621
3,641,927
def get_colors(k): """ Return k colors in a list. We choose from 7 different colors. If k > 7 we choose colors more than once. """ base_colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k'] colors = [] index = 1 for i in range(0, k): if index % (len(base_colors) + 1) == 0: index = 1 colors.append(base_colors[index - 1]) index += 1 return colors
6c4a38eb394254f57d8be9fca47e0b44f51f5f04
3,641,928
import logging def test_significance(stat, A, b, eta, mu, cov, z, alpha): """ Compute an p-value by testing a one-tail. Look at right tail or left tail? Returns "h_0 Reject """ ppf, params = psi_inf(A, b, eta, mu, cov, z) if np.isnan(params['scale']) or not np.isreal(params['scale']): logging.warning("Scale is not real or negative, test reject") return False, params threshold = ppf(1.-alpha) return stat > threshold, params
f723a75c59a23d7110a41036d6873f6023b42333
3,641,929
def _get_cluster_group_idx(clusters: np.ndarray) -> nb.typed.List: """ Get start and stop indexes for unique cluster labels. Parameters ---------- clusters : np.ndarray The ordered cluster labels (noise points are -1). Returns ------- nb.typed.List[Tuple[int, int]] Tuples with the start index (inclusive) and end index (exclusive) of the unique cluster labels. """ start_i = 0 while clusters[start_i] == -1: start_i += 1 group_idx, stop_i = nb.typed.List(), start_i while stop_i < clusters.shape[0]: start_i, label = stop_i, clusters[stop_i] while stop_i < clusters.shape[0] and clusters[stop_i] == label: stop_i += 1 group_idx.append((start_i, stop_i)) return group_idx
5bdae0228367868c201b8a399ca959bc50c715b2
3,641,930
from typing import Union from typing import Optional def genes_flyaltas2( genes: Union[str, list] = None, gene_nametype: Optional[str] = "symbol", stage: Optional[str] = "male_adult", enrich_threshold: Optional[float] = 1.0, fbgn_path: Optional[str] = "deml_fbgn.tsv.gz", ) -> pd.DataFrame: """ Annotate a gene list based on the flyaltas2 database Parameters ---------- genes: `str` or `list` (default: `None`) The name of a gene, or a list of genes. gene_nametype : `str` (default: `'symbol'`) Type of gene name, including `'symbol'` and `'FBgn'`. stage: `str` (default: `'male_adult'`) The developmental stages of Drosophila melanogaster. Available stages are: * `'larval'` * `'female_adult'` * `'male_adult'` enrich_threshold: `float` (default: `1.0`) Threshold for filtering enrichment in FlyAtlas 2. fbgn_path: `str` (default: `'deml_fbgn.tsv.gz'`) Absolute path to the deml_fbgn.tsv.gz. Returns ------- anno_genes: `pandas.DataFrame` The genes and the particular tissues in which the genes are specifically expressed of each group. """ genes = [genes] if isinstance(genes, str) else genes fbgn_names = ( symbol2fbgn(gene=genes, datapath=fbgn_path) if gene_nametype is "symbol" else genes ) # Find the particular tissue in which the gene is specifically expressed anno_genes = pd.DataFrame() for fbgn_name in fbgn_names: particular_tissues = gene2tissue(fbgn_name, stage, enrich_threshold) if particular_tissues is not None: anno_genes = pd.concat([anno_genes, particular_tissues], axis=0) return anno_genes.astype(str)
651e3eb2ce58ae19d1785df1217b5434737b8bda
3,641,931
import torch import tqdm def _batch_embed(args, net, vecs: StringDataset, device, char_alphabet=None): """ char_alphabet[dict]: id to char """ # convert it into a raw string dataset if char_alphabet != None: vecs.to_bert_dataset(char_alphabet) test_loader = torch.utils.data.DataLoader(vecs, batch_size=args.test_batch_size, shuffle=False, num_workers=4) net.eval() embedding = [] with tqdm.tqdm(total=len(test_loader), desc="# batch embedding") as p_bar: for i, x in enumerate(test_loader): p_bar.update(1) if char_alphabet != None: for xx in x: xx = tokenizer(xx, return_tensors="pt") # 1 x 768 xx = bert(**xx)[0][0][1].unsqueeze(0) embedding.append(xx.cpu().data.numpy()) else: embedding.append(net(x.to(device)).cpu().data.numpy()) vecs.to_original_dataset() return np.concatenate(embedding, axis=0)
43224296330e4516c530d65217edf6a4b12dc5d3
3,641,932
def get_adjacency_matrix(distance_df, sensor_ids, normalized_k=0.1): """ :param distance_df: data frame with three columns: [from, to, distance]. :param sensor_ids: list of sensor ids. :param normalized_k: entries that become lower than normalized_k after normalization are set to zero for sparsity. :return: adjacency matrix """ num_sensors = len(sensor_ids) dist_mx = np.zeros((num_sensors, num_sensors), dtype=np.float32) dist_mx[:] = np.inf # Builds sensor id to index map. sensor_id_to_ind = {} for i, sensor_id in enumerate(sensor_ids): sensor_id_to_ind[sensor_id] = i # Fills cells in the matrix with distances. for row in distance_df.values: if row[0] not in sensor_id_to_ind or row[1] not in sensor_id_to_ind: continue dist_mx[sensor_id_to_ind[row[0]], sensor_id_to_ind[row[1]]] = row[2] # Calculates the standard deviation as theta. distances = dist_mx[~np.isinf(dist_mx)].flatten() std = distances.std() adj_mx = np.exp(-np.square(dist_mx / std)) # Make the adjacent matrix symmetric by taking the max. # adj_mx = np.maximum.reduce([adj_mx, adj_mx.T]) # Sets entries that lower than a threshold, i.e., k, to zero for sparsity. adj_mx[adj_mx < normalized_k] = 0 return adj_mx
b8acd5401dbf743294d52d71ddc97a0b0c74780b
3,641,933
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>', strip_bos='<BOS>', strip_eos='<EOS>', compat=True): """Transforms `int` indexes to strings by mapping ids to tokens, concatenating tokens into sentences, and stripping special tokens, etc. Args: ids: An n-D numpy array or (possibly nested) list of `int` indexes. vocab: An instance of :class:`~texar.tf.data.Vocab`. join (bool): Whether to concat along the last dimension of the the tokens into a string separated with a space character. strip_pad (str): The PAD token to strip from the strings (i.e., remove the leading and trailing PAD tokens of the strings). Default is '<PAD>' as defined in :class:`~texar.tf.data.SpecialTokens`.PAD. Set to `None` or `False` to disable the stripping. strip_bos (str): The BOS token to strip from the strings (i.e., remove the leading BOS tokens of the strings). Default is '<BOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.BOS. Set to `None` or `False` to disable the stripping. strip_eos (str): The EOS token to strip from the strings (i.e., remove the EOS tokens and all subsequent tokens of the strings). Default is '<EOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.EOS. Set to `None` or `False` to disable the stripping. Returns: If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of concatenated strings. If :attr:`join` is False, returns an `n`-D numpy array (or list) of str tokens. Example: .. code-block:: python text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]] text = map_ids_to_strs(text_ids, data.vocab) # text == ['a sentence', 'parsed from ids'] text = map_ids_to_strs( text_ids, data.vocab, join=False, strip_pad=None, strip_bos=None, strip_eos=None) # text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'], # ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']] """ tokens = vocab.map_ids_to_tokens_py(ids) if isinstance(ids, (list, tuple)): tokens = tokens.tolist() if compat: tokens = compat_as_text(tokens) str_ = str_join(tokens, compat=False) str_ = strip_special_tokens( str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos, compat=False) if join: return str_ else: return _recur_split(str_, ids)
6e702d8c0d658bd822f0db6c4094ab093207b78e
3,641,934
import argparse def _parse_args() -> argparse.Namespace: """Parses and returns the command line arguments.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('in_json', type=argparse.FileType('r'), help='The JSON file containing a list of file names ' 'that the prefix map operations should be applied to') parser.add_argument( '--prefix-map-json', type=argparse.FileType('r'), required=True, help= 'JSON file containing an array of prefix map transformations to apply ' 'to the strings before tokenizing. These string literal ' 'transformations are of the form "from=to". All strings with the ' 'prefix `from` will have the prefix replaced with `to`. ' 'Transformations are applied in the order they are listed in the JSON ' 'file.') parser.add_argument('--output', type=argparse.FileType('w'), help='File path to write transformed paths to.') return parser.parse_args()
108f2ab7d962fa31a99158997f832f46b8b8d6f8
3,641,935
import random def rand_ascii_str(length): """Generates a random string of specified length, composed of ascii letters and digits. Args: length: The number of characters in the string. Returns: The random string generated. """ letters = [random.choice(ascii_letters_and_digits) for _ in range(length)] return ''.join(letters)
130e8dbe1eb8e60813b01cf08dc7e9fd388638cf
3,641,936
def set_achievement_disabled(aid, disabled): """ Updates a achievement's availability. Args: aid: the achievement's aid disabled: whether or not the achievement should be disabled. Returns: The updated achievement object. """ return update_achievement(aid, {"disabled": disabled})
2793e3576904f5b1498361f02e32fdf2642b734c
3,641,937
from datetime import datetime def get_block(in_dt: datetime): """Get the BlockNumber instance at or before the datetime timestamp.""" return BlockNumber.from_timestamp(in_dt.replace(tzinfo=timezone.utc).timestamp())
a77ca6c4021ebc0e5ef2e2c0294246415ccd9811
3,641,938
def get_channels(posts): """ <summary> Returns post channel (twitter/facebook)</summary> <param name="posts" type="list"> List of posts </param> <returns> String "twitter" or "facebook" </returns> """ channel = [] for i in range(0, len(posts['post_id'])): if len(posts['post_text'][i]) <= 140: channel.append("twitter") else: channel.append("facebook") return channel
2bd67d13079ce115263ac46856d8a708f461cb7e
3,641,939
from bs4 import BeautifulSoup def clean_text(text): """ text: a string return: modified initial string """ text = BeautifulSoup(text, "lxml").text # HTML decoding text = text.lower() # lowercase text text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text text = ' '.join(word for word in text.split() if word not in STOPWORDS) # delete stopwords from text return text
b29f4b388bac55d04c824ad014a6b85e1c9c8ede
3,641,940
def ConvertToTypeEnum(type_enum, airflow_executor_type): """Converts airflow executor type string to enum. Args: type_enum: AirflowExecutorTypeValueValuesEnum, executor type enum value. airflow_executor_type: string, executor type string value. Returns: AirflowExecutorTypeValueValuesEnum: the executor type enum value. """ return type_enum(airflow_executor_type)
04162b04719031ba6b96d981a7ffe8a82691bc31
3,641,941
import numpy def image2array(image): """PIL Image to NumPy array""" assert image.mode in ('L', 'RGB', 'CMYK') arr = numpy.fromstring(image.tostring(), numpy.uint8) arr.shape = (image.size[1], image.size[0], len(image.getbands())) return arr.swapaxes(0, 2).swapaxes(1, 2).astype(numpy.float32)
bb1ba38f2d27acb63ea7ccfb600720eee3d683a3
3,641,942
from typing import Type def enum_name_callback(ctx: 'mypy.plugin.AttributeContext') -> Type: """This plugin refines the 'name' attribute in enums to act as if they were declared to be final. For example, the expression 'MyEnum.FOO.name' normally is inferred to be of type 'str'. This plugin will instead make the inferred type be a 'str' where the last known value is 'Literal["FOO"]'. This means it would be legal to use 'MyEnum.FOO.name' in contexts that expect a Literal type, just like any other Final variable or attribute. This plugin assumes that the provided context is an attribute access matching one of the strings found in 'ENUM_NAME_ACCESS'. """ enum_field_name = _extract_underlying_field_name(ctx.type) if enum_field_name is None: return ctx.default_attr_type else: str_type = ctx.api.named_generic_type('builtins.str', []) literal_type = LiteralType(enum_field_name, fallback=str_type) return str_type.copy_modified(last_known_value=literal_type)
e7b34490625ad2c8cf55ed002592ed1194f96e2f
3,641,943
def is_forward_angle(n, theta): """ if a wave is traveling at angle theta from normal in a medium with index n, calculate whether or not this is the forward-traveling wave (i.e., the one going from front to back of the stack, like the incoming or outgoing waves, but unlike the reflected wave). For real n & theta, the criterion is simply -pi/2 < theta < pi/2, but for complex n & theta, it's more complicated. See https://arxiv.org/abs/1603.02720 appendix D. If theta is the forward angle, then (pi-theta) is the backward angle and vice-versa. """ assert n.real * n.imag >= 0, ("For materials with gain, it's ambiguous which " "beam is incoming vs outgoing. See " "https://arxiv.org/abs/1603.02720 Appendix C.\n" "n: " + str(n) + " angle: " + str(theta)) ncostheta = n * cos(theta) if abs(ncostheta.imag) > 100 * EPSILON: # Either evanescent decay or lossy medium. Either way, the one that # decays is the forward-moving wave answer = (ncostheta.imag > 0) else: # Forward is the one with positive Poynting vector # Poynting vector is Re[n cos(theta)] for s-polarization or # Re[n cos(theta*)] for p-polarization, but it turns out they're consistent # so I'll just assume s then check both below answer = (ncostheta.real > 0) # convert from numpy boolean to the normal Python boolean answer = bool(answer) # double-check the answer ... can't be too careful! error_string = ("It's not clear which beam is incoming vs outgoing. Weird" " index maybe?\n" "n: " + str(n) + " angle: " + str(theta)) if answer is True: assert ncostheta.imag > -100 * EPSILON, error_string assert ncostheta.real > -100 * EPSILON, error_string assert (n * cos(theta.conjugate())).real > -100 * EPSILON, error_string else: assert ncostheta.imag < 100 * EPSILON, error_string assert ncostheta.real < 100 * EPSILON, error_string assert (n * cos(theta.conjugate())).real < 100 * EPSILON, error_string return answer
9d90a84be42968eebb1dd89285019ddc10a2b140
3,641,944
import os import pkgutil def get_data(cfg, working_dir, global_parameters, res_incl=None, res_excl=None): """Reads experimental measurements""" exp_type = global_parameters['experiment_type'] path = os.path.dirname(__file__) pkgs = [ modname for _, modname, ispkg in pkgutil.iter_modules([path]) if ispkg and modname in exp_type ] if pkgs: pkg = max(pkgs) else: exit("\nUnknown data type {:s}" "\nDid you forget _cpmg, _cest, etc?" "\n".format(global_parameters['experiment_type'])) reading = __import__( '.'.join([pkg, 'reading']), globals(), locals(), ['get_data'], -1 ) data = reading.read_data(cfg, working_dir, global_parameters, res_incl, res_excl) return data
826314fe99b6ba6dc408c7b0109536ae5fdc0acb
3,641,945
import math def fit_cubic1(points,rotate,properties=None): """This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0""" r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z') rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z') Sxy = 0 Sx = 0 Sy = 0 Sx2 = 0 Sx2y = 0 Sx3y = 0 Sx3 = 0 Sx4 = 0 Sx5 = 0 Sx6 = 0 Sw = 0 for p in points: pr=p['point']*r x = pr.x y = pr.y Sxy = Sxy + x*y * p['weight'] Sx = Sx + x * p['weight'] Sy = Sy + y * p['weight'] Sx2 = Sx2 + math.pow(x,2) * p['weight'] Sx2y = Sx2y+ math.pow(x,2)*y * p['weight'] Sx3y = Sx3y+ math.pow(x,3)*y * p['weight'] Sx3 = Sx3 + math.pow(x,3) * p['weight'] Sx4 = Sx4 + math.pow(x,4) * p['weight'] Sx5 = Sx5 + math.pow(x,5) * p['weight'] Sx6 = Sx6 + math.pow(x,6) * p['weight'] Sw += p['weight'] N = Sw A=[[N, Sx, Sx2,Sx3,Sy], [Sx, Sx2, Sx3,Sx4,Sxy], [Sx2, Sx3, Sx4, Sx5,Sx2y], [Sx3, Sx4, Sx5, Sx6,Sx3y]] xM=like_a_gauss(A) a0=xM[0][4] a1=xM[1][4] a2=xM[2][4] a3=xM[3][4] def line_func(x,a): return a[0] + a[1]*x + a[2]*math.pow(x,2) + a[3]*math.pow(x,3) points=sort_index1(points,r) return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,a3])
dea26481743546600bef54c58523746a638b63a5
3,641,946
import glob import os def tile_from_slippy_map(root, x, y, z): """Retrieve a single tile from a slippy map dir.""" path = glob.glob(os.path.join(os.path.expanduser(root), z, x, y + ".*")) if not path: return None return mercantile.Tile(x, y, z), path[0]
5af475d01591a9f3170adecbc34d53ab26ffb1db
3,641,947
def get_labelstats_df_list(fimage_list, flabel_list): """loop over lists of image and label files and extract label statisics as pandas.DataFrame """ if np.ndim(fimage_list) == 0: fimage_list = [fimage_list] if np.ndim(flabel_list) == 0: flabel_list = [flabel_list] columns = ['imagefile', 'labelfile', 'label', 'mean', 'var', 'min', 'max', 'median', 'count', 'sum', 'boundingbox', 'voxels'] DF = pd.DataFrame(columns=columns) for fimage in fimage_list: for flabel in flabel_list: df = get_labelstats_df(fimage, flabel) df['imagefile'] = fimage df['labelfile'] = flabel DF = DF.append(df) return DF
e494212975641e8c9f9b2d786077e320d9096c02
3,641,948
def index(web): """The web.request.params is a dictionary, pointing to falcon.Request directly.""" name = web.request.params["name"] return f"Hello {name}!\n"
b717ac60d42b8161ed27f7e4156d8a5a03aea803
3,641,949
def process_object(obj): """ Recursively process object loaded from json When the dict in appropriate(*) format is found, make object from it. (*) appropriate is defined in create_object function docstring. """ if isinstance(obj, list): result_obj = [] for elem in obj: result_obj.append(process_object(elem)) return result_obj elif isinstance(obj, dict): processed_obj = {} for key in obj.keys(): processed_obj[key] = process_object(obj[key]) as_obj = obj.get(ObjSpecification.AS_OBJECT, False) if as_obj: result_obj = create_object(processed_obj) else: result_obj = processed_obj return result_obj else: return obj
f5410c6168d96eb153f08c824a98cf58fd80e1a0
3,641,950
def remove_role(principal, role): """Removes role from passed principal. **Parameters:** principal The principal (actor or group) from which the role is removed. role The role which is removed. """ try: if isinstance(principal, Actor): ppr = PrincipalRoleRelation.objects.get( actor=principal, role=role, content_id=None, content_type=None) else: ppr = PrincipalRoleRelation.objects.get( group=principal, role=role, content_id=None, content_type=None) except PrincipalRoleRelation.DoesNotExist: return False else: ppr.delete() return True
78b27631ee80b42a2ee8759315ef00f490c0e86c
3,641,951
def set_var_input_validation( prompt="", predicate=lambda _: True, failure_description="Value is illegal", ): """Validating user input by predicate. Vars: - prompt: message displayed when prompting for user input. - predicate: lambda function to verify a condition. - failure_description: message displayed when predicate's condition is not met. Returns: - The value entered by the user if predicate's condition is met and after confirmation by the user. - If the predicate fails failure_description is displayed - If literal_eval fails an error message containing the raised exception. """ while True: try: value = literal_eval(input(f"{Color.INFORMATION}{prompt}{Color.END}\n")) if predicate(value): a = literal_eval( input( f"{Color.INFORMATION}Is this correct: {value} ? enter 1 to confirm, 0 to retry{Color.END}\n" ) ) if a == 1: return value else: print(f"{Color.FAIL}{failure_description}{Color.END}") except Exception as e: print(f"{Color.FAIL}{e} was raised, try again{Color.END}")
40cad55c5d07a405f946e9583c69029eb2ee4e65
3,641,952
def mad(data, axis=None): """Mean absolute deviation""" return np.mean(np.abs(data - np.mean(data, axis)), axis)
955763e5ee5d29e2b2b735d584d7a84b98affc23
3,641,953
from re import T def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pad the 2nd and 3rd dimensions of a 4D tensor with "padding[0]" and "padding[1]" (resp.) zeros left and right. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 top_pad, bottom_pad = padding[0] left_pad, right_pad = padding[1] if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) input_shape = x.shape if data_format == 'channels_first': output_shape = (input_shape[0], input_shape[1], input_shape[2] + top_pad + bottom_pad, input_shape[3] + left_pad + right_pad) output = T.zeros(output_shape) indices = (slice(None), slice(None), slice(top_pad, input_shape[2] + top_pad), slice(left_pad, input_shape[3] + left_pad)) else: output_shape = (input_shape[0], input_shape[1] + top_pad + bottom_pad, input_shape[2] + left_pad + right_pad, input_shape[3]) output = T.zeros(output_shape) indices = (slice(None), slice(top_pad, input_shape[1] + top_pad), slice(left_pad, input_shape[2] + left_pad), slice(None)) y = T.set_subtensor(output[indices], x) y._keras_shape = output_shape return y
ba57f1f8462d7c3212379b9678364c5e2e2e6a5b
3,641,954
def doms_hit_pass_threshold(mc_hits, threshold, pass_k40): """ checks if there a at least <<threshold>> doms hit by monte carlo hits. retuns true or false""" if threshold == 0: return True if len(mc_hits) == 0: return bool(pass_k40) dom_id_set = set() for hit in mc_hits: dom_id = pmt_id_to_dom_id(hit.pmt_id) dom_id_set.add(dom_id) if len(dom_id_set) >= threshold: return True return False
9f4157d202e2587232b13cfc9873400ab6ee6e5b
3,641,955
import os import difflib def differ_filelist_with_two_dirs(s_p_dir, d_p_dir, filelist, mode='0'): """ 使用 difflib 库对两个文件夹下的文件列表进行比较,输出原始结果 """ output = '' for f_path in filelist: s_file_path = os.path.join(s_p_dir, f_path) if mode != '1' else None d_file_path = os.path.join(d_p_dir, f_path) if mode != '2' else None fd1 = open(s_file_path, 'rbU') if s_file_path else None fd2 = open(d_file_path, 'rbU') if d_file_path else None fromlines = fd1.readlines() if fd1 else '' tolines = fd2.readlines() if fd2 else '' if fd1: fd1.close() if fd2: fd2.close() dresult = difflib.unified_diff(fromlines, tolines, f_path, f_path) for line in dresult: line = patch_line(line) if line.startswith('---'): line = '\n' + line output += line return output
bb3b39a894c465ea9df78bb7e6e10016deaaf727
3,641,956
def _parseLocalVariables(line): """Accepts a single line in Emacs local variable declaration format and returns a dict of all the variables {name: value}. Raises ValueError if 'line' is in the wrong format. See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html """ paren = '-*-' start = line.find(paren) + len(paren) end = line.rfind(paren) if start == -1 or end == -1: raise ValueError("%r not a valid local variable declaration" % (line,)) items = line[start:end].split(';') localVars = {} for item in items: if len(item.strip()) == 0: continue split = item.split(':') if len(split) != 2: raise ValueError("%r contains invalid declaration %r" % (line, item)) localVars[split[0].strip()] = split[1].strip() return localVars
39dc5130f47589e111e4b894cf293d446ac0eac0
3,641,957
from re import T def NLL(mu, sigma, mixing, y): """Computes the mean of negative log likelihood for P(y|x) y = T.matrix('y') # (minibatch_size, output_size) mu = T.tensor3('mu') # (minibatch_size, output_size, n_components) sigma = T.matrix('sigma') # (minibatch_size, n_components) mixing = T.matrix('mixing') # (minibatch_size, n_components) """ # multivariate Gaussian exponent = -0.5 * T.inv(sigma) * T.sum((y.dimshuffle(0,1,'x') - mu)**2, axis=1) normalizer = (2 * np.pi * sigma) exponent = exponent + T.log(mixing) - (y.shape[1]*.5)*T.log(normalizer) max_exponent = T.max(exponent ,axis=1, keepdims=True) mod_exponent = exponent - max_exponent gauss_mix = T.sum(T.exp(mod_exponent),axis=1) log_gauss = max_exponent + T.log(gauss_mix) res = -T.mean(log_gauss) return res
60a27f48d404af860cdbeac9e017a3df5ebca450
3,641,958
from typing import Optional import sqlite3 def get_non_subscribed_trainers(user) -> Optional[str]: """ returns all trainers the user is not subscrbed to """ conn = get_db() error = None try: trainers = conn.execute("""SELECT distinct u_name FROM user, trainer where t_userID = u_userID and u_trainer = 1 and u_name NOT IN ( SELECT u2.u_name FROM user u1, user u2, trainer, customer, subscription where c_userID = u1.u_userID and u1.u_name = ? and su_customerID = c_customerID and t_trainerID = su_trainerID and u2.u_userID = t_userID ) Order by u_name COLLATE NOCASE""", (user,)).fetchall() # print(trainers) close_db() return trainers except sqlite3.Error as error: print(error) return error
6c728e1bd805047e20224c45fab81f546e291d27
3,641,959
def __clean_datetime_value(datetime_string): """Given""" if datetime_string is None: return datetime_string if isinstance(datetime_string, str): x = datetime_string.replace("T", " ") return x.replace("Z", "") raise TypeError("Expected datetime_string to be of type string (or None)")
77afef31056365a47ea821de7a4979cb061920dc
3,641,960
def get_metric_by_name(metric: str, *args, **kwargs) -> Metric: """Returns metric using given `metric`, `args` and `kwargs` Args: metric (str): name of the metric Returns: Metric: requested metric as Metric """ assert metric in __metric_mapper__, "given metric {} is not found".format(metric) return __metric_mapper__[metric](*args, **kwargs)
b1ecb1fe1fad330570abcf9abd3f12abd2a18193
3,641,961
def Square(inputs, **kwargs): """Calculate the square of input. Parameters ---------- inputs : Tensor The input tensor. Returns ------- Tensor The square result. """ CheckInputs(inputs, 1) arguments = ParseArguments(locals()) output = Tensor.CreateOperator(nout=1, op_type='Square', **arguments) if inputs.shape is not None: output.shape = inputs.shape[:] return output
5869ab81460b0a3b56194ec9829bf6ec36716b9a
3,641,962
def make_call(rpc_name, request, retries=None, timeout=None): """Make a call to the Datastore API. Args: rpc_name (str): Name of the remote procedure to call on Datastore. request (Any): An appropriate request object for the call, eg, `entity_pb2.LookupRequest` for calling ``Lookup``. retries (int): Number of times to potentially retry the call. If :data:`None` is passed, will use :data:`_retry._DEFAULT_RETRIES`. If :data:`0` is passed, the call is attempted only once. timeout (float): Timeout, in seconds, to pass to gRPC call. If :data:`None` is passed, will use :data:`_DEFAULT_TIMEOUT`. Returns: tasklets.Future: Future for the eventual response for the API call. """ api = stub() method = getattr(api, rpc_name) if retries is None: retries = _retry._DEFAULT_RETRIES if timeout is None: timeout = _DEFAULT_TIMEOUT @tasklets.tasklet def rpc_call(): call = method.future(request, timeout=timeout) rpc = _remote.RemoteCall(call, "{}({})".format(rpc_name, request)) log.debug(rpc) log.debug("timeout={}".format(timeout)) result = yield rpc raise tasklets.Return(result) if retries: rpc_call = _retry.retry_async(rpc_call, retries=retries) return rpc_call()
f360aebb119b8f6e05207f5a7add1a6a16059d41
3,641,963
def render_table(data, col_width=3.0, row_height=0.625, font_size=14, header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w', bbox=[0, 0, 1, 1], header_columns=0, ax=None, **kwargs): """[Taken from ref: https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure] [Prints given dataframe in a nice format, that is easy to save] Parameters ---------- data : [data frame] [data frame] col_width : float, optional [column width], by default 3.0 row_height : float, optional [row height], by default 0.625 font_size : int, optional [font size], by default 14 header_color : str, optional [header color], by default '#40466e' row_colors : list, optional [row color], by default ['#f1f1f2', 'w'] edge_color : str, optional [edge color], by default 'w' bbox : list, optional [bbox ], by default [0, 0, 1, 1] header_columns : int, optional [header columns], by default 0 ax : [type], optional [plotting table, by default None Returns ------- [object] [figure] """ if ax is None: size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height]) fig, ax = plt.subplots(figsize=size) ax.axis('off') mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs) mpl_table.auto_set_font_size(False) mpl_table.set_fontsize(font_size) for k, cell in mpl_table._cells.items(): cell.set_edgecolor(edge_color) if k[0] == 0 or k[1] < header_columns: cell.set_text_props(weight='bold', color='w') cell.set_facecolor(header_color) else: cell.set_facecolor(row_colors[k[0]%len(row_colors) ]) return ax.get_figure(), ax
597d8732ca86896d02d0df30d2a0808b88f02873
3,641,964
def segment_image(class_colours, pixel_classes, height, width, bg_alpha=0, fg_alpha=255): """visualise pixel classes""" segment_colours = np.reshape(class_colours[pixel_classes], (height, width, 3)) segment_colours = segment_colours.astype("uint8") img = Image.fromarray(segment_colours) # set backgroud/unlabeled pixel alpha to 0. # note to self: do with numpy img = img.convert("RGBA") arr = np.array(img) arr = np.reshape(arr, (height*width, 4)) background = np.where(pixel_classes == 0) arr[background, 3] = bg_alpha background = np.where(pixel_classes > 0) arr[background, 3] = fg_alpha arr = np.reshape(arr, (height, width, 4)) return Image.fromarray(arr)
2dd1f70342cc101d3cf37d706ede8c55ded91629
3,641,965
def cross_replica_average(inputs, num_shards=None, num_shards_per_group=None, physical_shape=None, tile_shape=None, use_spatial_partitioning=False): """Customized cross replica sum op.""" # if num_shards_per_group is defined, apply distributed batch norm. group_assignment = None if num_shards_per_group > 0: if num_shards % num_shards_per_group != 0: raise ValueError( 'num_shards: %d mod num_shards_per_group: %d, should be 0' % (num_shards, num_shards_per_group)) num_groups = num_shards // num_shards_per_group if physical_shape is not None and tile_shape is not None: if use_spatial_partitioning: group_assignment = spatial_partitioning_group_assignment( physical_shape, tile_shape, num_groups) else: group_assignment = normal_group_assignment(physical_shape, tile_shape, num_groups) else: group_assignment = [[ # pylint: disable=g-complex-comprehension x for x in range(num_shards) if x // num_shards_per_group == y ] for y in range(num_groups)] return tpu_ops.cross_replica_sum(inputs, group_assignment) / math_ops.cast( num_shards_per_group, inputs.dtype)
5486eae34e2e25966343a5e115541de7c734ec98
3,641,966
def run_length_to_bitstream(rl: np.ndarray, values: np.ndarray, v_high: int, v_low: int) -> np.ndarray: """Do run length DECODING and map low/high signal to logic 0/1. Supposed to leave middle values untouched. [1,2,1,1,1] [7,1,7,1,5] --> [1 0 0 1 0 5] :param rl: Array of run lengths :param values: Array of corresponding values (positive ints) :param v_high: Value that will be mapped to 1 :param v_low: Value that will be mapped to 0 :return: Array of hopefully only {0,1} with runs re-expanded. :raises: ValueError if rl not exactly same size as values. """ rl = np.asarray(rl) # so that technically it works on lists values = np.asarray(values) if rl.shape != values.shape: raise ValueError("rl and values shapes unequal: %s %s" % (str(rl.shape), str(values.shape))) high_shifts = np.where(values == v_high, 1 - v_high, 0) low_shifts = np.where(values == v_low, 0 - v_low, 0) values_edited = values + high_shifts + low_shifts # fixme exception (or warn?) if values not in the set {v_high, v_low} return np.repeat(values_edited, rl)
3b9ed2ce753f6adfcf8197886b38595b2eac9978
3,641,967
def edge_naming(col_list, split_collections=True): """ This function normalize the naming of edges collections If split_collections is True an edge collection name will be generated between each listed collection in order. So if col_list = [A, B, C] result will be [A__B, B__C] :param col_list: ordered list of collection names :return: an array of edge collection names """ result = [] name = "" for v in col_list: if name == "": name = v else: name = name + EDGE_MARKER + v if split_collections: result.append(name) name = v if len(result) == 0: result.append(name) return result
47e0c7253a5a9f1c0df9488c54cca38b2264539f
3,641,968
def interactive_visual_difference_from_threshold_by_day(ds_ext): """ Returns: 1) xarray DataArray, with three variables, for each day in the dataset i) Highest value difference from the threshold, across the area ii) Lowest value difference from the threshold, across the area iii) Average difference from the threshold, from all pixels in the area : 2) bokeh pane for interactive visualization. """ result_type = ds_ext.attrs['result_type'] temp_var = 'tasmax' if result_type=='max' else 'tasmin' diff_var = 'above_threshold' if result_type=='max' else 'below_threshold' threshold_diff_high = ds_ext[diff_var].max(dim=['lat','lon'], skipna=True) threshold_diff_low = ds_ext[diff_var].min(dim=['lat','lon'], skipna=True) threshold_diff_avg = ds_ext[diff_var].mean(dim=['lat','lon'], skipna=True) dt_index = threshold_diff_high.indexes['time'].to_datetimeindex() difference_from_threshold = xr.Dataset(data_vars = {'threshold_diff_high':(['time'],threshold_diff_high.to_numpy()) , 'threshold_diff_low':(['time'],threshold_diff_low.to_numpy()) , 'threshold_diff_avg':(['time'],threshold_diff_avg.to_numpy())} , coords=dict(time=dt_index)) difference_from_threshold_plot = difference_from_threshold.hvplot(y=['threshold_diff_low','threshold_diff_high','threshold_diff_avg'] , value_label='difference_from_threshold' , alpha=0.7) pane = pn.panel(difference_from_threshold_plot) return difference_from_threshold, pane
2386f3564a105a44ff3cd12979f52e296e8293ac
3,641,969
def _parse_compression_method(data): """Parses the value of "method" extension parameter.""" return common.parse_extensions(data)
71901780aec98d8818a5296aa7b186c79b0f0e7b
3,641,970
def my_distance(drij): """ Compute length of displacement vector drij assume drij already accounts for PBC Args: drij (np.array) : vector(s) of length 3 Returns: float: length (distance) of vector(s) """ return np.linalg.norm(drij, axis=0)
45a29d7335e72dac68ddefcbf32b55b57e87b980
3,641,971
from typing import List def show(list_id): """Get single list via id.""" data = db_session.query(List).filter(List.id == list_id).first() if '/json' in request.path: return jsonify(data.as_dict()) else: return render_template('list/show.html', list=data)
9ed8de0dfc5621546dafe447fa83d33c00fa6b7e
3,641,972
import os def create_folder(path): """ Creates a folder if not already exists Args: :param path: The folder to be created Returns :return: True if folder was newly created, false if folder already exists """ if not os.path.exists(path): os.makedirs(path) return True else: return False
9b6cfaed256001aa15c15cb535fce54ddcf20bc8
3,641,973
def extract_smaps(kspace, low_freq_percentage=8, background_thresh=4e-6): """Extract raw sensitivity maps for kspaces This function will first select a low frequency region in all the kspaces, then Fourier invert it, and finally perform a normalisation by the root sum-of-square. kspace has to be of shape: nslices x ncoils x height x width Arguments: kspace (tf.Tensor): the kspace whose sensitivity maps you want extracted. low_freq_percentage (int): the low frequency region to consider for sensitivity maps extraction, given as a percentage of the width of the kspace. In fastMRI, it's 8 for an acceleration factor of 4, and 4 for an acceleration factor of 8. Defaults to 8. background_thresh (float): unused for now, will later allow to have thresholded sensitivity maps. Returns: tf.Tensor: extracted raw sensitivity maps. """ n_slices = tf.shape(kspace)[0] if n_slices > 0: n_low_freq = tf.cast(tf.shape(kspace)[-2:] * low_freq_percentage / 100, tf.int32) center_dimension = tf.cast(tf.shape(kspace)[-2:] / 2, tf.int32) low_freq_lower_locations = center_dimension - tf.cast(n_low_freq / 2, tf.int32) low_freq_upper_locations = center_dimension + tf.cast(n_low_freq / 2, tf.int32) ### # NOTE: the following stands for in numpy: # low_freq_mask = np.zeros_like(kspace) # low_freq_mask[ # ..., # low_freq_lower_locations[0]:low_freq_upper_locations[0], # low_freq_lower_locations[1]:low_freq_upper_locations[1] # ] = 1 x_range = tf.range(low_freq_lower_locations[0], low_freq_upper_locations[0]) y_range = tf.range(low_freq_lower_locations[1], low_freq_upper_locations[1]) X_range, Y_range = tf.meshgrid(x_range, y_range) X_range = tf.reshape(X_range, (-1,)) Y_range = tf.reshape(Y_range, (-1,)) low_freq_mask_indices = tf.stack([X_range, Y_range], axis=-1) # we have to transpose because only the first dimension can be indexed in # scatter_nd scatter_nd_perm = [2, 3, 0, 1] low_freq_mask = tf.scatter_nd( indices=low_freq_mask_indices, updates=tf.ones([ tf.size(X_range), tf.shape(kspace)[0], tf.shape(kspace)[1]], ), shape=[tf.shape(kspace)[i] for i in scatter_nd_perm], ) low_freq_mask = tf.transpose(low_freq_mask, perm=scatter_nd_perm) ### low_freq_kspace = kspace * tf.cast(low_freq_mask, kspace.dtype) coil_image_low_freq = tf_ortho_ifft2d(low_freq_kspace) # no need to norm this since they all have the same norm low_freq_rss = tf.norm(coil_image_low_freq, axis=1) coil_smap = coil_image_low_freq / low_freq_rss[:, None] # for now we do not perform background removal based on low_freq_rss # could be done with 1D k-means or fixed background_thresh, with tf.where else: coil_smap = tf.zeros_like(kspace, dtype=kspace.dtype) return coil_smap
e40ec21c8e353e6352b65f35710d83857cc6c124
3,641,974
import io def tiff_to_mat_conversion(ms_path, pan_path, save_path, ms_initial_point=(0, 0), ms_final_point=(0, 0), ratio=4): """ Generation of *.mat file, starting from the native GeoTiFF extension. Also, a crop tool is provided to analyze only small parts of the image. Parameters ---------- ms_path : str The path of the Multi-Spectral image pan_path : str The path of the Panchromatic file save_path : str The destination mat file ms_initial_point : tuple Upper left point for image cropping. The point must be expressed in pixel coordinates, as (x,y), where (0,0) is precisely the point at the top left. ms_final_point : tuple Bottom right point for image cropping. The point must be expressed in pixel coordinates, as (x,y), where (0,0) is precisely the point at the top left. ratio : int The resolution scale which elapses between MS and PAN. Return ------ I_in : Dictionary The dictionary, composed of MS and Pan images. """ ms = gdal.Open(ms_path) ms = ms.ReadAsArray() ms = np.moveaxis(ms, 0, -1) pan = gdal.Open(pan_path) pan = pan.ReadAsArray() if ms_final_point[0] != 0 and ms_final_point[1] != 0: ms = ms[ms_initial_point[1]:ms_final_point[1], ms_initial_point[0]:ms_final_point[0], :] pan = pan[ms_initial_point[1] * ratio:ms_final_point[1] * ratio, ms_initial_point[0] * ratio:ms_final_point[0] * ratio] io.savemat(save_path, {'I_MS_LR': ms, 'I_PAN': pan}) I_in = {'I_MS_LR': ms, 'I_PAN': pan} return I_in
c0e1a03f82b97ecc8a3e33a1b84624b53c3efae7
3,641,975
def is_member(musicians, musician_name): """Return true if named musician is in musician list; otherwise return false. Parameters: musicians (list): list of musicians and their instruments musician_name (str): musician name Returns: bool: True if match is made; otherwise False. """ i = 0 # counter while i < len(musicians): # guard against infinite loop musician = musicians[i].split(', ')[0].lower() if musician_name.lower() == musician: return True # preferable to break statement i += 1 # MUST INCREMENT return False
6ef5b9bbccb17d9b97a85e3af7789e059829184b
3,641,976
from typing import Tuple def _parse_client_dict(dataset: tf.data.Dataset, string_max_length: int) -> Tuple[tf.Tensor, tf.Tensor]: """Parses the dictionary in the input `dataset` to key and value lists. Args: dataset: A `tf.data.Dataset` that yields `OrderedDict`. In each `OrderedDict` there are two key, value pairs: `DATASET_KEY`: A `tf.string` representing a string in the dataset. `DATASET_VALUE`: A rank 1 `tf.Tensor` with `dtype` `tf.int64` representing the value associate with the string. string_max_length: The maximum length of the strings. If any string is longer than `string_max_length`, a `ValueError` will be raised. Returns: input_strings: A rank 1 `tf.Tensor` containing the list of strings in `dataset`. string_values: A rank 2 `tf.Tensor` containing the values of `input_strings`. Raises: ValueError: If any string in `dataset` is longer than string_max_length. """ parsed_dict = data_processing.to_stacked_tensor(dataset) input_strings = parsed_dict[DATASET_KEY] string_values = parsed_dict[DATASET_VALUE] tf.debugging.Assert( tf.math.logical_not( tf.math.reduce_any( tf.greater(tf.strings.length(input_strings), string_max_length))), data=[input_strings], name='CHECK_STRING_LENGTH') return input_strings, string_values
bd5761396ee661d91898859aa23858da7f674c76
3,641,977
def init(command): """ We assume the first command from NASA is the rover position. Assumetions are bad. We know that the command conists of two numbers seperated by a space. Parse the number so it matches D D """ if re.match('^[0-9]\s[0-9]\s[a-zA-Z]$', command): pos = command.split(" "); position['x'] = pos[0] position['y'] = pos[1] position['heading'] = pos[2] print position return position return False
282bb4bababeb24e0fe8cb1f6f4a57d36b2339dd
3,641,978
def add_small_gap_multiply(original_wf, gap_cutoff, density_multiplier, fw_name_constraint=None): """ In all FWs with specified name constraints, add a 'small_gap_multiply' parameter that multiplies the k-mesh density of compounds with gap < gap_cutoff by density multiplier. Useful for increasing the k-point mesh for metallic or small gap systems. Note that this powerup only works on FireWorks with the appropriate WriteVasp* tasks that accept the small_gap_multiply argument... Args: original_wf (Workflow) gap_cutoff (float): Only multiply k-points for materials with gap < gap_cutoff (eV) density_multiplier (float): Multiply k-point density by this amount fw_name_constraint (str): Only apply changes to FWs where fw_name contains this substring. Returns: Workflow """ for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint, task_name_constraint="WriteVasp"): original_wf.fws[idx_fw].tasks[idx_t]["small_gap_multiply"] = [gap_cutoff, density_multiplier] return original_wf
4573ee33b1be21adfcca1e8be3337b1df51ab737
3,641,979
def _get_backend(config_backend): """Extract the backend class from the command line arguments.""" if config_backend == 'gatttool': backend = GatttoolBackend elif config_backend == 'bluepy': backend = BluepyBackend elif config_backend == 'pygatt': backend = PygattBackend else: raise Exception('unknown backend: {}'.format(config_backend)) return backend
86374b3c19cbb3fa26434c18720288edf1c4fbe8
3,641,980
from time import time def kern_CUDA_sparse(nsteps, dX, rho_inv, context, phi, grid_idcs, mu_egrid=None, mu_dEdX=None, mu_lidx_nsp=None, prog_bar=None): """`NVIDIA CUDA cuSPARSE <https://developer.nvidia.com/cusparse>`_ implementation of forward-euler integration. Function requires a working :mod:`accelerate` installation. Args: nsteps (int): number of integration steps dX (numpy.array[nsteps]): vector of step-sizes :math:`\\Delta X_i` in g/cm**2 rho_inv (numpy.array[nsteps]): vector of density values :math:`\\frac{1}{\\rho(X_i)}` int_m (numpy.array): interaction matrix :eq:`int_matrix` in dense or sparse representation dec_m (numpy.array): decay matrix :eq:`dec_matrix` in dense or sparse representation phi (numpy.array): initial state vector :math:`\\Phi(X_0)` prog_bar (object,optional): handle to :class:`ProgressBar` object Returns: numpy.array: state vector :math:`\\Phi(X_{nsteps})` after integration """ c = context c.set_phi(phi) enmuloss = config['enable_muon_energy_loss'] de = mu_egrid.size mu_egrid = mu_egrid.astype(c.fl_pr) muloss_min_step = config['muon_energy_loss_min_step'] lidx, nmuspec = mu_lidx_nsp # Accumulate at least a few g/cm2 for energy loss steps # to avoid numerical errors dXaccum = 0. grid_step = 0 grid_sol = [] start = time() for step in xrange(nsteps): if prog_bar and (step % 5 == 0): prog_bar.update(step) c.do_step(rho_inv[step], dX[step]) dXaccum += dX[step] if enmuloss and (dXaccum > muloss_min_step or step == nsteps - 1): # Download current solution vector to host phc = c.get_phi() for nsp in xrange(nmuspec): phc[lidx + de * nsp:lidx + de * (nsp + 1)] = np.interp( mu_egrid, mu_egrid + mu_dEdX * dXaccum, phc[lidx + de * nsp:lidx + de * (nsp + 1)]) # Upload changed vector back.. c.set_phi(phc) dXaccum = 0. if (grid_idcs and grid_step < len(grid_idcs) and grid_idcs[grid_step] == step): grid_sol.append(c.get_phi()) grid_step += 1 if dbg: print "Performance: {0:6.2f}ms/iteration".format( 1e3 * (time() - start) / float(nsteps)) return c.get_phi(), grid_sol
74b8d93f867fed536b3ba1f1f4f0fc6e33e3efe8
3,641,981
def _to_sequence(x): """shape batch of images for input into GPT2 model""" x = x.view(x.shape[0], -1) # flatten images into sequences x = x.transpose(0, 1).contiguous() # to shape [seq len, batch] return x
bb3b0bb478c924b520bf7bf991a028cf8aaea25f
3,641,982
def read_wav_kaldi(wav_file_path: str) -> WaveData: """Read a given wave file to a Kaldi readable format. Args: wav_file_path: Path to a .wav file. Returns: wd: A Kaldi-readable WaveData object. """ # Read in as np array not memmap. fs, wav = wavfile.read(wav_file_path, False) wd = read_wav_kaldi_internal(wav, fs) return wd
50b2f5a848d387d4b1ca2764b7d30178d7ec5e28
3,641,983
def _testCheckSums(tableDirectory): """ >>> data = "0" * 44 >>> checkSum = calcTableChecksum("test", data) >>> test = [ ... dict(data=data, checkSum=checkSum, tag="test") ... ] >>> bool(_testCheckSums(test)) False >>> test = [ ... dict(data=data, checkSum=checkSum+1, tag="test") ... ] >>> bool(_testCheckSums(test)) True """ errors = [] for entry in tableDirectory: tag = entry["tag"] checkSum = entry["checkSum"] data = entry["data"] shouldBe = calcTableChecksum(tag, data) if checkSum != shouldBe: errors.append("Invalid checksum for the %s table." % tag) return errors
192785ca352eec4686e07f2f103283f6499b656f
3,641,984
def read_img(path): """ 读取图片,并将其转换为邻接矩阵 """ # 对于彩色照片,只使用其中一个维度的色彩 im = sp.misc.imread(path)[:, :, 2] im = im / 255. # 若运算速度太慢,可使用如下的语句来缩减图片的大小 # im = sp.misc.imresize(im, 0.10) / 255. # 计算图片的梯度,既相邻像素点之差 graph = image.img_to_graph(im) beta = 20 # 计算邻接矩阵 graph.data = np.exp(-beta * graph.data / graph.data.std()) return im, graph
41e4d000f9a70a6b6e787c08bcced4083178da11
3,641,985
def createmarker(name=None, source='default', mtype=None, size=None, color=None, priority=None, viewport=None, worldcoordinate=None, x=None, y=None, projection=None): """%s :param name: Name of created object :type name: `str`_ :param source: A marker, or string name of a marker :type source: `str`_ :param mtype: Specifies the type of marker, i.e. "dot", "circle" :type mtype: `str`_ :param size: :type size: `int`_ :param color: A color name from the `X11 Color Names list <https://en.wikipedia.org/wiki/X11_color_names>`_, or an integer value from 0-255, or an RGB/RGBA tuple/list (e.g. (0,100,0), (100,100,0,50)) :type color: `str`_ or int :param priority: The layer on which the marker will be drawn. :type priority: `int`_ :param viewport: 4 floats between 0 and 1 which specify the area that X/Y values are mapped to inside of the canvas. :type viewport: `list`_ :param worldcoordinate: List of 4 floats (xmin, xmax, ymin, ymax) :type worldcoordinate: `list`_ :param x: List of lists of x coordinates. Values must be between worldcoordinate[0] and worldcoordinate[1]. :type x: `list`_ :param y: List of lists of y coordinates. Values must be between worldcoordinate[2] and worldcoordinate[3]. :type y: `list`_ :returns: A secondary marker method :rtype: vcs.marker.Tm """ name, source = check_name_source(name, source, 'marker') mrk = marker.Tm(name, source) if (mtype is not None): mrk.type = mtype if (size is not None): mrk.size = size if (color is not None): mrk.color = color if (priority is not None): mrk.priority = priority if (viewport is not None): mrk.viewport = viewport if (worldcoordinate is not None): mrk.worldcoordinate = worldcoordinate if (x is not None): mrk.x = x if (y is not None): mrk.y = y if (projection is not None): mrk.projection = projection return mrk
7afaec1d24aad89b85974c509cf7c0ed165f733b
3,641,986
from typing import List from typing import Optional from typing import Set from typing import Callable def get_parser(disable: List[str] = None , lang: str = 'en', merge_terms: Optional[Set] = None, max_sent_len: Optional[int] = None) -> Callable: """spaCy clinical text parser Parameters ---------- disable lang merge_terms max_sent_len Returns ------- """ disable = ["ner", "parser", "tagger", "lemmatizer"] if not disable \ else disable merge_terms = {} if not merge_terms else merge_terms nlp = spacy.load(lang, disable=disable) nlp.tokenizer = ct_tokenizer(nlp) sbd_func = partial(ct_sbd_rules, merge_terms=merge_terms, max_sent_len=max_sent_len) sbd = SentenceSegmenter(nlp.vocab, strategy=sbd_func) nlp.add_pipe(sbd) return nlp
175c4ea51417dc02b4a7f6f5a0c512464bd252c2
3,641,987
def block_device_mapping_get_all_by_instance(context, instance_uuid, use_slave=False): """Get all block device mapping belonging to an instance.""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_uuid, use_slave)
16fc00068ad87d76831044d626a88960a0278817
3,641,988
def add_rain(img, slant, drop_length, drop_width, drop_color, blur_value, brightness_coefficient, rain_drops): """ From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library Args: img (np.uint8): slant (int): drop_length: drop_width: drop_color: blur_value (int): rainy view are blurry brightness_coefficient (float): rainy days are usually shady rain_drops: Returns: """ non_rgb_warning(img) input_dtype = img.dtype needs_float = False if input_dtype == np.float32: img = from_float(img, dtype=np.dtype("uint8")) needs_float = True elif input_dtype not in (np.uint8, np.float32): raise ValueError("Unexpected dtype {} for RandomSnow augmentation".format(input_dtype)) image = img.copy() for (rain_drop_x0, rain_drop_y0) in rain_drops: rain_drop_x1 = rain_drop_x0 + slant rain_drop_y1 = rain_drop_y0 + drop_length cv2.line(image, (rain_drop_x0, rain_drop_y0), (rain_drop_x1, rain_drop_y1), drop_color, drop_width) image = cv2.blur(image, (blur_value, blur_value)) # rainy view are blurry image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float32) image_hls[:, :, 1] *= brightness_coefficient image_rgb = cv2.cvtColor(image_hls.astype(np.uint8), cv2.COLOR_HLS2RGB) if needs_float: image_rgb = to_float(image_rgb, max_value=255) return image_rgb
f864a46fc13e955baade855ae454249e3a514a03
3,641,989
def vrctst_tml(file_name): """ adds vrctst_tml extension, if missing :param file_name: name of file :type file_name: str :returns: file with extension added :rtype: str """ return _add_extension(file_name, Extension.VRC_TML)
92a084fc19c39c0797d573e01c90da28c0e1c11c
3,641,990
def get_pixel_coords(x, y, xres, yres, xmin, ymax): """ Translate x, y coordinates to cols, rows. Example: col, row = map_pixel(x, y, geotransform[1], geotransform[-1], geotransform[0], geotransform[3]) Parameters ---------- x : float, numpy.ndarray X coordinates. y : float, numpy.ndarray Y coordinates. xres : float X resolution. yres : float Y resolution. Returns ------- col : int, numpy.ndarray Column coordinates. row : int, numpy.ndarray Row coordinates. """ col = np.around((x - xmin) / xres).astype(int) row = np.around((y - ymax) / yres).astype(int) return col, row
ccf38618de9d24279ab4df6ba016804c6da926f7
3,641,991
import re def _boundary_of_alternatives_indices(pattern): """ Determines the location of a set of alternatives in a glob pattern. Alternatives are defined by a matching set of non-bracketed parentheses. :param pattern: Glob pattern with wildcards. :return: Indices of the innermost set of matching non-bracketed parentheses in a tuple. The Index of a missing parenthesis will be passed as None. """ # Taking the leftmost closing parenthesis and the rightmost opening # parenthesis left of it ensures that the parentheses belong together and # the pattern is parsed correctly from the most nested section outwards. end_pos = None for match in re.finditer('\\)', pattern): if not _position_is_bracketed(pattern, match.start()): end_pos = match.start() break # Break to get leftmost. start_pos = None for match in re.finditer('\\(', pattern[:end_pos]): if not _position_is_bracketed(pattern, match.start()): start_pos = match.end() # No break to get rightmost. return start_pos, end_pos
707a4a02a362019db63277b01955bb54d31e51e7
3,641,992
def plot(model, featnames=None, num_trees=None, plottype='horizontal', figsize=(25,25), verbose=3): """Make tree plot for the input model. Parameters ---------- model : model xgboost or randomforest model. featnames : list, optional list of feature names. The default is None. num_trees : int, default None The best performing tree is choosen. Specify any other ordinal number for another target tree plottype : str, (default : 'horizontal') Works only in case of xgb model. * 'horizontal' * 'vertical' figsize: tuple, default (25,25) Figure size, (height, width) verbose : int, optional Print progress to screen. The default is 3. 0: NONE, 1: ERROR, 2: WARNING, 3: INFO (default), 4: DEBUG, 5: TRACE Returns ------- ax : Figure axis Figure axis of the input model. """ modelname = str(model).lower() if ('xgb' in modelname): if verbose>=4: print('xgboost plotting pipeline.') ax = xgboost(model, featnames=featnames, num_trees=num_trees, figsize=figsize, plottype=plottype, verbose=verbose) elif ('tree' in modelname) or ('forest' in modelname) or ('gradientboosting' in modelname): if verbose>=4: print('tree plotting pipeline.') ax = randomforest(model, featnames=featnames, num_trees=num_trees, figsize=figsize, verbose=verbose) elif ('lgb' in modelname): ax = lgbm(model, featnames=featnames, num_trees=num_trees, figsize=figsize, verbose=verbose) else: print('[treeplot] >Model not recognized: %s' %(modelname)) ax = None return ax
29b6f242ba6f794440b27fff573d976f9e06a228
3,641,993
import logging import tempfile import os import shutil def post_images(*, image_path: str,) -> dict: """ Convert Image to PDF """ logging.debug('image_path: ' + image_path) try: if(image_path.startswith(NEXTCLOUD_USERNAME+"/files")): image_path = image_path[len(NEXTCLOUD_USERNAME+"/files"):] logging.debug('image_path: ' + image_path) if(VERIFY_INPUTPATH and not image_path.startswith(NEXTCLOUD_OCR_INPUT_DIR)): return {"image_path": image_path,"message": "ignored because folder not " + NEXTCLOUD_OCR_INPUT_DIR , "status": "IGNORED"} except Exception as e: print("parse error: " + str(e)) try: workdir = tempfile.mkdtemp(prefix="ocr_") with NextCloud( NEXTCLOUD_URL, user=NEXTCLOUD_USERNAME, password=NEXTCLOUD_PASSWORD, ) as nxc: try: nc_file_list = nxc.list_folders(NEXTCLOUD_OCR_INPUT_DIR).data logging.debug('nc_file_list: ' + str(nc_file_list)) nc_file_remote_name = image_path logging.debug('nc_file_remote_name: ' + str(nc_file_remote_name)) nc_file_remote = nxc.get_file(nc_file_remote_name) logging.debug('nc_file_remote: ' + str(nc_file_remote)) nc_file_name, nc_file_ext = os.path.splitext(os.path.basename(nc_file_remote_name)) logging.debug('nc_file_name: ' + str(nc_file_name)) logging.debug('nc_file_ext: ' + str(nc_file_ext)) nc_file_remote.download(target=workdir) except Exception as e: print("download error: " + str(e)) try: pdf = pytesseract.image_to_pdf_or_hocr(os.path.join(workdir, nc_file_name + nc_file_ext), extension='pdf', lang="deu") file_ocr_name = os.path.join(workdir, nc_file_name + "-" + str(nc_file_remote.file_id) + ".pdf") with open(file_ocr_name, 'w+b') as file_ocr: file_ocr.write(pdf) except Exception as e: print("ocr error: " + str(e)) try: file_ocr_remote = NEXTCLOUD_OCR_OUTPUT_DIR + "/" + nc_file_name + "-" + str(nc_file_remote.file_id) + ".pdf" nxc.upload_file(file_ocr_name, file_ocr_remote).data nc_file_remote.add_tag(tag_name=NEXTCLOUD_OCR_TAG) except Exception as e: print("upload error: " + str(e)) shutil.rmtree(workdir) return {"image_path": image_path,"message": "successfully converted " + nc_file_remote_name + " to " + file_ocr_remote, "status": "OK"} except Exception as e: return {"image_path": image_path,"message": str(e), "status": "Failure"}
2f8d4855b189b55aeb2ba76b957cccf1adf7a935
3,641,994
def sim_DA_from_timestamps2_p2_2states(timestamps, dt_ref, k_D, R0, R_mean, R_sigma, tau_relax, k_s, rg, chunk_size=1000, alpha=0.05, ndt=10): """ 2-states recoloring using CDF in dt and with random number caching """ dt = np.array([dt_ref] * 2, dtype=np.float64) for state in [0, 1]: if tau_relax[state] < ndt * dt[state]: dt[state] = tau_relax[state] / ndt print(f'WARNING: Reducing dt[{state}] to {dt[state]:g} ' f'[tau_relax[{state}] = {tau_relax[state]}]') # Array flagging photons as A (1) or D (0) emitted A_ph = np.zeros(timestamps.size, dtype=np.uint8) # Instantaneous D-A distance at D de-excitation time R_ph = np.zeros(timestamps.size, dtype=np.float64) # Time of D de-excitation relative to the last timestamp T_ph = np.zeros(timestamps.size, dtype=np.float64) # State for each photon S_ph = np.zeros(timestamps.size, dtype=np.uint8) peq = [k_s[1] / (k_s[0] + k_s[1]), k_s[0] / (k_s[0] + k_s[1])] k_s_sum = np.sum(k_s) t0 = 0 nanotime = 0 state = 0 # the two states are 0 and 1 R = rg.randn() * R_sigma[state] + R_mean[state] iN = chunk_size - 1 # value to get the first chunk of random numbers for iph, t in enumerate(timestamps): # each cycle starts with a new photon timestamp `t` # excitation time is `t`, emission time is `t + nanotime` delta_t0 = t - t0 delta_t = delta_t0 - nanotime if delta_t < 0: # avoid negative delta_t possible when when two photons have # the same macrotime delta_t = 0 t = t0 p_state = (1 - peq[state]) * np.exp(-(delta_t0 * k_s_sum)) + peq[state] u = rg.rand() #print(f'iph={iph}, state={state}, p_state={p_state}, u={u}, delta_t0={delta_t0}') # Inversion of u is for compatibility with N-state version if state == 1: u = 1 - u if p_state <= u: #print(' * state change') state = 0 if state == 1 else 1 R = rg.randn() * R_sigma[state] + R_mean[state] # Compute the D-A distance at the "excitation time" iN += 1 if iN == chunk_size: Na = memoryview(rg.randn(chunk_size)) Pa = memoryview(rg.rand(chunk_size)) iN = 0 N = Na[iN] p = Pa[iN] R = ou_single_step_cy(R, delta_t, N, R_mean[state], R_sigma[state], tau_relax[state]) nanotime = 0 # loop through D-A diffusion steps with a fixed time-step dt # until D de-excitation by photon emission or energy transfer to A while True: k_ET = k_D * (R0 / R)**6 k_emission = k_ET + k_D d_prob_ph_em = k_emission * dt[state] # prob. of emission in dt if d_prob_ph_em > alpha: d_prob_ph_em = 1 - exp(-d_prob_ph_em) if d_prob_ph_em >= p: break # break out of the loop when the photon is emitted nanotime += dt[state] iN += 1 if iN == chunk_size: Na = memoryview(rg.randn(chunk_size)) Pa = memoryview(rg.rand(chunk_size)) iN = 0 N = Na[iN] p = Pa[iN] R = ou_single_step_cy(R, dt[state], N, R_mean[state], R_sigma[state], tau_relax[state]) # photon emitted, let's decide if it is from D or A p_DA = p / d_prob_ph_em # equivalent to rand(), but faster prob_A_em = k_ET / k_emission if prob_A_em >= p_DA: A_ph[iph] = 1 # time of D de-excitation by photon emission or energy transfer to A t0 = t # save D-A distance at emission time R_ph[iph] = R # save time of emission relative to the excitation time `t` T_ph[iph] = nanotime # Save state for current photon S_ph[iph] = state return A_ph, R_ph, T_ph, S_ph
c268e49d7fc960c3f684d77fcbb4880be446a5b0
3,641,995
def get_cross_track_error(data, rate, velocity): """Returns the final cross-track position (in nautical miles) The algorithm simulates an aircraft traveling on a straight trajectory who turns according to the data provided. The aircraft instantaneously updates its heading at each timestep by Ω * Δt. .. warning: This code assumes that the magnitude of the rotations in data is small in order to use a paraxial approximation sin(\theta) = \theta. This paraxial approximation speeds up the algorithm, which is important if cross track error simulations will occur hundreds of times. This can be used in conjunction with `simulate_fog_single`. In order to simulate a transpacific flight and estimate the cross-track error for a single run, one could run: >>> rate = 1 # Hz >>> data = simulate_fog_single(rate=rate, hours=10, arw=.0413, ... drift=.944, correlation_time=3600) >>> xtk = get_cross_track_error(data, rate, 900) Parameters ---------- data: ndarray.float An array of rotation rates, in deg/h rate: float The sampling rate of data in Hz velocity: float The velocity of the simulated aircraft in kph Returns ------- float The cross track error from this FOG signal. """ Δθ = data * np.pi/180/3600/rate # radians heading = np.cumsum(Δθ) Δy = velocity * 1000 / 3600 / rate * heading # m xtk = np.cumsum(Δy) / 1852 # nmi return xtk
88fa42408090c94c9e3212517d89ba47afeedccb
3,641,996
async def get_list_address(): """Get list of address """ return await service.address_s.all()
4d0304e23243d7f151c0f09fc5e1896413daee1b
3,641,997
import pathlib from typing import Optional def load_xml_stream( file_path: pathlib.Path, progress_message: Optional[str] = None ) -> progress.ItemProgressStream: """Load an iterable xml file with a progress bar.""" all_posts = ElementTree.parse(file_path).getroot() return progress.ItemProgressStream( all_posts, len(all_posts), prefix=" ", message=progress_message, )
26d6bd350dbe913855479c557c8881b03f90b266
3,641,998
def SNR_band(cp, ccont, cb, iband, itime=10.): """ Calc the exposure time necessary to get a given S/N on a molecular band following Eqn 7 from Robinson et al. 2016. Parameters ---------- cp : Planet count rate ccont : Continuum count rate cb : Background count rate iband : Indicies of molecular band itime : Integration time [hours] Returns ------- snr : float SNR to detect band given exposure time """ denominator = np.power(np.sum(cp[iband] + 2.*cb[iband]), 0.5) numerator = np.sum(np.fabs(ccont - cp[iband])) return np.power(itime*3600., 0.5) * numerator / denominator
ffec688d6e760ace0fc1e38217ec91dc674ee83f
3,641,999