content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Dict import array def extract_float_arrays(blockids: str, data: bytes) -> Dict[str, array]: """Extracts float arrays from raw scope, background trace, and recorder zoom binary data (block ids a, A, b, B, x, y, Y in the DLC pro 'Scope, Lock, and Recorder Binary Data' format). Args: blockids: String of requested block id letters. Block ids not available in the input data or in the above list are ignored. data: Input byte sequence. Returns: Dictionary with found block ids as keys and arrays of floats (typecode 'f') as values. Raises: DataFormatError: If the contents of `data` are not conform to the 'Scope, Lock, and Recorder Binary Data' format. """ retval = {} for block in _binary_data_blocks(data): if block.id in blockids and block.id in 'aAbBxyY': values = array('f') # float (IEEE 754 single precision) try: values.frombytes(block.payload) except ValueError as exc: raise DataFormatError("Invalid payload length in block '{}'".format(block.id)) from exc retval[block.id] = _letoh(values) return retval
8789f73d175b9d0f244b33b61d0fe1effa702ded
26,400
def rotate_image(path): """Rotate the image from path and return wx.Image.""" img = Image.open(path) try: exif = img._getexif() if exif[ORIENTATION_TAG] == 3: img = img.rotate(180, expand=True) elif exif[ORIENTATION_TAG] == 6: img = img.rotate(270, expand=True) elif exif[ORIENTATION_TAG] == 8: img = img.rotate(90, expand=True) except: pass return pil_image_to_wx_image(img)
b4f450a3f6cb01a4d9c8e6c384edeabd0366ac63
26,401
def predict(): """Predict endpoint. Chooses model for prediction and predcits bitcoin price for the given time period. @author: Andrii Koval, Yulia Khlyaka, Pavlo Mospan """ data = request.json if data: predict = bool(data["predict"]) if predict: if predictor.pred_dict["model"] == 0: # ARIMA arima_forecast = predictor.get_prediction_arima() plots.arima_df = arima_forecast elif predictor.pred_dict["model"] == 1: # Prophet prophet_forecast = predictor.get_prediction_prophet() plots.prophet_df = prophet_forecast elif predictor.pred_dict["model"] == 2: # LSTM lstm_forecast = predictor.get_prediction_bidirectlstm() plots.lstm_df = lstm_forecast else: pass return 'Non tam praeclarum est scire latine, quam turpe nescire'
90fbc72e3a57ad7dae3617bec20268c87a0c158a
26,402
def splice_imgs(img_list, vis_path): """Splice pictures horizontally """ IMAGE_WIDTH, IMAGE_HEIGHT = img_list[0].size padding_width = 20 img_num = len(img_list) to_image = Image.new('RGB', (img_num * IMAGE_WIDTH + (img_num - 1) * padding_width, IMAGE_HEIGHT)) # Create a new picture padding = Image.new('RGB', (padding_width, IMAGE_HEIGHT), (255, 255, 255)) # Loop through, paste each picture to the corresponding position in order for i, from_image in enumerate(img_list): to_image.paste(from_image, (i * (IMAGE_WIDTH + padding_width), 0)) if i < img_num - 1: to_image.paste(padding, (i * (IMAGE_WIDTH + padding_width) + IMAGE_WIDTH, 0)) return to_image.save(vis_path)
97d4ab32f1a734fbd04e7c558062867c2e6bd3b4
26,403
def create_segmented_colormap(cmap, values, increment): """Create colormap with discretized colormap. This was created mainly to plot a colorbar that has discretized values. Args: cmap: matplotlib colormap values: A list of the quantities being plotted increment: The increment used to bin the values Returns: A tuple with the cmap, the norm, and the colors. """ bmin = values[0] - increment / 2 bmax = values[-1] + 3 * increment / 2 boundaries = np.arange(bmin, bmax, increment) norm = mpl.colors.BoundaryNorm(boundaries, len(values) + 1) norm2 = mpl.colors.Normalize(vmin=0, vmax=len(values)) norm3 = mpl.colors.BoundaryNorm( np.arange(-0.5, len(values) + 0.5, 1), len(values) + 1 ) colors = cmap(norm2(norm(values + [values[-1] + increment]))) cmap = mpl.colors.ListedColormap(colors, "hate") return cmap, norm3, colors
9ab8a0a95896e1ae0f8777b705f920196afc6627
26,404
from io import StringIO def division_series_logs(): """ Pull Retrosheet Division Series Game Logs """ s = get_text_file(gamelog_url.format('DV')) data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"') data.columns = gamelog_columns return data
414de8e0409bba9651bef81bbdc811e105d1d11f
26,405
def release_date(json): """ Returns the date from the json content in argument """ return json['updated']
635efd7140860c8f0897e90433a539c8bd585945
26,406
def init_embedding_from_graph( _raw_data, graph, n_components, random_state, metric, _metric_kwds, init="spectral" ): """Initialize embedding using graph. This is for direct embeddings. Parameters ---------- init : str, optional Type of initialization to use. Either random, or spectral, by default "spectral" Returns ------- embedding : np.array the initialized embedding """ if random_state is None: random_state = check_random_state(None) if isinstance(init, str) and init == "random": embedding = random_state.uniform( low=-10.0, high=10.0, size=(graph.shape[0], n_components) ).astype(np.float32) elif isinstance(init, str) and init == "spectral": # We add a little noise to avoid local minima for optimization to come initialisation = spectral_layout( _raw_data, graph, n_components, random_state, metric=metric, metric_kwds=_metric_kwds, ) expansion = 10.0 / np.abs(initialisation).max() embedding = (initialisation * expansion).astype( np.float32 ) + random_state.normal( scale=0.0001, size=[graph.shape[0], n_components] ).astype( np.float32 ) else: init_data = np.array(init) if len(init_data.shape) == 2: if np.unique(init_data, axis=0).shape[0] < init_data.shape[0]: tree = KDTree(init_data) dist, ind = tree.query(init_data, k=2) nndist = np.mean(dist[:, 1]) embedding = init_data + random_state.normal( scale=0.001 * nndist, size=init_data.shape ).astype(np.float32) else: embedding = init_data return embedding
22c2d939a47932b625491a1e685b055214753010
26,407
def basic_hash_table(): """Not empty hash table.""" return HashTable(1)
b06e59c2a6767309e5394df6e51bdaf12c58d073
26,408
def scan_by_key(key, a, dim=0, op=BINARYOP.ADD, inclusive_scan=True): """ Generalized scan by key of an array. Parameters ---------- key : af.Array key array. a : af.Array Multi dimensional arrayfire array. dim : optional: int. default: 0 Dimension along which the scan is performed. op : optional: af.BINARYOP. default: af.BINARYOP.ADD. Binary option the scan algorithm uses. Can be one of: - af.BINARYOP.ADD - af.BINARYOP.MUL - af.BINARYOP.MIN - af.BINARYOP.MAX inclusive_scan: optional: bool. default: True Specifies if the scan is inclusive Returns --------- out : af.Array - will contain scan of input. """ out = Array() safe_call(backend.get().af_scan_by_key(c_pointer(out.arr), key.arr, a.arr, dim, op.value, inclusive_scan)) return out
ea9556b3e2a87a08cca62d03da41edf5985d4156
26,409
def local_luminance_subtraction(image, filter_sigma, return_subtractor=False): """ Computes an estimate of the local luminance and removes this from an image Parameters ---------- image : ndarray(float32 or uint8, size=(h, w, c)) An image of height h and width w, with c color channels filter_sigma : float The standard deviation of the isotropic gaussian kernel that we use to compute a local estimate of the luminance return_subtractor : bool, optional If true, return the array used to do the luminance subtraction -- this can be used to reverse the transform. Defualt False. Returns ------- filtered_image : ndarray(float32, size=(h, w, c)) subtractor : ndarray(float32, size=(h, w, c)) """ gaussian_kernel = get_gaussian_filter_2d( filter_sigma, (4*filter_sigma+1, 4*filter_sigma+1)) local_luminance = filter_sd(image, gaussian_kernel) if return_subtractor: return image - local_luminance, local_luminance else: return image - local_luminance
d9cc46a205f495c0107211d7222be27f40d6896b
26,410
def createBank(): """Create the bank. Returns: Bank: The bank. """ return Bank( 123456, 'My Piggy Bank', 'Tunja Downtown' )
676f0d6cf330e7832064393b543a5ffd1f1068d1
26,411
def str_to_bool(param): """ Convert string value to boolean Attributes: param -- inout query parameter """ if param.upper() == 'TRUE': return True elif param.upper() in ['FALSE', None]: return False else: raise InputValidationError( 'Invalid query parameter. Param is {} and param type is {}'.format(param, type(param)))
ef860e2b2e623d98576c04ef1e397604576d8d48
26,412
import argparse def create_parser(): """ Creates the argparse parser with all the arguments. """ parser = argparse.ArgumentParser( description='Management CLI for mock PCRF', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Add subcommands subparsers = parser.add_subparsers(title='subcommands', dest='cmd') # Reset alert_ack_parser = subparsers.add_parser( 'reset', help='Send Reset to mock PCRF hosted in FeG') alert_ack_parser.set_defaults(func=send_reset) return parser
c2a1d2fbbcaafd1fec40ea8f53df29e2cc481269
26,413
import sys def ProtoFlags(*ss): """ add the command options for protoc Args: ss : a variable number of string objects ss may contain multiple string objects, each object can contain multiple options one option may be: 1. option may contains $WORKSPACE, $OUT Macros value Returns: return a """ tag = SyntaxTag.TagProtoFlags() if sys.argv[0] == 'PLANISH': return tag env = Environment.GetCurrent() for s in ss: ps = s.split() for x in ps: if "$WORKSPACE" in x: tag.AddSV(x.replace("$WORKSPACE/", '')) elif "$OUT" in x: tag.AddSV(x.replace("$OUT", env.OutputPath())) elif "$OUT_ROOT" in x: tag.AddSV(x.replace("$OUT_ROOT", env.OutputRoot())) else: tag.AddSV(x) return tag
7d667ae25e4b6e1b3fa201064ef158c5cf5cc46a
26,414
from typing import Optional from typing import List from typing import Dict from datetime import datetime def fetch_log_messages(attempt_id: Optional[int] = None, task_id: Optional[int] = None, min_severity: Optional[int] = None): """ Fetch log messages from the database. :param attempt_id: Fetch only log messages associated with one particular task execution. :param task_id: Fetch only log messages associated with one particular task. :param min_severity: Fetch only log messages with a minimum severity level. :return: List[Dict] """ output: List[Dict] = [] # Open connection to the database task_db = task_database.TaskDatabaseConnection() # Build an SQL query for all matching log messages constraints = ["1"] if attempt_id is not None: constraints.append("l.generatedByTaskExecution = {:d}".format(attempt_id)) if task_id is not None: constraints.append("et.taskId = {:d}".format(task_id)) if min_severity is not None: constraints.append("l.severity >= {:d}".format(min_severity)) # Search for all matching log messages task_db.conn.execute(""" SELECT l.timestamp, l.generatedByTaskExecution, l.severity, l.message FROM eas_log_messages l LEFT JOIN eas_scheduling_attempt esa on l.generatedByTaskExecution = esa.schedulingAttemptId LEFT JOIN eas_task et on esa.taskId = et.taskId WHERE {constraint} ORDER BY generatedByTaskExecution, timestamp; """.format(constraint=" AND ".join(constraints))) log_list = task_db.conn.fetchall() # Convert log events into dictionaries for item in log_list: message_class = 'info' if item['severity'] >= 30: message_class = 'warning' if item['severity'] >= 40: message_class = 'error' output.append({ 'attempt_id': item['generatedByTaskExecution'], 'time': datetime.utcfromtimestamp(item['timestamp']).strftime('%Y-%m-%d %H:%M:%S'), 'class': message_class, 'message': item['message'].strip() }) # Commit database task_db.commit() task_db.close_db() # Return results return output
10e78d0ee14647cf7a390287208a28aa957551a4
26,415
import argparse def parse_arguments(): """Argument parser for extract_branch_length""" parser = argparse.ArgumentParser( description="extract_branch_length.py: extract the branch length of the " " common ancestor of a set of species" ) parser.add_argument( "-t", "--tree", help="Input species tree (Newick; not transcript tree)", required=True, ) parser.add_argument( "-s", "--species", help="Target species, separated by commas. If some are missing, won't be taken into account", required=True, ) return vars(parser.parse_args())
2ee5fcce15420e77307e4444b86b51e18b0fadb7
26,416
def part_1_solution_2(lines): """Shorter, but not very readable. A good example of "clever programming" that saves a few lines of code, while making it unbearably ugly. Counts the number of times a depth measurement increases.""" return len([i for i in range(1, len(lines)) if lines[i] > lines[i - 1]])
d393f0385a1afbea4c2f3b4d3f51d8e7d0ade204
26,417
def get_lifecycle_configuration(bucket_name): """ Get the lifecycle configuration of the specified bucket. Usage is shown in usage_demo at the end of this module. :param bucket_name: The name of the bucket to retrieve. :return: The lifecycle rules of the specified bucket. """ s3 = get_s3() try: config = s3.Bucket(bucket_name).LifecycleConfiguration() logger.info("Got lifecycle rules %s for bucket '%s'.", config.rules, bucket_name) except: logger.exception("Couldn't get lifecycle configuration for bucket '%s'.", bucket_name) raise else: return config.rules
380c884afddbf72db6474e60480bf75ebe67309e
26,418
def compute_reward(ori, new, target_ids): """ Compute the reward for each target item """ reward = {} PE_dict = {} ori_RI, ori_ERI, ori_Revenue = ori new_RI, new_ERI, new_Revenue = new max_PE, min_PE, total_PE = 0, 0, 0 for item in target_ids: PE = new_Revenue[item] - ori_Revenue[item] # Eq. (3) in paper PE = min_PE if PE <= min_PE else PE max_PE = PE if PE >= max_PE else max_PE total_PE += PE PE_dict[item] = PE avg_PE = total_PE/len(target_ids) PE_interval = max_PE - min_PE # Eq. (9) in paper for item, PE in PE_dict.items(): reward[item] = expit((PE - avg_PE)/PE_interval) return reward
a64a1c47089924d373c7435c5018c63fd4edbe74
26,419
from typing import Any import torch import tqdm def _compute_aspect_ratios_slow(dataset: Any, indices: Any = None) -> Any: """Compute the aspect ratios.""" print( "Your dataset doesn't support the fast path for " "computing the aspect ratios, so will iterate over " "the full dataset and load every image instead. " "This might take some time..." ) if indices is None: indices = range(len(dataset)) class SubsetSampler(Sampler): # type: ignore """Subset sampler.""" def __init__(self, indices: Any) -> None: self.indices = indices def __iter__(self) -> Any: return iter(self.indices) def __len__(self) -> int: return len(self.indices) sampler = SubsetSampler(indices) data_loader = torch.utils.data.DataLoader( dataset, batch_size=1, sampler=sampler, num_workers=14, # you might want to increase it for faster processing collate_fn=lambda x: x[0], ) aspect_ratios = [] with tqdm(total=len(dataset)) as pbar: for _i, (img, _) in enumerate(data_loader): pbar.update(1) height, width = img.shape[-2:] aspect_ratio = float(width) / float(height) aspect_ratios.append(aspect_ratio) return aspect_ratios
8fd183a5c353503067666526bdf6722bb53a4317
26,420
import getopt def parse_args(input_args): """ Parse the supplied command-line arguments and return the input file glob and metric spec strings. :param input_args: Command line arguments. :return: A triplet, the first element of which is the input file glob, the second element is the output file name (may be empty), the third element is a list of metric spec strings. """ file_glob = "" output_file_name = "" try: opts, args = getopt.getopt(input_args, "hi:o:") except getopt.GetoptError as err: print(str(err)) usage_and_die() for o, a in opts: if o == "-h": usage_and_die() elif o == "-i": file_glob = a elif o == "-o": output_file_name = a else: usage_and_die() if not file_glob: usage_and_die() return file_glob, output_file_name, args
a15327a3aa2aae86ef8ad14ebcae46b6f3593503
26,421
def _diffuse(field: jnp.ndarray, diffusion_coeff: float, delta_t: float) -> jnp.ndarray: """ Average each value in a vector field closer to its neighbors to simulate diffusion and viscosity. Parameters ---------- field The vector field to diffuse. *Shape: [y, x, any].* diffusion_coeff A coefficient determining the amount of diffusion at each frame. Must be static during JIT tracing. delta_t The time elapsed in each timestep. Must be static during JIT tracing. Returns ------- jnp.ndarray `field`, with diffusion applied for this frame. """ # Compile-time: precompute neighbor averaging kernel neighbor_weight = diffusion_coeff * delta_t neighbor_kernel = np.array( [ [0, neighbor_weight / 4, 0], [neighbor_weight / 4, 1 - 4 * neighbor_weight, neighbor_weight / 4], [0, neighbor_weight / 4, 0], ] ) neighbor_kernel = jax.device_put(neighbor_kernel) return jax.scipy.signal.convolve2d(field, neighbor_kernel, mode="same")
e05763df93164bd7b4baa778720dbecc32fea228
26,422
def dAdzmm_ron_s0(u0, M, n2, lamda, tsh, dt, hf, w_tiled): """ calculates the nonlinear operator for a given field u0 use: dA = dAdzmm(u0) """ print(u0.real.flags) print(u0.imag.flags) M3 = uabs(np.ascontiguousarray(u0.real), np.ascontiguousarray(u0.imag)) temp = fftshift(ifft(fft(M3)*hf)) # for i in (M, u0,M3, dt, temp): # print(i.dtype) N = nonlin_ram(M, u0, M3, dt, temp) N *= -1j*n2*2*pi/lamda return N
57c958bca07b77eaa406cb6b95bf5719af34075b
26,423
def create_support_bag_of_embeddings_reader(reference_data, **options): """ A reader that creates sequence representations of the input reading instance, and then models each question and candidate as the sum of the embeddings of their tokens. :param reference_data: the reference training set that determines the vocabulary. :param options: repr_dim, candidate_split (used for tokenizing candidates), question_split :return: a MultipleChoiceReader. """ tensorizer = SequenceTensorizer(reference_data) candidate_dim = options['repr_dim'] support_dim = options['support_dim'] # question embeddings: for each symbol a [support_dim, candidate_dim] matrix question_embeddings = tf.Variable(tf.random_normal((tensorizer.num_symbols, support_dim, candidate_dim), dtype=_FLOAT_TYPE), dtype=_FLOAT_TYPE) # [batch_size, max_question_length, support_dim, candidate_dim] question_encoding_raw = tf.gather(question_embeddings, tensorizer.questions) # question encoding should have shape: [batch_size, 1, support_dim, candidate_dim], so reduce and keep question_encoding = tf.reduce_sum(question_encoding_raw, 1, keep_dims=True) # candidate embeddings: for each symbol a [candidate_dim] vector candidate_embeddings = tf.Variable(tf.random_normal((tensorizer.num_symbols, candidate_dim), dtype=_FLOAT_TYPE), dtype=_FLOAT_TYPE) # [batch_size, num_candidates, max_candidate_length, candidate_dim] candidate_encoding_raw = tf.gather(candidate_embeddings, tensorizer.candidates) # candidate embeddings should have shape: [batch_size, num_candidates, 1, candidate_dim] candidate_encoding = tf.reduce_sum(candidate_encoding_raw, 2, keep_dims=True) # each symbol has [support_dim] vector support_embeddings = tf.Variable(tf.random_normal((tensorizer.num_symbols, support_dim), dtype=_FLOAT_TYPE), dtype=_FLOAT_TYPE) # [batch_size, max_support_num, max_support_length, support_dim] support_encoding_raw = tf.gather(support_embeddings, tensorizer.support) # support encoding should have shape: [batch_size, 1, support_dim, 1] support_encoding = tf.expand_dims(tf.expand_dims(tf.reduce_sum(support_encoding_raw, (1, 2)), 1), 3) # scoring with a dot product # [batch_size, num_candidates, support_dim, candidate_dim] combined = question_encoding * candidate_encoding * support_encoding scores = tf.reduce_sum(combined, (2, 3)) loss = create_softmax_loss(scores, tensorizer.target_values) return MultipleChoiceReader(tensorizer, scores, loss)
0b09423e9d62d1e5c7c99bd1ebede22ca490797a
26,424
def get_hosts_cpu_frequency(ceilo, hosts): """Get cpu frequency for each host in hosts. :param ceilo: A Ceilometer client. :type ceilo: * :param hosts: A set of hosts :type hosts: list(str) :return: A dictionary of (host, cpu_frequency) :rtype: dict(str: *) """ hosts_cpu_total = dict() #dict of (host, cpu_max_frequency) for host in hosts: host_id = "_".join([host, host]) cpu_frequency_list = ceilo.samples.list(meter_name='compute.node.cpu.frequency', limit=1, q=[{'field':'resource_id','op':'eq','value':host_id}]) if cpu_frequency_list: hosts_cpu_total[host] = cpu_frequency_list[0].counter_volume return hosts_cpu_total
aa6049b9d011d187e1a246a413835aafdbb5c6dc
26,425
def _cloture(exc): """ Return a function which will accept any arguments but raise the exception when called. Parameters ------------ exc : Exception Will be raised later Returns ------------- failed : function When called will raise `exc` """ # scoping will save exception def failed(*args, **kwargs): raise exc return failed
b2e22f5b4bd267d1945b7f759f5ddfb1ee8c44e5
26,426
from typing import Dict import json def _with_environment_variables(cmd: str, environment_variables: Dict[str, object]): """Prepend environment variables to a shell command. Args: cmd (str): The base command. environment_variables (Dict[str, object]): The set of environment variables. If an environment variable value is a dict, it will automatically be converted to a one line yaml string. """ as_strings = [] for key, val in environment_variables.items(): val = json.dumps(val, separators=(",", ":")) s = "export {}={};".format(key, quote(val)) as_strings.append(s) all_vars = "".join(as_strings) return all_vars + cmd
ae27d9e7a62f49e836f1c1b116205f318d9d0dd3
26,427
def update_spam_assets(db: 'DBHandler') -> int: """ Update the list of ignored assets using query_token_spam_list and avoiding the addition of duplicates. It returns the amount of assets that were added to the ignore list """ spam_tokens = query_token_spam_list(db) # order maters here. Make sure ignored_assets are queried after spam tokens creation # since it's possible for a token to exist in ignored assets but not global DB. # and in that case query_token_spam_list add it to the global DB ignored_assets = {asset.identifier for asset in db.get_ignored_assets()} assets_added = 0 for token in spam_tokens: if token.identifier in ignored_assets: continue db.add_to_ignored_assets(token) assets_added += 1 return assets_added
4e7f4e5ae8a6b92ebd5a60a34d9330330690b663
26,428
import os import gzip def _load_idx(filename): """Loads a single IDX file.""" dirname = os.path.dirname(__file__) filename = os.path.join(dirname, "data", filename) with gzip.open(filename, "rb") as f: return idx.read_array(f)
afcf0e50a12965223722ddfe15b874660c81ac6a
26,429
import png def png_info(path): """Returns a dict with info about the png""" r = png.Reader(filename=path) x, y, frames, info = r.read() return info
97b9df7dd7800f350695e8d678d25154c7a4b2b8
26,430
def _(data: ndarray, outliers: ndarray, show_report: bool = True) -> ndarray: """Process ndarrays""" if type(data) != type(outliers): raise TypeError("`data` and `outliers` must be same type") # convert to DataFrame or Series data = DataFrame(data).squeeze() outliers = DataFrame(outliers).squeeze() # dispatch to relevant function and convert back to ndarray return trim(data, outliers).to_numpy()
1c478af8a6fffcf6240b2547782ee3fc256fdd0c
26,431
import six def bool_from_string(subject, strict=False, default=False): """ 将字符串转换为bool值 :param subject: 待转换对象 :type subject: str :param strict: 是否只转换指定列表中的值 :type strict: bool :param default: 转换失败时的默认返回值 :type default: bool :returns: 转换结果 :rtype: bool """ TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') if isinstance(subject, bool): return subject if not isinstance(subject, six.string_types): subject = six.text_type(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return default
3c6efa416471da391e60b82aec3753d823ee2878
26,432
def prot_to_vector(seq: str) -> np.ndarray: """Concatenate the amino acid features for each position of the sequence. Args: seq: A string representing an amino acid sequence. Returns: A numpy array of features, shape (len(seq), features)""" # convert to uppercase seq = seq.upper() try: chain = [aa_feats.loc[pos].values for pos in seq] except KeyError as e: print(e) raise ValueError("Invalid string character encountered in prot_to_vector") return np.concatenate(chain, axis=0).reshape(len(seq), -1)
ac5293ee67698243e4910c87944133f9697d8646
26,433
import os def project_root(project): """Return the path the root dir of the vendored project. If "project" is an empty string then the path prefix for vendored projects (e.g. "ptvsd/_vendored/") will be returned. """ if not project: project = '' return os.path.join(VENDORED_ROOT, project)
5428b19ef68748719886997cc4908d2f70711d50
26,434
def set_partition(num, par): """ A function returns question for partitions of a generated set. :param num: number of questions. :param par: type of items in the set based on documentation. :return: questions in JSON format. """ output = question_list_maker(num, par, 'set-partition') return jsonify(output)
71540d753020e5333558098b7edf96c4318fb316
26,435
def meanS_heteroscedastic_metric(nout): """This function computes the mean log of the variance (log S) for the heteroscedastic model. The mean log is computed over the standard deviation prediction and the mean prediction is not taken into account. Parameters ---------- nout : int Number of outputs without uq augmentation """ def metric(y_true, y_pred): """ Parameters ---------- y_true : Keras tensor Keras tensor including the ground truth y_pred : Keras tensor Keras tensor including the predictions of a heteroscedastic model. The predictions follow the order: (mean_0, S_0, mean_1, S_1, ...) with S_i the log of the variance for the ith output. """ if nout > 1: log_sig2 = y_pred[:, 1::nout] else: log_sig2 = y_pred[:, 1] return K.mean(log_sig2) metric.__name__ = 'meanS_heteroscedastic' return metric
75bc5ddb482cc0e99bb4f5f9b0d557321b57cf06
26,436
def ec2_connect(module): """ Return an ec2 connection""" region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") return ec2
d94b5a1359a31657aa2dffd1f0c11d9cd06493dd
26,437
import glob def read_lris(raw_file, det=None, TRIM=False): """ Read a raw LRIS data frame (one or more detectors) Packed in a multi-extension HDU Based on readmhdufits.pro Parameters ---------- raw_file : str Filename det : int, optional Detector number; Default = both TRIM : bool, optional Trim the image? This doesn't work.... Returns ------- array : ndarray Combined image header : FITS header sections : list List of datasec, oscansec, ampsec sections datasec, oscansec needs to be for an *unbinned* image as per standard convention """ # Check for file; allow for extra .gz, etc. suffix fil = glob.glob(raw_file+'*') if len(fil) != 1: msgs.error("Found {:d} files matching {:s}".format(len(fil))) # Read msgs.info("Reading LRIS file: {:s}".format(fil[0])) hdu = fits.open(fil[0]) head0 = hdu[0].header # Get post, pre-pix values precol = head0['PRECOL'] postpix = head0['POSTPIX'] preline = head0['PRELINE'] postline = head0['POSTLINE'] # Setup for datasec, oscansec dsec = [] osec = [] # get the x and y binning factors... binning = head0['BINNING'] xbin, ybin = [int(ibin) for ibin in binning.split(',')] # First read over the header info to determine the size of the output array... n_ext = len(hdu)-1 # Number of extensions (usually 4) xcol = [] xmax = 0 ymax = 0 xmin = 10000 ymin = 10000 for i in np.arange(1, n_ext+1): theader = hdu[i].header detsec = theader['DETSEC'] if detsec != '0': # parse the DETSEC keyword to determine the size of the array. x1, x2, y1, y2 = np.array(parse.load_sections(detsec, fmt_iraf=False)).flatten() # find the range of detector space occupied by the data # [xmin:xmax,ymin:ymax] xt = max(x2, x1) xmax = max(xt, xmax) yt = max(y2, y1) ymax = max(yt, ymax) # find the min size of the array xt = min(x1, x2) xmin = min(xmin, xt) yt = min(y1, y2) ymin = min(ymin, yt) # Save xcol.append(xt) # determine the output array size... nx = xmax - xmin + 1 ny = ymax - ymin + 1 # change size for binning... nx = nx // xbin ny = ny // ybin # Update PRECOL and POSTPIX precol = precol // xbin postpix = postpix // xbin # Deal with detectors if det in [1,2]: nx = nx // 2 n_ext = n_ext // 2 det_idx = np.arange(n_ext, dtype=np.int) + (det-1)*n_ext ndet = 1 elif det is None: ndet = 2 det_idx = np.arange(n_ext).astype(int) else: raise ValueError('Bad value for det') # change size for pre/postscan... if not TRIM: nx += n_ext*(precol+postpix) ny += preline + postline # allocate output array... array = np.zeros( (nx, ny) ) order = np.argsort(np.array(xcol)) # insert extensions into master image... for kk, i in enumerate(order[det_idx]): # grab complete extension... data, predata, postdata, x1, y1 = lris_read_amp(hdu, i+1) #, linebias=linebias, nobias=nobias, $ #x1=x1, x2=x2, y1=y1, y2=y2, gaindata=gaindata) # insert components into output array... if not TRIM: # insert predata... buf = predata.shape nxpre = buf[0] xs = kk*precol xe = xs + nxpre ''' if keyword_set(VERBOSITY) then begin section = '['+stringify(xs)+':'+stringify(xe)+',*]' message, 'inserting extension '+stringify(i)+ $ ' predata in '+section, /info endif ''' array[xs:xe, :] = predata # insert data... buf = data.shape nxdata = buf[0] nydata = buf[1] xs = n_ext*precol + kk*nxdata #(x1-xmin)/xbin xe = xs + nxdata #section = '[{:d}:{:d},{:d}:{:d}]'.format(preline,nydata-postline, xs, xe) # Eliminate lines section = '[{:d}:{:d},{:d}:{:d}]'.format(preline*ybin, (nydata-postline)*ybin, xs*xbin, xe*xbin) # Eliminate lines dsec.append(section) #print('data',xs,xe) array[xs:xe, :] = data # Include postlines #; insert postdata... buf = postdata.shape nxpost = buf[0] xs = nx - n_ext*postpix + kk*postpix xe = xs + nxpost #section = '[:,{:d}:{:d}]'.format(xs*xbin, xe*xbin) section = '[{:d}:{:d},{:d}:{:d}]'.format(preline*ybin, (nydata-postline)*ybin, xs*xbin, xe*xbin) osec.append(section) ''' if keyword_set(VERBOSITY) then begin section = '['+stringify(xs)+':'+stringify(xe)+',*]' message, 'inserting extension '+stringify(i)+ $ ' postdata in '+section, /info endif ''' array[xs:xe, :] = postdata else: buf = data.shape nxdata = buf[0] nydata = buf[1] xs = (x1-xmin)//xbin xe = xs + nxdata ys = (y1-ymin)//ybin ye = ys + nydata - postline yin1 = preline yin2 = nydata - postline ''' if keyword_set(VERBOSITY) then begin section = '['+stringify(xs)+':'+stringify(xe)+ $ ','+stringify(ys)+':'+stringify(ye)+']' message, 'inserting extension '+stringify(i)+ $ ' data in '+section, /info endif ''' array[xs:xe, ys:ye] = data[:, yin1:yin2] # make sure BZERO is a valid integer for IRAF obzero = head0['BZERO'] head0['O_BZERO'] = obzero head0['BZERO'] = 32768-obzero # Return, transposing array back to goofy Python indexing #from IPython import embed; embed() return array.T, head0, (dsec, osec)
98351f63a78a37ac8cbb3282e71903ee6dd6bbb1
26,438
def mu(n: int) -> int: """Return the value of the Moebius function on n. Examples: >>> mu(3*5*2) -1 >>> mu(3*5*2*17) 1 >>> mu(3*3*5*2) 0 >>> mu(1) 1 >>> mu(5) -1 >>> mu(2**10-1) -1 """ if n == 1: return 1 else: facts = factor(n) len_ = len(facts) if len(set(facts)) < len_: return 0 return (-1)**len_
b8347480041de2dc9dfc469096293e3815eebbcd
26,439
def find_last(arr, val, mask=None, compare="eq"): """ Returns the index of the last occurrence of *val* in *arr*. Or the last occurrence of *arr* *compare* *val*, if *compare* is not eq Otherwise, returns -1. Parameters ---------- arr : device array val : scalar mask : mask of the array compare: str ('gt', 'lt', or 'eq' (default)) """ found_col = find_index_of_val(arr, val, mask=mask, compare=compare) found_col = found_col.find_and_replace([arr.size], [None], True) max_index = found_col.max() return -1 if max_index is None or np.isnan(max_index) else max_index
376a21174bc26ca332768aadf96b84b06e7f55f5
26,440
def find_orphans(input_fits, header_ihdus_keys): """Return a dictionary with keys=(ihdu, key) and values='label' for missing cards in 'header_ihdus_keys' Parameters: ----------- input_fits: astropy.io.fits.HDUList instance FITS file where to find orphan header cards header_ihdus_keys: list a list of tuples=(ihdu,key) for the reference header cards Returns: -------- orphans: list list of orphan header keys """ ihdus, keys = zip(*header_ihdus_keys) orphans = [] for ihdu, lvm_hdu in enumerate(input_fits): hdu_mask = np.array(ihdus)==ihdu orphan_keys = OrderedSet(lvm_hdu.header.keys()) - OrderedSet(np.array(keys)[hdu_mask]) orphans.extend(zip([ihdu]*len(orphan_keys), orphan_keys)) return orphans
bcc98722ba43450ff68367f776a84c0e193447d9
26,441
import ipdb import ipdb import tqdm def solve(n_vec, m_vec, p_vec, repeat, dns_level, seed, solver='gurobi'): """ Solve random optimization problems """ print("Solving random problems with solver %s\n" % solver) # Define statistics to record std_solve_time = np.zeros(len(n_vec)) avg_solve_time = np.zeros(len(n_vec)) min_solve_time = np.zeros(len(n_vec)) max_solve_time = np.zeros(len(n_vec)) n_prob = len(n_vec) # Store also OSQP time if solver == 'miosqp': # Add OSQP solve times statistics avg_osqp_solve_time = np.zeros(len(n_vec)) # reset random seed np.random.seed(seed) for i in range(n_prob): # Get dimensions n = n_vec[i] m = m_vec[i] p = p_vec[i] print("problem n = %i, m = %i, p = %i" % (n, m, p)) # Define vector of cpu times solve_time_temp = np.zeros(repeat) # Store also OSQP time if solver == 'miosqp': osqp_solve_time_temp = np.zeros(repeat) for j in tqdm(range(repeat)): # for j in range(repeat): # Generate random vector of indeces i_idx = np.random.choice(np.arange(0, n), p, replace=False) # Generate random Matrices Pt = spa.random(n, n, density=dns_level) P = spa.csc_matrix(np.dot(Pt, Pt.T)) q = sp.randn(n) A = spa.random(m, n, density=dns_level) u = 2 + sp.rand(m) l = -2 + sp.rand(m) # Enforce [0, 1] bounds on variables i_l = np.zeros(p) i_u = np.ones(p) # A, l, u = miosqp.add_bounds(i_idx, 0., 1., A, l, u) if solver == 'gurobi': # Solve with gurobi prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u) res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False, Threads=1) if res_gurobi.status != 'optimal': ipdb.set_trace() solve_time_temp[j] = 1e3 * res_gurobi.cputime elif solver == 'miosqp': # Define problem settings miosqp_settings = { # integer feasibility tolerance 'eps_int_feas': 1e-03, # maximum number of iterations 'max_iter_bb': 1000, # tree exploration rule # [0] depth first # [1] two-phase: depth first until first incumbent and then best bound 'tree_explor_rule': 1, # branching rule # [0] max fractional part 'branching_rule': 0, 'verbose': False, 'print_interval': 1} osqp_settings = {'eps_abs': 1e-03, 'eps_rel': 1e-03, 'eps_prim_inf': 1e-04, 'verbose': False} model = miosqp.MIOSQP() model.setup(P, q, A, l, u, i_idx, i_l, i_u, miosqp_settings, osqp_settings) res_miosqp = model.solve() # DEBUG (check if solutions match) # prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u) # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False) # if (np.linalg.norm(res_gurobi.x - res_miosqp.x) / # np.linalg.norm(res_gurobi.x)) > 1e-02: # import ipdb; ipdb.set_trace() # # import ipdb; ipdb.set_trace() if res_miosqp.status != miosqp.MI_SOLVED: ipdb.set_trace() # Solution time solve_time_temp[j] = 1e3 * res_miosqp.run_time # Store OSQP time in percentage if solver == 'miosqp': osqp_solve_time_temp[j] = \ 100 * (res_miosqp.osqp_solve_time / res_miosqp.run_time) # Get time statistics std_solve_time[i] = np.std(solve_time_temp) avg_solve_time[i] = np.mean(solve_time_temp) max_solve_time[i] = np.max(solve_time_temp) min_solve_time[i] = np.min(solve_time_temp) # Store also OSQP time if solver == 'miosqp': avg_osqp_solve_time[i] = np.mean(osqp_solve_time_temp) # Create pandas dataframe for the results df_dict = {'n': n_vec, 'm': m_vec, 'p': p_vec, 't_min': min_solve_time, 't_max': max_solve_time, 't_avg': avg_solve_time, 't_std': std_solve_time} # Store also OSQP time if solver == 'miosqp': df_dict.update({'t_osqp_avg': avg_osqp_solve_time}) timings = pd.DataFrame(df_dict) return timings
1630b556f40c20696adc5b7ce2b9ef218d82a87a
26,442
def Rotation_multiplyByBodyXYZ_NInv_P(cosxy, sinxy, qdot): """ Rotation_multiplyByBodyXYZ_NInv_P(Vec2 cosxy, Vec2 sinxy, Vec3 qdot) -> Vec3 Parameters ---------- cosxy: SimTK::Vec2 const & sinxy: SimTK::Vec2 const & qdot: SimTK::Vec3 const & """ return _simbody.Rotation_multiplyByBodyXYZ_NInv_P(cosxy, sinxy, qdot)
93303a83224b29d9dddf09a64f36d7004ae2ace0
26,443
def sqrt(x: float): """ Take the square root of a positive number Arguments: x (int): Returns: (float): √x Raises: (ValueError): If the number is negative """ if x < 0: raise ValueError('Cannot square-root a negative number with this ' 'function!') return np.sqrt(x)
ab43573010044ffa3861f6a13a58135be52c02b4
26,444
def get_tree_type(tree): """Return the (sub)tree type: 'root', 'nucleus', 'satellite', 'text' or 'leaf' Parameters ---------- tree : nltk.tree.ParentedTree a tree representing a rhetorical structure (or a part of it) """ if is_leaf_node(tree): return SubtreeType.leaf tree_type = tree.label().lower().split(':')[0] assert tree_type in SUBTREE_TYPES return tree_type
15d292ab1f756594add92a6999c7874f6d7fc45b
26,445
def intersection(ls1, ls2): """ This function returns the intersection of two lists without repetition. This function uses built in Python function set() to get rid of repeated values so inputs must be cast to list first. Parameters: ----------- ls1 : Python list The first list. Cannot be array. ls2 : Python list The second list. Cannot be array. Returns: ls3 : Python list The list of overlapping values between ls1 and ls2 """ temp = set(ls1) ls3 = [value for value in ls2 if value in temp] return ls3
fb3bda67d8040da5f4f570e8ff10a8503e153f36
26,446
import sys def run_tutorial(options): """Run a selection dlapp console CLI tutorial. Parameters ---------- options (argparse.Namespace): a argparse.Namespace instance. Returns ------- None: will call ``sys.exit(0)`` if end user requests a tutorial """ is_tutorial_needed = options.tutorial is_tutorial_needed |= options.tutorial_csv is_tutorial_needed |= options.tutorial_json is_tutorial_needed |= options.tutorial_yaml if not is_tutorial_needed: return None options.tutorial and show_tutorial_dlquery() options.tutorial_csv and show_tutorial_csv() options.tutorial_json and show_tutorial_json() options.tutorial_yaml and show_tutorial_yaml() sys.exit(0)
fab0dac555bc45f6c816a6804b1e0d7447f5a5cf
26,447
def params_count(model): """ Computes the number of parameters. Args: model (model): model to count the number of parameters. """ return np.sum([p.numel() for p in model.parameters()]).item()
12bb8463f6eb722a5cb7e7adfdf869764be67944
26,448
from pathlib import Path import platform import shutil def open_cmd_in_path(file_path: Path) -> int: """ Open a terminal in the selected folder. """ if platform.system() == "Linux": return execute_cmd(["x-terminal-emulator", "-e", "cd", f"{str(file_path)}", "bash"], True) elif platform.system() == "Windows": cmd_path = shutil.which("cmd") if cmd_path: return execute_app(Path(cmd_path), True, f"/k cd {str(file_path)}") return 0
899424cd8ab2d76a5ca47d7219a7057d29bb5abe
26,449
def get_f(user_id, ftype): """Get one's follower/following :param str user_id: target's user id :param str ftype: follower or following :return: a mapping from follower/following id to screen name :rtype: Dict """ p = dict(user_id=user_id, count=200, stringify_ids=True, include_user_entities=True, cursor=-1) f = [] if ftype == 'follower': resource_uri = 'https://api.twitter.com/1.1/followers/list.json' elif ftype == 'following': resource_uri = 'https://api.twitter.com/1.1/friends/list.json' else: raise Exception('Unknown type: ' + ftype) while True: while 1: try: j = twitter().get(resource_uri, params=p).json() break except ConnectionError: pass if 'errors' in j: raise Exception(j['errors']) if 'error' in j: raise Exception(j['error']) f.extend([(str(u['id']), u['screen_name']) for u in j['users']]) if j['next_cursor'] != 0: p['cursor'] = j['next_cursor'] else: break return dict(f)
31371d823509c8051660ca0869556253af6b99cc
26,450
from typing import List from typing import Tuple from typing import Any from typing import Optional def walk_extension( state: State, trie_prefix: Bytes, node_key: Bytes, extension_node: ExtensionNode, dirty_list: List[Tuple[Bytes, Node]], cursor: Any, ) -> Optional[InternalNode]: """ Consume the last element of `dirty_list` and update the `ExtensionNode` at `node_key`, potentially turning it into `ExtensionNode` -> `BranchNode` -> `ExtensionNode`. This function returns the new value of the visited node, but does not write it to the database. """ key, value = dirty_list[-1] if key[len(node_key) :].startswith(extension_node.key_segment): target_node = walk( state, trie_prefix, node_key + extension_node.key_segment, dirty_list, cursor, ) return make_extension_node( state, trie_prefix, node_key, node_key + extension_node.key_segment, target_node, cursor, ) prefix_length = common_prefix_length( extension_node.key_segment, key[len(node_key) :] ) prefix = extension_node.key_segment[:prefix_length] if prefix_length != len(extension_node.key_segment) - 1: new_extension_node = ExtensionNode( extension_node.key_segment[prefix_length + 1 :], extension_node.subnode, ) write_internal_node( cursor, trie_prefix, node_key + extension_node.key_segment[: prefix_length + 1], new_extension_node, ) encoded_new_extension_node = encode_internal_node(new_extension_node) else: encoded_new_extension_node = extension_node.subnode node = split_branch( state, trie_prefix, node_key + prefix, extension_node.key_segment[prefix_length], encoded_new_extension_node, dirty_list, cursor, ) if prefix_length != 0: return make_extension_node( state, trie_prefix, node_key, node_key + prefix, node, cursor ) else: return node
bcc31ae61729db82d84e02168926845b7b42da44
26,451
import os def gffintRead(): """ Read the integrated free-free gaunt factors of [1]_. """ xuvtop = os.environ['XUVTOP'] fileName = os.path.join(xuvtop, 'continuum','gffint.dat' ) input = open(fileName) lines = input.readlines() input.close() # ngamma = 41 g2 = np.zeros(ngamma, np.float64) gffint = np.zeros(ngamma, np.float64) s1 = np.zeros(ngamma, np.float64) s2 = np.zeros(ngamma, np.float64) s3 = np.zeros(ngamma, np.float64) # ivalue = 0 start = 4 for iline in range(start,start+ngamma): values = lines[iline].split() g2[ivalue] = float(values[0]) gffint[ivalue] = float(values[1]) s1[ivalue] = float(values[2]) s2[ivalue] = float(values[3]) s3[ivalue] = float(values[4]) ivalue += 1 # return {'g2':g2, 'gffint':gffint, 's1':s1, 's2':s2, 's3':s3}
63233454325c192dc674f2685fc92c38a82fbd42
26,452
def bottleneck_block_v2(inputs, filters, strides, training, projection_shortcut, data_format): """ 3-layer bottleneck residual block with batch normalization and relu before convolution layer. :param inputs: Input images :param filters: number of filters :param strides: strides of convolutions :param training: a flag to indicate whether during training or not :param projection_shortcut: a function if projection is necessary on shortcuts, None otherwise :param data_format: the format of data, either channels_first or channels_last :return: one 3-layer bottleneck residual block """ shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = batch_norm_relu(inputs, training, data_format) inputs = padded_conv2d(inputs=inputs, kernel_size=1, filters=filters, strides=1, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = padded_conv2d(inputs=inputs, kernel_size=3, filters=filters, strides=strides, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = padded_conv2d(inputs=inputs, kernel_size=1, filters=4 * filters, strides=1, data_format=data_format) return inputs + shortcut
8330e68d1411c643ffcee6916ba95ef77b7cc5ee
26,453
import os def update_index(homework): """Check if the index of the given dataset is up to date with server version, and update it if needed. Parameters: homework (str): The name of the dataset to check the index of. Returns: bool: Indicates if we were able to check the index and update if needed (i.e. we had internet) """ # Get the path to our dataset dataset_path = get_dataset_path(homework) # Define our file names we'll need index_urls_file = "index_urls.tsv" index_hash_file = "index_hash.txt" index_file = "index.txt" # Get, from the server, what the md5 hash of our index file should be index_urls_path = os.path.join(dataset_path, index_urls_file) urls_dict = parse_tsv_dict(index_urls_path) index_hash_url = urls_dict.get(index_hash_file) checking_msg = f"Checking that {homework} index is up-to-date..." print(checking_msg, end='\r') try: server_index_hash = download_text(index_hash_url) finally: print(" " * len(checking_msg), end='\r') # Erase the checking message, even if there was an internet error index_path = os.path.join(dataset_path, index_file) if os.path.isfile(index_path): local_index_hash = hash_file(index_path) if local_index_hash == server_index_hash: return True index_url = urls_dict.get(index_file) download_file(index_url, index_path, server_index_hash, file_message=f"{homework} index") if os.path.isfile(index_path): local_index_hash = hash_file(index_path) if local_index_hash == server_index_hash: return True # If we get here, something apparently went wrong with the download. raise NoInternetError("Insufficient internet. Check your internet connection.")
41f2d935b990e8d9a5ab5dabd268288d864d1a60
26,454
def preprocess_lines(lines, otherAutorizedSymbols, sentencesSeparator=None): """ complete my dataset""" if sentencesSeparator : result = [] for line in lines : e = line.split(sentencesSeparator) if e[0] != "__Error__" and e[1]!= "__Error__" : lignes_i = sent_tokenize(e[0]) lignes_j = sent_tokenize(e[1]) l = len(lignes_i) if l == len(lignes_j) : for k in range(l) : result.append(EPE(lignes_i[k])+sentencesSeparator+EPE(lignes_j[k])+'\n') else : lignes_i = EPE(e[0]) lignes_j = EPE(e[1]) result.append(lignes_i+sentencesSeparator+lignes_j+'\n') for line in result.copy() : e = line.split(sentencesSeparator) lignes_i = help_preprocess_lines(e[0], otherAutorizedSymbols).split(',') lignes_j = help_preprocess_lines(e[1], otherAutorizedSymbols).split(',') l = len(lignes_i) if l == len(lignes_j) : for k in range(l) : result.append(EPE(lignes_i[k])+sentencesSeparator+EPE(lignes_j[k])+'\n') return result else : return lines
04bf9f90bf06f07803ca8dd6199728d33a73a6de
26,455
def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is slashable. """ return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
9ea82379e270f668d2dde3cb5b10a59f29f2e6e6
26,456
def check_rate_limit() -> None: """ Check whether or not a user has exceeded the rate limits specified in the config. Rate limits per API key or session and per user are recorded. The redis database is used to keep track of caching, by incrementing "rate limit" cache keys on each request and setting a timeout on them. The rate limit can be adjusted in the configuration file. :raises APIException: If the rate limit has been exceeded """ if not flask.g.user: return check_rate_limit_unauthenticated() user_cache_key = f'rate_limit_user_{flask.g.user.id}' key_cache_key = f'rate_limit_api_key_{flask.g.api_key.hash}' auth_specific_requests = cache.inc( key_cache_key, timeout=app.config['RATE_LIMIT_AUTH_SPECIFIC'][1] ) if auth_specific_requests > app.config['RATE_LIMIT_AUTH_SPECIFIC'][0]: time_left = cache.ttl(key_cache_key) raise APIException( f'Client rate limit exceeded. {time_left} seconds until lock expires.' ) user_specific_requests = cache.inc( user_cache_key, timeout=app.config['RATE_LIMIT_PER_USER'][1] ) if user_specific_requests > app.config['RATE_LIMIT_PER_USER'][0]: time_left = cache.ttl(user_cache_key) raise APIException( f'User rate limit exceeded. {time_left} seconds until lock expires.' )
ca49c4b2c4287bca3d664b20abebd9b7df53a0fb
26,457
def update_db(mode): """Mode can be 'add', 'move', 'delete'""" def decorator(func): @wraps(func) def wrapped(*args, **kwargs): # did and rse are the first 2 args did, rse = args[0], args[1] update_db = kwargs.get('update_db', False) if not update_db or db is None: return func(*args, **kwargs) func(did, rse, **kwargs) number, dtype, h = parse_did(did) if mode == 'add': data = build_data_dict(did, rse, status='transferring') db.update_data(number, data) elif mode == 'delete': data = db.get_data(number, type=dtype, did=did, location=rse) if len(data) == 0: pass else: for d in data: db.delete_data(number, d) elif mode == 'move': from_rse = kwargs.pop('from_rse') if not from_rse: raise ValueError(f"'from_rse' must be passed when calling {func.__name__}") from_data = db.get_data(number, type=dtype, did=did, location=from_rse) if len(from_data) == 0: to_data = build_data_dict(did, rse, 'transferring') else: to_data = from_data[0].copy() to_data['location'] = rse to_data['status'] = 'transferring' db.update_data(number, to_data) db.delete_data(number, from_data) return wrapped return decorator
cc91757030c9d398bd17ad73521e23d835879560
26,458
def right_size2(a1, a2): """ Check that a1 and a2 have equal shapes. a1 and a2 are NumPy arrays. """ if hasattr(a1, 'shape') and hasattr(a2, 'shape'): pass # ok, a1 and a2 are NumPy arrays else: raise TypeError('%s is %s and %s is %s - both must be NumPy arrays' \ % (a1_name, type(a1), a2_name, type(a2))) if a1.shape != a2.shape: a1_name, where = get_argname_in_call('right_size2', 1) a2_name, where = get_argname_in_call('right_size2', 2) raise ValueError( '%s\n%s has size %s, which is not compatible with size %s of %s' \ % (where, a1_name, a1.shape, a2.shape, a2_name)) else: return True
66aa2097ff67a2ef44c49118dbdbba1539f1e3ba
26,459
import argparse def arguments_parser() -> argparse.Namespace: """ Parses arguments. """ parser = argparse.ArgumentParser(description="Input File containing list of repositories url's") parser.add_argument("-u", "--username", default='Luzkan', help="GitHub Username. \ (default: %(default)s)") parser.add_argument("-r", "--repositories", default=['DeveloperEnvironment', 'PythonCourse'], nargs='*', help="List of repository names that should be linted. \ (default: %(default)s)") return parser.parse_args()
1c3eafc82ac2014c205f1fe5a9356ef47fe9b864
26,460
def spherDist(stla, stlo, evla, evlo): """spherical distance in degrees""" return SphericalCoords.distance(stla, stlo, evla, evlo)
e2ae206c63712dbf263d6d3af28066f279571e20
26,461
def get_example(matrix, start_row, input_timesteps=INPUT_TIMESTEPS, output_timesteps=OUTPUT_TIMESTEPS): """Returns a pair of input, output ndarrays. Input starts at start_row and has the given input length. Output starts at next timestep and has the given output length.""" # Make sure there are enough time steps remaining. if len(matrix) < start_row + input_timesteps + output_timesteps: raise Exception('Not enough rows to get example.') input_ex = matrix[start_row : start_row + input_timesteps] output_ex = matrix[start_row + input_timesteps : start_row+input_timesteps+output_timesteps] return (input_ex, output_ex)
aa396588a32492c29e1dbb4a0e8f96016974b805
26,462
def full_mul_modifier(optree): """ extend the precision of arguments of a multiplication to get the full result of the multiplication """ op0 = optree.get_input(0) op1 = optree.get_input(1) optree_type = optree.get_precision() assert(is_std_integer_format(op0.get_precision()) and is_std_integer_format(op1.get_precision()) and is_std_integer_format(optree_type)) op0_conv = Conversion(op0, precision = optree_type) if optree_type != op0.get_precision() else op0 op1_conv = Conversion(op1, precision = optree_type) if optree_type != op1.get_precision() else op0 return Multiplication(op0_conv, op1_conv, precision = optree_type)
6c191c274d9f130619e2831b50de65a270b41748
26,463
def find_used_modules(modules, text): """ Given a list of modules, return the set of all those imported in text """ used = set() for line in text.splitlines(): for mod in modules: if 'import' in line and mod in line: used.add(mod) return used
0b1b2b31f60a565d7ba30a9b21800ba7ec265d0c
26,464
def string2mol2(filename, string): """ Writes molecule to filename.mol2 file, input is a string of Mol2 blocks """ block = string if filename[-4:] != '.mol2': filename += '.mol2' with open(filename, 'w') as file: file.write(block) return None
51043e7f4edde36682713455dc33c643f89db397
26,465
def getTimeFormat(): """ def getTimeFormat(): This functions returns the time format used in the bot. """ return timeFormat
cd13ab983cd91dca4fc3ae3414c3724b5019f248
26,466
from typing import Sequence from typing import Optional import numpy def concatenate( arrays: Sequence[PolyLike], axis: int = 0, out: Optional[ndpoly] = None, ) -> ndpoly: """ Join a sequence of arrays along an existing axis. Args: arrays: The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis: The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0. out: If provided, the destination to place the result. The shape must be correct, matching that of what concatenate would have returned if no out argument were specified. Returns: The concatenated array. Examples: >>> const = numpy.array([[1, 2], [3, 4]]) >>> poly = numpoly.variable(2).reshape(1, 2) >>> numpoly.concatenate((const, poly), axis=0) polynomial([[1, 2], [3, 4], [q0, q1]]) >>> numpoly.concatenate((const, poly.T), axis=1) polynomial([[1, 2, q0], [3, 4, q1]]) >>> numpoly.concatenate((const, poly), axis=None) polynomial([1, 2, 3, 4, q0, q1]) """ arrays = numpoly.align_exponents(*arrays) if out is None: coefficients = [numpy.concatenate( [array.values[key] for array in arrays], axis=axis) for key in arrays[0].keys] out = numpoly.polynomial_from_attributes( exponents=arrays[0].exponents, coefficients=coefficients, names=arrays[0].names, dtype=coefficients[0].dtype, ) else: for key in out.keys: if key in arrays[0].keys: numpy.concatenate([array.values[key] for array in arrays], out=out.values[key], axis=axis) return out
df6c897b25279dca5187e6cafe5c1b9d22b8a994
26,467
import requests import json def get_cik_map(key="ticker"): """Get dictionary of tickers to CIK numbers. Args: key (str): Should be either "ticker" or "title". Choosing "ticker" will give dict with tickers as keys. Choosing "title" will use company name as keys. Returns: Dictionary with either ticker or company name as keys, depending on ``key`` argument, and corresponding CIK as values. .. versionadded:: 0.1.6 """ if key not in ("ticker", "title"): raise ValueError("key must be 'ticker' or 'title'. Was given {key}.".format(key=key)) response = requests.get(URL) json_response = json.loads(response.text) return {v[key]: str(v["cik_str"]) for v in json_response.values()}
6a9cf67bb63bfd057ee936e1a5d5be33d8655abe
26,468
def _data_layers(): """Index all configured data layers by their "shorthand". This doesn't have any error checking -- it'll explode if configured improperly""" layers = {} for class_path in settings.DATA_LAYERS: module, class_name = class_path.rsplit('.', 1) klass = getattr(import_module(module), class_name) layers[klass.shorthand] = klass return layers
34b0843c76086b41bf119987283fa4373bc07190
26,469
import hashlib def sha1base64(file_name): """Calculate SHA1 checksum in Base64 for a file""" return _compute_base64_file_hash(file_name, hashlib.sha1)
aaf2daca1676c822259bec8a519ab5eae7618b17
26,470
def readfsa(fh): """Reads a file and returns an fsa object""" raw = list() seqs = list() for line in fh: if line.startswith(">") and len(raw) > 0: seqs.append(Fsa("".join(raw))) raw.clear() raw.append(line) if len(raw) > 0: seqs.append(Fsa("".join(raw))) return seqs
089cd4b7addcf99baf9394b59d44702995eff417
26,471
def with_timeout(name): """ Method decorator, wraps method with :py:func:`asyncio.wait_for`. `timeout` argument takes from `name` decorator argument or "timeout". :param name: name of timeout attribute :type name: :py:class:`str` :raises asyncio.TimeoutError: if coroutine does not finished in timeout Wait for `self.timeout` :: >>> def __init__(self, ...): ... ... self.timeout = 1 ... ... @with_timeout ... async def foo(self, ...): ... ... pass Wait for custom timeout :: >>> def __init__(self, ...): ... ... self.foo_timeout = 1 ... ... @with_timeout("foo_timeout") ... async def foo(self, ...): ... ... pass """ if isinstance(name, str): return _with_timeout(name) else: return _with_timeout("timeout")(name)
7591a4ed176fad60510dfc7aafbb6df2b44672a4
26,472
import json def results(): """ function for predicting a test dataset input as a body of the HTTP request :return: prediction labels array """ data = request.get_json(force=True) data = pd.DataFrame(json.loads(data)) prediction = model.predict(data) output = list(map(int, prediction)) return jsonify(output)
caae7f04884532de7a4fef9e5a6d285c982d2187
26,473
import os def check_table_updates() -> bool: """Check whether the table needs to be updated. Returns: * A `boolean` indicating whether the tables need updating * A `string` indicating the online version. Raises: SSLError:HTTPSConnectionPool: Max retries exceeded with url """ if not os.path.exists("data/data_version.txt"): os.mkdir("data/") open("data/data_version.txt", "ab").close() with (open('data/data_version.txt', 'rb') as f, closing(request.urlopen( 'https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/zh_CN/gamedata/excel/data_version.txt' )) as o): online_version = o.read() local_version = f.read() result = online_version != local_version return result, online_version
01892435f042344d1971d8cdf6f59662158be05f
26,474
from datetime import datetime def tradedate_2_dtime(td): """ convert trade date as formatted by yfinance to a datetime object """ td_str = str(int(td)) y, m, d = int(td_str[:4]), int(td_str[4:6]), int(td_str[6:]) return datetime(y, m, d)
29db7ed41a5cac48af1e7612e1cd2b59ab843a1f
26,475
def frmchg(frame1, frame2, et): """frmchg(SpiceInt frame1, SpiceInt frame2, SpiceDouble et)""" return _cspyce0.frmchg(frame1, frame2, et)
db05ebf45f0d265e8f75b26fbd7c1234d9a8b4cb
26,476
def map_popularity_score_keys(popularity): """ Maps popularity score keys to be more meaningful :param popularity: popularity scores of the analysis :return: Mapped dictionary of the scores """ return dict((config.popularity_terms[key], value) for (key, value) in popularity.items())
84c265e7ec6e881df878f74d5d9b9eda9d223bf3
26,477
from typing import Callable from typing import Optional def _get_utf16_setting() -> Callable[[Optional[bool]], bool]: """Closure for holding utf16 decoding setting.""" _utf16 = False def _utf16_enabled(utf16: Optional[bool] = None) -> bool: nonlocal _utf16 if utf16 is not None: _utf16 = utf16 return _utf16 return _utf16_enabled
1f0caeab03047cc847d34266c1ed53eabdf01a10
26,478
def uproot_ntuples_to_ntuple_dict(uproot_ntuples, properties_by_track_type, keep_invalid_vals=False): """Takes in a collection of uproot ntuples and a dictionary from track types to desired properties to be included, returns an ntuple dictionary formed by selecting properties from the ntuples and then concatenating them all together. Cuts any invalid values, like inf or nan, by default. Args: uproot_ntuples: an iterable of uproot ntuples. properties_by_track_type: a dictionary from track types (trk, matchtrk, etc.) to properties to be selected (eta, pt, chi2). keep_invalid_vals: if True, don't cut tracks with inf or nan as one of their values. Returns: An ntuple dict. """ return ndops.add_ntuple_dicts(list(map(lambda uproot_ntuple: uproot_ntuple_to_ntuple_dict(uproot_ntuple, properties_by_track_type, keep_invalid_vals), uproot_ntuples)))
e4f391fc0c63e73ff320e973f624e43841a3613f
26,479
from datetime import datetime def get_container_sas_token(block_blob_client, container_name, blob_permissions): """ Obtains a shared access signature granting the specified permissions to the container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param BlobPermissions blob_permissions: :rtype: str :return: A SAS token granting the specified permissions to the container. """ # Obtain the SAS token for the container, setting the expiry time and # permissions. In this case, no start time is specified, so the shared # access signature becomes valid immediately. container_sas_token = \ block_blob_client.generate_container_shared_access_signature( container_name, permission=blob_permissions, expiry=datetime.utcnow() + timedelta(days=10)) return container_sas_token
f17623e721e84a0953565854f2e2eedfb4f8afe6
26,480
from pytorch3d.io import load_obj from typing import Optional from typing import Tuple import torch from typing import NamedTuple import os import time def create_mesh( pdb_file: Optional[str] = None, pdb_code: Optional[str] = None, out_dir: Optional[str] = None, config: Optional[ProteinMeshConfig] = None, ) -> Tuple[torch.FloatTensor, NamedTuple, NamedTuple]: """ Creates a ``PyTorch3D`` mesh from a ``pdb_file`` or ``pdb_code``. :param pdb_file: path to ``pdb_file``. Defaults to ``None``. :type pdb_file: str, optional :param pdb_code: 4-letter PDB accession code. Defaults to None. :type pdb_code: str, optional :param out_dir: output directory to store ``.obj`` file. Defaults to ``None``. :type out_dir: str, optional :param config: :class:`~graphein.protein.config.ProteinMeshConfig` config to use. Defaults to default config in ``graphein.protein.config``. :type config: graphein.protein.config.ProteinMeshConfig :return: ``verts``, ``faces``, ``aux``. :rtype: Tuple[torch.Tensor, torch.Tensor, torch.Tensor] """ if config is None: config = ProteinMeshConfig() obj_file = get_obj_file( pdb_code=pdb_code, pdb_file=pdb_file, out_dir=out_dir, config=config ) # Wait for PyMol to finish while os.path.isfile(obj_file) is False: time.sleep(0.1) verts, faces, aux = load_obj(obj_file) return verts, faces, aux
f44e430e2ab5ecfb9cfdb219453272563e99336e
26,481
import os import logging def init_logger(logger_name="pfp_log", log_file_name="pfp.log", to_file=True, to_console=True): """ Purpose: Returns a logger object. Usage: logger = pfp_log.init_logger() Author: PRI with acknowledgement to James Cleverly Date: September 2016 """ # create the logfiles directory if it does not exist if not os.path.exists("logfiles"): os.makedirs("logfiles") # create formatter formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s','%H:%M:%S') # create the logger logger = logging.getLogger(name=logger_name) logger.setLevel(logging.DEBUG) # create file handler if requested if to_file: log_file_path = os.path.join("logfiles", log_file_name) fh1 = logging.FileHandler(log_file_path) fh1.setLevel(logging.DEBUG) # add formatter to file handler fh1.setFormatter(formatter) # add the file handler to the logger logger.addHandler(fh1) # set up a separate file for errors error_file_name = log_file_name.replace(".","_errors.") error_file_path = os.path.join("logfiles", error_file_name) fh2 = logging.FileHandler(error_file_path) fh2.setLevel(logging.ERROR) fh2.setFormatter(formatter) logger.addHandler(fh2) # create console handler if requested if to_console: ch = logging.StreamHandler() ch.setLevel(logging.INFO) # add formatter to console handler ch.setFormatter(formatter) # add the console handler to the logger logger.addHandler(ch) return logger
6442c68b1f5d1a7df34ed1e604cee5b4c091fd97
26,482
def to_one_hot(y): """Transform multi-class labels to binary labels The output of to_one_hot is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : numpy array or sparse matrix of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. classes_ : class vector extraceted from y. """ lb = LabelBinarizer() lb.fit(y) Y = lb.transform(y) return (Y.base, lb.classes_)
134f4bce729c98439bdca2cd586f95d0cc9178c7
26,483
def random(): """ getting a random number from 0 to 1 """ return randrange(10000) / 10000
12ab43d5e5c8a9a993f8053363e56c2acf8b0ceb
26,484
from typing import Union def parameter_string_to_value( parameter_string: str, passthrough_estimate: bool = False, ) -> Union[float, int, str]: """Cast a parameter value from string to numeric. Args: parameter_string: The parameter value, as a string. passthrough_estimate: Whether to return `ESTIMATE` as `ESTIMATE`. If `False`, raises an exception if `parameter_string == ESTIMATE`. Returns: The parameter value, as a numeric type. """ if parameter_string == ESTIMATE: if passthrough_estimate: return parameter_string raise ValueError('Please handle estimated parameters differently.') float_value = float(parameter_string) int_value = int(float_value) if int_value == float_value: return int_value return float_value
3271acf50f5171703e16bce183d246d149d5e053
26,485
import sys import os import urllib import traceback def getNLCDRasterDataForBoundingBox(config, outputDir, bbox, coverage=DEFAULT_COVERAGE, filename='NLCD', srs='EPSG:4326', resx=0.000277777777778, resy=0.000277777777778, interpolation='near', fmt=FORMAT_GEOTIFF, overwrite=False, verbose=False, outfp=sys.stdout): """ Download NLCD rasters from http://raster.nationalmap.gov/arcgis/rest/services/LandCover/USGS_EROS_LandCover_NLCD/MapServer @param config A Python ConfigParser (not currently used) @param outputDir String representing the absolute/relative path of the directory into which output raster should be written @param bbox Dict representing the lat/long coordinates and spatial reference of the bounding box area for which the raster is to be extracted. The following keys must be specified: minX, minY, maxX, maxY, srs. @param srs String representing the spatial reference of the raster to be returned. @param resx Float representing the X resolution of the raster(s) to be returned @param resy Float representing the Y resolution of the raster(s) to be returned @param interpolation String representing resampling method to use. Must be one of spatialdatalib.utils.RASTER_RESAMPLE_METHOD. @param fmt String representing format of raster file. Must be one of FORMATS. @param overwrite Boolean True if existing data should be overwritten @param verbose Boolean True if detailed output information should be printed to outfp @param outfp File-like object to which verbose output should be printed @return A dictionary mapping soil property names to soil property file path and WCS URL, i.e. dict[soilPropertyName] = (soilPropertyFilePath, WCS URL) @exception Exception if coverage is not known @exception Exception if interpolation method is not known @exception Exception if fmt is not a known format @exception Exception if output already exists by overwrite is False """ if coverage not in COVERAGES: raise Exception("Coverage {0} is not known".format(coverage)) if interpolation not in INTERPOLATION_METHODS: raise Exception("Interpolation method {0} is not of a known method {1}".format(interpolation, INTERPOLATION_METHODS.keys())) if fmt not in FORMATS: raise Exception("Format {0} is not of a known format {1}".format(fmt, str(FORMATS))) if verbose: outfp.write("Acquiring NLCD coverage {lctype} from {pub}\n".format(lctype=coverage, pub=DC_PUBLISHER)) outFilename = os.path.extsep.join([filename, FORMAT_EXT[fmt]]) outFilepath = os.path.join(outputDir, outFilename) delete = False if os.path.exists(outFilepath): if not overwrite: raise Exception("File {0} already exists, and overwrite is false".format(outFilepath)) else: delete = True try: if delete: os.unlink(outFilepath) wcs = WebCoverageService(URL_BASE, version='1.0.0') bbox = [bbox['minX'], bbox['minY'], bbox['maxX'], bbox['maxY']] wcsfp = wcs.getCoverage(identifier=COVERAGES[coverage], bbox=bbox, crs=srs, response_crs=srs, resx=resx, # their WCS seems to accept resx, resy in meters resy=resy, format=fmt, interpolation=INTERPOLATION_METHODS[interpolation], **{'band': '1'}) url = urllib.unquote(wcsfp.geturl()) f = open(outFilepath, 'wb') f.write(wcsfp.read()) f.close() return (True, url, outFilename) except Exception as e: traceback.print_exc(file=outfp) raise(e) finally: # Clean-up pass
dbae7825d91b6a6dc3bf904391b47603ed29d092
26,486
def rfsize(spatial_filter, dx, dy=None, sigma=2.): """ Computes the lengths of the major and minor axes of an ellipse fit to an STA or linear filter. Parameters ---------- spatial_filter : array_like The spatial receptive field to which the ellipse should be fit. dx : float The spatial sampling along the x-dimension. dy : float The spatial sampling along the y-dimension. If None, uses the same value as dx. (Default: None) sigma : float, optional Determines the size of the ellipse contour, in units of standard deviation of the fitted gaussian. E.g., 2.0 means a 2 SD ellipse. Returns ------- xsize, ysize : float The x- and y-sizes of the ellipse fitted to the receptive field, at the given sigma. """ if dy is None: dy = dx # get ellipse parameters widths = get_ellipse(spatial_filter, sigma=sigma)[1] # return the scaled widths return widths[0] * dx, widths[1] * dy
a2e184dd597c840392c05d0955dba826aa528a06
26,487
import json def get_config_from_json(json_file): """ Get the config from a json file :param json_file: :return: config(namespace) or config(dictionary) """ # parse the configurations from the config json file provided with open(json_file, 'r') as config_file: config_dict = json.load(config_file) # convert the dictionary to a namespace using bunch lib config = Bunch(config_dict) config = default_values(config) return config, config_dict
17aec6d1d0413836f647b222681e32af1a298fbc
26,488
from pathlib import Path def create_polarimetric_layers(import_file, out_dir, burst_prefix, config_dict): """Pipeline for Dual-polarimetric decomposition :param import_file: :param out_dir: :param burst_prefix: :param config_dict: :return: """ # temp dir for intermediate files with TemporaryDirectory(prefix=f"{config_dict['temp_dir']}/") as temp: temp = Path(temp) # ------------------------------------------------------- # 1 Polarimetric Decomposition # create namespace for temporary decomposed product out_haa = temp / f"{burst_prefix}_h" # create namespace for decompose log haa_log = out_dir / f"{burst_prefix}_haa.err_log" # run polarimetric decomposition try: slc.ha_alpha(import_file, out_haa, haa_log, config_dict) except (GPTRuntimeError, NotValidFileError) as error: logger.info(error) return None, error # ------------------------------------------------------- # 2 Geocoding # create namespace for temporary geocoded product out_htc = temp / f"{burst_prefix}_pol" # create namespace for geocoding log haa_tc_log = out_dir / f"{burst_prefix}_haa_tc.err_log" # run geocoding try: common.terrain_correction( out_haa.with_suffix(".dim"), out_htc, haa_tc_log, config_dict ) except (GPTRuntimeError, NotValidFileError) as error: logger.info(error) return None, error # set nans to 0 (issue from SNAP for polarimetric layers) for infile in list(out_htc.with_suffix(".data").glob("*.img")): with rasterio.open(str(infile), "r") as src: meta = src.meta.copy() array = src.read() array[np.isnan(array)] = 0 with rasterio.open(str(infile), "w", **meta) as dest: dest.write(array) # --------------------------------------------------------------------- # 5 Create an outline ras.image_bounds(out_htc.with_suffix(".data")) # move to final destination ard = config_dict["processing"]["single_ARD"] h.move_dimap(out_htc, out_dir / f"{burst_prefix}_pol", ard["to_tif"]) # write out check file for tracking that it is processed with (out_dir / ".pol.processed").open("w+") as file: file.write("passed all tests \n") dim_file = out_dir / f"{burst_prefix}_pol.dim" return (str(dim_file), None)
ddfd3d9b12aefcf5f60b254ad299c59d4caca837
26,489
import argparse def parse_args(): """Parse argument values from command-line""" parser = argparse.ArgumentParser(description='Arguments required for script.') parser.add_argument('-t', '--job-type', required=True, choices=['process', 'analyze'], help='process or analysis') args = parser.parse_args() return args
c8ca3ad879db8c9dbfb2a3fb044a747a856304e7
26,490
from typing import Literal import math def Builtin_FLOOR(expr, ctx): """ http://www.w3.org/TR/sparql11-query/#func-floor """ l_ = expr.arg return Literal(int(math.floor(numeric(l_))), datatype=l_.datatype)
495d7e2133028030e1766d0a04eb3d20f800b918
26,491
import requests def get(target: str) -> tuple: """Fetches a document via HTTP/HTTPS and returns a tuple containing a boolean indicating the result of the request, the URL we attempted to contact and the request HTML content in bytes and text format, if successful. Otherwise, returns a tuple containing a boolean indicating the result of the request, the URL we attempted to contact and the HTTP status code or exception error output from the request. :param target: :return: tuple """ if target.startswith('http://') is False and target.startswith('https://') is False: target = 'http://{0}'.format(target) try: request = requests.get(url=target, timeout=3, verify=False) except Exception as e: return False, target, e.__str__() try: request.raise_for_status() except requests.exceptions.HTTPError as e: return False, target, e.__str__() if request.ok: return True, request.url, request.content, request.text return False, request.url, request.status_code
1d9d650d77776419318cbd204b722d8abdff94c5
26,492
def DOM2ET(domelem): """Converts a DOM node object of type element to an ElementTree Element. domelem: DOM node object of type element (domelem.nodeType == domelem.ELEMENT_NODE) returns an 'equivalent' ElementTree Element """ # make some local variables for fast processing tyCDATA = domelem.CDATA_SECTION_NODE tyTEXT = domelem.TEXT_NODE tyPI = domelem.PROCESSING_INSTRUCTION_NODE tyCOMMENT = domelem.COMMENT_NODE tyELEMENT = domelem.ELEMENT_NODE # calculate the attributes of the domelem attribs = domelem.attributes attrs = dict((x.name, x.value) for x in (attribs.item(i) for i in range(attribs.length))) # build the ET Element etelem = ET.Element(domelem.tagName, attrs) last = None # to differentiate between 'text' and 'tail' for node in domelem.childNodes: nodeType = node.nodeType if (tyTEXT == nodeType or tyCDATA == nodeType) and node.data: data = node.data if last is None: etelem.text = etelem.text + data if etelem.text else data else: last.tail = last.tail + data if last.tail else data elif tyELEMENT == nodeType: last = DOM2ET(node) etelem.append(last) elif tyCOMMENT == nodeType: last = ET.Comment(node.data) etelem.append(last) elif tyPI == nodeType: last = ET.ProcessingInstruction(node.target, node.data) etelem.append(last) return etelem
b8288a2704995ec4fbe4dc1bc2805cd7658beb35
26,493
import torch def f_score(pr, gt, beta=1, eps=1e-7, threshold=.5): """dice score(also referred to as F1-score)""" if threshold is not None: pr = (pr > threshold).float() tp = torch.sum(gt * pr) fp = torch.sum(pr) - tp fn = torch.sum(gt) - tp score = ((1 + beta ** 2) * tp + eps) \ / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps) return score
2c54fd24cd04ac2b41a9d5ca4bf8a7afc5e88640
26,494
def makeSatelliteDir(metainfo): """ Make the directory name for the 'satellite' level. """ satDir = "Sentinel-" + metainfo.satId[1] return satDir
dfbb43f235bc027f25fc9b624097e8f2e0dee4f9
26,495
import os import re def parse_requirements(file_name): """Taken from http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy""" requirements = [] for line in open(os.path.join(os.path.dirname(__file__), "config", file_name), "r"): line = line.strip() # comments and blank lines if re.match(r"(^#)|(^$)", line): continue requirements.append(line) return requirements
f34892163087cecdf84aa7f14da4fc5e56e9f100
26,496
def resolve_tagged_field(field): """ Fields tagged with `swagger_field` shoudl respect user definitions. """ field_type = getattr(field, SWAGGER_TYPE) field_format = getattr(field, SWAGGER_FORMAT, None) if isinstance(field_type, list): # Ideally we'd use oneOf here, but OpenAPI 2.0 uses the 0.4-draft jsonschema # which doesn't include oneOf. Upgrading to OpenAPI 3.0 ought to suffice. return dict() elif field_format: return dict( type=field_type, format=field_format, ) else: return dict( type=field_type, )
90f59615395350dbd0f2ff3ff5573f28e926dada
26,497
def grid_id_from_string(grid_id_str): """Convert Parameters ---------- grid_id_str : str The string grid ID representation Returns ------- ret : tuple of ints A 4-length tuple representation of the dihedral id """ return tuple(int(i) for i in grid_id_str.split(','))
cec058302aae701c1aa28fcb4c4a9d762efa724e
26,498
def adj_to_edge_index(adj): """ Convert an adjacency matrix to an edge index :param adj: Original adjacency matrix :return: Edge index representation of the graphs """ converted = [] for d in adj: edge_index = np.argwhere(d > 0.).T converted.append(edge_index) return converted
e2c047a6c60bfb3ea109e8686a810749a726265f
26,499