content
stringlengths
22
815k
id
int64
0
4.91M
def test(test_batches: torch.utils.data.DataLoader, model: nn.Module, criterion: nn.CrossEntropyLoss) -> None: """Test the model Args: test_batches: batch loader of test images model: the network to test criterion: calculator for the loss """ test_loss = 0. correct = 0. total = 0. model.eval() for data, target in test_batches: data, target = data.to(device), target.to(device) output = model(data) loss = criterion(output, target) test_loss += loss.item() * data.size(0) # convert output probabilities to predicted class predictions = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += numpy.sum( numpy.squeeze( predictions.eq( target.data.view_as(predictions))).cpu().numpy()) total += data.size(0) test_loss /= len(test_batches.dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) return
15,000
def color_parser(color: str, color_dicts: list = None) -> tuple: """ convert a string with RGB/matplotlib named colors to matplotlib HSV tuples. supports RGB colors with ranges between 0-1 or 0-255. supported matplotlib colors can be found here: https://matplotlib.org/3.3.1/gallery/color/named_colors.html """ # input: RGB if color.count(",") == 2: value = [float(c) for c in color.split(",")] return rgb_to_hsv(value) # input: matplotlib colors cdicts = color_dicts if color_dicts else DEFAULT_COLOR_DICTS for cdict in cdicts: if color in cdict: value = cdict[color] # tableau, css4 and xkcd return hex colors. if str(value).startswith("#"): value = hex_to_rgb(value) return rgb_to_hsv(value) logger.error(f"Color not recognized: {color}") os._exit(1)
15,001
def _build_auth_record(response): """Build an AuthenticationRecord from the result of an MSAL ClientApplication token request""" try: id_token = response["id_token_claims"] if "client_info" in response: client_info = json.loads(_decode_client_info(response["client_info"])) home_account_id = "{uid}.{utid}".format(**client_info) else: # MSAL uses the subject claim as home_account_id when the STS doesn't provide client_info home_account_id = id_token["sub"] # "iss" is the URL of the issuing tenant e.g. https://authority/tenant issuer = six.moves.urllib_parse.urlparse(id_token["iss"]) # tenant which issued the token, not necessarily user's home tenant tenant_id = id_token.get("tid") or issuer.path.strip("/") # AAD returns "preferred_username", ADFS returns "upn" username = id_token.get("preferred_username") or id_token["upn"] return AuthenticationRecord( authority=issuer.netloc, client_id=id_token["aud"], home_account_id=home_account_id, tenant_id=tenant_id, username=username, ) except (KeyError, ValueError) as ex: auth_error = ClientAuthenticationError( message="Failed to build AuthenticationRecord from unexpected identity token" ) six.raise_from(auth_error, ex)
15,002
def main(selection="user", headless=False, short_exec=False): """ Replays a batch of demos using action primitives Creates threads for a batch of demos to be replayed in parallel Uses the code in replay_demo_with_action_primitives.py """ print("*" * 80 + "\nDescription:" + main.__doc__ + "\n" + "*" * 80) defaults = selection == "random" and headless and short_exec args_dict = parse_args(defaults=defaults) skip_if_existing = not defaults # Not skipping if testing # Find all the segmentation files by searching for json files in the segmentation directory segm_files = list(glob.glob(os.path.join(args_dict["segm_dir"], "*_segm.json"))) demo_files = list(glob.glob(os.path.join(args_dict["demo_dir"], "*.hdf5"))) print("Segmentations to replay with action primitives: {}".format(len(segm_files))) # Load the demo to get info for demo_file in tqdm.tqdm(demo_files): demo = os.path.splitext(os.path.basename(demo_file))[0] if "replay" in demo: continue for segm_file in segm_files: if demo in segm_file: segm_name = os.path.splitext(os.path.basename(segm_file))[0] ap_replay_demo_file = os.path.join(args_dict["out_dir"], segm_name + "_ap_replay.json") out_log_file = os.path.join(args_dict["out_dir"], segm_name + "_ap_replay.log") if os.path.exists(ap_replay_demo_file) and skip_if_existing: print("Skipping demo because it exists already: {}".format(ap_replay_demo_file)) continue # Batch me script_file = os.path.join(behavior.examples_path, "replay_demo_with_action_primitives_example.py") command = ["python", script_file, demo_file, segm_file, ap_replay_demo_file] with open(out_log_file, "w") as log_file: print("Launching subprocess for demo. Command: {}. Log file: {}".format(command, out_log_file)) tqdm.tqdm.write("Processing %s" % demo) subprocess.run(command, stdout=log_file, stderr=subprocess.STDOUT)
15,003
def loc_data_idx(loc_idx): """ Return tuple of slices containing the unflipped idx corresponding to loc_idx. By 'unflipped' we mean that if a slice has a negative step, we wish to retrieve the corresponding indices but not in reverse order. Examples -------- >>> loc_data_idx(slice(11, None, -3)) (slice(2, 12, 3),) """ retval = [] for i in as_tuple(loc_idx): if isinstance(i, slice) and i.step is not None and i.step == -1: if i.stop is None: retval.append(slice(0, i.start+1, -i.step)) else: retval.append(slice(i.stop+1, i.start+1, -i.step)) elif isinstance(i, slice) and i.step is not None and i.step < -1: if i.stop is None: lmin = i.start while lmin >= 0: lmin += i.step retval.append(slice(lmin-i.step, i.start+1, -i.step)) else: retval.append(slice(i.stop+1, i.start+1, -i.step)) elif is_integer(i): retval.append(slice(i, i+1, 1)) else: retval.append(i) return as_tuple(retval)
15,004
def eat_descriptor(descr): """ Read head of a field/method descriptor. Returns a pair of strings, where the first one is a human-readable string representation of the first found type, and the second one is the tail of the parameter. """ array_dim = 0 while descr[0] == '[': array_dim += 1 descr = descr[1:] if (descr[0] == 'L'): try: end = descr.find(';') except Exception: raise ParserError("Not a valid descriptor string: " + descr) type = descr[1:end] descr = descr[end:] else: global code_to_type_name try: type = code_to_type_name[descr[0]] except KeyError: raise ParserError("Not a valid descriptor string: %s" % descr) return (type.replace("/", ".") + array_dim * "[]", descr[1:])
15,005
def legendre(a, p): """Legendre symbol""" tmp = pow(a, (p-1)//2, p) return -1 if tmp == p-1 else tmp
15,006
def get_parser(): """ Create a parser with some arguments used to configure the app. Returns: argparse.ArgumentParser: """ parser = argparse.ArgumentParser(description="configuration") parser.add_argument( "--upload-folder", required=True, metavar="path", help="Target path where the images will be uploaded for inference", ) parser.add_argument( "--config-file", default="/content/computer-vision-REST-API/MaskRCNN_finetune/configs/ResNet-101-FPN/balloon.yaml", metavar="path", help="Path to the model config file. Possible improvement : let the user instead choose the desired model thru the app then load the ad-hoc config file.", ) parser.add_argument( "--weights", default="https://www.dropbox.com/s/otp52ccygc2t3or/ResNet101_FPN_model_final.pth?dl=1", metavar="path", help="Path to the model file weights. Possible improvement : let the user instead choose the desired model thru the app then load the ad-hoc pretrained weights.", ) parser.add_argument( "--remove-colors", default=False, action="store_true", help="One can remove colors of unsegmented pixels for better clarity as the mask and balloons colors can be hard to distinguish.", ) parser.add_argument( "--use-ngrok", default=False, action="store_true", help="Need to set this arg to True to be able to run it on google collab", ) parser.add_argument( "--infer-with-cpu", default=False, action="store_true", help="Use cpu for forward pass (slower)", ) return parser
15,007
def _interpolate(acq, coefficient, sat_sol_angles_fname, coefficients_fname, ancillary_fname, out_fname, compression=H5CompressionFilter.LZF, filter_opts=None, method=Method.SHEARB): """ A private wrapper for dealing with the internal custom workings of the NBAR workflow. """ with h5py.File(sat_sol_angles_fname, 'r') as sat_sol,\ h5py.File(coefficients_fname, 'r') as comp,\ h5py.File(ancillary_fname, 'r') as anc,\ h5py.File(out_fname, 'w') as out_fid: grp1 = anc[GroupName.ANCILLARY_GROUP.value] grp2 = sat_sol[GroupName.SAT_SOL_GROUP.value] grp3 = comp[GroupName.COEFFICIENTS_GROUP.value] interpolate(acq, coefficient, grp1, grp2, grp3, out_fid, compression, filter_opts, method)
15,008
def precheck_arguments(args): """ Make sure the argument choices are valid """ any_filelist = (len(args.filelist_name[0]) > 0 or len(args.output_dir[0]) > 0 or args.num_genomes[0] > 0) if len(args.filelist_name[0]) > 0 and len(args.output_dir[0]) == 0: print("Error: Need to specify output directory with -O if using -F") exit(1) if len(args.filelist_name[0]) == 0 and len(args.output_dir[0]) > 0: print("Error: Need to specify a filelist with -F if using -O") exit(1) if len(args.input_fasta[0]) > 0 and any_filelist: print("Error: When using -i flag, cannot use any of other options that imply multiple files") exit(1) if len(args.input_fasta[0]) > 0 and not any_filelist: return "single" elif any_filelist and len(args.input_fasta[0]) == 0: return "multi" else: print("Error: Need to specify either -i or the combination of -F and -O") exit(1)
15,009
def get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= cfg.PIXEL_MEANS im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] for target_size in cfg.TEST.SCALES: im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE: im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, np.array(im_scale_factors)
15,010
def main(): """Start the bot.""" sp = get_spotify_client() user_id = os.environ.get('SPOTIFY_USER_ID') playlist_id = os.environ.get('SPOTIFY_PLAYLIST_ID') playlister = PlaylistMaker(sp, user_id, playlist_id) TOKEN = os.environ.get('TELEGRAM_BOT_TOKEN') PORT = int(os.environ.get('PORT', '8443')) updater = Updater(TOKEN) dp = updater.dispatcher dp.add_handler(MessageHandler(Filters.all, playlister.find_spotify_links)) dp.add_error_handler(error) updater.start_webhook(listen="0.0.0.0", port=PORT, url_path=TOKEN) logger.info("Setting webhook") webhook_prefix = os.environ.get('WEBHOOK_DOMAIN') updater.bot.set_webhook(webhook_prefix + TOKEN) logger.info("Ideling") updater.idle()
15,011
def get_expr_fields(self): """ get the Fields referenced by switch or list expression """ def get_expr_field_names(expr): if expr.op is None: if expr.lenfield_name is not None: return [expr.lenfield_name] else: # constant value expr return [] else: if expr.op == '~': return get_expr_field_names(expr.rhs) elif expr.op == 'popcount': return get_expr_field_names(expr.rhs) elif expr.op == 'sumof': # sumof expr references another list, # we need that list's length field here field = None for f in expr.lenfield_parent.fields: if f.field_name == expr.lenfield_name: field = f break if field is None: raise Exception("list field '%s' referenced by sumof not found" % expr.lenfield_name) # referenced list + its length field return [expr.lenfield_name] + get_expr_field_names(field.type.expr) elif expr.op == 'enumref': return [] else: return get_expr_field_names(expr.lhs) + get_expr_field_names(expr.rhs) # get_expr_field_names() # resolve the field names with the parent structure(s) unresolved_fields_names = get_expr_field_names(self.expr) # construct prefix from self prefix = [('', '', p) for p in self.parents] if self.is_container: prefix.append(('', '', self)) all_fields = _c_helper_resolve_field_names (prefix) resolved_fields_names = list(filter(lambda x: x in all_fields.keys(), unresolved_fields_names)) if len(unresolved_fields_names) != len(resolved_fields_names): raise Exception("could not resolve all fields for %s" % self.name) resolved_fields = [all_fields[n][1] for n in resolved_fields_names] return resolved_fields
15,012
def add_bias_towards_void(transformer_class_logits, void_prior_prob=0.9): """Adds init bias towards the void (no object) class to the class logits. We initialize the void class with a large probability, similar to Section 3.3 of the Focal Loss paper. Reference: Focal Loss for Dense Object Detection, ICCV 2017. https://arxiv.org/abs/1708.02002 Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, Piotr Dollár. Args: transformer_class_logits: A [batch, num_mask_slots, num_classes] tensor, the class logits predicted by the transformer. It concats (num_classes - 1) non-void classes, including both thing classes and stuff classes, and the void class (the last channel). If the dataset class IDs do not follow this order, MaX-DeepLab loss functions will handle the mapping and thus the architecture still supports any dataset. void_prior_prob: A float, the desired probability (after softmax) of the void class at initialization. Defaults to 0.9 as in MaX-DeepLab. Returns: updated_transformer_class_logits: A [batch, num_mask_slots, num_classes] Raises: ValueError: If the rank of transformer_class_logits is not 3. """ class_logits_shape = transformer_class_logits.get_shape().as_list() if len(class_logits_shape) != 3: raise ValueError('Input transformer_class_logits should have rank 3.') init_bias = [0.0] * class_logits_shape[-1] init_bias[-1] = math.log( (class_logits_shape[-1] - 1) * void_prior_prob / (1 - void_prior_prob)) # Broadcasting the 1D init_bias to the 3D transformer_class_logits. return transformer_class_logits + tf.constant(init_bias, dtype=tf.float32)
15,013
def chkExists( path ): """If the given file or directory does not exist, raise an exception""" if not os.path.exists(path): raise IOError("Directory or file %s does not exist" % path)
15,014
def validate(instance, schema, instance_cls, cls=None, *args, **kwargs): """This is a carbon-copy of :method:`jsonschema.validate` except that it takes two validator classes instead of just one. In the jsonschema implementation, `cls` is used to validate both the schema and the instance. This changes the behavior to have a separate validator for each of schema and instance. Schema should not be validated with the custom validator returned by :method:`create_dereffing_validator` because it follows $refs. :param instance: the instance to validate :param schema: the schema to validate with :param instance_cls: Validator class to validate instance. :param cls: Validator class to validate schema. :raises: :exc:`ValidationError` if the instance is invalid :exc:`SchemaError` if the schema itself is invalid """ if cls is None: cls = jsonschema.validator_for(schema) cls.check_schema(schema) instance_cls(schema, *args, **kwargs).validate(instance)
15,015
def show_help( ): """ displays the program parameter list and usage information """ stdout( "usage: " + sys.argv[0] + " -f <path>" ) stdout( " " ) stdout( " option description" ) stdout( " -h help (this text here)" ) stdout( " -f GO flat file to import [tab delimited]" ) stdout( " " ) sys.exit(1)
15,016
def trajnet_batch_multi_eval(preds, gt, seq_start_end): """Calculate Top-k ADE, Top-k FDE for batch of samples. pred = Num_modes x Num_ped x Num_timesteps x 2 gt = Num_ped x Num_timesteps x 2 seq_start_end (batch delimiter) = Num_batches x 2 """ s_topk_ade = 0 s_topk_fde = 0 for (start, end) in seq_start_end: s_preds = [pred[start:end] for pred in preds] s_topk_ade += topk_ade(s_preds, gt[start:end]) s_topk_fde += topk_fde(s_preds, gt[start:end]) return s_topk_ade, s_topk_fde
15,017
def disemvowel(sentence): """Disemvowel: Given a sentence, return the sentence with all vowels removed. >>> disemvowel('the quick brown fox jumps over the lazy dog') 'th qck brwn fx jmps vr th lzy dg' """ vowels = ('a','e','i','o','u') for x in sentence: if x in vowels: sentence = sentence.replace(x,"") return sentence pass
15,018
def runPolyReg(xValueList, yValueList, degrees): """ Preforms *Polynomial Regression* based on the arguments provided. Note that we split the data by the *First* 80 percent of the data and then the *Last* 20 percent of the data, rather than randomly splitting the data by 80/20 for the Train/Test split. Args: xValueList (list of floats) : List of X values used for polynomial regression. Offset 1 day earlier than the y values so we have something to predict. Prepared by *prepDataSets*. Can change based on the values in saved in the configuration file. yValueList (list of floats) : Close values tied to the X value list for the following day. degrees (int) : Level of degress the polynomial will be operating at. :return: model: The actual machine Learning model. float: the R^2 score for the model. """ splitValue = int(len(xValueList) * 0.2) xTrain, xTest, yTrain, yTest = ( xValueList.iloc[:-splitValue], xValueList.iloc[splitValue:], yValueList[:-splitValue], yValueList[splitValue:], ) polyreg = make_pipeline(PolynomialFeatures(degree=degrees), LinearRegression()) polyreg.fit(xTrain, yTrain) yPred = polyreg.predict(xTest) results = metrics.rmse_score(yTest, yPred) return (polyreg, results)
15,019
def unfold_phi_vulpiani(phidp, kdp): """Alternative phase unfolding which completely relies on :math:`K_{DP}`. This unfolding should be used in oder to iteratively reconstruct :math:`Phi_{DP}` and :math:`K_{DP}` (see :cite:`Vulpiani2012`). Parameters ---------- phidp : :class:`numpy:numpy.ndarray` array of floats kdp : :class:`numpy:numpy.ndarray` array of floats """ # unfold phidp shape = phidp.shape phidp = phidp.reshape((-1, shape[-1])) kdp = kdp.reshape((-1, shape[-1])) for beam in range(len(phidp)): below_th3 = kdp[beam] < -20 try: idx1 = np.where(below_th3)[0][2] phidp[beam, idx1:] += 360 except Exception: pass return phidp.reshape(shape)
15,020
def test_is_layout_using_existing_script_no_scripts(): """ Given - layout which has no scripts. - id_set.json When - is_layout_scripts_found is called with an id_set.json Then - Ensure that is_layout_scripts_found returns True. """ validator = IDSetValidations(is_circle=False, is_test_run=True, configuration=CONFIG) layout_data_without_scripts = {'my-layout': { 'typename': 'my-layout', 'scripts': [] }} validator.script_set = [{"script_to_test": { "name": "script_to_test", "file_path": "Packs/DeveloperTools/TestPlaybooks/script-script_to_test.yml", "fromversion": "6.0.0", "pack": "DeveloperTools" } }] assert validator._is_layout_scripts_found(layout_data=layout_data_without_scripts) is True, \ "The layout doesn't have any scripts thus result should be True."
15,021
def get_result_df(session): """ query the match table and put results into pandas dataframe, to train the team-level model. """ df_past = pd.DataFrame( np.array( [ [s.fixture.date, s.fixture.home_team, s.fixture.away_team, s.home_score, s.away_score] for s in session.query(Result).all() ] ), columns=["date", "home_team", "away_team", "home_goals", "away_goals"], ) df_past["home_goals"] = df_past["home_goals"].astype(int) df_past["away_goals"] = df_past["away_goals"].astype(int) df_past["date"] = pd.to_datetime(df_past["date"]) return df_past
15,022
def permute1d(preserve_symmetry = True): """Choose order to rearrange rows or columns of puzzle.""" bp = block_permutation(preserve_symmetry) ip = [block_permutation(False),block_permutation(preserve_symmetry)] if preserve_symmetry: ip.append([2-ip[0][2],2-ip[0][1],2-ip[0][0]]) else: ip.append(block_permutation(False)) return [bp[i]*3+ip[i][j] for i in [0,1,2] for j in [0,1,2]]
15,023
def find_diff(sha, files=None): """Find the diff since the given sha.""" if files: for file_or_dir in files: msg = f"{file_or_dir} doesn't exist. Please provide a valid path." assert os.path.exists(file_or_dir), msg else: files = ['*.py'] res = subprocess.run( ['git', 'diff', '--unified=0', sha, '--'] + files, stdout=subprocess.PIPE, encoding='utf-8' ) res.check_returncode() return res.stdout
15,024
def split_name_with_nii(filename): """ Returns the clean basename and extension of a file. Means that this correctly manages the ".nii.gz" extensions. :param filename: The filename to clean :return: A tuple of the clean basename and the full extension """ base, ext = os.path.splitext(filename) if ext == ".gz": # Test if we have a .nii additional extension temp_base, add_ext = os.path.splitext(base) if add_ext == ".nii": ext = add_ext + ext base = temp_base return base, ext
15,025
def take(n: int, iterable: Iterable[T_]) -> List[T_]: """Return first n items of the iterable as a list""" return list(islice(iterable, n))
15,026
def abs(rv): """ Returns the absolute value of a random variable """ return rv.abs()
15,027
def compute_ssm(X, metric="cosine"): """Computes the self-similarity matrix of X.""" D = distance.pdist(X, metric=metric) D = distance.squareform(D) for i in range(D.shape[0]): for j in range(D.shape[1]): if np.isnan(D[i, j]): D[i, j] = 0 D /= D.max() return 1 - D
15,028
def escape(instruction): """ Escape used dot graph characters in given instruction so they will be displayed correctly. """ instruction = instruction.replace('<', r'\<') instruction = instruction.replace('>', r'\>') instruction = instruction.replace('|', r'\|') instruction = instruction.replace('{', r'\{') instruction = instruction.replace('}', r'\}') instruction = instruction.replace(' ', ' ') return instruction
15,029
def chunks(blocks: Iterable, n: int = 16) -> Iterable: """ Yield successive n-sized chunks from blocks. :param blocks: :param n: :return: """ for i in range(0, len(blocks), n): yield blocks[i : i + n]
15,030
def authenticate(ws, service_account_file, audience): """Authenticates the WebSocket""" ws.send("Bearer {token}".format(token=google_id_token.get_id_token(service_account_file, audience)))
15,031
def _json_object_hook(d): """ JSON to object helper :param d: data :return: namedtuple """ keys = [] for k in d.keys(): if k[0].isdigit(): k = 'd_{}'.format(k) keys.append(k) return namedtuple('X', keys)(*d.values())
15,032
def _fourier_interpolate(x, y): """ Simple linear interpolation for FFTs""" xs = np.linspace(x[0], x[-1], len(x)) intp = interp1d(x, y, kind="linear", fill_value="extrapolate") ys = intp(xs) return xs, ys
15,033
def compute_hash_json_digest(*args, **kwargs): """compute json hash of given args and kwargs and return md5 hex digest""" as_json = compute_hash_json(*args, **kwargs) return hashlib.md5(as_json).hexdigest()
15,034
def hello_world(): """Print welcome message as the response body.""" return '{"info": "Refer to internal http://metadata-db for more information"}'
15,035
def get_role(server: discord.Server, role_arg: str) -> discord.Role: """ Get a role from a passed command parameter (name, mention or ID). :return: """ try: role_id = extract_role_id(role_arg) except discord.InvalidArgument: # no ID, treat as a role name try: role = get_named_role(server, role_arg) # type: discord.Role except discord.InvalidArgument: logger.warning("Cannot find role {!r} as name or ID".format(role_arg)) role = None else: logger.debug("Found role ID in {!r}".format(role_arg)) role = discord.utils.get(server.roles, id=role_id) # type: discord.Role if role is None: raise commands.BadArgument('No such role: {}'.format(role)) return role
15,036
def tf_cc_library( name, srcs = [], hdrs = [], deps = [], tf_deps = [], copts = [], compatible_with = None, testonly = 0, alwayslink = 0): """ A rule to build a TensorFlow library or OpKernel. Just like cc_library, but: * Adds alwayslink=1 for kernels (name has kernel in it) * Separates out TF deps for when building for Android. Args: name: Name of library srcs: Source files hdrs: Headers files deps: All non-TF dependencies tf_deps: All TF depenedencies copts: C options compatible_with: List of environments target can be built for testonly: If library is only for testing alwayslink: If symbols should be exported """ if "kernel" in name: alwayslink = 1 # These are "random" deps likely needed by each library (http://b/142433427) oss_deps = [ "@com_google_absl//absl/strings:cord", ] deps += select({ "@org_tensorflow//tensorflow:mobile": [ "@org_tensorflow//tensorflow/core:portable_tensorflow_lib_lite", ], "//conditions:default": [ "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", ] + tf_deps + oss_deps, }) native.cc_library( name = name, srcs = srcs, hdrs = hdrs, deps = deps, copts = copts, compatible_with = compatible_with, testonly = testonly, alwayslink = alwayslink)
15,037
def build_timestamp(timestamp=None) -> google.protobuf.timestamp_pb2.Timestamp: """Convert Python datetime to Protobuf Timestamp""" # https://github.com/protocolbuffers/protobuf/issues/3986 proto_timestamp = google.protobuf.timestamp_pb2.Timestamp() return proto_timestamp.FromDatetime(timestamp or datetime.datetime.utcnow())
15,038
def set_workspace(data: Dict[str, Any]) -> Dict[str, Any]: """Set workspace.""" workspace_path = data.get("path", None) if not workspace_path: raise ClientErrorException("Parameter 'path' is missing in request.") os.makedirs(workspace_path, exist_ok=True) workdir = Workdir() workdir.set_active_workspace(workspace_path) return {"message": "SUCCESS"}
15,039
def read_blosum(): """Read blosum dict and delete some keys and values.""" with open('./psiblast/blosum62.pkl', 'rb') as f: blosum_dict = cPickle.load(f) temp = blosum_dict.pop('*') temp = blosum_dict.pop('B') temp = blosum_dict.pop('Z') temp = blosum_dict.pop('X') temp = blosum_dict.pop('alphas') for key in blosum_dict: for i in range(4): temp = blosum_dict[key].pop() return blosum_dict
15,040
def parse_spans_bio_with_errors(seq: List[str]) -> Tuple[List[Span], List[Error]]: """Parse a sequence of BIO labels into a list of spans but return any violations of the encoding scheme. Note: In the case where labels violate the span encoded scheme, for example the tag is a new type (like ``I-ORG``) in the middle of a span of another type (like ``PER``) without a proper starting token (``B-ORG``) we will finish the initial span and start a new one, resulting in two spans. This follows the ``conlleval.pl`` script. Note: Span are returned sorted by their starting location. Due to the fact that spans are not allowed to overlap there is no resolution policy when two spans have same starting location. Note: Errors are returned sorted by the location where the violation occurred. In the case a single transition triggered multiple errors they are sorted lexically based on the error type. Args: seq: The sequence of labels Returns: A list of spans and a list of errors. """ errors = [] spans = [] # This tracks the type of the span we are building out span = None # This tracks the tokens of the span we are building out tokens = [] for i, s in enumerate(seq): func = extract_function(s) _type = extract_type(s) # A `B` ends a span and starts a new one if func == BIO.BEGIN: # Save out the old span if span is not None: spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) # Start the new span span = _type tokens = [i] # An `I` will continue a span when types match and start a new one otherwise. elif func == BIO.INSIDE: # A span is already being built if span is not None: # The types match so we just add to the current span if span == _type: tokens.append(i) # Types mismatch so create a new span else: # Log error from type mismatch LOGGER.warning("Illegal Label: I doesn't match previous token at %d", i) errors.append(Error(i, "Illegal Transition", s, safe_get(seq, i - 1), safe_get(seq, i + 1))) # Save out the previous span spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) # Start a new span span = _type tokens = [i] # No span was being build so start a new one with this I else: # Log error from starting with I LOGGER.warning("Illegal Label: starting a span with `I` at %d", i) errors.append(Error(i, "Illegal Start", s, safe_get(seq, i - 1), safe_get(seq, i + 1))) span = _type tokens = [i] # An `O` will cut off a span being built out. else: if span is not None: spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) # Set so no span is being built span = None tokens = [] # If we fell off the end so save the entity that we were making. if span is not None: spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) return sort_spans(spans), sort_errors(errors)
15,041
def find_best_similar_match(i1: int, i2: int, j1: int, j2: int, a: Sequence, b: Sequence, sm: SequenceMatcher = None) \ -> Tuple[int, int, float]: """ Finds most similar pair of elements in sequences bounded by indexes a[i1:i2], b[j1: j2]. :param i1: starting index in "a" sequence. :param i2: ending index in "a" sequence. :param j1: starting index in "b" sequence. :param j2: ending index in "b" sequence. :param a: first sequence. :param b: second sequence. :param sm: SequenceMatcher object. Creates new difflib.SequenceMatcher instance if not passed. :return: Tuple (best_i, best_j, best_ratio) where: best_i: is index of most similar element in sequence "a". best_j: is index of most similar element in sequence "b". best_ratio: similarity ratio of elements a[best_i] and b[best_j], where 1.0 means elements are identical and 0.0 means that elements are completely different. """ best_ratio = 0.0 best_i = best_j = None if not sm: sm = SequenceMatcher() for i in range(i1, i2): sm.set_seq1(a[i]) for j in range(j1, j2): if a[i] == b[j]: continue sm.set_seq2(b[j]) if sm.real_quick_ratio() > best_ratio and sm.quick_ratio() > best_ratio and sm.ratio() > best_ratio: best_i = i best_j = j best_ratio = sm.ratio() return best_i, best_j, best_ratio
15,042
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2): """ Basic conv transpose block for Encoder-Decoder upsampling Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity """ net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None) net = tf.nn.relu(slim.batch_norm(net, fused=True)) return net
15,043
def create_ticket( client, chat_id, user_id, group_id, recipient_email, subject, slack_message_url ): """Create a new zendesk ticket in response to a new user question. :param client: The Zendesk web client to use. :param chat_id: The conversation ID on slack. :param user_id: Who to create the ticket as. :param group_id: Which group the ticket belongs to. :param recipient_email: The email addres to CC on the issue. :param subject: The title of the support issue. :param slack_message_url: The link to message on the support slack channel. :returns: A Zenpy.Ticket instance. """ log = logging.getLogger(__name__) log.debug( f'Assigning new ticket subject:<{subject}> to ' f'user:<{user_id}> and group:<{group_id}> ' ) # And assign this ticket to them. I can then later filter comments that # should go to the ZenSlackChat webhook to just those in the ZenSlackChat # group. issue = Ticket( type='ticket', external_id=chat_id, requestor_id=user_id, submitter_id=user_id, assingee_id=user_id, group_id=group_id, subject=subject, description=subject, recipient=recipient_email, comment=Comment( body=f'This is the message on slack {slack_message_url}.', author_id=user_id ) ) log.debug(f'Creating new ticket with subject:<{subject}>') ticket_audit = client.tickets.create(issue) ticket_id = ticket_audit.ticket.id log.debug(f'Ticket for subject:<{subject}> created ok:<{ticket_id}>') return ticket_audit.ticket
15,044
def make_random_password(self, length = 10, allowed_chars = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'): """ Generate a random password with the given length and given allowed_chars. The default value of allowed_chars does not have "I" or "O" or letters and digits that look similar -- just to avoid confusion. """ return get_random_string(length, allowed_chars)
15,045
def autocov_vector(x, nlags=None): """ This method computes the following function .. math:: R_{xx}(k) = E{ x(t)x^{*}(t-k) } = E{ x(t+k)x^{*}(k) } k \in {0, 1, ..., nlags-1} (* := conjugate transpose) Note: this is related to the other commonly used definition for vector autocovariance .. math:: R_{xx}^{(2)}(k) = E{ x(t-k)x^{*}(k) } = R_{xx}^{*}(k) = R_{xx}(-k) Parameters ---------- x: ndarray (nc, N) nlags: int, optional compute lags for k in {0, ..., nlags-1} Returns ------- rxx : ndarray (nc, nc, nlags) """ return crosscov_vector(x, x, nlags=nlags)
15,046
def sequence_masking(x, mask, mode=0, axis=None, heads=1): """为序列条件mask的函数 mask: 形如(batch_size, sequence)的0-1矩阵; mode: 如果是0,则直接乘以mask; 如果是1,则在padding部分减去一个大正数。 axis: 序列所在轴,默认为1; heads: 相当于batch这一维要被重复的次数。 """ if mask is None or mode not in [0, 1]: return x else: if heads is not 1: mask = K.expand_dims(mask, 1) mask = K.tile(mask, (1, heads, 1)) mask = K.reshape(mask, (-1, K.shape(mask)[2])) if axis is None: axis = 1 if axis == -1: axis = K.ndim(x) - 1 assert axis > 0, "axis must be greater than 0" for _ in range(axis - 1): mask = K.expand_dims(mask, 1) for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1): mask = K.expand_dims(mask, K.ndim(mask)) if mode == 0: return x * mask else: return x - (1 - mask) * 1e12
15,047
def declare_encoding(log, labelling, encoding, additional_columns, cols=None): #TODO JONAS """creates and returns the DataFrame encoded using the declare encoding :param log: :param labelling: :param encoding: :param additional_columns: :param cols: :return: """ filter_t = True print("Filter_t", filter_t) templates = template_sizes.keys() constraint_threshold = 0.1 candidate_threshold = 0.1 #apply prefix log = [Trace(trace[:encoding.prefix_length], attributes=trace.attributes) for trace in log] # Read into suitable data structure transformed_log = xes_to_positional(log) labels = {trace.attributes['concept:name']: trace.attributes['label'] for trace in log} # Extract unique activities from log events_set = {event_label for tid in transformed_log for event_label in transformed_log[tid]} # Brute force all possible candidates if cols is None: candidates = [(event,) for event in events_set] + [(e1, e2) for e1 in events_set for e2 in events_set if e1 != e2] else: candidates = list({ make_tuple(c.split(':')[1]) if len(c.split(':')) > 1 else c for c in cols if c not in ['label', 'trace_id'] }) print("Start candidates:", len(candidates)) # Count by class true_count = len([trace.attributes['concept:name'] for trace in log if trace.attributes['label'] == 'true']) false_count = len(log) - true_count print("{} deviant and {} normal traces in set".format(false_count, true_count)) ev_support_true = int(true_count * candidate_threshold) ev_support_false = int(false_count * candidate_threshold) if filter_t and cols is None: print(filter_t) print("Filtering candidates by support") candidates = filter_candidates_by_support(candidates, transformed_log, labels, ev_support_true, ev_support_false) print("Support filtered candidates:", len(candidates)) constraint_support_false = int(false_count * constraint_threshold) constraint_support_true = int(true_count * constraint_threshold) train_results = generate_train_candidate_constraints(candidates, templates, transformed_log, labels, constraint_support_true, constraint_support_false, filter_t=filter_t) print("Candidate constraints generated") # transform to numpy # get trace names data, labels, featurenames, train_names = transform_results_to_numpy(train_results, labels, transformed_log, cols) df = pd.DataFrame(data, columns=featurenames) df["trace_id"] = train_names df["label"] = labels.tolist() return df
15,048
def __long_description() -> str: """Returns project long description.""" return f"{__readme()}\n\n{__changelog()}"
15,049
def captains_draft(path=None, config=None): """Similar to captains mode with a 27 heroes, only 3 bans per teams""" game = _default_game(path, config=config) game.options.game_mode = int(DOTA_GameMode.DOTA_GAMEMODE_CD) return game
15,050
def create(width, height, pattern=None): """Create an image optionally filled with the given pattern. :note: You can make no assumptions about the return type; usually it will be ImageData or CompressedImageData, but patterns are free to return any subclass of AbstractImage. :Parameters: `width` : int Width of image to create `height` : int Height of image to create `pattern` : ImagePattern or None Pattern to fill image with. If unspecified, the image will initially be transparent. :rtype: AbstractImage """ if not pattern: pattern = SolidColorImagePattern() return pattern.create_image(width, height)
15,051
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray: """Computes conformer. Args: smile: Smile string. max_iter: Maximum number of iterations to perform when optimising MMFF force field. If set to <= 0, energy optimisation is not performed. Returns: A tuple containing index, fingerprint and conformer. Raises: RuntimeError: If unable to convert smile string to RDKit mol. """ mol = rdkit.Chem.MolFromSmiles(smile) if not mol: raise RuntimeError('Unable to convert smile to molecule: %s' % smile) conformer_failed = False try: mol = generate_conformers( mol, max_num_conformers=1, random_seed=45, prune_rms_thresh=0.01, max_iter=max_iter) except IOError as e: logging.exception('Failed to generate conformers for %s . IOError %s.', smile, e) conformer_failed = True except ValueError: logging.error('Failed to generate conformers for %s . ValueError', smile) conformer_failed = True except: # pylint: disable=bare-except logging.error('Failed to generate conformers for %s.', smile) conformer_failed = True atom_features_list = [] conformer = None if conformer_failed else list(mol.GetConformers())[0] for atom in mol.GetAtoms(): atom_features_list.append(atom_to_feature_vector(atom, conformer)) conformer_features = np.array(atom_features_list, dtype=np.float32) return conformer_features
15,052
def get_api_key(): """Load API key.""" api_key_file = open('mailgun_api_key.txt', 'r') api_key = api_key_file.read() api_key_file.close() return api_key.strip()
15,053
def bitserial_conv2d_strategy_hls(attrs, inputs, out_type, target): """bitserial_conv2d hls strategy""" strategy = _op.OpStrategy() layout = attrs.data_layout if layout == "NCHW": strategy.add_implementation( wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw), wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nchw), name="bitserial_conv2d_nchw.hls", ) elif layout == "NHWC": strategy.add_implementation( wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc), wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nhwc), name="bitserial_conv2d_nhwc.hls", ) else: raise ValueError("Data layout {} not supported.".format(layout)) return strategy
15,054
def visit(planfile,tracefile=None) : """ Reduce an APOGEE visit Driver to do 3 chips in parallel Makes median flux plots """ # reduce channels in parallel chan=['a','b','c' ] procs=[] for channel in [1] : kw={'planfile' : planfile, 'channel' : channel, 'clobber' : False} procs.append(mp.Process(target=do_visit,kwargs=kw)) for proc in procs : proc.start() for proc in procs : proc.join() plan=yaml.load(open(planfile,'r'), Loader=yaml.FullLoader) fig,ax=plots.multi(1,1) allmags=[] allinst=[] for ichan,channel in enumerate([1]) : mags=[] inst=[] for obj in plan['APEXP'] : if obj['flavor'] != 'object' : continue name='ap1D-{:s}-{:08d}.fits'.format(chan[channel],obj['name']) out=CCDData.read(name) print(name,out.header['NREAD']) mapname=plan['plugmap'] if np.char.find(mapname,'conf') >=0 : plug=sdss.config(out.header['CONFIGID'],specid=2) hmag=plug['h_mag'] else : plug=sdss.config(os.environ['MAPPER_DATA_N']+'/'+mapname.split('-')[1]+'/plPlugMapM-'+mapname+'.par',specid=2,struct='PLUGMAPOBJ') plate=int(mapname.split('-')[0]) holes=yanny('{:s}/plates/{:04d}XX/{:06d}/plateHolesSorted-{:06d}.par'.format( os.environ['PLATELIST_DIR'],plate//100,plate,plate)) h=esutil.htm.HTM() m1,m2,rad=h.match(plug['ra'],plug['dec'],holes['STRUCT1']['target_ra'],holes['STRUCT1']['target_dec'],0.1/3600.,maxmatch=500) hmag=plug['mag'][:,1] hmag[m1]=holes['STRUCT1']['tmass_h'][m2] i1,i2=match.match(300-np.arange(300),plug['fiberId']) mag='H' rad=np.sqrt(plug['xFocal'][i2]**2+plug['yFocal'][i2]**2) plots.plotp(ax,hmag[i2],+2.5*np.log10(np.median(out.data/(out.header['NREAD']-2),axis=1))[i1],color=None, zr=[0,300],xr=[8,15],size=20,label=name,xt=mag,yt='-2.5*log(cnts/read)') mags.append(hmag[i2]) inst.append(-2.5*np.log10(np.median(out.data/(out.header['NREAD']-2),axis=1))[i1]) ax.grid() ax.legend() allmags.append(mags) allinst.append(inst) fig.suptitle(planfile) fig.tight_layout() fig.savefig(planfile.replace('.yaml','.png')) return allmags,allinst
15,055
def get_fpga_bypass_mode(serverid): """ Read back FPGA bypass mode setting """ try: interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "mode"]) return parse_get_fpga_bypass_mode(interface, "mode") except Exception, e: return set_failure_dict("get_fpga_bypass_mode() Exception {0}".format(e), completion_code.failure)
15,056
def lookup_complement(binding): """ Extracts a complement link from the scope of the given binding. Returns an instance of :class:`htsql.core.tr.binding.Recipe` or ``None`` if a complement link is not found. `binding` (:class:`htsql.core.tr.binding.Binding`) A binding node. """ probe = ComplementProbe() return lookup(binding, probe)
15,057
def flatten(colours): """Flatten the cubular array into one long list.""" return list(itertools.chain.from_iterable(itertools.chain.from_iterable(colours)))
15,058
def efficientnet_b6(pretrained=False, num_classes=1000, in_chans=3, **kwargs): """EfficientNet-B6""" # NOTE for train, drop_rate should be 0.5 # kwargs['drop_connect_rate'] = 0.2 # set when training, TODO add as cmd arg model_name = "tf_efficientnet_b6" default_cfg = default_cfgs[model_name] model = _gen_efficientnet( model_name=model_name, channel_multiplier=1.8, depth_multiplier=2.6, num_classes=num_classes, in_chans=in_chans, **kwargs ) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfgs[model_name], num_classes) return model
15,059
def env_revert_setup_parser(subparser): """restore environments to their state before update""" subparser.add_argument( metavar='env', dest='env', help='name or directory of the environment to activate' ) spack.cmd.common.arguments.add_common_arguments(subparser, ['yes_to_all'])
15,060
def create_word_search_board(number: int): """ This function creates a numpy array of zeros, with dimensions of number x number, which is set by the user. The array is then iterated through, and zeros are replaced with -1's to avoid confusion with the alphabet (A) beginning at 0. """ board = numpy.zeros((number, number)) for i in range(len(board)): for x in range(number): board[i][x] = -1 return board
15,061
def combine_station_data(station, input_dir, temp_dir): """ This function combines data for a given station across multiple realizations, writting a single output file in temp_dir """ data = {} # Get realizations realizations = sorted(os.listdir(input_dir)) for realization in realizations: basedir = os.path.join(input_dir, realization) data_file = glob.glob("%s%s%s.%s.rd50" % (basedir, os.sep, realization, station)) if len(data_file) != 1: raise bband_utils.ProcessingError("Data for station %s " % (station) + "not found for " "realization %s!" % (realization)) data_file = data_file[0] in_data = open(data_file, 'r') for line in in_data: line = line.strip() # Skip comments if line.startswith("#"): continue pieces = line.split() pieces = [float(piece) for piece in pieces] key = pieces[0] pieces = pieces[1:] if not key in data: # Key is new to dictionary empty_set = [[] for _ in pieces] data[key] = empty_set for idx, value in enumerate(pieces): data[key][idx].append(value) in_data.close() # Now, write the output file out_file = open((os.path.join(temp_dir, "%s.rd50" % (station))), 'w') keys = sorted(data.keys()) for key in keys: out_file.write("%10.4f" % (key)) for comp in data[key]: out_file.write(" %10.5e" % (numpy.mean(comp))) out_file.write("\n")
15,062
def test_register_sequence_decl_path2(collector, sequence_decl): """Test handling wrong path : too many ':'. """ tb = {} sequence_decl.view = 'exopy_pulses.sequences:sequences:Sequence' sequence_decl.register(collector, tb) assert 'exopy_pulses.BaseSequence' in tb
15,063
def get_parent_choices(menu, menu_item=None): """ Returns flat list of tuples (possible_parent.pk, possible_parent.caption_with_spacer). If 'menu_item' is not given or None, returns every item of the menu. If given, intentionally omit it and its descendant in the list. """ def get_flat_tuples(menu_item, excepted_item=None): if menu_item == excepted_item: return [] else: choices = [(menu_item.pk, mark_safe(menu_item.caption_with_spacer()))] if menu_item.has_children(): for child in menu_item.children(): choices += get_flat_tuples(child, excepted_item) return choices return get_flat_tuples(menu.root_item, menu_item)
15,064
def test_get_mli_type(mli_score): """Ensure correct type is returned.""" typ = Score.get_type(mli_score.dump()) assert typ == MLIScore
15,065
def _update_jacobian(state, jac): """ we update the jacobian using J(t_{n+1}, y^0_{n+1}) following the scipy bdf implementation rather than J(t_n, y_n) as per [1] """ J = jac(state.y0, state.t + state.h) n_jacobian_evals = state.n_jacobian_evals + 1 LU = jax.scipy.linalg.lu_factor(state.M - state.c * J) n_lu_decompositions = state.n_lu_decompositions + 1 return state._replace( J=J, n_jacobian_evals=n_jacobian_evals, LU=LU, n_lu_decompositions=n_lu_decompositions, )
15,066
def find_matches(article, is_saved=False): """ Tries to find connecting articles from the related links provided by google :param is_saved: bool :param article: Article :return: """ print("Trying to match " + str(len(article.relatedLinks)) + " articles for " + article.title) for link in article.relatedLinks: try: article_resp = requests.get(link) except URLError as ex: print("Error in getting related link: " + str(ex)) continue if article_resp.ok: source = article_resp.content soup = BeautifulSoup(source, 'html.parser') article_list_soup = soup.findAll('div', attrs={'class': 'blended-wrapper'}) for relatedArticleSoup in article_list_soup: related_article_title = relatedArticleSoup.find('span', 'titletext').text try: related_article = Article.objects.get(title=related_article_title) except db.DoesNotExist: # Onto the next article # should we then scrape this article and then add it to the database? continue if related_article not in article.relatedArticles and article not in related_article.relatedArticles: print("Found a new related article: " + related_article_title + " for " + article.title) if is_saved: # The preferred method but can only be used on a saved doc article.update(push__relatedArticles=related_article) related_article.update(push__relatedArticles=article) else: article.relatedArticles.append(related_article) article.relatedAnalyzed = True else: print("Couldn't make a request for the related article url ") print("Error code: " + str(article_resp.status_code)) if article_resp.status_code == 503: print(colored("Google is probably mad. Stop for 5 secs.", 'red')) time.sleep(5)
15,067
def reduce_pad(sess: tf.Session, op_tensor_tuple: Tuple[Op, List[tf.Tensor]], _) -> (str, tf.Operation, tf.Operation): """ Pad module reducer :param sess: current tf session :param op_tensor_tuple: tuple containing the op to reduce, and a list of input tensors to the op """ name = "reduced_" + op_tensor_tuple[0].dotted_name pad_op = op_tensor_tuple[0].get_module() # Get padding tensor dimensions # Padding dimension information is captured in an input tensor to the pad op, index 1 of pad op inputs # Dimensions of this tensor are always (N, 2), where N is the dimensionality of the input tensor coming into pad. # The value of padding[N][0] gives the amount to pad in dimension N prior to the contents of the input to pad, while # padding[N][1] gives the amount to pad in dimension N after the contents of the input. # Currently we do not support reducing a pad op that modifies the channel dimension, which is the last dimension, # indexed by -1 below. So check to make sure that indices [-1][0] and [-1][1] remain 0 (no padding). padding_tensor_eval = sess.run(pad_op.inputs[1]) if padding_tensor_eval[-1][0] != 0 or padding_tensor_eval[-1][1] != 0: raise NotImplementedError("Attempting to reduce pad operation that modifies channel size, not supported.") new_padding_tensor = tf.constant(padding_tensor_eval) # No need to actually modify padding tensor # Get constant value for padding # If pad op takes a non default constant value (default = 0), it appears as a third input tensor to pad op, index 2 const_val = 0 if len(pad_op.inputs) > 2: const_val = sess.run(pad_op.inputs[2]) # Get mode # Mode can be 'CONSTANT', 'SYMMETRIC', or 'REFLECT'. 'CONSTANT' is default, and will not appear as a mode attribute # if it is the case. try: mode = pad_op.get_attr('mode') mode = mode.decode('utf-8') except ValueError: mode = 'CONSTANT' new_tensor = tf.pad(op_tensor_tuple[1][0], new_padding_tensor, constant_values=const_val, mode=mode, name=name) module = sess.graph.get_operation_by_name(name) return name, new_tensor.op, module
15,068
def parse_fastq(fh): """ Parse reads from a FASTQ filehandle. For each read, we return a name, nucleotide-string, quality-string triple. """ reads = [] while True: first_line = fh.readline() if len(first_line) == 0: break # end of file name = first_line[1:].rstrip() seq = fh.readline().rstrip() fh.readline() # ignore line starting with + qual = fh.readline().rstrip() reads.append((name, seq, qual)) return reads
15,069
def minhash_256(features): # type: (List[int]) -> bytes """ Create 256-bit minimum hash digest. :param List[int] features: List of integer features :return: 256-bit binary from the least significant bits of the minhash values :rtype: bytes """ return compress(minhash(features), 4)
15,070
def coalesce(*values): """Returns the first not-None arguement or None""" return next((v for v in values if v is not None), None)
15,071
def comparison_scatter(Xexact,Xinferred,vmax=None,color='cornflowerblue',alpha=0.05,axes=None,y=0.8): """ This method is used to compare the inferred force components to the exact ones along the trajectory, in a graphical way. It assumes that the compute_accuracy method has been called before to provide the exact force components.""" # Flatten the data: Xe = np.array([ x.reshape(int(np.prod(x.shape))) for x in Xexact ]) Xi = np.array([ x.reshape(int(np.prod(x.shape))) for x in Xinferred ]) Xe = Xe.reshape(np.prod(Xe.shape)) Xi = Xi.reshape(np.prod(Xi.shape)) if vmax is None: vmax = max(abs(Xe).max(),abs(Xi).max()) range_vals=np.array([[-vmax,vmax],[-vmax,vmax]]) plt.scatter(Xe,Xi,alpha=alpha,linewidth=0,c=color) from scipy.stats import pearsonr (r,p) = pearsonr(Xe,Xi) plt.plot([-vmax,vmax],[-vmax,vmax],'k-') plt.grid(True) plt.axis('equal') plt.xlabel('exact') plt.ylabel('inferred') plt.title(r"$r="+str(round(r,2 if r<0.98 else 3 if r<0.999 else 4 if r<0.9999 else 5))+"$",loc='left',y=y,x=0.05,fontsize=10) plt.xticks([0.]) plt.yticks([0.]) plt.xlim(-vmax,vmax) plt.ylim(-vmax,vmax)
15,072
def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): """Tests the OBJ & PLY loaders with combinations of vertex / face normals, presence and absence of UVs, etc. """ from mitsuba.core.xml import load_string def test(): shape = load_string(""" <shape type="{0}" version="2.0.0"> <string name="filename" value="resources/data/tests/{0}/rectangle_{1}.{0}" /> <boolean name="face_normals" value="{2}" /> </shape> """.format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals) positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3) if 'uv' in features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]] # For OBJs (and .serialized generated from OBJ), UV.y is flipped. if mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]: assert ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)()
15,073
def euc_reflection(x, a): """ Euclidean reflection (also hyperbolic) of x Along the geodesic that goes through a and the origin (straight line) """ xTa = torch.sum(x * a, dim=-1, keepdim=True) norm_a_sq = torch.sum(a ** 2, dim=-1, keepdim=True).clamp_min(MIN_NORM) proj = xTa * a / norm_a_sq return 2 * proj - x
15,074
def is_online(): """Check if host is online""" conn = httplib.HTTPSConnection("www.google.com", timeout=1) try: conn.request("HEAD", "/") return True except Exception: return False finally: conn.close()
15,075
def show_digit(x): """ Inputs: x: cluster center matrix (k, p), returned by kmeans. """ w = 20 h = 20 col = 10 row = (x.shape[0] + col - 1) // col plt.figure(figsize=(10, 10)) padding = row * col - x.shape[0] if padding: print(x.shape, padding) x = np.vstack((x, np.zeros((padding, x.shape[1])))) x = x.reshape(row, col, w, h).transpose(0, 3, 1, 2).reshape(row * h, col * w ) plt.imshow(x, cmap='gray')
15,076
def text_value(s): """Convert a raw Text property value to the string it represents. Returns an 8-bit string, in the encoding of the original SGF string. This interprets escape characters, and does whitespace mapping: - linebreak (LF, CR, LFCR, or CRLF) is converted to \n - any other whitespace character is replaced by a space - backslash followed by linebreak disappears - other backslashes disappear (but double-backslash -> single-backslash) """ s = _newline_re.sub(b"\n", s) s = s.translate(_whitespace_table) is_escaped = False result = [] for chunk in _chunk_re.findall(s): if is_escaped: if chunk != b"\n": result.append(chunk) is_escaped = False elif chunk == b"\\": is_escaped = True else: result.append(chunk) return b"".join(result)
15,077
def modifica_immobile_pw(): """La funzione riceve l' ID immobile da modificare e ne modifica un attibuto scelto dall'utente """ s = input("Vuoi la lista degli immobili per scegliere il ID Immobile da modificare? (S/N)") if s == "S" or s =="s": stampa_immobili_pw() s= input("Dammi ID Immobile da modificare -") immo = Immobile.select().where(Immobile.id == int(s)).get() scel = input("Cosa vuoi modificare?\ni=ID proprietario -\nd=Indirizzo -\np=Prezzo -\nc=ClasseEnergetica ") if scel == "i": #controllare se immo e' una lista va iterata se oggetto no id_cliente = (input("Dammi il nuovo ID Cliente del Proprietario -")) immo.cliente_id=int(id_cliente) elif scel == "d": new_indirizzo = input("Dammi il nuovo indirizzo dell'immobile -") immo.indirizzo = new_indirizzo elif scel == "p": new_prezzo = input("Dammi il nuovo prezzo dell'Immobile -") immo.prezzo = int(new_prezzo) elif scel == "c": new_classe = input("Dammi la nuova Classe Energetica dell'Immobile -") immo.classe_energ = new_classe immo.save() return True
15,078
def run(context, port): """ Run the Webserver/SocketIO and app """ global ctx ctx = context app.run(port=port)
15,079
def timer(function): """ timer method for Euler problems returns result and time taken in seconds """ start = _timeit.default_timer() result = function() end = _timeit.default_timer() print(f'result: {result} ({(end-start):.2f}s)')
15,080
def hpat_pandas_series_len(self): """ Pandas Series operator :func:`len` implementation .. only:: developer Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_len Parameters ---------- series: :class:`pandas.Series` Returns ------- :obj:`int` number of items in the object """ _func_name = 'Operator len().' if not isinstance(self, SeriesType): raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self)) def hpat_pandas_series_len_impl(self): return len(self._data) return hpat_pandas_series_len_impl
15,081
def test_default(Class, default_in, default_out): """ test attribute default property """ attribute = Class("test", default=default_in) assert attribute.default == default_out
15,082
def arrays_not_same_size(inputs: List[np.ndarray]) -> bool: """Validates that all input arrays are the same size. Args: inputs (List[np.ndarray]): Input arrays to validate Returns: true if the arrays are the same size and false if they are not """ shapes = [i.shape for i in inputs] shp_first = shapes[0] shp_rest = shapes[1:] return not np.array_equiv(shp_first, shp_rest)
15,083
def GetReaderForFile(filename): """ Given a filename return a VTK reader that can read it """ r = vtkPNGReader() if not r.CanReadFile(filename): r = vtkPNMReader() if not r.CanReadFile(filename): r = vtkJPEGReader() if not r.CanReadFile(filename): r = vtkTIFFReader() if not r.CanReadFile(filename): return None r.SetFileName(filename) return r
15,084
def build_figure_nn(df, non_private, semantic): """ Dataframe with one semantic and one model """ l = df.query("epsilon > 0").sort_values(["train_size", "epsilon"]) naive, low, high = get_plot_bounds(df) fig = px.line( l, x="train_size", y="accuracy", range_y=[low, high], color="epsilon", hover_data=["n_blocks", "delta", "noise"], title=f"{list(l['task'])[0]} {list(l['model'])[0]} {semantic} accuracy", log_y=False, ).update_traces(mode="lines+markers") fig.add_trace( go.Scatter( x=non_private.sort_values("train_size")["train_size"], y=non_private.sort_values("train_size")["accuracy"], mode="lines+markers", name="Non private", ) ) fig.add_trace( go.Scatter( x=l["train_size"], y=[naive] * len(l), mode="lines", name="Naive baseline", ) ) return fig
15,085
def get_incomplete_sample_nrs(df): """ Returns sample nrs + topologies if at least 1 algorithm result is missing """ topology_incomplete_sample_nr_map = dict() n_samples = df.loc[df['sample_idx'].idxmax()]['sample_idx'] + 1 for ilp_method in np.unique(df['algorithm_complete']): dfx = df[df['algorithm_complete'] == ilp_method] dfg_tops = dfx.groupby(by='topology_name') for key, group in dfg_tops: if n_samples > group.shape[0]: if key not in topology_incomplete_sample_nr_map: topology_incomplete_sample_nr_map[key] = set() for s_nr in range(n_samples): if s_nr not in list(group['sample_idx']): topology_incomplete_sample_nr_map[key].add(s_nr) return topology_incomplete_sample_nr_map
15,086
def play(player1Factory, player2Factory): """Play a game of Battleships player1Factory and player2Factory should be functions capable of turning a Battleships instance into an Engine for their respective players """ game = Game() #Construct the two players from the given Engine factories p1 = player1Factory(game) p2 = player2Factory(game) #Get each engine to place down its ships p1.placeShips() p2.placeShips() while True: game.loud = p1.isTalkative() p1.attackShips() if game.score is not None: break #Player 1 win game.loud = p2.isTalkative() p2.attackShips() if game.score is not None: break #Player 2 win if not game.loud: print(f"Player {max(-game.player, 0) + 1} has won!") print(f"Game ended after {game.n_moves // 2} turns")
15,087
def _read_pos_at_ref_pos(rec: AlignedSegment, ref_pos: int, previous: Optional[bool] = None) -> Optional[int]: """ Returns the read or query position at the reference position. If the reference position is not within the span of reference positions to which the read is aligned an exception will be raised. If the reference position is within the span but is not aligned (i.e. it is deleted in the read) behavior is controlled by the "previous" argument. Args: rec: the AlignedSegment within which to find the read position ref_pos: the reference position to be found previous: Controls behavior when the reference position is not aligned to any read position. True indicates to return the previous read position, False indicates to return the next read position and None indicates to return None. Returns: The read position at the reference position, or None. """ if ref_pos < rec.reference_start or ref_pos >= rec.reference_end: raise ValueError(f"{ref_pos} is not within the reference span for read {rec.query_name}") pairs = rec.get_aligned_pairs() index = 0 read_pos = None for read, ref in pairs: if ref == ref_pos: read_pos = read break else: index += 1 if not read_pos and previous is not None: if previous: while read_pos is None and index > 0: index -= 1 read_pos = pairs[index][0] else: while read_pos is None and index < len(pairs): read_pos = pairs[index][0] index += 1 return read_pos
15,088
def load_gtis(fits_file, gtistring=None): """Load GTI from HDU EVENTS of file fits_file.""" from astropy.io import fits as pf import numpy as np gtistring = _assign_value_if_none(gtistring, 'GTI') logging.info("Loading GTIS from file %s" % fits_file) lchdulist = pf.open(fits_file, checksum=True) lchdulist.verify('warn') gtitable = lchdulist[gtistring].data gti_list = np.array([[a, b] for a, b in zip(gtitable.field('START'), gtitable.field('STOP'))], dtype=np.longdouble) lchdulist.close() return gti_list
15,089
def macro_australia_unemployment_rate(): """ 东方财富-经济数据-澳大利亚-失业率 http://data.eastmoney.com/cjsj/foreign_5_2.html :return: 失业率 :rtype: pandas.DataFrame """ url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx" params = { "type": "GJZB", "sty": "HKZB", "js": "({data:[(x)],pages:(pc)})", "p": "1", "ps": "2000", "mkt": "5", "stat": "2", "_": "1625474966006", } r = requests.get(url, params=params) data_text = r.text data_json = demjson.decode(data_text[1:-1]) temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]]) temp_df.columns = [ "时间", "前值", "现值", "发布日期", ] temp_df["前值"] = pd.to_numeric(temp_df["前值"]) temp_df["现值"] = pd.to_numeric(temp_df["现值"]) return temp_df
15,090
def test_masked_registration_random_masks(): """masked_register_translation should be able to register translations between images even with random masks.""" # See random number generator for reproducible results np.random.seed(23) reference_image = cp.asarray(camera()) shift = (-7, 12) shifted = np.real( fft.ifft2(fourier_shift(fft.fft2(reference_image), shift)) ) # Random masks with 75% of pixels being valid ref_mask = np.random.choice( [True, False], reference_image.shape, p=[3 / 4, 1 / 4] ) shifted_mask = np.random.choice( [True, False], shifted.shape, p=[3 / 4, 1 / 4] ) ref_mask = cp.asarray(ref_mask) shifted_mask = cp.asarray(shifted_mask) measured_shift = masked_register_translation( reference_image, shifted, ref_mask, shifted_mask ) cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift))
15,091
def _convert_flattened_paths( paths: List, quantization: float, scale_x: float, scale_y: float, offset_x: float, offset_y: float, simplify: bool, ) -> "LineCollection": """Convert a list of FlattenedPaths to a :class:`LineCollection`. Args: paths: list of FlattenedPaths quantization: maximum length of linear elements to approximate curve paths scale_x, scale_y: scale factor to apply offset_x, offset_y: offset to apply simplify: should Shapely's simplify be run Returns: new :class:`LineCollection` instance containing the converted geometries """ lc = LineCollection() for result in paths: # Here we load the sub-part of the path element. If such sub-parts are connected, # we merge them in a single line (e.g. line string, etc.). If there are disconnection # in the path (e.g. multiple "M" commands), we create several lines sub_paths: List[List[complex]] = [] for elem in result: if isinstance(elem, svg.Line): coords = [elem.start, elem.end] else: # This is a curved element that we approximate with small segments step = int(math.ceil(elem.length() / quantization)) coords = [elem.start] coords.extend(elem.point((i + 1) / step) for i in range(step - 1)) coords.append(elem.end) # merge to last sub path if first coordinates match if sub_paths: if sub_paths[-1][-1] == coords[0]: sub_paths[-1].extend(coords[1:]) else: sub_paths.append(coords) else: sub_paths.append(coords) for sub_path in sub_paths: path = np.array(sub_path) # transform path += offset_x + 1j * offset_y path.real *= scale_x path.imag *= scale_y lc.append(path) if simplify: mls = lc.as_mls() lc = LineCollection(mls.simplify(tolerance=quantization)) return lc
15,092
def _CustomSetAttr(self, sAttr, oValue): """ Our setattr replacement for DispatchBaseClass. """ try: return _g_dCOMForward['setattr'](self, ComifyName(sAttr), oValue) except AttributeError: return _g_dCOMForward['setattr'](self, sAttr, oValue)
15,093
def reduce_to_contemporaneous(ts): """ Simplify the ts to only the contemporaneous samples, and return the new ts + node map """ samples = ts.samples() contmpr_samples = samples[ts.tables.nodes.time[samples] == 0] return ts.simplify( contmpr_samples, map_nodes=True, keep_unary=True, filter_populations=False, filter_sites=False, record_provenance=False, filter_individuals=False, )
15,094
def mk_inv_part_txt_file(filename): """This function downloads the inventory pdf file given by 'part' and saves it in the 'data' directory. It also saves the retrieval time of the file. It produces a txt file for the pdf file with pdftotext. """ url = ('http://www.pinakothek.de/sites/default/files/files/' + filename) print(url) # RequestObj = urllib.request.urlopen(url) now = datetime.datetime.utcnow().isoformat() print(now) # RequestObjRead = RequestObj.read() # with open('data/' + filename, 'w+b') as pdffile: # pdffile.write(RequestObjRead) txtoutput = subprocess.check_output(['pdftotext', '-layout', 'data/' + filename, '-']) # With PyPDF2 I got only blank lines ... txtoutput = txtoutput.decode('utf-8') return url, now, txtoutput
15,095
def scenario_map_fn( example, *, snr_range: tuple = (20, 30), sync_speech_source=True, add_speech_reverberation_early=True, add_speech_reverberation_tail=True, early_rir_samples: int = int(8000 * 0.05), # 50 milli seconds details=False, ): """ This will care for convolution with RIR and also generate noise. The random noise generator is fixed based on example ID. It will therefore generate the same SNR and same noise sequence the next time you use this DB. Args: example: Example dictionary. snr_range: required for noise generation sync_speech_source: pad and/or cut the source signal to match the length of the observations. Considers the offset. add_speech_reverberation_direct: Calculate the speech_reverberation_direct signal. add_speech_reverberation_tail: Calculate the speech_reverberation_tail signal. Returns: """ h = example['audio_data']['rir'] # Shape (K, D, T) # Estimate start sample first, to make it independent of channel_mode rir_start_sample = np.array([get_rir_start_sample(h_k) for h_k in h]) _, D, rir_length = h.shape # TODO: SAMPLE_RATE not defined # rir_stop_sample = rir_start_sample + int(SAMPLE_RATE * 0.05) # Use 50 milliseconds as early rir part, excluding the propagation delay # (i.e. "rir_start_sample") assert isinstance(early_rir_samples, int), (type(early_rir_samples), early_rir_samples) rir_stop_sample = rir_start_sample + early_rir_samples log_weights = example['log_weights'] # The two sources have to be cut to same length K = example['num_speakers'] T = example['num_samples']['observation'] if 'original_source' not in example['audio_data']: # legacy code example['audio_data']['original_source'] = example['audio_data']['speech_source'] if 'original_source' not in example['num_samples']: # legacy code example['num_samples']['original_source'] = example['num_samples']['speech_source'] s = example['audio_data']['original_source'] def get_convolved_signals(h): assert s.shape[0] == h.shape[0], (s.shape, h.shape) x = [fftconvolve(s_[..., None, :], h_, axes=-1) for s_, h_ in zip(s, h)] assert len(x) == len(example['num_samples']['original_source']) for x_, T_ in zip(x, example['num_samples']['original_source']): assert x_.shape == (D, T_ + rir_length - 1), ( x_.shape, D, T_ + rir_length - 1) # This is Jahn's heuristic to be able to still use WSJ alignments. offset = [ offset_ - rir_start_sample_ for offset_, rir_start_sample_ in zip( example['offset'], rir_start_sample) ] assert len(x) == len(offset) x = [extract_piece(x_, offset_, T) for x_, offset_ in zip(x, offset)] x = np.stack(x, axis=0) assert x.shape == (K, D, T), x.shape return x x = get_convolved_signals(h) # Note: scale depends on channel mode std = np.maximum( np.std(x, axis=(-2, -1), keepdims=True), np.finfo(x.dtype).tiny, ) # Rescale such that invasive SIR is as close as possible to `log_weights`. scale = (10 ** (np.asarray(log_weights)[:, None, None] / 20)) / std # divide by 71 to ensure that all values are between -1 and 1 scale /= 71 x *= scale example['audio_data']['speech_image'] = x if add_speech_reverberation_early: h_early = h.copy() # Replace this with advanced indexing for i in range(h_early.shape[0]): h_early[i, ..., rir_stop_sample[i]:] = 0 x_early = get_convolved_signals(h_early) x_early *= scale example['audio_data']['speech_reverberation_early'] = x_early if details: example['audio_data']['rir_early'] = h_early if add_speech_reverberation_tail: h_tail = h.copy() for i in range(h_tail.shape[0]): h_tail[i, ..., :rir_stop_sample[i]] = 0 x_tail = get_convolved_signals(h_tail) x_tail *= scale example['audio_data']['speech_reverberation_tail'] = x_tail if details: example['audio_data']['rir_tail'] = h_tail if sync_speech_source: example['audio_data']['speech_source'] = synchronize_speech_source( example['audio_data']['original_source'], offset=example['offset'], T=T, ) else: # legacy code example['audio_data']['speech_source'] = \ example['audio_data']['original_source'] clean_mix = np.sum(x, axis=0) rng = _example_id_to_rng(example['example_id']) snr = rng.uniform(*snr_range) example["snr"] = snr rng = _example_id_to_rng(example['example_id']) n = get_white_noise_for_signal(clean_mix, snr=snr, rng_state=rng) example['audio_data']['noise_image'] = n example['audio_data']['observation'] = clean_mix + n return example
15,096
def init(): """ Initializes the local key-value store. """ STORAGE_DIR.mkdir(exist_ok=True)
15,097
def validate_checkpoint_type(checkpoint_type: str) -> None: """ Check that the passed `checkpoint_type` is valid. """ ALLOWED_CHECKPOINT_TYPES = ["state", "model"] assert checkpoint_type in ALLOWED_CHECKPOINT_TYPES, ( f"'checkpoint_type' ('{checkpoint_type}') not understood (likely " f"from the file name provided). It must be one of {ALLOWED_CHECKPOINT_TYPES}." )
15,098
def assert_same_shallow_tree(shallow, tree): """Asserts that `tree` has the same shallow structure as `shallow`.""" # Do a dummy multimap for the side-effect of verifying that the structures are # the same. This doesn't catch all the errors we actually care about, sadly. map_tree_up_to(shallow, lambda *args: (), tree)
15,099