content
stringlengths
22
815k
id
int64
0
4.91M
def add_device(overlay_id) -> Union[str, Tuple[str, int]]: """ Add device to an overlay. """ manager = get_manager() api_key = header_api_key(request) if not manager.api_key_is_valid(api_key): return jsonify(error="Not authorized"), 403 if not request.data: return jsonify(error="Send device id to add to overlay in body"), 400 if "device_id" in request.json: return manager.add_device_to_overlay(overlay_id,request.get_json()['device_id']) return jsonify(error="Send device_id as JSON"), 400
18,200
def pvtol(t, x, u, params={}): """Reduced planar vertical takeoff and landing dynamics""" from math import sin, cos m = params.get('m', 4.) # kg, system mass J = params.get('J', 0.0475) # kg m^2, system inertia r = params.get('r', 0.25) # m, thrust offset g = params.get('g', 9.8) # m/s, gravitational constant c = params.get('c', 0.05) # N s/m, rotational damping l = params.get('c', 0.1) # m, pivot location return np.array([ x[3], -c/m * x[1] + 1/m * cos(x[0]) * u[0] - 1/m * sin(x[0]) * u[1], -g - c/m * x[2] + 1/m * sin(x[0]) * u[0] + 1/m * cos(x[0]) * u[1], -l/J * sin(x[0]) + r/J * u[0] ])
18,201
def write_bins(grp, req, buf, unit, res, chroms, bins, by_chr_bins, norm_info, show_warnings): """ Write the bins table, which has columns: chrom, start (in bp), end (in bp), and one column for each normalization type, named for the norm type. 'weight' is a reserved column for `cooler balance`. Chrom is an index which corresponds to the chroms table. Also write a dataset for each normalization vector within the hic file. """ n_chroms = len(chroms) n_bins = len(bins) idmap = dict(zip(chroms, range(n_chroms))) # Convert chrom names to enum chrom_ids = [idmap[chrom.encode('UTF-8')] for chrom in bins[:, 0]] starts = [int(val) for val in bins[:, 1]] ends = [int(val) for val in bins[:, 2]] enum_dtype = h5py.special_dtype(enum=(CHROMID_DTYPE, idmap)) # Store bins grp.create_dataset('chrom', shape=(n_bins,), dtype=enum_dtype, data=chrom_ids, **H5OPTS) grp.create_dataset('start', shape=(n_bins,), dtype=COORD_DTYPE, data=starts, **H5OPTS) grp.create_dataset('end', shape=(n_bins,), dtype=COORD_DTYPE, data=ends, **H5OPTS) # write columns for normalization vectors for norm in NORMS: norm_data = [] for chr_idx in by_chr_bins: chr_bin_end = by_chr_bins[chr_idx] try: norm_key = norm_info[norm, unit, res, chr_idx] except KeyError: WARN = True if show_warnings: print_stderr('!!! WARNING. Normalization vector %s does not exist for chr idx %s.' % (norm, chr_idx)) # add a vector of nan's for missing vectors norm_data.extend([np.nan]*chr_bin_end) continue norm_vector = read_normalization_vector(req, buf, norm_key) # NOTE: possible issue to look into # norm_vector returned by read_normalization_vector has an extra # entry at the end (0.0) that is never used and causes the # norm vectors to be longer than n_bins for a given chr # restrict the length of norm_vector to chr_bin_end for now. norm_data.extend(norm_vector[:chr_bin_end]) # we are no longer performing the inversion of hic weights, i.e. # from .hic2cool_updates import norm_convert # norm_data.extend(list(map(norm_convert, norm_vector[:chr_bin_end]))) if len(norm_data) != n_bins: error_str = ( '!!! ERROR. Length of normalization vector %s does not match the' ' number of bins.\nThis is likely a problem with the hic file' % (norm)) force_exit(error_str, req) grp.create_dataset( norm, shape=(len(norm_data),), dtype=NORM_DTYPE, data=np.array(norm_data, dtype=NORM_DTYPE), **H5OPTS)
18,202
def route( path: str, methods: List[str], **kwargs: Any ) -> Callable[[AnyCallable], AnyCallable]: """General purpose route definition. Requires you to pass an array of HTTP methods like GET, POST, PUT, etc. The remaining kwargs are exactly the same as for FastAPI's decorators like @get, @post, etc. Most users will probably want to use the shorter decorators like @get, @post, @put, etc. so they don't have to pass the list of methods. """ def marker(method: AnyCallable) -> AnyCallable: setattr( method, "_endpoint", EndpointDefinition( endpoint=method, args=RouteArgs(path=path, methods=methods, **kwargs) ), ) return method return marker
18,203
def canHaveGui(): """Return ``True`` if a display is available, ``False`` otherwise. """ # We cache this because calling the # IsDisplayAvailable function will cause the # application to steal focus under OSX! try: import wx return wx.App.IsDisplayAvailable() except ImportError: return False
18,204
def test_convert_json(): """ Test converting a JSON file to Parquet """ schema = pa.schema([ pa.field("foo", pa.int32()), pa.field("bar", pa.int64()) ]) input_path = "{}/tests/fixtures/simple_json.txt".format(os.getcwd()) expected_file = "{}/tests/fixtures/simple.parquet".format(os.getcwd()) with tempfile.NamedTemporaryFile() as f: output_file = f.name client.convert_json(input_path, output_file, schema) output = pq.ParquetFile(output_file) expected = pq.ParquetFile(expected_file) assert output.metadata.num_columns == expected.metadata.num_columns assert output.metadata.num_rows == expected.metadata.num_rows assert output.schema.equals(expected.schema) assert output.read_row_group(0).to_pydict() == expected.read_row_group(0).to_pydict()
18,205
def syntactic_analysis(input_fd): """ Realiza análisis léxico-gráfico y sintáctico de un programa Tiger. @type input_fd: C{file} @param input_fd: Descriptor de fichero del programa Tiger al cual se le debe realizar el análisis sintáctico. @rtype: C{LanguageNode} @return: Como resultado del análsis sintáctico se obtiene el árbol de sintáxis abstracta correspondiente al programa Tiger recibido como argumento. El árbol se retorna a través del nodo de la raíz del árbol. @raise SyntacticError: Esta excepción se lanzará si se encuentra algún error de sintáxis durante el análisis del programa. La excepción contendrá información acerca del error, como por ejemplo, la línea y/o columna donde se encontró el error. """ data = input_fd.read() ast = parser.parse(data) return ast
18,206
def create_fixxation_map(eye_x, eye_y, fixxation_classifier): """ :param eye_x: an indexable datastructure with the x eye coordinates :param eye_y: an indexable datastructure with the y eye coordinates :param fixxation_classifier: a list with values which indicate if the move from the previos is a fixxations. :return: a List of circles which bound around the fixxation and witch saccades they dont bound. The List is organized Liked this [((circle1_x, circle1_y), circle1_radius), ...]) """ # process into fixxation and saccade movements points_array = [] currently_fixxation = False current_points = [] for idx, classifier in enumerate(fixxation_classifier): if classifier == 1 and currently_fixxation == False: current_points = [(eye_x[idx], eye_y[idx])] elif classifier == 1: current_points.append((eye_x[idx], eye_y[idx])) elif classifier == 0 and currently_fixxation == True: points_array.append((current_points.copy(), True)) current_points = [] currently_fixxation = False points_array.append(([(eye_x[idx], eye_y[idx])], False)) else: points_array.append(([(eye_x[idx], eye_y[idx])], True)) circles = [(make_circle(points), is_fixxation) for points, is_fixxation in points_array] circles = [((x, y), radius, is_fixxation) for ((x, y, radius), is_fixxation) in circles] return circles
18,207
def with_metadata(obj: T, key: str, value: Any) -> T: """ Adds meta-data to an object. :param obj: The object to add meta-data to. :param key: The key to store the meta-data under. :param value: The meta-data value to store. :return: obj. """ # Create the meta-data map if not hasattr(obj, META_DATA_KEY): try: setattr(obj, META_DATA_KEY, {}) except AttributeError as e: raise ValueError(f"Cannot set meta-data against objects of type {obj.__class__.__name__}") from e # Put this mapping in the map getattr(obj, META_DATA_KEY)[key] = value return obj
18,208
def checksum(uploaded_file: 'SimpleUploadedFile', **options): """ Function to calculate checksum for file, can be used to verify downloaded file integrity """ hash_type = options['type'] if hash_type == ChecksumType.MD5: hasher = hashlib.md5() elif hash_type == ChecksumType.SHA256: hasher = hashlib.sha256() else: raise ValueError(f'Hash type "{hash_type}" in "checksum" function is not valid') if uploaded_file.multiple_chunks(): for data in uploaded_file.chunks(HASH_CHUNK_SIZE): hasher.update(data) else: hasher.update(uploaded_file.read()) return { 'checksum': hasher.hexdigest() }
18,209
def get_page(url): """Get source of url Keyword Arguments: url : string """ logger.debug(f'Getting source for {url}') res = requests.get(url) logger.debug(f'Status code for {url} is {res.status_code}') if res.status_code < 200 or res.status_code >= 300: raise Exception(f''' Not succesful retrieving {url}, the status code is {res.status_code} ''') logger.debug(f'Request succesful, returning text') return res.text
18,210
def preceding_words(document: Document, position: types.Position) -> Optional[Tuple[str, str]]: """ Get the word under the cursor returning the start and end positions. """ lines = document.lines if position.line >= len(lines): return None row, col = position_from_utf16(lines, position) line = lines[row] try: word = line[:col].strip().split()[-2:] return word except ValueError: return None
18,211
def check_gpu(gpu, *args): """Move data in *args to GPU? gpu: options.gpu (None, or 0, 1, .. gpu index) """ if gpu == None: if isinstance(args[0], dict): d = args[0] #print(d.keys()) var_dict = {} for key in d: var_dict[key] = Variable(d[key]) if len(args) > 1: return [var_dict] + check_gpu(gpu, *args[1:]) else: return [var_dict] # it's a list of arguments if len(args) > 1: return [Variable(a) for a in args] else: # single argument, don't make a list return Variable(args[0]) else: if isinstance(args[0], dict): d = args[0] #print(d.keys()) var_dict = {} for key in d: var_dict[key] = Variable(d[key].cuda(gpu)) if len(args) > 1: return [var_dict] + check_gpu(gpu, *args[1:]) else: return [var_dict] # it's a list of arguments if len(args) > 1: return [Variable(a.cuda(gpu)) for a in args] else: # single argument, don't make a list return Variable(args[0].cuda(gpu))
18,212
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up the SleepIQ config entry.""" conf = entry.data email = conf[CONF_USERNAME] password = conf[CONF_PASSWORD] client_session = async_get_clientsession(hass) gateway = AsyncSleepIQ(client_session=client_session) try: await gateway.login(email, password) except SleepIQLoginException: _LOGGER.error("Could not authenticate with SleepIQ server") return False except SleepIQTimeoutException as err: raise ConfigEntryNotReady( str(err) or "Timed out during authentication" ) from err try: await gateway.init_beds() except SleepIQTimeoutException as err: raise ConfigEntryNotReady( str(err) or "Timed out during initialization" ) from err except SleepIQAPIException as err: raise ConfigEntryNotReady(str(err) or "Error reading from SleepIQ API") from err coordinator = SleepIQDataUpdateCoordinator(hass, gateway, email) # Call the SleepIQ API to refresh data await coordinator.async_config_entry_first_refresh() hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True
18,213
def print_sonar_alert(alert, console): """Print AUnit Alert as sonar message""" def print_alert(): for line in alert.details: console.printout(escape(line)) if alert.details and alert.stack: console.printout('') for frame in alert.stack: console.printout(escape(frame)) if alert.is_error: console.printout(f' <error message={quoteattr(alert.title)}>') print_alert() console.printout(' </error>') elif alert.is_warning: console.printout(f' <skipped message={quoteattr(alert.title)}>') print_alert() console.printout(' </skipped>')
18,214
def main(): # Main function for testing. """ Create binary tree. """ root = make_tree() """ All Traversals of the binary are as follows: """ print(f" In-order Traversal is {inorder(root)}") print(f" Pre-order Traversal is {preorder(root)}") print(f"Post-order Traversal is {postorder(root)}") print(f"Height of Tree is {height(root)}") print("Complete Level Order Traversal is : ") level_order_1(root) print("\nLevel-wise order Traversal is : ") for h in range(1, height(root) + 1): level_order_2(root, h) print("\nZigZag order Traversal is : ") zigzag(root) print()
18,215
def expansion(svsal,temp,pres,salt=None,dliq=None,dvap=None, chkvals=False,chktol=_CHKTOL,salt0=None,dliq0=None,dvap0=None, chkbnd=False,useext=False,mathargs=None): """Calculate seawater-vapour thermal expansion coefficient. Calculate the thermal expansion coefficient of a seawater-vapour parcel. :arg float svsal: Total sea-vapour salinity in kg/kg. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg salt: Seawater salinity in kg/kg. If unknown, pass None (default) and it will be calculated. :type salt: float or None :arg dliq: Seawater liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg dvap: Water vapour density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dvap: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg salt0: Initial guess for the seawater salinity in kg/kg. If None (default) then `_approx_tp` is used. :type salt0: float or None :arg dliq0: Initial guess for the seawater liquid water density in kg/m3. If None (default) then `flu3a._dliq_default` is used. :type dliq0: float or None :arg dvap0: Initial guess for the salinity in kg/kg. If None (default) then `flu3a._dvap_default` is used. :type dvap0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg bool useext: If False (default) then the salt contribution is calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT. :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Expansion coefficient in 1/K. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :raises RuntimeWarning: If the equilibrium seawater salinity is lower than the total parcel salinity. :Examples: >>> expansion(0.035,274.,610.) 0.4588634213 """ salt, dliq, dvap = eq_seavap(svsal,temp,pres,salt=salt,dliq=dliq, dvap=dvap,chkvals=chkvals,chktol=chktol,salt0=salt0,dliq0=dliq0, dvap0=dvap0,chkbnd=chkbnd,useext=useext,mathargs=mathargs) g_p = seavap_g(0,0,1,svsal,temp,pres,salt=salt,dliq=dliq,dvap=dvap, useext=useext) g_tp = seavap_g(0,1,1,svsal,temp,pres,salt=salt,dliq=dliq,dvap=dvap, useext=useext) alpha = g_tp / g_p return alpha
18,216
def git_push(branches_to_push: list, ask: bool = True, push: bool = False): """Push all changes.""" if ask: push = input("Do you want to push changes? (y)") in ("", "y", "yes",) cmd = ["git", "push", "--tags", "origin", "master", *branches_to_push] if push: command(cmd) else: print( f"Changes are not pushed. Command for manual pushing: {subprocess.list2cmdline(cmd)}" )
18,217
def yaml_to_dict(yaml_str=None, str_or_buffer=None): """ Load YAML from a string, file, or buffer (an object with a .read method). Parameters are mutually exclusive. Parameters ---------- yaml_str : str, optional A string of YAML. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- dict Conversion from YAML. """ if not yaml_str and not str_or_buffer: raise ValueError('One of yaml_str or str_or_buffer is required.') if yaml_str: d = yaml.load(yaml_str) elif isinstance(str_or_buffer, str): with open(str_or_buffer) as f: d = yaml.load(f) else: d = yaml.load(str_or_buffer) return d
18,218
def verify_block_arguments( net_part: str, block: Dict[str, Any], num_block: int, ) -> Tuple[int, int]: """Verify block arguments are valid. Args: net_part: Network part, either 'encoder' or 'decoder'. block: Block parameters. num_block: Block ID. Return: block_io: Input and output dimension of the block. """ block_type = block.get("type") if block_type is None: raise ValueError( "Block %d in %s doesn't a type assigned.", (num_block, net_part) ) if block_type == "transformer": arguments = {"d_hidden", "d_ff", "heads"} elif block_type == "conformer": arguments = { "d_hidden", "d_ff", "heads", "macaron_style", "use_conv_mod", } if block.get("use_conv_mod", None) is True and "conv_mod_kernel" not in block: raise ValueError( "Block %d: 'use_conv_mod' is True but " " 'conv_mod_kernel' is not specified" % num_block ) elif block_type == "causal-conv1d": arguments = {"idim", "odim", "kernel_size"} if net_part == "encoder": raise ValueError("Encoder does not support 'causal-conv1d.'") elif block_type == "conv1d": arguments = {"idim", "odim", "kernel_size"} if net_part == "decoder": raise ValueError("Decoder does not support 'conv1d.'") else: raise NotImplementedError( "Wrong type. Currently supported: " "causal-conv1d, conformer, conv-nd or transformer." ) if not arguments.issubset(block): raise ValueError( "%s in %s in position %d: Expected block arguments : %s." " See tutorial page for more information." % (block_type, net_part, num_block, arguments) ) if block_type in ("transformer", "conformer"): block_io = (block["d_hidden"], block["d_hidden"]) else: block_io = (block["idim"], block["odim"]) return block_io
18,219
def _colorama(*args, **kwargs): """Temporarily enable colorama.""" colorama.init(*args, **kwargs) try: yield finally: colorama.deinit()
18,220
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). conf: Objectness value from 0-1 (nparray). pred_cls: Predicted object classes (nparray). target_cls: True object classes (nparray). plot: Plot precision-recall curve at mAP@0.5 save_dir: Plot save directory # Returns The average precision as computed in py-faster-rcnn. """ # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes unique_classes = np.unique(target_cls) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class px, py = np.linspace(0, 1, 1000), [] # for plotting ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = (target_cls == c).sum() # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: print("n_p: n_l:", n_p, n_l, flush=True) continue else: # Accumulate FPs and TPs fpc = (1 - tp[i]).cumsum(0) tpc = tp[i].cumsum(0) # Recall recall = tpc / (n_l + 1e-16) # recall curve #print("recall: ", recall, flush=True) #print("recall.shape: ", recall.shape, flush=True) r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve #print("precision: ", precision, flush=True) #print("precision.shape: ", precision.shape, flush=True) p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) if plot and j == 0: py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 (harmonic mean of precision and recall) f1 = 2 * p * r / (p + r + 1e-16) if plot: plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') i = f1.mean(0).argmax() # max F1 index return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
18,221
def frame_aligned_point_error( pred_frames: r3.Rigids, target_frames: r3.Rigids, frames_mask: paddle.Tensor, pred_positions: r3.Vecs, target_positions: r3.Vecs, positions_mask: paddle.Tensor, length_scale: float, l1_clamp_distance: Optional[float] = None, epsilon=1e-4) -> paddle.Tensor: """Measure point error under different alignments. Jumper et al. (2021) Suppl. Alg. 28 "computeFAPE" Computes error between two structures with B points under A alignments derived from the given pairs of frames. Args: pred_frames: num_frames reference frames for 'pred_positions'. target_frames: num_frames reference frames for 'target_positions'. frames_mask: Mask for frame pairs to use. pred_positions: num_positions predicted positions of the structure. target_positions: num_positions target positions of the structure. positions_mask: Mask on which positions to score. length_scale: length scale to divide loss by. l1_clamp_distance: Distance cutoff on error beyond which gradients will be zero. epsilon: small value used to regularize denominator for masked average. Returns: Masked Frame Aligned Point Error. """ def unsqueeze_rigids(rigid, axis=-1): """add an axis in the axis of rot.xx and trans.x""" if axis < 0: axis_t = axis - 1 axis_r = axis - 2 else: axis_t = axis axis_r = axis rotation = paddle.unsqueeze(rigid.rot.rotation, axis=axis_r) translation = paddle.unsqueeze(rigid.trans.translation, axis=axis_t) return r3.Rigids(rot=r3.Rots(rotation), trans=r3.Vecs(translation)) def unsqueeze_vecs(vecs, axis=-1): """add an axis in the axis of rot.xx and trans.x""" if axis < 0: axis_t = axis - 1 else: axis_t = axis translation = paddle.unsqueeze(vecs.translation, axis=axis_t) return r3.Vecs(translation) # Compute array of predicted positions in the predicted frames. # r3.Vecs (num_frames, num_positions) local_pred_pos = r3.rigids_mul_vecs( unsqueeze_rigids(r3.invert_rigids(pred_frames)), unsqueeze_vecs(pred_positions, axis=1)) # Compute array of target positions in the target frames. # r3.Vecs (num_frames, num_positions) local_target_pos = r3.rigids_mul_vecs( unsqueeze_rigids(r3.invert_rigids(target_frames)), unsqueeze_vecs(target_positions, axis=1)) # Compute errors between the structures. # paddle.Tensor (num_frames, num_positions) error_dist = paddle.sqrt(r3.vecs_squared_distance(local_pred_pos, local_target_pos) + epsilon) if l1_clamp_distance: error_dist = paddle.clip(error_dist, min=0, max=l1_clamp_distance) normed_error = error_dist / length_scale normed_error *= paddle.unsqueeze(frames_mask, axis=-1) normed_error *= paddle.unsqueeze(positions_mask, axis=-2) normalization_factor = ( paddle.sum(frames_mask, axis=-1) * paddle.sum(positions_mask, axis=-1)) return (paddle.sum(normed_error, axis=[-2, -1]) / (epsilon + normalization_factor))
18,222
def remove_app_restriction_request(machine_id, comment): """Enable execution of any application on the machine. Args: machine_id (str): Machine ID comment (str): Comment to associate with the action Notes: Machine action is a collection of actions you can apply on the machine, for more info https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/machineaction Returns: dict. Machine action """ cmd_url = '/machines/{}/unrestrictCodeExecution'.format(machine_id) json = { 'Comment': comment } response = http_request('POST', cmd_url, json=json) return response
18,223
def fix_bond_lengths( dist_mat: torch.Tensor, bond_lengths: torch.Tensor, delim: int = None, delim_value: float = ARBITRARILY_LARGE_VALUE) -> torch.Tensor: """ Replace one-offset diagonal entries with ideal bond lengths """ mat_len = dist_mat.shape[1] bond_lengths = torch.cat([bond_lengths] * (mat_len // 3))[:mat_len - 1] dist_mat[1:, :-1][torch.eye(mat_len - 1) == 1] = bond_lengths dist_mat[:-1, 1:][torch.eye(mat_len - 1) == 1] = bond_lengths # Set chain break distance to arbitrarily large value for replacement by F-W algorithm if delim is not None: dist_mat[delim * 3 + 2, (delim + 1) * 3] = delim_value dist_mat[(delim + 1) * 3, delim * 3 + 2] = delim_value return dist_mat
18,224
def add_problem(report, problem, description, ack): """Add problem dict to report""" report["Problems"][problem] = {} report["Problems"][problem]["Description"] = description report["Problems"][problem]["Acknowledged"] = ack
18,225
def CommandToString(command): """Returns quoted command that can be run in bash shell.""" return ' '.join(cmd_helper.SingleQuote(c) for c in command)
18,226
def sample_trilinear(t, coords, img_h=128, img_w=128): """ Samples noise octaves in one shot :param t: noise cube [n_octaves, noise_res, noise_res, noise_res] (same noise foreach sample in batch) :param coords: octave-transformed sampling positions [bs, n_octaves, 3, img_h*img_w] :param img_h: height of image to synthesize :param img_w: width of image to synthesize :return: sampled noise octaves [bs, n_octaves, img_h, img_w] """ # in- and output dimensions n_octaves, noise_res = t.get_shape().as_list()[:2] bs = coords.get_shape().as_list()[0] # all contributing source coordinates (interpolation endpoints) x0 = tf.floor(coords[:, :, 0, :]) x1 = x0 + 1 y0 = tf.floor(coords[:, :, 1, :]) y1 = y0 + 1 z0 = tf.floor(coords[:, :, 2, :]) z1 = z0 + 1 # interpolation weights w_x = coords[:, :, 0, :] - x0 w_y = coords[:, :, 1, :] - y0 w_z = coords[:, :, 2, :] - z0 # modulo for out-of-bound indices x0 = tf.floormod(x0, tf.ones_like(x0) * noise_res) x1 = tf.floormod(x1, tf.ones_like(x1) * noise_res) y0 = tf.floormod(y0, tf.ones_like(y0) * noise_res) y1 = tf.floormod(y1, tf.ones_like(y1) * noise_res) z0 = tf.floormod(z0, tf.ones_like(z0) * noise_res) z1 = tf.floormod(z1, tf.ones_like(z1) * noise_res) # for mem efficiency we flatten voxels s.t. we need only one index per element instead of 3 t = tf.reshape(t, [n_octaves, noise_res**3]) # index arrays (in flattened voxel array) idx_x0_y0_z0 = tf.cast(y0 * noise_res + x0 * noise_res**2 + z0, tf.int32) idx_x0_y0_z1 = tf.cast(y0 * noise_res + x0 * noise_res**2 + z1, tf.int32) idx_x0_y1_z0 = tf.cast(y1 * noise_res + x0 * noise_res**2 + z0, tf.int32) idx_x0_y1_z1 = tf.cast(y1 * noise_res + x0 * noise_res**2 + z1, tf.int32) idx_x1_y0_z0 = tf.cast(y0 * noise_res + x1 * noise_res**2 + z0, tf.int32) idx_x1_y0_z1 = tf.cast(y0 * noise_res + x1 * noise_res**2 + z1, tf.int32) idx_x1_y1_z0 = tf.cast(y1 * noise_res + x1 * noise_res**2 + z0, tf.int32) idx_x1_y1_z1 = tf.cast(y1 * noise_res + x1 * noise_res**2 + z1, tf.int32) def _gather(idx): # TODO: not quite efficient. ;) out = [] for i in range(n_octaves): g = tf.gather(t[i], idx[:, i, :], axis=0, batch_dims=1) out.append(tf.expand_dims(g, 1)) return tf.concat(out, 1) # gather contributing samples --> now 2D! val_x0_y0_z0 = _gather(idx_x0_y0_z0) val_x0_y0_z1 = _gather(idx_x0_y0_z1) val_x0_y1_z0 = _gather(idx_x0_y1_z0) val_x0_y1_z1 = _gather(idx_x0_y1_z1) val_x1_y0_z0 = _gather(idx_x1_y0_z0) val_x1_y0_z1 = _gather(idx_x1_y0_z1) val_x1_y1_z0 = _gather(idx_x1_y1_z0) val_x1_y1_z1 = _gather(idx_x1_y1_z1) # interpolate along z ... c_00 = val_x0_y0_z0 * (1.0 - w_z) + val_x0_y0_z1 * w_z c_01 = val_x0_y1_z0 * (1.0 - w_z) + val_x0_y1_z1 * w_z c_10 = val_x1_y0_z0 * (1.0 - w_z) + val_x1_y0_z1 * w_z c_11 = val_x1_y1_z0 * (1.0 - w_z) + val_x1_y1_z1 * w_z # ... along y ... c_0 = c_00 * (1.0 - w_y) + c_01 * w_y c_1 = c_10 * (1.0 - w_y) + c_11 * w_y # ... and along x c = c_0 * (1.0 - w_x) + c_1 * w_x # reshape c = tf.reshape(c, [bs, n_octaves, img_h, img_w]) return c
18,227
def file_capture(pcap_files, bpf, out_file, count=0, dump=False): """ :param dump: Dump to stdout if true, does not send packets :type dump: bool :type count: int :type out_file: str :type bpf: str :type pcap_files: List[str] """ # try: # es = None # if node is not None: # es = Elasticsearch(node) logging.debug('Loading packet capture file(s)') for pcap_file in pcap_files: logging.debug("Pcap file is: %s", pcap_file) capture = FileCapture(pcap_file, bpf, out_file) if not dump: decode_packets(capture, count) else: dump_packets(capture, count)
18,228
def version(output): """ `git --version` > git version 1.8.1.1 """ output = output.rstrip() words = re.split('\s+', output, 3) if not words or words[0] != 'git' or words[1] != 'version': raise WrongOutputError() version = words[2] parts = version.split('.') try: major = int(parts[0]) if len(parts) > 0 else None except ValueError: major = None try: minor = int(parts[1]) if len(parts) > 1 else None except ValueError: minor = None return Version(version, parts, major, minor)
18,229
def set_up_s3_encryption_configuration(kms_arn=None): """ Use the default SSE-S3 configuration for the journal export if a KMS key ARN was not given. :type kms_arn: str :param kms_arn: The Amazon Resource Name to encrypt. :rtype: dict :return: The encryption configuration for JournalS3Export. """ if kms_arn is None: return {'ObjectEncryptionType': 'SSE_S3'} return {'ObjectEncryptionType': {'S3ObjectEncryptionType': 'SSE_KMS', 'KmsKeyArn': kms_arn}}
18,230
def search(dataset, node, aoi, start_date, end_date, lng, lat, dist, lower_left, upper_right, where, geojson, extended, api_key): """ Search for images. """ node = get_node(dataset, node) if aoi == "-": src = click.open_file('-') if not src.isatty(): lines = src.readlines() if len(lines) > 0: aoi = json.loads(''.join([ line.strip() for line in lines ])) bbox = map(get_bbox, aoi.get('features') or [aoi])[0] lower_left = bbox[0:2] upper_right = bbox[2:4] if where: # Query the dataset fields endpoint for queryable fields resp = api.dataset_fields(dataset, node) def format_fieldname(s): return ''.join(c for c in s if c.isalnum()).lower() field_lut = { format_fieldname(field['name']): field['fieldId'] for field in resp['data'] } where = { field_lut[format_fieldname(k)]: v for k, v in where if format_fieldname(k) in field_lut } if lower_left: lower_left = dict(zip(['longitude', 'latitude'], lower_left)) upper_right = dict(zip(['longitude', 'latitude'], upper_right)) result = api.search(dataset, node, lat=lat, lng=lng, distance=dist, ll=lower_left, ur=upper_right, start_date=start_date, end_date=end_date, where=where, extended=extended, api_key=api_key) if geojson: result = to_geojson(result) print(json.dumps(result))
18,231
def _make_rnn_cell(spec: RNNSpec) -> Callable[[], tf.nn.rnn_cell.RNNCell]: """Return the graph template for creating RNN cells.""" return RNN_CELL_TYPES[spec.cell_type](spec.size)
18,232
def swap(b, h, k): """ Procedure that swaps b[h] and b[k] in b Parameter b: The list to modify Precondition: b is a mutable list, Parameter h: The first position to swap Precondition: h is an int and a valid position in the list Parameter k: The first position to swap Precondition: k is an int and a valid position in the list """ temp= b[h] b[h]= b[k] b[k]= temp
18,233
def run_baselines(env, seed, log_dir): """Create baselines model and training. Replace the ppo and its training with the algorithm you want to run. Args: env (gym.Env): Environment of the task. seed (int): Random seed for the trial. log_dir (str): Log dir path. Returns: str: The log file path. """ seed = seed + 1000000 set_global_seeds(seed) env.seed(seed) # Set up logger for baselines configure(dir=log_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard']) baselines_logger.info('seed={}, logdir={}'.format( seed, baselines_logger.get_dir())) env = DummyVecEnv([ lambda: bench.Monitor( env, baselines_logger.get_dir(), allow_early_resets=True) ]) ddpg.learn(network='mlp', env=env, nb_epochs=params['n_epochs'], nb_epoch_cycles=params['steps_per_epoch'], normalize_observations=False, critic_l2_reg=0, actor_lr=params['policy_lr'], critic_lr=params['qf_lr'], gamma=params['discount'], nb_train_steps=params['n_train_steps'], nb_rollout_steps=params['n_rollout_steps'], nb_eval_steps=100) return osp.join(log_dir, 'progress.csv')
18,234
def dataset_str2float(dataset, column): """ Converts a dataset column's values from string to float """ for row in dataset: row[column] = float(row[column].strip())
18,235
def SolveRcpsp(problem, proto_file, params): """Parse and solve a given RCPSP problem in proto format.""" PrintProblemStatistics(problem) # Create the model. model = cp_model.CpModel() num_tasks = len(problem.tasks) num_resources = len(problem.resources) all_active_tasks = range(1, num_tasks - 1) all_resources = range(num_resources) horizon = problem.deadline if problem.deadline != -1 else problem.horizon if FLAGS.horizon > 0: horizon = FLAGS.horizon if horizon == -1: # Naive computation. horizon = sum(max(r.duration for r in t.recipes) for t in problem.tasks) if problem.is_rcpsp_max: for t in problem.tasks: for sd in t.successor_delays: for rd in sd.recipe_delays: for d in rd.min_delays: horizon += abs(d) print(f' - horizon = {horizon}') # Containers used to build resources. intervals_per_resource = collections.defaultdict(list) demands_per_resource = collections.defaultdict(list) presences_per_resource = collections.defaultdict(list) starts_per_resource = collections.defaultdict(list) # Starts and ends for each task (shared between all alternatives) task_starts = {} task_ends = {} # Containers for per-recipe per task alternatives variables. presences_per_task = collections.defaultdict(list) durations_per_task = collections.defaultdict(list) one = model.NewConstant(1) # Create tasks variables. for t in all_active_tasks: task = problem.tasks[t] if len(task.recipes) == 1: # Create main and unique interval. recipe = task.recipes[0] task_starts[t] = model.NewIntVar(0, horizon, f'start_of_task_{t}') task_ends[t] = model.NewIntVar(0, horizon, f'end_of_task_{t}') interval = model.NewIntervalVar(task_starts[t], recipe.duration, task_ends[t], f'interval_{t}') # Store as a single alternative for later. presences_per_task[t].append(one) durations_per_task[t].append(recipe.duration) # Register the interval in resources specified by the demands. for i in range(len(recipe.demands)): demand = recipe.demands[i] res = recipe.resources[i] demands_per_resource[res].append(demand) if problem.resources[res].renewable: intervals_per_resource[res].append(interval) else: starts_per_resource[res].append(task_starts[t]) presences_per_resource[res].append(1) else: # Multiple alternative recipes. all_recipes = range(len(task.recipes)) start = model.NewIntVar(0, horizon, f'start_of_task_{t}') end = model.NewIntVar(0, horizon, f'end_of_task_{t}') # Store for precedences. task_starts[t] = start task_ends[t] = end # Create one optional interval per recipe. for r in all_recipes: recipe = task.recipes[r] is_present = model.NewBoolVar(f'is_present_{t}_{r}') interval = model.NewOptionalIntervalVar(start, recipe.duration, end, is_present, f'interval_{t}_{r}') # Store alternative variables. presences_per_task[t].append(is_present) durations_per_task[t].append(recipe.duration) # Register the interval in resources specified by the demands. for i in range(len(recipe.demands)): demand = recipe.demands[i] res = recipe.resources[i] demands_per_resource[res].append(demand) if problem.resources[res].renewable: intervals_per_resource[res].append(interval) else: starts_per_resource[res].append(start) presences_per_resource[res].append(is_present) # Exactly one alternative must be performed. model.Add(sum(presences_per_task[t]) == 1) # linear encoding of the duration. min_duration = min(durations_per_task[t]) max_duration = max(durations_per_task[t]) shifted = [x - min_duration for x in durations_per_task[t]] duration = model.NewIntVar(min_duration, max_duration, f'duration_of_task_{t}') model.Add( duration == min_duration + cp_model.LinearExpr.ScalProd(presences_per_task[t], shifted)) # We do not create a 'main' interval. Instead, we link start, end, and # duration. model.Add(start + duration == end) # Create makespan variable makespan = model.NewIntVar(0, horizon, 'makespan') interval_makespan = model.NewIntervalVar( makespan, model.NewIntVar(1, horizon, 'interval_makespan_size'), model.NewConstant(horizon + 1), 'interval_makespan') # Add precedences. if problem.is_rcpsp_max: # In RCPSP/Max problem, precedences are given and max delay (possible # negative) between the starts of two tasks. for task_id in all_active_tasks: task = problem.tasks[task_id] num_modes = len(task.recipes) for successor_index in range(len(task.successors)): next_id = task.successors[successor_index] delay_matrix = task.successor_delays[successor_index] num_next_modes = len(problem.tasks[next_id].recipes) for m1 in range(num_modes): s1 = task_starts[task_id] p1 = presences_per_task[task_id][m1] if next_id == num_tasks - 1: delay = delay_matrix.recipe_delays[m1].min_delays[0] model.Add(s1 + delay <= makespan).OnlyEnforceIf(p1) else: for m2 in range(num_next_modes): delay = delay_matrix.recipe_delays[m1].min_delays[ m2] s2 = task_starts[next_id] p2 = presences_per_task[next_id][m2] model.Add(s1 + delay <= s2).OnlyEnforceIf([p1, p2]) else: # Normal dependencies (task ends before the start of successors). for t in all_active_tasks: for n in problem.tasks[t].successors: if n == num_tasks - 1: model.Add(task_ends[t] <= makespan) else: model.Add(task_ends[t] <= task_starts[n]) # Containers for resource investment problems. capacities = [] # Capacity variables for all resources. max_cost = 0 # Upper bound on the investment cost. # Create resources. for r in all_resources: resource = problem.resources[r] c = resource.max_capacity if c == -1: c = sum(demands_per_resource[r]) if problem.is_resource_investment: # RIP problems have only renewable resources. capacity = model.NewIntVar(0, c, f'capacity_of_{r}') model.AddCumulative(intervals_per_resource[r], demands_per_resource[r], capacity) capacities.append(capacity) max_cost += c * resource.unit_cost elif resource.renewable: if intervals_per_resource[r]: if FLAGS.use_interval_makespan: model.AddCumulative( intervals_per_resource[r] + [interval_makespan], demands_per_resource[r] + [c], c) else: model.AddCumulative(intervals_per_resource[r], demands_per_resource[r], c) elif presences_per_resource[r]: # Non empty non renewable resource. if problem.is_consumer_producer: model.AddReservoirConstraint(starts_per_resource[r], demands_per_resource[r], resource.min_capacity, resource.max_capacity) else: model.Add( sum(presences_per_resource[r][i] * demands_per_resource[r][i] for i in range(len(presences_per_resource[r]))) <= c) # Objective. if problem.is_resource_investment: objective = model.NewIntVar(0, max_cost, 'capacity_costs') model.Add(objective == sum(problem.resources[i].unit_cost * capacities[i] for i in range(len(capacities)))) else: objective = makespan model.Minimize(objective) if proto_file: print(f'Writing proto to{proto_file}') with open(proto_file, 'w') as text_file: text_file.write(str(model)) # Solve model. solver = cp_model.CpSolver() if params: text_format.Parse(params, solver.parameters) solver.parameters.log_search_progress = True solver.Solve(model)
18,236
def load_stl(): """Loads the STL-10 dataset from config.STL10_PATH or downloads it if necessary. :return: (x_train, y_train), (x_test, y_test), min, max :rtype: tuple of numpy.ndarray), (tuple of numpy.ndarray), float, float """ from config import STL10_PATH min_, max_ = 0., 1. # Download and extract data if needed path = data_utils.get_file('stl10_binary', cache_subdir=STL10_PATH, untar=True, origin='https://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz') with open(os.path.join(path, 'train_X.bin'), 'rb') as f: x_train = np.fromfile(f, dtype=np.uint8) x_train = np.reshape(x_train, (-1, 3, 96, 96)) with open(os.path.join(path, 'test_X.bin'), 'rb') as f: x_test = np.fromfile(f, dtype=np.uint8) x_test = np.reshape(x_test, (-1, 3, 96, 96)) if k.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) with open(os.path.join(path, 'train_y.bin'), 'rb') as f: y_train = np.fromfile(f, dtype=np.uint8) y_train -= 1 with open(os.path.join(path, 'test_y.bin'), 'rb') as f: y_test = np.fromfile(f, dtype=np.uint8) y_test -= 1 x_train, y_train = preprocess(x_train, y_train) x_test, y_test = preprocess(x_test, y_test) return (x_train, y_train), (x_test, y_test), min_, max_
18,237
def PolyCurveCount(curve_id, segment_index=-1): """Returns the number of curve segments that make up a polycurve"""
18,238
def count_transitions(hypno): """ return the count for all possible transitions """ possible_transitions = [(0,1), (0,2), (0,4), # W -> S1, S2, REM (1,2), (1,0), (1,3), # S1 -> W, S2, REM (2,0), (2,1), (2,3), (2,4), # S2 -> W, S1, SWS, REM (3,0), (3,2), # SWS -> W, S2 (4,0), (4,1), (4,2)] # counts = [] for trans in possible_transitions: counts += [transition_index(hypno, trans)] return counts
18,239
def mu_ref_normal_sampler_tridiag(loc=0.0, scale=1.0, beta=2, size=10, random_state=None): """Implementation of the tridiagonal model to sample from .. math:: \\Delta(x_{1}, \\dots, x_{N})^{\\beta} \\prod_{n=1}^{N} \\exp(-\\frac{(x_i-\\mu)^2}{2\\sigma^2} ) dx_i .. seealso:: :cite:`DuEd02` II-C """ rng = check_random_state(random_state) if not (beta > 0): raise ValueError('`beta` must be positive. Given: {}'.format(beta)) # beta/2*[N-1, N-2, ..., 1] b_2_Ni = 0.5 * beta * np.arange(size - 1, 0, step=-1) alpha_coef = rng.normal(loc=loc, scale=scale, size=size) beta_coef = rng.gamma(shape=b_2_Ni, scale=scale**2) return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
18,240
def view_request_headers(client, args): """ View the headers of the request Usage: view_request_headers <reqid(s)> """ if not args: raise CommandError("Request id is required") reqs = load_reqlist(client, args[0], headers_only=True) for req in reqs: print('-- Request id=%s --------------------' % req.db_id) view_full_message(req, headers_only=True)
18,241
def get_ofi_info(hosts, supported=None, verbose=True): """Get the OFI provider information from the specified hosts. Args: hosts (NodeSet): hosts from which to gather the information supported (list, optional): list of supported providers when if provided will limit the inclusion to only those providers specified. Defaults to None. verbose (bool, optional): display command details. Defaults to True. Returns: dict: a dictionary of interface keys with a dictionary value of a comma-separated string of providers key with a NodeSet value where the providers where detected. """ task = run_task(hosts, "fi_info", verbose=verbose) if verbose: display_task(task) # Populate a dictionary of interfaces with a list of provider lists and NodSet of hosts on which # the providers were detected. providers = {} results = dict(task.iter_retcodes()) if 0 in results: for output, nodelist in task.iter_buffers(results[0]): output_lines = [line.decode("utf-8").rstrip(os.linesep) for line in output] nodeset = NodeSet.fromlist(nodelist) # Find all the provider and domain pairings. The fi_info output reports these on # separate lines when processing the re matches ensure each domain is preceded by a # provider. interface_providers = {} data = re.findall(r"(provider|domain):\s+([A-Za-z0-9;_+]+)", "\n".join(output_lines)) while data: provider = list(data.pop(0)) if provider[0] == "provider" and data[0][0] == "domain": provider.pop(0) domain = list(data.pop(0)) domain.pop(0) # A provider and domain must be specified if not provider or not domain: continue # Add 'ofi+' to the provider provider = ["+".join(["ofi", item]) for item in provider] # Only include supported providers if a supported list is provided if supported and provider[0] not in supported: continue if domain[0] not in interface_providers: interface_providers[domain[0]] = set() interface_providers[domain[0]].update(provider) for interface, provider_set in interface_providers.items(): if interface not in providers: providers[interface] = {} provider_key = ",".join(list(provider_set)) if provider_key not in providers[interface]: providers[interface][provider_key] = NodeSet() providers[interface][provider_key].update(nodeset) return providers
18,242
def get_pokemon(name:str) -> dict: """ Busca el pokémon dado su nombre en la base de datos y crea un diccionario con su información básica. Paramétros: name(str): Nombre del pokémon a buscar Retorna: Diccionario con la información básica del pokémon y sus evoluciones. """ try: p = Pokemon.objects.get(name=name) pokemon = { "name": p.name, "id": p.id, "weight": p.weight, "height": p.height, "stats": [], "evolutions": [] } stats = PokemonStat.objects.filter(pokemon_name=p) for stat in stats: pokemon["stats"].append({"stat": stat.stat_id, "base": stat.base}) evolutionChain = PokemonEvolution.objects.get(pokemon=p) evolutionId = evolutionChain.evolution_chain position = evolutionChain.position chain = PokemonEvolution.objects.filter(evolution_chain=evolutionId) for evolution in chain: if evolution.position > position: pokemon["evolutions"].append({"name": evolution.pokemon.name, "id": evolution.pokemon.id, "evolution_type": "Evolution"}) elif evolution.position < position: pokemon["evolutions"].append({"name": evolution.pokemon.name, "id": evolution.pokemon.id, "evolution_type": "Preevolution"}) return pokemon except ObjectDoesNotExist: return None
18,243
def run_and_check_command_in_hadoop(hadoop_node_config, command, cmd_work_dir=None, log_line_processor=None): """Run the given command on the Hadoop cluster. Args: hadoop_node_config - where to SSH to and run this command. command - Command to run. cmd_work_dir - Path on hadoop cluster to run from. log_line_processor - Processor to process the command stdoutput. Raises: CalledProcessError if it fails. """ if not log_line_processor: run_command_in_hadoop(hadoop_node_config, command, cmd_work_dir, check_output=True) else: command_output = run_command_in_hadoop(hadoop_node_config, command, cmd_work_dir) out_filter = output_filter.OutputFilter(log_line_processor, output=sys.stderr) out_filter.read_and_output(command_output)
18,244
def create_cluster(module, switch, name, node1, node2): """ Method to create a cluster between two switches. :param module: The Ansible module to fetch input parameters. :param switch: Name of the local switch. :param name: The name of the cluster to create. :param node1: First node of the cluster. :param node2: Second node of the cluster. :return: String describing if cluster got created or if it's already exists. """ global CHANGED_FLAG cli = pn_cli(module) clicopy = cli cli += ' switch %s cluster-show format name no-show-headers ' % node1 cluster_list = run_cli(module, cli).split() if name not in cluster_list: cli = clicopy cli += ' switch %s cluster-create name %s ' % (switch, name) cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2) if 'Success' in run_cli(module, cli): CHANGED_FLAG.append(True) return ' %s: %s created successfully \n' % (switch, name) else: return ' %s: %s already exists \n' % (switch, name)
18,245
def random_terminals_for_primitive( primitive_set: dict, primitive: Primitive ) -> List[Terminal]: """ Return a list with a random Terminal for each required input to Primitive. """ return [random.choice(primitive_set[term_type]) for term_type in primitive.input]
18,246
def _specie_is_intermediate_old( specie_id: str, specie_dict: dict = None, ) -> bool: """Detect is a specie should be considered as an intermediate compound. FIXME, this needs to be refined so that we don't rely on the specie ID. :param specie_id: specie ID :type: str :param specie_dict: dictionary about the specie :type specie_dict: dict :return: true if it is, otherwise false :rtype: bool """ if specie_id.startswith('CMPD_'): return True return False
18,247
def where_from_pos(text, pos): """ Format a textual representation of the given position in the text. """ return "%d:%d" % (line_from_pos(text, pos), col_from_pos(text, pos))
18,248
def test(ctx, unit=False, installed=False, style=False, cover=False): """run tests (unit, style)""" if not (unit or installed or style or cover): sys.exit("Test task needs --unit, --style or --cover") if unit: sys.exit(pytest_wrapper()) if style: flake8_wrapper() # exits on fail black_wrapper(False) # always exits if installed: for p in list(sys.path): if p in ("", "."): sys.path.remove(p) elif p == ROOT_DIR or p == os.path.dirname(ROOT_DIR): sys.path.remove(p) os.environ["IMAGEIO_NO_INTERNET"] = "1" sys.exit(pytest_wrapper()) if cover: res = pytest_wrapper(cov_report="html") if res: raise RuntimeError("Cannot show coverage, tests failed.") print("Launching browser.") fname = os.path.join(os.getcwd(), "htmlcov", "index.html") if not os.path.isfile(fname): raise IOError("Generated file not found: %s" % fname) webbrowser.open_new_tab(fname)
18,249
def generateHuffmanCodes (huffsize): """ Calculate the huffman code of each length. """ huffcode = [] k = 0 code = 0 # Magic for i in range (len (huffsize)): si = huffsize[i] for k in range (si): huffcode.append ((i + 1, code)) code += 1 code <<= 1 return huffcode
18,250
def test_ast_generation(): """ >>> test_ast_generation() """ m = load_testschema1() # Nonterminals assert issubclass(m.root, m.AST) assert issubclass(m.expr, m.AST) # Products assert issubclass(m.myproduct, m.AST) # Terminals assert issubclass(m.Ham, m.root) assert issubclass(m.Foo, m.root) assert issubclass(m.SomeExpr, m.expr) assert issubclass(m.Bar, m.expr)
18,251
def one_hot_df(df, cat_col_list): """ Make one hot encoding on categoric columns. Returns a dataframe for the categoric columns provided. ------------------------- inputs - df: original input DataFrame - cat_col_list: list of categorical columns to encode. outputs - df_hot: one hot encoded subset of the original DataFrame. """ df_hot = pd.DataFrame() for col in cat_col_list: encoded_matrix = col_encoding(df, col) df_ = pd.DataFrame(encoded_matrix, columns = [col+ ' ' + str(int(i))\ for i in range(encoded_matrix.shape[1])]) df_hot = pd.concat([df_hot, df_], axis = 1) return df_hot
18,252
async def load_users_by_id(user_ids: List[int]) -> List[Optional[User]]: """ Batch-loads users by their IDs. """ query = select(User).filter(User.id.in_(user_ids)) async with get_session() as session: result: Result = await session.execute(query) user_map: Dict[int, User] = {user.id: user for user in result.scalars()} return [user_map.get(user_id) for user_id in user_ids]
18,253
def create_timetravel_model(for_model): """ Returns the newly created timetravel model class for the model given. """ if for_model._meta.proxy: _tt_model = for_model._meta.concrete_model._tt_model for_model._tt_model = _tt_model for_model._meta._tt_model = _tt_model return opt = for_model._meta name = 'tt_%s' % opt.db_table class Meta: app_label = get_migration_app() db_table = name index_together = [[OK, VU]] verbose_name = name[:39] attrs = {'Meta': Meta, '_tt_is_timetravel_model': True, '__module__': for_model.__module__} fields = copy_fields(for_model) attrs.update(fields) for_model._tt_has_history = True ret = type(str(name), (Model,), attrs) for_model._tt_model = ret for_model._meta._tt_model = ret return ret
18,254
def public_route_server_has_read(server_id, user_id=None): """ check if current user has read access to the given server """ user = user_id and User.query.get_or_404(user_id) or current_user server = DockerServer.query.get_or_404(server_id) if server.has_group_read(user): return Response("read access", 200) abort(403)
18,255
def isValidPublicAddress(address: str) -> bool: """Check if address is a valid NEO address""" valid = False if len(address) == 34 and address[0] == 'A': try: base58.b58decode_check(address.encode()) valid = True except ValueError: # checksum mismatch valid = False return valid
18,256
def address_print(): """ Print the working directory :return:None """ print_line(70, "*") print("Where --> " + SOURCE_DIR) print_line(70, "*")
18,257
def cost_to_go_np(cost_seq, gamma_seq): """ Calculate (discounted) cost to go for given cost sequence """ # if np.any(gamma_seq == 0): # return cost_seq cost_seq = gamma_seq * cost_seq # discounted reward sequence cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on]) cost_seq /= gamma_seq # un-scale it to get true discounted cost to go return cost_seq
18,258
def parse_args(): """ Parses the command line arguments. """ # Override epilog formatting OptionParser.format_epilog = lambda self, formatter: self.epilog parser = OptionParser(usage="usage: %prog -f secret.txt | --file secret.txt | --folder allmysecrets", epilog=EXAMPLES) parser.add_option("-p", "--password", dest="password", help="set password file for AES decryption") parser.add_option("-f", "--file", dest="file", help="encrypt/decrypt this file") parser.add_option("-F", "--folder", dest="folder", help="encrypt/decrypt all files in this folder") parser.add_option("--encrypt", action="store_true", dest="encrypt", help="encrypt file(s)") parser.add_option("--decrypt", action="store_true", dest="decrypt", help="decrypt file(s)") parser.add_option("--recursive", action="store_true", dest="recursive", help="encrypt/decrypt files in folder recursively") parser.formatter.store_option_strings(parser) parser.formatter.store_option_strings = lambda _: None for option, value in parser.formatter.option_strings.items(): value = re.sub(r"\A(-\w+) (\w+), (--[\w-]+=(\2))\Z", r"\g<1>/\g<3>", value) value = value.replace(", ", '/') if len(value) > MAX_HELP_OPTION_LENGTH: value = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % value parser.formatter.option_strings[option] = value args = parser.parse_args()[0] if not any((args.file, args.folder)): parser.error("Required argument is missing. Use '-h' for help.") if not any((args.encrypt, args.decrypt)): parser.error("Required argument is missing. Use '-h' for help.") if args.decrypt and not args.password: parser.error("Required password file is missing. Use '-h' for help.") return args
18,259
def x_to_ggsg(seq): """replace Xs with a Serine-Glycine linker (GGSG pattern) seq and return value are strings """ if "X" not in seq: return seq replacement = [] ggsg = _ggsg_generator() for aa in seq: if aa != "X": replacement.append(aa) # restart linker iterator for next stretch of Xs ggsg = _ggsg_generator() else: replacement.append(next(ggsg)) return "".join(replacement)
18,260
def gini_pairwise(idadf, target=None, features=None, ignore_indexer=True): """ Compute the conditional gini coefficients between a set of features and a set of target in an IdaDataFrame. Parameters ---------- idadf : IdaDataFrame target : str or list of str, optional A column or list of columns against to be used as target. Per default, consider all columns features : str or list of str, optional A column or list of columns to be used as features. Per default, consider all columns. ignore_indexer : bool, default: True Per default, ignore the column declared as indexer in idadf Returns ------- Pandas.DataFrame or Pandas.Series if only one target Notes ----- Input columns as target and features should be categorical, otherwise this measure does not make much sense. Examples -------- >>> idadf = IdaDataFrame(idadb, "IRIS") >>> gini_pairwise(idadf) """ # Check input target, features = _check_input(idadf, target, features, ignore_indexer) gini_dict = OrderedDict() length = len(idadf) for t in target: gini_dict[t] = OrderedDict() features_notarget = [x for x in features if (x != t)] for feature in features_notarget: if t not in gini_dict: gini_dict[t] = OrderedDict() query = ("SELECT SUM((POWER(c,2) - gini)/c)/%s FROM "+ "(SELECT SUM(POWER(count,2)) as gini, SUM(count) as c FROM "+ "(SELECT CAST(COUNT(*) AS FLOAT) AS count, \"%s\" FROM %s GROUP BY \"%s\",\"%s\") "+ "GROUP BY \"%s\")") query0 = query%(length, feature, idadf.name, t, feature, feature) gini_dict[t][feature] = idadf.ida_scalar_query(query0) result = pd.DataFrame(gini_dict).fillna(np.nan) if len(result.columns) > 1: order = [x for x in result.columns if x in features] + [x for x in features if x not in result.columns] result = result.reindex(order) result = result.dropna(axis=1, how="all") if len(result.columns) == 1: if len(result) == 1: result = result.iloc[0,0] else: result = result[result.columns[0]].copy() result.sort_values(ascending = True) else: result = result.fillna(0) return result
18,261
def init_isolated_80(): """ Real Name: b'init Isolated 80' Original Eqn: b'0' Units: b'person' Limits: (None, None) Type: constant b'' """ return 0
18,262
def xcom_api_setup(): """Instantiate api""" return XComApi(API_CLIENT)
18,263
def test_kl_normal_normal(): """Test Normal/Normal KL.""" dim = (5, 10) mu = np.zeros(dim, dtype=np.float32) std = 1.0 q = tf.distributions.Normal(mu, std) # Test 0 KL p = tf.distributions.Normal(mu, std) KL0 = kl_sum(q, p) # Test diff var std1 = 2.0 p = tf.distributions.Normal(mu, std1) KL1 = kl_sum(q, p) rKL1 = 0.5 * ((std / std1)**2 - 1 + np.log((std1 / std)**2)) * np.prod(dim) # Test diff mu mu1 = np.ones(dim, dtype=np.float32) p = tf.distributions.Normal(mu1, std) KL2 = kl_sum(q, p) rKL2 = 0.5 * (np.sum((mu1 - mu)**2) / std**2) tc = tf.test.TestCase() with tc.test_session(): kl0 = KL0.eval() assert np.isscalar(kl0) assert kl0 == 0. assert np.allclose(KL1.eval(), rKL1) assert np.allclose(KL2.eval(), rKL2)
18,264
def zeros_tensor(*args, **kwargs): """Construct a tensor of a given shape with every entry equal to zero.""" labels = kwargs.pop("labels", []) dtype = kwargs.pop("dtype", np.float) base_label = kwargs.pop("base_label", "i") return Tensor(np.zeros(*args, dtype=dtype), labels=labels, base_label=base_label)
18,265
def reg_split_from( splitted_mappings: np.ndarray, splitted_sizes: np.ndarray, splitted_weights: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ When creating the regularization matrix of a source pixelization, this function assumes each source pixel has been split into a cross of four points (the size of which is based on the area of the source pixel). This cross of points represents points which together can evaluate the gradient of the pixelization's reconstructed values. This function takes each cross of points and determines the regularization weights of every point on the cross, to construct a regulariaztion matrix based on the gradient of each pixel. The size of each cross depends on the Voronoi pixel area, thus this regularization scheme and its weights depend on the pixel area (there are larger weights for bigger pixels). This ensures that bigger pixels are regularized more. The number of pixel neighbors over which regularization is 4 * the total number of source pixels. This contrasts other regularization schemes, where the number of neighbors changes depending on, for example, the Voronoi mesh geometry. By having a fixed number of neighbors this removes stochasticty in the regularization that is applied to a solution. There are cases where a grid has over 100 neighbors, corresponding to very coordinate transformations. In such extreme cases, we raise a `exc.FitException`. Parameters ---------- splitted_mappings splitted_sizes splitted_weights Returns ------- """ max_j = np.shape(splitted_weights)[1] - 1 splitted_weights *= -1.0 for i in range(len(splitted_mappings)): pixel_index = i // 4 flag = 0 for j in range(splitted_sizes[i]): if splitted_mappings[i][j] == pixel_index: splitted_weights[i][j] += 1.0 flag = 1 if j >= max_j: raise exc.PixelizationException( "the number of Voronoi natural neighbours exceeds 100." ) if flag == 0: splitted_mappings[i][j + 1] = pixel_index splitted_sizes[i] += 1 splitted_weights[i][j + 1] = 1.0 return splitted_mappings, splitted_sizes, splitted_weights
18,266
def get_initiator_IP(json_isessions): """ pull the IP from the host session """ print("-" * 20 + " get_initiator started") for session in json_isessions['sessions']: session_array[session['initiatorIP']] = session['initiatorName'] return session_array
18,267
def get_data_for_recent_jobs(recency_msec=DEFAULT_RECENCY_MSEC): """Get a list containing data about recent jobs. This list is arranged in descending order based on the time the job was enqueued. At most NUM_JOBS_IN_DASHBOARD_LIMIT job descriptions are returned. Args: - recency_secs: the threshold for a recent job, in seconds. """ recent_job_models = job_models.JobModel.get_recent_jobs( NUM_JOBS_IN_DASHBOARD_LIMIT, recency_msec) return [_get_job_dict_from_job_model(model) for model in recent_job_models]
18,268
def test_installroot_sdist_with_extras(project: Project) -> None: """It installs the extra.""" @nox.session def test(session: nox.sessions.Session) -> None: """Install the local package.""" nox_poetry.installroot( session, distribution_format=nox_poetry.SDIST, extras=["pygments"] ) run_nox_with_noxfile(project, [test], [nox, nox.sessions, nox_poetry]) expected = [ project.package, *project.dependencies, project.get_dependency("pygments"), ] packages = list_packages(project, test) assert set(expected) == set(packages)
18,269
def construct_outgoing_multicast_answers(answers: _AnswerWithAdditionalsType) -> DNSOutgoing: """Add answers and additionals to a DNSOutgoing.""" out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, multicast=True) _add_answers_additionals(out, answers) return out
18,270
def load_analog_binary_v1(filename): """Load analog traces stored in the binary format by Logic 1.2.0+ The format is documented at https://support.saleae.com/faq/technical-faq/data-export-format-analog-binary Returns (data, period) where data is a numpy array of 32-bit floats of shape (nchannels, nsamples) and period is the sampling period in seconds. """ with open(filename, 'rb') as f: nsamples, nchannels, period = struct.unpack('<QId', f.read(20)) if nchannels > 16: raise RuntimeError(f'Invalid nchannels={nchannels}. Are you sure this is binary analog data from v1.2.0+?') if period < 1 / 50e6 or period > 1: raise RuntimeError(f'Invalid period={period}. Are you sure this is binary analog data from v1.2.0+?') data = np.fromfile(f, dtype=np.dtype('<f'), count=nsamples * nchannels).reshape(nchannels, nsamples).astype('=f') return data, period
18,271
def setup_phantomjs(): """Create and return a PhantomJS browser object.""" try: # Setup capabilities for the PhantomJS browser phantomjs_capabilities = DesiredCapabilities.PHANTOMJS # Some basic creds to use against an HTTP Basic Auth prompt phantomjs_capabilities['phantomjs.page.settings.userName'] = 'none' phantomjs_capabilities['phantomjs.page.settings.password'] = 'none' # Flags to ignore SSL problems and get screenshots service_args = [] service_args.append('--ignore-ssl-errors=true') service_args.append('--web-security=no') service_args.append('--ssl-protocol=any') # Create the PhantomJS browser and set the window size browser = webdriver.PhantomJS(desired_capabilities=phantomjs_capabilities,service_args=service_args) browser.set_window_size(1920,1080) except Exception as error: click.secho("[!] Bad news: PhantomJS failed to load (not installed?), so activities \ requiring a web browser will be skipped.",fg="red") click.secho("L.. Details: {}".format(error),fg="red") browser = None return browser
18,272
def normal_pdf(x, mu, cov, log=True): """ Calculate the probability density of Gaussian (Normal) distribution. Parameters ---------- x : float, 1-D array_like (K, ), or 2-D array_like (K, N) The variable for calculating the probability density. mu : float or 1-D array_like, (K, ) The mean of the Gaussian distribution. cov : float or 2-D array_like, (K, K) The variance or the covariance matrix of the Gaussian distribution. log : bool If true, the return value is at log scale. Returns ------- pdf : numpy float The probability density of x. if N==1, return a float elif N>1, return an array """ if len(np.array(mu).shape) == 0: x = np.array(x).reshape(-1,1) elif len(np.array(x).shape) <= 1: x = np.array(x).reshape(1, -1) x = x - np.array(mu) N, K = x.shape if len(np.array(cov).shape) < 2: cov = np.array(cov).reshape(-1,1) cov_inv = np.linalg.inv(cov) cov_det = np.linalg.det(cov) if cov_det <= 0: print("Warning: the det of covariance is not positive!") return None pdf_all = np.zeros(N) pdf_part1 = -(K*np.log(2*np.pi) + np.log(cov_det)) / 2.0 for i in range(N): pdf_all[i] = pdf_part1 - np.dot(np.dot(x[i,:], cov_inv), x[i,:]) / 2.0 if log == False: pdf_all = np.exp(pdf_all) if N == 1: pdf_all = pdf_all[0] return pdf_all
18,273
def write_log(s, *args): """ :undocumented: Writes to log.txt. """ renpy.display.log.write(s, *args)
18,274
def aws() -> Generator[Mock, None, None]: """Mock our mqtt client wrapper""" with patch("edge.edge.CloudClient", autospec=True) as mock: yield mock.return_value
18,275
def get_quiz(id, user): """Get Quiz""" conn = sqlite3.connect(DBNAME) cursor = conn.cursor() if user == 'admin' or user == 'fabioja': cursor.execute( "SELECT id, release, expire, problem, tests, results, diagnosis, numb from QUIZ where id = {0}".format(id)) else: cursor.execute("SELECT id, release, expire, problem, tests, results, diagnosis, numb from QUIZ where id = {0} and release < '{1}'".format( id, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) info = [reg for reg in cursor.fetchall()] conn.close() return info
18,276
def filesystem_entry(filesystem): """ Filesystem tag {% filesystem_entry filesystem %} is used to display a single filesystem. Arguments --------- filesystem: filesystem object Returns ------- A context which maps the filesystem object to filesystem. """ return {'filesystem': filesystem}
18,277
def haversine(lat1, lon1, lat2, lon2, units='miles'): """ Calculates arc length distance between two lat_lon points (must be in radians) lat2 & and lon2 can be numpy arrays units can be 'miles' or 'km' (kilometers) """ earth_radius = {'miles': 3959., 'km': 6371.} a = np.square(np.sin((lat2 - lat1)/2.)) + np.cos(lat1) * np.cos(lat2) * np.square(np.sin((lon2 - lon1)/2.)) return 2 * earth_radius[units] * np.arcsin(np.sqrt(a))
18,278
def OptionalDateField(description='',validators=[]): """ A custom field that makes the DateField optional """ validators.append(Optional()) field = DateField(description,validators) return field
18,279
def ShowAllPte(cmd_args=None): """ Prints out the physical address of the pte for all tasks """ head_taskp = addressof(kern.globals.tasks) taskp = Cast(head_taskp.next, 'task *') while taskp != head_taskp: procp = Cast(taskp.bsd_info, 'proc *') out_str = "task = {:#x} pte = {:#x}\t".format(taskp, taskp.map.pmap.ttep) if procp != 0: out_str += "{:s}\n".format(procp.p_comm) else: out_str += "\n" print out_str taskp = Cast(taskp.tasks.next, 'struct task *')
18,280
def init_brats_metrics(): """Initialize dict for BraTS Dice metrics""" metrics = {} metrics['ET'] = {'labels': [3]} metrics['TC'] = {'labels': [1, 3]} metrics['WT'] = {'labels': [1, 2, 3]} for _, value in metrics.items(): value.update({'tp':0, 'tot':0}) return metrics
18,281
def add_rse(rse, issuer, vo='def', deterministic=True, volatile=False, city=None, region_code=None, country_name=None, continent=None, time_zone=None, ISP=None, staging_area=False, rse_type=None, latitude=None, longitude=None, ASN=None, availability=None): """ Creates a new Rucio Storage Element(RSE). :param rse: The RSE name. :param issuer: The issuer account. :param vo: The VO to act on. :param deterministic: Boolean to know if the pfn is generated deterministically. :param volatile: Boolean for RSE cache. :param city: City for the RSE. :param region_code: The region code for the RSE. :param country_name: The country. :param continent: The continent. :param time_zone: Timezone. :param staging_area: staging area. :param ISP: Internet service provider. :param rse_type: RSE type. :param latitude: Latitude coordinate of RSE. :param longitude: Longitude coordinate of RSE. :param ASN: Access service network. :param availability: Availability. """ validate_schema(name='rse', obj=rse, vo=vo) kwargs = {'rse': rse} if not permission.has_permission(issuer=issuer, vo=vo, action='add_rse', kwargs=kwargs): raise exception.AccessDenied('Account %s can not add RSE' % (issuer)) return rse_module.add_rse(rse, vo=vo, deterministic=deterministic, volatile=volatile, city=city, region_code=region_code, country_name=country_name, staging_area=staging_area, continent=continent, time_zone=time_zone, ISP=ISP, rse_type=rse_type, latitude=latitude, longitude=longitude, ASN=ASN, availability=availability)
18,282
def multi_graph_partition(costs: Dict, probs: Dict, p_t: np.ndarray, idx2nodes: Dict, ot_hyperpara: Dict, weights: Dict = None, predefine_barycenter: bool = False) -> \ Tuple[List[Dict], List[Dict], List[Dict], Dict, np.ndarray]: """ Achieve multi-graph partition via calculating Gromov-Wasserstein barycenter between the target graphs and a proposed one Args: costs: a dictionary of graphs {key: graph idx, value: (n_s, n_s) adjacency matrix of source graph} probs: a dictionary of graphs {key: graph idx, value: (n_s, 1) the distribution of source nodes} p_t: (n_t, 1) the distribution of target nodes idx2nodes: a dictionary of graphs {key: graph idx, value: a dictionary {key: idx of row in cost, value: name of node}} ot_hyperpara: a dictionary of hyperparameters weights: a dictionary of graph {key: graph idx, value: the weight of the graph} predefine_barycenter: False: learn barycenter, True: use predefined barycenter Returns: sub_costs_all: a list of graph dictionary: a dictionary {key: graph idx, value: sub cost matrices}} sub_idx2nodes: a list of graph dictionary: a dictionary {key: graph idx, value: a dictionary mapping indices to nodes' names}} trans: a dictionary {key: graph idx, value: an optimal transport between the graph and the barycenter} cost_t: the reference graph corresponding to partition result """ sub_costs_cluster = [] sub_idx2nodes_cluster = [] sub_probs_cluster = [] sub_costs_all = {} sub_idx2nodes_all = {} sub_probs_all = {} if predefine_barycenter is True: cost_t = csr_matrix(np.diag(p_t[:, 0])) trans = {} for n in costs.keys(): sub_costs_all[n], sub_probs_all[n], sub_idx2nodes_all[n], trans[n] = graph_partition(costs[n], probs[n], p_t, idx2nodes[n], ot_hyperpara) else: cost_t, trans, _ = Gwl.gromov_wasserstein_barycenter(costs, probs, p_t, ot_hyperpara, weights) for n in costs.keys(): sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(costs[n], trans[n], probs[n], p_t, idx2nodes[n]) sub_costs_all[n] = sub_costs sub_idx2nodes_all[n] = sub_idx2nodes sub_probs_all[n] = sub_probs for i in range(p_t.shape[0]): sub_costs = {} sub_idx2nodes = {} sub_probs = {} for n in costs.keys(): if i in sub_costs_all[n].keys(): sub_costs[n] = sub_costs_all[n][i] sub_idx2nodes[n] = sub_idx2nodes_all[n][i] sub_probs[n] = sub_probs_all[n][i] sub_costs_cluster.append(sub_costs) sub_idx2nodes_cluster.append(sub_idx2nodes) sub_probs_cluster.append(sub_probs) return sub_costs_cluster, sub_probs_cluster, sub_idx2nodes_cluster, trans, cost_t
18,283
def TDataStd_BooleanArray_Set(*args): """ * Finds or creates an attribute with the array. :param label: :type label: TDF_Label & :param lower: :type lower: int :param upper: :type upper: int :rtype: Handle_TDataStd_BooleanArray """ return _TDataStd.TDataStd_BooleanArray_Set(*args)
18,284
def py_list_to_tcl_list(py_list): """ Convert Python list to Tcl list using Tcl interpreter. :param py_list: Python list. :type py_list: list :return: string representing the Tcl string equivalent to the Python list. """ py_list_str = [str(s) for s in py_list] return tcl_str(tcl_interp_g.eval('split' + tcl_str('\t'.join(py_list_str)) + '\\t'))
18,285
def get_post_count(user): """ Get number of posts published by the requst user. Parameters ------------ user: The request user Returns ------- count: int The number of posts published by the requst user. """ count = Post.objects.filter(publisher=user).count() return count
18,286
def init_db(): # pragma: no cover """Initializes the DB""" database.init_db()
18,287
def test_dry_run(pytest_test_inst): """ Test the dry_run() method returns the expected report skeleton. """ result = pytest_test_inst.dry_run() report = result.report assert report == pytest_expected_data.EXPECTED_DRY_RUN_REPORT
18,288
def test_connect(ccid): """Check access (udev.""" power_off = ccid.power_off()[2] assert 0x81 == power_off[0] atr = ccid.power_on() assert 0x80 == atr[0] l = atr[1] assert [0x3B, 0x8C, 0x80, 0x01] == list(atr[10 : 10 + l])
18,289
def create_new_employee(employees): """ Create a new employee record with the employees dictionary Use the employee_sections dictionary template to create a new employee record. """ subsidiary = input('Employee Subsidiary (SK, CZ):') employee_id = generate_employee_id(subsidiary, employees) employee = {} # Storage for new employee print('Please, enter records for new employee ID: ' + employee_id) # Iterating over 'employee_sections' for section in employee_sections['<employee_id>']: # Inserting empty section employee[section] = {} for field in employee_sections['<employee_id>'][section]: _input = '' while not _input: _input = input(section + '/' + field + ': ') from config import employee_required_fields if not _input and field in employee_required_fields: print('This field is required, please enter the value.') else: employee[section][field] = _input break print(employee) employees[employee_id] = employee print('Thank you, entry has been completed for ID: ' + employee_id) input('Press ENTER to continue') commit_changes(file_with_employees, str(employees)) return employees
18,290
def gather_squares_triangles(p1,p2,depth): """ Draw Square and Right Triangle given 2 points, Recurse on new points args: p1,p2 (float,float) : absolute position on base vertices depth (int) : decrementing counter that terminates recursion return: squares [(float,float,float,float)...] : absolute positions of vertices of squares triangles [(float,float,float)...] : absolute positions of vertices of right triangles """ # Break Recursion if depth is met if depth == 0: return [],[] # Generate Points pd = (p2[0] - p1[0]),(p1[1] - p2[1]) p3 = (p2[0] - pd[1]),(p2[1] - pd[0]) p4 = (p1[0] - pd[1]),(p1[1] - pd[0]) p5 = (p4[0] + (pd[0] - pd[1])/2),(p4[1] - (pd[0] + pd[1])/2) # Gather Points further down the tree squares_left,triangles_left = gather_squares_triangles(p4,p5,depth-1) squares_right,triangles_right = gather_squares_triangles(p5,p3,depth-1) # Merge and Return squares = [[p1,p2,p3,p4]]+squares_left+squares_right triangles = [[p3,p4,p5]]+triangles_left+triangles_right return squares,triangles
18,291
def send_email(subject, sender, recipients, html_body): """Функция отправки электронных писем Принимает в качестве аргументов тему письма, отправителя, получателя, и html-шаблон письма. Создается объект thread для отправки сообщения в новом потоке. """ app = current_app._get_current_object() msg = Message(subject, sender=sender, recipients=recipients) msg.html = html_body Thread(target=send_async_email, args=(app, msg)).start()
18,292
def update_node(node_name, node_type, root=None): """ ! Node is assumed to have only one input and one output port with a maximum of one connection for each. Returns: NodegraphAPI.Node: newly created node """ new = NodegraphAPI.CreateNode(node_type, root or NodegraphAPI.GetRootNode()) if new.getType() == "Group": new_in = new.addInputPort("in") new_out = new.addOutputPort("out") else: new_in = new.getInputPortByIndex(0) new_out = new.getOutputPortByIndex(0) existingn = NodegraphAPI.GetNode(node_name) if existingn: # we assume there is only 1 input/output port with only one connection in_port = existingn.getInputPorts()[0] in_port = in_port.getConnectedPort(0) out_port = existingn.getOutputPorts()[0] out_port = out_port.getConnectedPort(0) pos = NodegraphAPI.GetNodePosition(existingn) # type: tuple existingn.delete() NodegraphAPI.SetNodePosition(new, pos) if in_port: in_port.connect(new_in) if out_port: out_port.connect(new_out) logger.info("[update_node] Found existing node, it has been updated.") new.setName(node_name) logger.info("[update_node] Finished for node <{}>".format(node_name)) return new
18,293
def dan_acf(x, axis=0, fast=False): """ Estimate the autocorrelation function of a time series using the FFT. Args: x (array): The time series. If multidimensional, set the time axis using the ``axis`` keyword argument and the function will be computed for every other axis. axis (Optional[int]): The time axis of ``x``. Assumed to be the first axis if not specified. fast (Optional[bool]): If ``True``, only use the largest ``2^n`` entries for efficiency. (default: False) Returns: acf (array): The acf array. """ x = np.atleast_1d(x) m = [slice(None), ] * len(x.shape) # For computational efficiency, crop the chain to the largest power of # two if requested. if fast: n = int(2**np.floor(np.log2(x.shape[axis]))) m[axis] = slice(0, n) x = x else: n = x.shape[axis] # Compute the FFT and then (from that) the auto-correlation function. f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis) m[axis] = slice(0, n) acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real m[axis] = 0 return acf / acf[m]
18,294
def schedule_decision(): """最適化の実行と結果の表示を行う関数""" # トップページを表示する(GETリクエストがきた場合) if request.method == "GET": return render_template("scheduler/schedule_decision.html", solution_html=None) # POSTリクエストである「最適化を実行」ボタンが押された場合に実行 # データがアップロードされているかチェックする。適切でなければ元のページに戻る if not check_request(request): return redirect(request.url) # 前処理(データ読み込み) df_kagisime, df_gomisute = preprocess(request) # 最適化実行 prob = KandGProblem(df_kagisime, df_gomisute) solution_df = prob.solve() L_gomisute_members = list(prob.L_gomisute_members) # ログインしている場合,DBに決定した予定表を追加. if current_user.is_authenticated: yyyy, mm, _ = solution_df.index[0].split("/") user_id = session["_user_id"] print(user_id) print("currentuser:", current_user) is_new_schedule = not ScheduleLists.query.filter_by( user_id=user_id, yyyymm=yyyy + mm ).all() if is_new_schedule: schedule_list = ScheduleLists(user_id=user_id, yyyymm=yyyy + mm) db.session.add(schedule_list) db.session.commit() schedulelist_id = ( ScheduleLists.query.filter_by(user_id=user_id, yyyymm=yyyy + mm) .group_by("id") .first() ) print(schedulelist_id.id) for row in solution_df.itertuples(): if not is_new_schedule: print(datetime.strptime(row[0], "%Y/%m/%d")) old_schedule = Schedules.query.filter_by( schedulelist_id=schedulelist_id.id, date=datetime.strptime(row[0], "%Y/%m/%d"), ).first() print(old_schedule) if old_schedule: old_schedule.k_members = row[1] old_schedule.g_members = row[2] db.session.add(old_schedule) db.session.commit() else: schedule = Schedules( schedulelist_id=schedulelist_id.id, date=datetime.strptime(row[0], "%Y/%m/%d"), k_members=row[1], g_members=row[2], ) db.session.add(schedule) db.session.commit() # 後処理(最適化結果をHTMLに表示できる形式にする) solution_html = postprocess(solution_df) return render_template( "scheduler/schedule_decision.html", solution_html=solution_html, solution_df=solution_df, L_gomisute_members=" ".join(L_gomisute_members), )
18,295
def process_mtl_data(sources_dir, processed_dir, date): """Processes new Sante Montreal data. Parameters ---------- sources_dir : str Absolute path to source data dir. processed_dir : str Absolute path to processed data dir. date : str Date of data to append (yyyy-mm-dd). """ # Append col to cases.csv append_mtl_cases_csv(sources_dir, processed_dir, date) # Update data_mtl_boroughs.csv update_mtl_boroughs_csv(processed_dir) # Append row to data_mtl_age.csv append_mtl_cases_by_age(sources_dir, processed_dir, date) update_mtl_vaccination_age_csv(sources_dir, processed_dir)
18,296
def infection_rate_asymptomatic_30x40(): """ Real Name: b'infection rate asymptomatic 30x40' Original Eqn: b'contact infectivity asymptomatic 30x40*(social distancing policy SWITCH self 40*social distancing policy 40\\\\ +(1-social distancing policy SWITCH self 40))*Infected asymptomatic 30x40*Susceptible 40\\\\ /non controlled pop 30x40' Units: b'person/Day' Limits: (None, None) Type: component b'' """ return contact_infectivity_asymptomatic_30x40() * ( social_distancing_policy_switch_self_40() * social_distancing_policy_40() + (1 - social_distancing_policy_switch_self_40()) ) * infected_asymptomatic_30x40() * susceptible_40() / non_controlled_pop_30x40()
18,297
def slug(hans, style=Style.NORMAL, heteronym=False, separator='-', errors='default', strict=True): """将汉字转换为拼音,然后生成 slug 字符串. :param hans: 汉字字符串( ``'你好吗'`` )或列表( ``['你好', '吗']`` ). 可以使用自己喜爱的分词模块对字符串进行分词处理, 只需将经过分词处理的字符串列表传进来就可以了。 :type hans: unicode 字符串或字符串列表 :param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。 更多拼音风格详见 :class:`~pypinyin.Style` :param heteronym: 是否启用多音字 :param separator: 两个拼音间的分隔符/连接符 :param errors: 指定如何处理没有拼音的字符,详情请参考 :py:func:`~pypinyin.pinyin` :param strict: 只获取声母或只获取韵母相关拼音风格的返回结果 是否严格遵照《汉语拼音方案》来处理声母和韵母, 详见 :ref:`strict` :return: slug 字符串. :raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常 :: >>> import pypinyin >>> from pypinyin import Style >>> pypinyin.slug('中国人') 'zhong-guo-ren' >>> pypinyin.slug('中国人', separator=' ') 'zhong guo ren' >>> pypinyin.slug('中国人', style=Style.FIRST_LETTER) 'z-g-r' >>> pypinyin.slug('中国人', style=Style.CYRILLIC) 'чжун1-го2-жэнь2' """ return separator.join( chain( *_default_pinyin.pinyin( hans, style=style, heteronym=heteronym, errors=errors, strict=strict ) ) )
18,298
def A_weight(signal, fs): """ Return the given signal after passing through an A-weighting filter signal : array_like Input signal fs : float Sampling frequency """ b, a = A_weighting(fs) return lfilter(b, a, signal)
18,299