content
stringlengths
22
815k
id
int64
0
4.91M
def _generate_indexed(array: IndexedArray) -> str: """Generate an indexed Bash array.""" return ( "(" + " ".join( f"[{index}]={_generate_string(value)}" for index, value in enumerate(array) if value is not None ) + ")" )
5,330,600
def main(argv=None): """Main entry point.""" known_args = parse_cmdline(argv) metrics_dir = known_args.metrics_dir metrics_files = [ join(metrics_dir, f) for f in listdir(metrics_dir) if isfile(join(metrics_dir, f)) ] metrics = [read_metrics_file(f) for f in metrics_files] for m in metrics: print('%s\t%s\t%s' % (m['checkpoint'], m['TPs+FNs_All'], m['F1_All']))
5,330,601
def up_date(dte, r_quant, str_unit, bln_post_colon): """ Adjust a date in the light of a (quantity, unit) tuple, taking account of any recent colon """ if str_unit == 'w': dte += timedelta(weeks=r_quant) elif str_unit == 'd': dte += timedelta(days=r_quant) elif str_unit == 'h': dte += timedelta(hours=r_quant) elif str_unit == 'm': dte += timedelta(minutes=r_quant) elif str_unit in ('Y', 'y'): if r_quant > 500: # jul 2019 vs jul 17 r_year = r_quant else: r_year = datetime.now().year + r_quant try: dte = datetime.replace(dte, year=int(r_year)) except ValueError: dte = datetime.replace(dte, day=28, month=2, year=int(datetime.now().year + r_quant)) elif str_unit == 'H': dte = datetime.replace(dte, hour=int(r_quant), second=0, microsecond=0) elif str_unit == 'M': dte = datetime.replace(dte, minute=int(r_quant), second=0, microsecond=0) elif str_unit == 'a': if not bln_post_colon: dte = datetime.replace(dte, hour=int(r_quant), minute=0, second=0, microsecond=0) elif str_unit == 'p': if bln_post_colon: # adjust by 12 hours if necessary if dte.hour < 12: dte = datetime.replace(dte, hour=dte.hour+12) else: p_quant = r_quant if p_quant < 12: p_quant += 12 dte = datetime.replace(dte, hour=int(p_quant), minute=0, second=0, microsecond=0) elif (len(str_unit) >= 3) and (STR_MONTHS.find(str_unit) != -1): dte = datetime.replace(dte, month=(STR_MONTHS.index(str_unit) + 3)/3, day=int(r_quant), second=0, microsecond=0) # refers to this year or next year ? (assume not past) dte_today = datetime.today().replace(hour=0, minute=0, \ second=0, microsecond=0) if dte < dte_today: dte = dte.replace(year=(dte_today.year+1)) return dte
5,330,602
def read_options() -> Options: """ read command line arguments and options Returns: option class(Options) Raises: NotInspectableError: the file or the directory does not exists. """ args: MutableMapping = docopt(__doc__) schema = Schema({ "<path>": And(Use(get_path), lambda path: path.is_file() or path.is_dir(), error=f"The specified path {args['<path>']}" " does not exist.\n") }) try: args = schema.validate(args) except SchemaError as e: raise NotInspectableError(e.args[0]) return Options(args["<path>"])
5,330,603
def test_generator_single_input_2(): """ Feature: Test single str input Description: input str Expectation: success """ def generator_str(): for i in range(64): yield chr(ord('a') + i) class RandomAccessDatasetInner: def __init__(self): self.__data = [i for i in range(64)] def __getitem__(self, item): return chr(ord('a') + self.__data[item]) def __len__(self): return 64 class SequentialAccessDataset: def __init__(self): self.__data = [i for i in range(64)] self.__index = 0 def __next__(self): if self.__index >= 64: raise StopIteration item = chr(ord('a') + self.__data[self.__index]) self.__index += 1 return item def __iter__(self): self.__index = 0 return self def __len__(self): return 64 def assert_generator_single_input_2(data): # apply dataset operations data1 = ds.GeneratorDataset(data, ["data"], shuffle=False) i = 0 for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary s = chr(ord('a') + i) golden = np.array(bytes(s, encoding='utf8')) np.testing.assert_array_equal(item["data"], golden) i = i + 1 assert_generator_single_input_2(generator_str) assert_generator_single_input_2(RandomAccessDatasetInner()) assert_generator_single_input_2(SequentialAccessDataset())
5,330,604
def to_keys_values_bson_dict_json(): """ Convert Document to generic python types. """ class User(Document): _id = fields.IntField() name = fields.StringField() user = User(_id=1, name="Jack") # to tuple assert user._fields_ordered == ("id", "name") # to keys assert list(user) # to values assert [getattr(user, key) for key in user] == [1, "Jack"] # to bson son = user.to_mongo() # to dict, key is DB document field, not Class field d = son.to_dict() assert d["_id"] == 1 and d["name"] == "Jack" # to json assert user.to_json() == '{"_id": 1, "name": "Jack"}'
5,330,605
def SLINK(Dataset, d): """function to execute SLINK algo Args: Dataset(List) :- list of data points, who are also lists d(int) :- dimension of data points Returns: res(Iterables) :- list of triples sorted by the second element, first element is index of point, the other two are pointer representations of dendrograms noting the lowest level at which i is no longer the last point in his cluster and the last point in the cluster which i then joins Heights(Iterables) :- list of the second element of res' triples """ n = len(Dataset) A = [inf for i in range(n)] B = [0 for i in range(n)] # initialisation A[0] = inf B[0] = 0 for k in range(1, n): B[k] = k A[k] = inf M = [0 for i in range(k + 1)] for i in range(k): M[i] = metrics(Dataset[i], Dataset[k]) for i in range(k): if(A[i] >= M[i]): M[B[i]] = min(M[B[i]], A[i]) A[i] = M[i] B[i] = k if(A[i] < M[i]): M[B[i]] = min(M[B[i]], M[i]) for i in range(k): if(A[i] >= A[B[i]]): B[i] = k res = [(index, i, j) for index, (i, j) in enumerate(zip(A, B))] res = sorted(res, key=lambda x: x[1]) Heights = [triple[1] for triple in res] return(res, Heights)
5,330,606
def static_file(path='index.html'): """static_file""" return app.send_static_file(path)
5,330,607
def lazy_property(function): """ Decorator to make a lazily executed property """ attribute = '_' + function.__name__ @property @wraps(function) def wrapper(self): if not hasattr(self, attribute): setattr(self, attribute, function(self)) return getattr(self, attribute) return wrapper
5,330,608
def basename(fname): """ Return file name without path. Examples -------- >>> fname = '../test/data/FSI.txt.zip' >>> print('{}, {}, {}'.format(*basename(fname))) ../test/data, FSI.txt, .zip """ if not isinstance(fname, path_type): fname = Path(fname) path, name, ext = fname.parent, fname.stem, fname.suffix return path, name, ext
5,330,609
def validate_lockstring(lockstring): """ Validate so lockstring is on a valid form. Args: lockstring (str): Lockstring to validate. Returns: is_valid (bool): If the lockstring is valid or not. error (str or None): A string describing the error, or None if no error was found. """ global _LOCK_HANDLER if not _LOCK_HANDLER: _LOCK_HANDLER = LockHandler(_ObjDummy()) return _LOCK_HANDLER.validate(lockstring)
5,330,610
def decode_geohash_collection(geohashes: Iterable[str]): """ Return collection of geohashes decoded into location coordinates. Parameters ---------- geohashes: Iterable[str] Collection of geohashes to be decoded Returns ------- Iterable[Tuple[float, float]] Collection of location coordinates in Latitude/Longitude """ locations = [] for geohash in geohashes: exact_location = decode_geo_hash(geohash) locations.append((exact_location[0], exact_location[1])) return locations
5,330,611
def renormalize_sparse(A: sp.spmatrix) -> sp.spmatrix: """Get (D**-0.5) * A * (D ** -0.5), where D is the diagonalized row sum.""" A = sp.coo_matrix(A) A.eliminate_zeros() rowsum = np.array(A.sum(1)) assert np.all(rowsum >= 0) d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0 d_mat_inv_sqrt = sp.diags(d_inv_sqrt) return d_mat_inv_sqrt.dot(A).dot(d_mat_inv_sqrt) # return A.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
5,330,612
def load_gloria( name: str = "gloria_resnet50", device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", ): """Load a GLoRIA model Parameters ---------- name : str A model name listed by `gloria.available_models()`, or the path to a model checkpoint containing the state_dict device : Union[str, torch.device] The device to put the loaded model Returns ------- gloria_model : torch.nn.Module The GLoRIA model """ # warnings if name in _MODELS: ckpt_path = _MODELS[name] elif os.path.isfile(name): ckpt_path = name else: raise RuntimeError( f"Model {name} not found; available models = {available_models()}" ) if not os.path.exists(ckpt_path): raise RuntimeError( f"Model {name} not found.\n" + "Make sure to download the pretrained weights from \n" + " https://stanfordmedicine.box.com/s/j5h7q99f3pfi7enc0dom73m4nsm6yzvh \n" + " and copy it to the ./pretrained folder." ) ckpt = torch.load(ckpt_path, map_location=device) cfg = ckpt["hyper_parameters"] ckpt_dict = ckpt["state_dict"] fixed_ckpt_dict = {} for k, v in ckpt_dict.items(): new_key = k.split("gloria.")[-1] fixed_ckpt_dict[new_key] = v ckpt_dict = fixed_ckpt_dict gloria_model = builder.build_gloria_model(cfg).to(device) gloria_model.load_state_dict(ckpt_dict) return gloria_model
5,330,613
def is_enabled(): """ Check if `ufw` is enabled :returns: True if ufw is enabled """ output = subprocess.check_output(['ufw', 'status'], universal_newlines=True, env={'LANG': 'en_US', 'PATH': os.environ['PATH']}) m = re.findall(r'^Status: active\n', output, re.M) return len(m) >= 1
5,330,614
def findExtraOverlayClasses(obj, clsList): """Determine the most appropriate class(es) for Chromium objects. This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method. """ if obj.role==controlTypes.ROLE_LISTITEM and obj.parent and obj.parent.parent and obj.parent.parent.role==controlTypes.ROLE_COMBOBOX: clsList.append(ComboboxListItem) ia2Web.findExtraOverlayClasses(obj, clsList, documentClass=Document)
5,330,615
def _fetch( self, targets=None, jobs=None, remote=None, all_branches=False, show_checksums=False, with_deps=False, all_tags=False, recursive=False, ): """Download data items from a cloud and imported repositories Returns: int: number of successfully downloaded files Raises: DownloadError: thrown when there are failed downloads, either during `cloud.pull` or trying to fetch imported files config.NoRemoteError: thrown when downloading only local files and no remote is configured """ used = self.used_cache( targets, all_branches=all_branches, all_tags=all_tags, with_deps=with_deps, force=True, remote=remote, jobs=jobs, recursive=recursive, ) downloaded = 0 failed = 0 try: downloaded += self.cloud.pull( used, jobs, remote=remote, show_checksums=show_checksums ) except NoRemoteError: if not used.external and used["local"]: raise except DownloadError as exc: failed += exc.amount for (repo_url, repo_rev), files in used.external.items(): d, f = _fetch_external(self, repo_url, repo_rev, files) downloaded += d failed += f if failed: raise DownloadError(failed) return downloaded
5,330,616
def get_connected_input_geometry(blend_shape): """ Return an array of blend_shape's input plugs that have an input connection. pm.listConnections should do this, but it has bugs when the input array is sparse. """ results = [] blend_shape_plug = _get_plug_from_node('%s.input' % blend_shape) num_input_elements = blend_shape_plug.evaluateNumElements() for idx in range(num_input_elements): input = blend_shape_plug.elementByPhysicalIndex(idx) input_geometry_attr = OpenMaya.MFnDependencyNode(input.node()).attribute('inputGeometry') input_geometry_plug = input.child(input_geometry_attr) conns = OpenMaya.MPlugArray() input_geometry_plug.connectedTo(conns, True, False); if conns.length(): results.append(input_geometry_plug.info()) return results
5,330,617
async def test_monthly_config_flow(hass: HomeAssistant) -> None: """Test we get the form.""" await setup.async_setup_component(hass, "persistent_notification", {}) # Initialise Config Flow result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Check that the config flow shows the user form as the first step assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" # If a user were to enter `weekly` for frequency # it would result in this function call result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"name": "test", "frequency": "monthly"}, ) # Check that the config flow is complete and a new entry is created with # the input data assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "detail" assert result["errors"] == {} # ...add Wednesday with patch( "custom_components.garbage_collection.async_setup_entry", return_value=True ) as mock_setup_entry, patch( "custom_components.garbage_collection.async_setup", return_value=True, ) as mock_setup: result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={ "collection_days": ["wed"], "weekday_order_number": ["1"], "period": 1, }, ) # Should create entry assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY del result["data"]["unique_id"] assert result["data"] == { "frequency": "monthly", "collection_days": ["wed"], "weekday_order_number": ["1"], "period": 1, } assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1
5,330,618
def get_median_and_stdev(arr: torch.Tensor) -> Tuple[float, float]: """Returns the median and standard deviation from a tensor.""" return torch.median(arr).item(), torch.std(arr).item()
5,330,619
def connect(user, host, port): """Create and return a new SSHClient connected to the given host.""" client = ssh.SSHClient() if not env.disable_known_hosts: client.load_system_host_keys() if not env.reject_unknown_hosts: client.set_missing_host_key_policy(ssh.AutoAddPolicy()) connected = False password = get_password() while not connected: try: client.connect( hostname=host, port=int(port), username=user, password=password, key_filename=env.key_filename, timeout=10, allow_agent=not env.no_agent, look_for_keys=not env.no_keys ) connected = True return client # BadHostKeyException corresponds to key mismatch, i.e. what on the # command line results in the big banner error about man-in-the-middle # attacks. except ssh.BadHostKeyException: abort("Host key for %s did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack." % env.host) # Prompt for new password to try on auth failure except ( ssh.AuthenticationException, ssh.PasswordRequiredException, ssh.SSHException ), e: # For whatever reason, empty password + no ssh key or agent results # in an SSHException instead of an AuthenticationException. Since # it's difficult to do otherwise, we must assume empty password + # SSHException == auth exception. Conversely: if we get # SSHException and there *was* a password -- it is probably # something non auth related, and should be sent upwards. if e.__class__ is ssh.SSHException and password: abort(str(e)) # Otherwise, assume an auth exception, and prompt for new/better # password. # # Paramiko doesn't handle prompting for locked private keys (i.e. # keys with a passphrase and not loaded into an agent) so we have # to detect this and tweak our prompt slightly. (Otherwise, # however, the logic flow is the same, because Paramiko's connect() # method overrides the password argument to be either the login # password OR the private key passphrase. Meh.) # # NOTE: This will come up if you normally use a # passphrase-protected private key with ssh-agent, and enter an # incorrect remote username, because Paramiko: # # * Tries the agent first, which will fail as you gave the wrong # username, so obviously any loaded keys aren't gonna work for a # nonexistent remote account; # * Then tries the on-disk key file, which is passphrased; # * Realizes there's no password to try unlocking that key with, # because you didn't enter a password, because you're using # ssh-agent; # * In this condition (trying a key file, password is None) # Paramiko raises PasswordRequiredException. # text = None if e.__class__ is ssh.PasswordRequiredException: # NOTE: we can't easily say WHICH key's passphrase is needed, # because Paramiko doesn't provide us with that info, and # env.key_filename may be a list of keys, so we can't know # which one raised the exception. Best not to try. prompt = "[%s] Passphrase for private key" text = prompt % env.host_string password = prompt_for_password(text, user=user) # Update env.password, env.passwords if empty set_password(password) # Ctrl-D / Ctrl-C for exit except (EOFError, TypeError): # Print a newline (in case user was sitting at prompt) print('') sys.exit(0) # Handle timeouts except timeout: abort('Timed out trying to connect to %s' % host) # Handle DNS error / name lookup failure except gaierror: abort('Name lookup failed for %s' % host) # Handle generic network-related errors # NOTE: In 2.6, socket.error subclasses IOError except socketerror, e: abort('Low level socket error connecting to host %s: %s' % ( host, e[1]) )
5,330,620
def _chf_to_pdf(t, x, chf, **chf_args): """ Estimate by numerical integration, using ``scipy.integrate.quad``, of the probability distribution described by the given characteristic function. Integration errors are not reported/checked. Either ``t`` or ``x`` must be a scalar. """ t = np.asarray(t) x = np.asarray(x) def f(u, t, x): return np.real( exp(-1j*u*x) / (2*np.pi) * chf(t, u, **chf_args)) if t.shape != (): pdf = np.empty(t.shape) for i in np.ndindex(t.shape): pdf[i] = scipy.integrate.quad( lambda u: f(u, t[i], x), -np.inf, np.inf)[0] else: pdf = np.empty(x.shape) for i in np.ndindex(x.shape): pdf[i] = scipy.integrate.quad( lambda u: f(u, t, x[i]), -np.inf, np.inf)[0] return pdf
5,330,621
def ParseTraceLocationLine(msg): """Parse the location line of a stack trace. If successfully parsed, returns (filename, line, method).""" parsed = re.match(kCodeLocationLine, msg) if not parsed: return None try: return (parsed.group(1), parsed.group(2), parsed.group(3)) except IndexError as e: logging.warning('RE matched "%s", but extracted wrong number of items: %r' % (msg, e)) return None
5,330,622
def test_datasets(test_client): """Test datasets route.""" response = test_client.get("datasets") assert response.status_code == 200
5,330,623
def get_rl_params(num, similarity_name, reward_name): """ Get RL model parameters (alpha and beta) based on <num>'s data. <similarity_name> is the name of the similarity metric you want to use, see fmri.catreward.roi.data.get_similarity_data() for details. <reward_name> is the name of the data to be used as rewards in the model. Options are 'acc' ({0,1}, i.e. behavioral accuracy or 'gl' ({-1,1}, short for gain/lose) """ if similarity_name == None: similarity_name = 'none' table = pandas.read_table( os.path.join(fmri.__path__[0],'catreward', 'rl', '101_118_rl_params.txt'), sep=',') stable = table[table['sub'] == num] stable_r = stable[stable['reward'] == reward_name] stable_r_s = stable_r[stable_r['sim'] == similarity_name] return stable_r_s.ix[:,0:2].values[0].tolist()
5,330,624
def return_arg_type(at_position): """ Wrap the return value with the result of `type(args[at_position])` """ def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): result = to_wrap(*args, **kwargs) ReturnType = type(args[at_position]) return ReturnType(result) return wrapper return decorator
5,330,625
def extend(s, var, val): """Copy dict s and extend it by setting var to val; return copy.""" try: # Python 3.5 and later return eval('{**s, var: val}') except SyntaxError: # Python 3.4 s2 = s.copy() s2[var] = val return s2
5,330,626
def rx_reduce(observable: Observable, accumulator: AccumulatorOperator, seed: Optional[Any] = None) -> Observable: """Create an observable which reduce source with accumulator and seed value. Args: observable (Observable): source accumulator (AccumulatorOperator): accumulator function (two argument, one result) async or sync. seed (Optional[Any]): optional seed value (default none) Returns: (Observable): a new observable """ is_awaitable = iscoroutinefunction(accumulator) async def _subscribe(an_observer: Observer) -> Subscription: nonlocal is_awaitable _buffer = seed async def _on_next(item: Any): nonlocal _buffer _buffer = await accumulator(_buffer, item) if is_awaitable else accumulator(_buffer, item) async def _on_completed(): nonlocal _buffer await an_observer.on_next(_buffer) await an_observer.on_completed() return await observable.subscribe(an_observer=rx_observer_from(observer=an_observer, on_next=_on_next, on_completed=_on_completed)) return rx_create(subscribe=_subscribe)
5,330,627
def test_build_manifest_fail2(): """Test recursive definition""" config_file = {'manifest': { '$BASE': '$TMP/share', '$TMP': '$BASE/share', }} with pytest.raises(Exception): cfg.__build_manifest(config_file)
5,330,628
def predict(image: Image.Image): """ Take an image and run it through the inference model. This returns a ModelOutput object with all of the information that the model returns. Furthermore, bounding box coordinates are normalized. """ logging.debug("Sending image to model for inference ...") width, height = image.size model = get_inference_model() model.eval() with torch.no_grad(): model_input = image_to_model_input(image) model_output = model([model_input])[0] logging.debug(f"Model returned {len(model_output)} fields.") bounding_boxes = model_output["instances"].get_fields()["pred_boxes"].to("cpu").tensor.tolist() confidences = model_output["instances"].get_fields()["scores"].to("cpu").tolist() classes = model_output["instances"].get_fields()["pred_classes"].to("cpu").tolist() classes = [CATEGORIES[num] for num in classes] # Normalize all the bounding box coordinates to between 0 and 1. normalized_bounding_boxes = [] for box in bounding_boxes: normalized_box = ( box[0] / float(width), box[1] / float(height), box[2] / float(width), box[3] / float(height)) normalized_bounding_boxes.append(BoundingBox(upper_left_x=normalized_box[0], upper_left_y=normalized_box[1], lower_right_x=normalized_box[2], lower_right_y=normalized_box[3])) return ModelOutput(bounding_boxes=normalized_bounding_boxes, confidences=confidences, classes=classes)
5,330,629
def _make_header(): """ Make vcf header for SNPs in miRs """
5,330,630
def minimum_filter( input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, ): """Multi-dimensional minimum filter. Args: input (cupy.ndarray): The input array. size (int or sequence of int): One of ``size`` or ``footprint`` must be provided. If ``footprint`` is given, ``size`` is ignored. Otherwise ``footprint = cupy.ones(size)`` with ``size`` automatically made to match the number of dimensions in ``input``. footprint (cupy.ndarray): a boolean array which specifies which of the elements within this shape will get passed to the filter function. output (cupy.ndarray, dtype or None): The array in which to place the output. Default is is same dtype as the input. mode (str): The array borders are handled according to the given mode (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, ``'wrap'``). Default is ``'reflect'``. cval (scalar): Value to fill past edges of input if mode is ``'constant'``. Default is ``0.0``. origin (int or sequence of int): The origin parameter controls the placement of the filter, relative to the center of the current element of the input. Default of 0 is equivalent to ``(0,)*input.ndim``. Returns: cupy.ndarray: The result of the filtering. .. seealso:: :func:`scipy.ndimage.minimum_filter` """ return _min_or_max_filter( input, size, footprint, None, output, mode, cval, origin, "min" )
5,330,631
def lowpass(data, cutoff=0.25, fs=30, order=2, nyq=0.75): """ Butter low pass filter for a single or spectra or a list of them. :type data: list[float] :param data: List of vectors in line format (each line is a vector). :type cutoff: float :param cutoff: Desired cutoff frequency of the filter. The default is 0.25. :type fs: int :param fs: Sample rate in Hz. The default is 30. :type order: int :param order: Sin wave can be approx represented as quadratic. The default is 2. :type nyq: float :param nyq: Nyquist frequency, 0.75*fs is a good value to start. The default is 0.75*30. :returns: Filtered data :rtype: list[float] """ y = copy.deepcopy(data) # so it does not change the input list normal_cutoff = cutoff / (nyq * fs) b, a = butter(order, normal_cutoff, btype='low', analog=False) if len(np.array(y).shape) > 1: for i in range(len(y)): y[i] = filtfilt(b, a, y[i]) else: y = filtfilt(b, a, y) return y
5,330,632
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0): """ Recursively dereferences an address into string representation, or convert the list representation of address dereferences into string representation. Arguments: value(int|list): Either the starting address to be sent to get, or the result of get (a list) limit(int): Number of valid pointers code(bool): Hint that indicates the value may be an instruction offset(int): Offset into the address to get the next pointer hard_stop(int): Value to stop on hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string. Returns: A string representing pointers of each address and reference Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141 """ limit = int(limit) # Allow results from get function to be passed to format if isinstance(value, list): chain = value else: chain = get(value, limit, offset, hard_stop, hard_end) arrow_left = C.arrow(' %s ' % config_arrow_left) arrow_right = C.arrow(' %s ' % config_arrow_right) # Colorize the chain rest = [] for link in chain: symbol = pwndbg.symbol.get(link) or None if symbol: symbol = '%#x (%s)' % (link, symbol) rest.append(M.get(link, symbol)) # If the dereference limit is zero, skip any enhancements. if limit == 0: return rest[0] # Otherwise replace last element with the enhanced information. rest = rest[:-1] # Enhance the last entry # If there are no pointers (e.g. eax = 0x41414141), then enhance # the only element there is. if len(chain) == 1: enhanced = pwndbg.enhance.enhance(chain[-1], code=code) # Otherwise, the last element in the chain is the non-pointer value. # We want to enhance the last pointer value. If an offset was used # chain failed at that offset, so display that offset. elif len(chain) < limit + 1: enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code) else: enhanced = C.contiguous('%s' % config_contiguous) if len(chain) == 1: return enhanced return arrow_right.join(rest) + arrow_left + enhanced
5,330,633
async def test_state_reporting(hass): """Test the state reporting.""" await async_setup_component( hass, SWITCH_DOMAIN, { SWITCH_DOMAIN: { "platform": DOMAIN, "entities": ["switch.test1", "switch.test2"], "all": "false", } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set("switch.test1", STATE_ON) hass.states.async_set("switch.test2", STATE_UNAVAILABLE) await hass.async_block_till_done() assert hass.states.get("switch.switch_group").state == STATE_ON hass.states.async_set("switch.test1", STATE_ON) hass.states.async_set("switch.test2", STATE_OFF) await hass.async_block_till_done() assert hass.states.get("switch.switch_group").state == STATE_ON hass.states.async_set("switch.test1", STATE_OFF) hass.states.async_set("switch.test2", STATE_OFF) await hass.async_block_till_done() assert hass.states.get("switch.switch_group").state == STATE_OFF hass.states.async_set("switch.test1", STATE_UNAVAILABLE) hass.states.async_set("switch.test2", STATE_UNAVAILABLE) await hass.async_block_till_done() assert hass.states.get("switch.switch_group").state == STATE_UNAVAILABLE
5,330,634
def sogs_put(client, url, json, user): """ PUTs a test `client` request to `url` with the given `json` as body and X-SOGS-* signature headers signing the request for `user`. """ data = dumps(json).encode() return client.put( url, data=data, content_type='application/json', headers=x_sogs_for(user, "PUT", url, data) )
5,330,635
def get_results(heading): """Get all records under a given record heading from PubChem/ Update results from those records.""" page = 1 results = {} with tqdm(total=100) as pbar: while True: url = (f"https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/annotations/heading/" f"JSON?heading_type=Compound&heading={heading}&page={page}") response = requests.get(url) records = response.json() update_results(records, results) totalPages = records['Annotations']['TotalPages'] if page==1: pbar.reset(total=totalPages) pbar.set_description("%d CIDs described" % len(results)) pbar.update() page += 1 if page > totalPages: break return results
5,330,636
def create_dictionary(documents): """Creates word dictionary for given corpus. Parameters: documents (list of str): set of documents Returns: dictionary (gensim.corpora.Dictionary): gensim dicionary of words from dataset """ dictionary = Dictionary(documents) dictionary.compactify() return dictionary
5,330,637
def clone_bitarray(other, src=None): """ Fast clone of the bit array. The actual function used depends on the implementation :param other: :param src: :return: """ if FAST_IMPL_PH4 and src is not None: src.fast_copy(other) return src return to_bitarray(other)
5,330,638
def _create_graph( expressions: List[expression.Expression], options: calculate_options.Options, feed_dict: Optional[Dict[expression.Expression, prensor.Prensor]] = None ) -> "ExpressionGraph": """Create graph and calculate expressions.""" expression_graph = OriginalExpressionGraph(expressions) canonical_graph = CanonicalExpressionGraph(expression_graph) canonical_graph.calculate_values(options, feed_dict=feed_dict) return canonical_graph
5,330,639
def power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0): """Convert a power spectrogram (amplitude squared) to decibel (dB) units This computes the scaling ``10 * log10(S / ref)`` in a numerically stable way. Parameters ---------- S : np.ndarray input power ref : scalar or callable If scalar, the amplitude `abs(S)` is scaled relative to `ref`: `10 * log10(S / ref)`. Zeros in the output correspond to positions where `S == ref`. If callable, the reference value is computed as `ref(S)`. amin : float > 0 [scalar] minimum threshold for `abs(S)` and `ref` top_db : float >= 0 [scalar] threshold the output at `top_db` below the peak: ``max(10 * log10(S)) - top_db`` Returns ------- S_db : np.ndarray ``S_db ~= 10 * log10(S) - 10 * log10(ref)`` See Also -------- perceptual_weighting db_to_power amplitude_to_db db_to_amplitude Notes ----- This function caches at level 30. Examples -------- Get a power spectrogram from a waveform ``y`` >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> S = np.abs(librosa.stft(y)) >>> librosa.power_to_db(S**2) array([[-33.293, -27.32 , ..., -33.293, -33.293], [-33.293, -25.723, ..., -33.293, -33.293], ..., [-33.293, -33.293, ..., -33.293, -33.293], [-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32) Compute dB relative to peak power >>> librosa.power_to_db(S**2, ref=np.max) array([[-80. , -74.027, ..., -80. , -80. ], [-80. , -72.431, ..., -80. , -80. ], ..., [-80. , -80. , ..., -80. , -80. ], [-80. , -80. , ..., -80. , -80. ]], dtype=float32) Or compare to median power >>> librosa.power_to_db(S**2, ref=np.median) array([[-0.189, 5.784, ..., -0.189, -0.189], [-0.189, 7.381, ..., -0.189, -0.189], ..., [-0.189, -0.189, ..., -0.189, -0.189], [-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32) And plot the results >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(2, 1, 1) >>> librosa.display.specshow(S**2, sr=sr, y_axis='log') >>> plt.colorbar() >>> plt.title('Power spectrogram') >>> plt.subplot(2, 1, 2) >>> librosa.display.specshow(librosa.power_to_db(S**2, ref=np.max), ... sr=sr, y_axis='log', x_axis='time') >>> plt.colorbar(format='%+2.0f dB') >>> plt.title('Log-Power spectrogram') >>> plt.tight_layout() >>> plt.show() """ S = np.asarray(S) if amin <= 0: raise ParameterError("amin must be strictly positive") if np.issubdtype(S.dtype, np.complexfloating): warnings.warn( "power_to_db was called on complex input so phase " "information will be discarded. To suppress this warning, " "call power_to_db(np.abs(D)**2) instead." ) magnitude = np.abs(S) else: magnitude = S if callable(ref): # User supplied a function to calculate reference power ref_value = ref(magnitude) else: ref_value = np.abs(ref) log_spec = 10.0 * np.log10(np.maximum(amin, magnitude)) log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value)) if top_db is not None: if top_db < 0: raise ParameterError("top_db must be non-negative") log_spec = np.maximum(log_spec, log_spec.max() - top_db) return log_spec
5,330,640
def is__str__equal(obj): """ Asserts if the string representation equals the object type. """ assert isinstance(eval(str(obj)), type(obj))
5,330,641
def generate_arn(service, arn_suffix, region=None): """Returns a formatted arn for AWS. Keyword arguments: service -- the AWS service arn_suffix -- the majority of the arn after the initial common data region -- the region (can be None for region free arns) """ arn_value = "arn" aws_value = "aws" region_qualified = region if region else "" return f"{arn_value}:{aws_value}:{service}:{region_qualified}:{arn_suffix}"
5,330,642
def prod_list(lst): """returns the product of all numbers in a list""" if lst: res = 1 for num in lst: res *= num return res else: raise ValueError("List cannot be empty.")
5,330,643
def plot(x, f, gradient, Name): """ Graphs a function """ fplot = plt.figure(figsize=(16,9)) axis = plt.axis() g= np.vectorize(f) y = g(x) grad = gradient(x) plt.xlim() plt.ylim() plt.xlabel("x") plt.ylabel("y") title = 'label {},label {}' plt.title(title.format("$f(x) = $" + Name,"$Df(x)=$" +gradient)) axis.legend() plt.show()
5,330,644
def _main(argv): """ Handle arguments for the 'lumi-upload' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-f', '--token-file', help='file where an API token was saved' ) parser.add_argument( '-w', '--workspace-id', default=None, help='Workspace ID that should own the project, if not the default', ) parser.add_argument( '-l', '--language', default='en', help='The language code for the language the text is in. Default: en', ) parser.add_argument( 'input_filename', help='The JSON-lines (.jsons) file of documents to upload', ) parser.add_argument( 'project_name', nargs='?', default=None, help='What the project should be called', ) args = parser.parse_args(argv) client = LuminosoClient.connect( url=args.base_url, token_file=args.token_file, user_agent_suffix='lumi-upload' ) name = args.project_name if name is None: name = input('Enter a name for the project: ') if not name: print('Aborting because no name was provided.') return result = upload_docs( client, args.input_filename, args.language, name, workspace=args.workspace_id, progress=True, ) print( 'Project {!r} created with {} documents'.format( result['project_id'], result['document_count'] ) )
5,330,645
def create_attribute(representation_uuid, attribute_name): """create a representation of an attribute of a representation""" try: uuid = get_bus().create_attribute(representation_uuid, attribute_name, public=True) return JsonResponse({'type': 'uuid', 'uuid': uuid}) except Exception as exception: message, status = handle_exception(exception) return JsonResponse({'message': message}, status=status)
5,330,646
def get_current_git_hash(raise_on_error: bool = False) -> Optional[str]: """ Return git hash of the latest commit Parameters ---------- raise_on_error: bool, optional If False (default), will return None, when it fails to obtain commit hash. If True, will raise, when it fails to obtain commit hash. Returns ------- Short hash of the current HEAD or None. """ try: git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').strip() except subprocess.CalledProcessError: if raise_on_error: raise warnings.warn('Probably not in a git repo.') git_hash = None return git_hash
5,330,647
def _raise_if_posterror(response): """raise error for failed POST request""" if response.status_code not in [200, 201]: LOG.error(response.text) raise SimpleHTTPException(response)
5,330,648
def test_annot_sanitizing(tmpdir): """Test description sanitizing.""" annot = Annotations([0], [1], ['a;:b']) fname = str(tmpdir.join('custom-annot.fif')) annot.save(fname) annot_read = read_annotations(fname) _assert_annotations_equal(annot, annot_read) # make sure pytest raises error on char-sequence that is not allowed with pytest.raises(ValueError, match='in descriptions not supported'): Annotations([0], [1], ['a{COLON}b'])
5,330,649
def check_shift(start_time, end_time, final_minute, starting_minute, record): """ Função que verifica o turno da chamada e calcula o valor da ligação :param start_time: :param end_time: :param final_minute: :param starting_minute: :return value: """ nem_start_time = start_time + (starting_minute / 60) nem_end_time = end_time + (final_minute / 60) call_time = (record['end'] - record['start']) // 60 if 6 < nem_start_time < 22: if 6 < nem_end_time < 22: # Portanto a ligação foi completada no periodo diurno value = 0.36 + call_time * 0.09 else: # Portanto a ligação iniciou no periodo diurno e terminou # no periodo noturno hour_max = 22 value = 0.36 + ((hour_max - nem_start_time) * 60) * 0.09 value = value + 0.36 else: if not 6 < nem_end_time < 22: # Portanto a ligação foi completada no periodo noturno value = 0.36 else: # Portanto a ligação iniciou no periodo noturno e terminou # no periodo diurno hour_min = 6 value = 0.36 + ((nem_end_time - hour_min) * 60) * 0.09 value = value + 0.36 return value
5,330,650
def s3_avatar_represent(user_id, tablename="auth_user", gravatar=False, **attr): """ Represent a User as their profile picture or Gravatar @param tablename: either "auth_user" or "pr_person" depending on which table the 'user_id' refers to @param attr: additional HTML attributes for the IMG(), such as _class """ size = (50, 50) if user_id: db = current.db s3db = current.s3db cache = s3db.cache table = s3db[tablename] email = None image = None if tablename == "auth_user": user = db(table.id == user_id).select(table.email, cache = cache, limitby = (0, 1), ).first() if user: email = user.email.strip().lower() ltable = s3db.pr_person_user itable = s3db.pr_image query = (ltable.user_id == user_id) & \ (ltable.pe_id == itable.pe_id) & \ (itable.profile == True) image = db(query).select(itable.image, limitby = (0, 1), ).first() if image: image = image.image elif tablename == "pr_person": user = db(table.id == user_id).select(table.pe_id, cache = cache, limitby = (0, 1), ).first() if user: ctable = s3db.pr_contact query = (ctable.pe_id == user.pe_id) & \ (ctable.contact_method == "EMAIL") email = db(query).select(ctable.value, cache = cache, limitby = (0, 1), ).first() if email: email = email.value itable = s3db.pr_image query = (itable.pe_id == user.pe_id) & \ (itable.profile == True) image = db(query).select(itable.image, limitby = (0, 1), ).first() if image: image = image.image if image: image = s3db.pr_image_library_represent(image, size=size) size = s3db.pr_image_size(image, size) url = URL(c="default", f="download", args=image) elif gravatar: if email: # If no Image uploaded, try Gravatar, which also provides a nice fallback identicon import hashlib email_hash = hashlib.md5(email).hexdigest() url = "//www.gravatar.com/avatar/%s?s=50&d=identicon" % email_hash else: url = "//www.gravatar.com/avatar/00000000000000000000000000000000?d=mm" else: url = URL(c="static", f="img", args="blank-user.gif") else: url = URL(c="static", f="img", args="blank-user.gif") if "_class" not in attr: attr["_class"] = "avatar" if "_width" not in attr: attr["_width"] = size[0] if "_height" not in attr: attr["_height"] = size[1] return IMG(_src=url, **attr)
5,330,651
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tf.histogram_summary(x.name + '/activations', x) tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))
5,330,652
def json_loader(path_to_json_file: str) -> Dict[str, List[str]]: """Reads a JSON file and converts its content in a dictionary. Parameters ---------- path_to_json_file: str The path to the JSON file. Returns ------- Dict[str, List[str]] A dictionary of source codes with the corresponding lists of instrument symbols of interest for each source. """ with pathlib.Path(path_to_json_file).open('r') as infile: return json.loads(infile.read())
5,330,653
def writeJson(fname, data): """ Writes a Json file """ try: with open(fname, 'w') as f: json.dump(data, f) except IOError: raise Exception('Could not open {0!s} for writing'.format((fname)))
5,330,654
def main( args ): """Main allows selection of the main subcommand (aka function). Each subcommand launches a separate function. The pydoc subcommand launches pydoc on this overall program file. :param args: the main command line arguments passed minus subcommand """ #print globals().keys() if len(args) == 0 or args[0] in ["h", "help", "-h", "--h", "--help","-help"] : verbosity= 'shortDesc' if args[0] in ["help" , "--help", "-help"]: verbosity = 'longDesc' program_name=os.path.basename(__file__) print "USAGE:",program_name, "[-h] subcommand [suboptions]" print "DESCRIPTION: A collection of tools to create and manipulate MIPs" print "SUBCOMMANDS:" #tw=TextWrap() for k in subcommands.keys(): text=subcommands[k][verbosity] text= textwrap.dedent(text) if text: text = "%s: %s " %(k, text ) print textwrap.fill(text,77, initial_indent='', subsequent_indent=' ') print "HELP:" print "pydoc detailed documentation of program structure" print "-h/-help short / long subcommand descriptions" print "For specific options:",program_name,"[subcommand] --help" elif args[0] == 'pydoc': os.system( "pydoc " + os.path.abspath(__file__) ) elif args[0] in subcommands.keys(): #execute sub command function globals()[args[0]](args[1:]) else: print "unknown subcommand (" + args[0] + ") use -h for list of subcommands!" sys.exit(-1) sys.exit(0)
5,330,655
def check(local_args): """ check args """ assert local_args.classes > 1 assert local_args.zoom_factor in [1, 2, 4, 8] assert local_args.split in ['train', 'val', 'test'] if local_args.arch == 'psp': assert (local_args.train_h - 1) % 8 == 0 and (local_args.train_w - 1) % 8 == 0 else: raise Exception('architecture not supported {} yet'.format(local_args.arch))
5,330,656
def test_manual_weights(tmpdir): """ 'regrid' will accept a SCRIP format weights file as the target """ a0 = xarray.DataArray( [[0, 1], [2, 3]], name="var", dims=["lat", "lon"], coords={"lat": [-45, 45], "lon": [0, 180]}, ) a0.lat.attrs["units"] = "degrees_north" a0.lon.attrs["units"] = "degrees_east" a0 = a0.astype("f4") grid = identify_grid(a0) weights = cdo_generate_weights(a0, grid) r = regrid(a0, weights=weights) assert r is not None
5,330,657
def get_region_solution_attribute(data, region_id, attribute, func, intervention): """Extract region solution attribute""" regions = data.get('NEMSPDCaseFile').get('NemSpdOutputs').get('RegionSolution') for i in regions: if (i['@RegionID'] == region_id) and (i['@Intervention'] == intervention): return func(i[attribute]) message = f'Attribute not found: {region_id} {attribute} {intervention}' raise CasefileLookupError(message)
5,330,658
def initialize_channels(): """Create the channel objects""" global_vars.CHANNEL_ARRAY.append(classes.Channel(0)) return
5,330,659
def _reset_attributes(idaobject, attributes): """ Delete an attribute of a list of attributes of an object, if the attribute exists. """ if (not hasattr(attributes, "__iter__"))|isinstance(attributes, six.string_types): attributes = [attributes] for attribute in attributes: try: delattr(idaobject, attribute) except AttributeError: pass
5,330,660
def test_macsec_psk_shorter_ckn(dev, apdev, params): """MACsec PSK (shorter CKN)""" try: ckn = "11223344" run_macsec_psk(dev, apdev, params, "macsec_psk_shorter_ckn", ckn0=ckn, ckn1=ckn) finally: cleanup_macsec()
5,330,661
def conv1x1(in_planes: int, out_planes: int) -> nn.Conv2d: """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)
5,330,662
def stop_session_safely(spark_session): """ Safely stops Spark session. This is no-op if running on Databricks - we shouldn't stop session there since it's managed by the platform, doing so fails the job. """ if not spark_session.conf.get("spark.home", "").startswith("/databricks"): spark_session.stop()
5,330,663
def test_no_mines_init(): """Asserts that initializing with no mines works properly""" size = (30, 50) ms_game = MinesweeperEnv(size, 0) assert size == ms_game.board_size assert ms_game.num_mines == 0 npt.assert_array_equal([], ms_game.hist) npt.assert_array_equal([[SPACE_UNKNOWN] * size[1]] * size[0], ms_game.board)
5,330,664
def make_ratio_map(amap, bmap): """Get the ratio of two PISA 2 style maps (amap/bmap) and return as another PISA 2 style map.""" validate_maps(amap, bmap) with np.errstate(divide='ignore', invalid='ignore'): result = {'ebins': amap['ebins'], 'czbins': amap['czbins'], 'map': amap['map']/bmap['map']} return result
5,330,665
def _compose_duration( components_tags: Collection[Tags]) -> Mapping[str, Sequence[str]]: """Returns summed duration tags.""" duration_seconds_values = [ component_tags.one_or_none(DURATION_SECONDS) for component_tags in components_tags ] if duration_seconds_values and None not in duration_seconds_values: try: return { DURATION_SECONDS.name: (str(math.fsum(map(float, duration_seconds_values))),), } except ValueError: pass return {}
5,330,666
def test_process_aggregating_cubes_with_overlapping_frt( reliability_cube, overlapping_frt ): """Test that attempting to aggregate reliability calibration tables with overlapping forecast reference time bounds raises an exception. The presence of overlapping forecast reference time bounds indicates that the same forecast data has contributed to both tables, thus aggregating them would double count these contributions.""" plugin = Plugin() msg = "Reliability calibration tables have overlapping" with pytest.raises(ValueError, match=msg): plugin.process([reliability_cube, overlapping_frt])
5,330,667
def feature_spatial(fslDir, tempDir, aromaDir, melIC): """ This function extracts the spatial feature scores. For each IC it determines the fraction of the mixture modeled thresholded Z-maps respecitvely located within the CSF or at the brain edges, using predefined standardized masks. Parameters --------------------------------------------------------------------------------- fslDir: Full path of the bin-directory of FSL tempDir: Full path of a directory where temporary files can be stored (called 'temp_IC.nii.gz') aromaDir: Full path of the ICA-AROMA directory, containing the mask-files (mask_edge.nii.gz, mask_csf.nii.gz & mask_out.nii.gz) melIC: Full path of the nii.gz file containing mixture-modeled threholded (p>0.5) Z-maps, registered to the MNI152 2mm template Returns --------------------------------------------------------------------------------- edgeFract: Array of the edge fraction feature scores for the components of the melIC file csfFract: Array of the CSF fraction feature scores for the components of the melIC file""" # Get the number of ICs numICs = int(commands.getoutput('%sfslinfo %s | grep dim4 | head -n1 | awk \'{print $2}\'' % (fslDir, melIC) )) # Loop over ICs edgeFract=np.zeros(numICs) csfFract=np.zeros(numICs) for i in range(0,numICs): # Define temporary IC-file tempIC = os.path.join(tempDir,'temp_IC.nii.gz') # Extract IC from the merged melodic_IC_thr2MNI2mm file os.system(' '.join([os.path.join(fslDir,'fslroi'), melIC, tempIC, str(i), '1'])) # Change to absolute Z-values os.system(' '.join([os.path.join(fslDir,'fslmaths'), tempIC, '-abs', tempIC])) # Get sum of Z-values within the total Z-map (calculate via the mean and number of non-zero voxels) totVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-V | awk \'{print $1}\'']))) if not (totVox == 0): totMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-M']))) else: print ' - The spatial map of component ' + str(i+1) + ' is empty. Please check!' totMean = 0 totSum = totMean * totVox # Get sum of Z-values of the voxels located within the CSF (calculate via the mean and number of non-zero voxels) csfVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-k mask_csf.nii.gz', '-V | awk \'{print $1}\'']))) if not (csfVox == 0): csfMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-k mask_csf.nii.gz', '-M']))) else: csfMean = 0 csfSum = csfMean * csfVox # Get sum of Z-values of the voxels located within the Edge (calculate via the mean and number of non-zero voxels) edgeVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-k mask_edge.nii.gz', '-V | awk \'{print $1}\'']))) if not (edgeVox == 0): edgeMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-k mask_edge.nii.gz', '-M']))) else: edgeMean = 0 edgeSum = edgeMean * edgeVox # Get sum of Z-values of the voxels located outside the brain (calculate via the mean and number of non-zero voxels) outVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-k mask_out.nii.gz', '-V | awk \'{print $1}\'']))) if not (outVox == 0): outMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'), tempIC, '-k mask_out.nii.gz', '-M']))) else: outMean = 0 outSum = outMean * outVox # Determine edge and CSF fraction if not (totSum == 0): edgeFract[i] = (outSum + edgeSum)/(totSum - csfSum) csfFract[i] = csfSum / totSum else: edgeFract[i]=0 csfFract[i]=0 # Remove the temporary IC-file os.remove(tempIC) # Return feature scores return edgeFract, csfFract
5,330,668
def p_translation_unit(p): """translation_unit : external_declaration | translation_unit external_declaration""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]]
5,330,669
def str_to_date(date_str, fmt=DATE_STR_FMT): """Convert string date to datetime object.""" return datetime.datetime.strptime(date_str, fmt).date()
5,330,670
def create_elasticsearch_domain(name, account_id, boto_session, lambda_role, cidr): """ Create Elastic Search Domain """ boto_elasticsearch = boto_session.client('es') total_time = 0 resource = "arn:aws:es:ap-southeast-2:{0}:domain/{1}/*".format(account_id, name) access_policy = {"Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"AWS": str(lambda_role)}, "Action": "es:*", "Resource": resource}, {"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", "Resource": resource, "Condition": {"IpAddress": {"aws:SourceIp": "{0}".format(cidr)}}} ]} endpoint = None time.sleep(5) try: print('Creating elasticsearch domain: {0}'.format(name)) boto_elasticsearch.create_elasticsearch_domain( DomainName=name, ElasticsearchVersion='2.3', ElasticsearchClusterConfig={ 'InstanceType': 't2.micro.elasticsearch', 'InstanceCount': 1, 'DedicatedMasterEnabled': False, 'ZoneAwarenessEnabled': False }, EBSOptions={ 'EBSEnabled': True, 'VolumeType': 'gp2', 'VolumeSize': 20 } ) time.sleep(10) attempts = 1 while True: print('Trying to apply access policies to elasticsearch domain: {0} (attempt: {1})'.format(name, attempts)) try: boto_elasticsearch.update_elasticsearch_domain_config( DomainName=name, AccessPolicies=json.dumps(access_policy) ) break except Exception as e: attempts += 1 if attempts > 3: print('Failed to apply access policies. Please run this script again with `-a delete -n {0}`' 'and wait approx 20 minutes before trying again'.format(name)) print('Full error was: {0}'.format(e)) exit(1) else: time.sleep(2) except Exception as e: print('Could not create elasticsearch domain: {0}.'.format(name)) print('Error was: {0}'.format(e)) exit(1) while True: try: es_status = boto_elasticsearch.describe_elasticsearch_domain(DomainName=name) processing = es_status['DomainStatus']['Processing'] if not processing: endpoint = es_status['DomainStatus']['Endpoint'] print('Domain: {0} has been created!'.format(name)) break else: print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name)) time.sleep(120) except Exception: print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name)) total_time += 120 if total_time > 1800: print('Script has been running for over 30 minutes... This likely means that your elastic search domain' ' has not created successfully. Please check the Elasticsearch Service dashboard in AWS console' ' and delete the domain named {0} if it exists before trying again'.format(name)) exit(1) time.sleep(120) return endpoint
5,330,671
def get_user(email): """ param: username returns User instance with user data, the MySQL error handle by the try-except senteces """ result = {} connection = _connect_to_db() try: with connection.cursor() as cursor: row_count = 0 e = 'none' # Read a single record sql = f"SELECT `id`,`name`, \ `last_name`, \ `email`, \ `password`, \ `phone_number`, \ `address`, \ `profile_image_url`, \ `city_id`, \ `account_type_id`, \ `lat_location`, \ `long_location`, \ `created_at`, \ `updated_at`, \ `active` FROM users WHERE users.email='{email}'" cursor.execute(sql) result = cursor.fetchone() except Exception as ex: #print(ex.args[1]) e = ex.args[0] finally: connection.close() return result,e
5,330,672
def index(request): """ A example of Function-based view method: get request: None response: type: html """ return HttpResponse("Hello, world. You're at the polls index.")
5,330,673
def make_https_request(logger, url, jobs_manager, download=False, timeout_attempt=0): """ Utility function for making HTTPs requests. """ try: req = requests_retry_session().get(url, timeout=120) req.raise_for_status() except requests.exceptions.ConnectionError as c_err: logger.error("Connection Error while fetching the cert list") logger.error(str(c_err)) jobs_manager.record_job_error() exit(1) except requests.exceptions.HTTPError as h_err: logger.warning("HTTP Error while fetching the cert list") logger.warning(str(h_err)) return None except requests.exceptions.RequestException as err: logger.error("Request exception while fetching the cert list") logger.error(str(err)) jobs_manager.record_job_error() exit(1) except Timeout: if timeout_attempt == 0: logger.warning("Timeout occurred. Attempting again...") result = make_https_request( logger, url, jobs_manager, download, timeout_attempt=1 ) return result else: logger.error("Too many timeouts. Exiting") jobs_manager.record_job_error() exit(1) except Exception as e: logger.error("UNKNOWN ERROR with the HTTP Request: " + str(e)) jobs_manager.record_job_error() exit(1) if req.status_code != 200: logger.error("ERROR: Status code " + str(req.status_code)) return None if download: return req.content return req.text
5,330,674
async def _return_exported_sender(self: 'TelegramClient', sender): """ Returns a borrowed exported sender. If all borrows have been returned, the sender is cleanly disconnected. """ async with self._borrow_sender_lock: self._log[__name__].debug('Returning borrowed sender for dc_id %d', sender.dc_id) state, _ = self._borrowed_senders[sender.dc_id] state.add_return()
5,330,675
def info(): """ Displays on the website of the page. @TODO - Add index.html for the site of Ubik. :return: response for each get request on '/'. """ return about_response()
5,330,676
def falshsort(): """ Do from here: https://en.wikipedia.org/wiki/Flashsort :return: None """ return None
5,330,677
def random_word(text, label, label_map, tokenizer, sel_prob): """ Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of str, tokenized sentence. :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here) :param label: labels such as ["D","O","O","D"] :param label_map: labels such as [0,1,,0] :param sel_prob: the prob to caluate the loss for each token :return: (list of str, list of int), masked tokens and related labels for LM prediction """ text = text.strip().split(" ") orig_to_map_label = [] orig_to_map_token = [] assert len(text) == len(label_map) assert len(text) == len(label) for i in range(0, len(text)): orig_token = text[i] orig_label = label[i] orig_label_map = label_map[i] tokens = tokenizer.tokenize(orig_token) orig_to_map_token.extend(tokens) prob = random.random() if orig_label == "D": if prob < sel_prob: orig_to_map_label.append(orig_label_map) else: orig_to_map_label.append(-1) else: if prob < sel_prob / 5.0: orig_to_map_label.append(orig_label_map) else: orig_to_map_label.append(-1) for j in range(1, len(tokens)): orig_to_map_label.append(-1) assert len(orig_to_map_label) == len(orig_to_map_token) return orig_to_map_token, orig_to_map_label
5,330,678
def ITERATIVETEST_Error_Of_Input_Parameter_inZSubsample(): """Tests occurrence of an error because of an invalid input value provided at field "inZSubsample".""" Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" ) testDataVariations = { "Invalid_Variation_1" : __getInvalidTestDataVariation1_inZSubsample(), "Invalid_Variation_2" : __getInvalidTestDataVariation2_inZSubsample(), } return ( testDataVariations, testError_inZSubsample )
5,330,679
def test_xclip_failure(): """Check that (mocked) failure raises an exception.""" with mock.patch('safe.clip.run') as run: run.return_value = 1, 'foo', 'bar' with pytest.raises(ClipboardError) as ei: Xclip().get() e = ei.value assert 'failed with stderr: bar' in str(e) with pytest.raises(ClipboardError) as ei: Xclip().put('hai') e = ei.value assert 'failed with stderr: bar' in str(e)
5,330,680
def _match_storm_objects(first_prediction_dict, second_prediction_dict, top_match_dir_name): """Matches storm objects between first and second prediction files. F = number of storm objects in first prediction file :param first_prediction_dict: Dictionary returned by `prediction_io.read_ungridded_predictions` for first prediction file. :param second_prediction_dict: Same but for second prediction file. :param top_match_dir_name: See documentation at top of file. :return: first_prediction_dict: Same as input, but containing only storm objects matched with one in the second file. :return: second_prediction_dict: Same as input, but containing only storm objects matched with one in the first file. Both dictionaries have storm objects in the same order. """ first_storm_times_unix_sec = first_prediction_dict[ prediction_io.STORM_TIMES_KEY] first_unique_times_unix_sec = numpy.unique(first_storm_times_unix_sec) first_indices = numpy.array([], dtype=int) second_indices = numpy.array([], dtype=int) for i in range(len(first_unique_times_unix_sec)): this_match_file_name = tracking_io.find_match_file( top_directory_name=top_match_dir_name, valid_time_unix_sec=first_unique_times_unix_sec[i], raise_error_if_missing=True) print('Reading data from: "{0:s}"...'.format(this_match_file_name)) this_match_dict = tracking_io.read_matches(this_match_file_name)[0] these_first_indices, these_second_indices = ( _match_storm_objects_one_time( first_prediction_dict=first_prediction_dict, second_prediction_dict=second_prediction_dict, match_dict=this_match_dict) ) first_indices = numpy.concatenate((first_indices, these_first_indices)) second_indices = numpy.concatenate(( second_indices, these_second_indices )) _, unique_subindices = numpy.unique(first_indices, return_index=True) first_indices = first_indices[unique_subindices] second_indices = second_indices[unique_subindices] _, unique_subindices = numpy.unique(second_indices, return_index=True) first_indices = first_indices[unique_subindices] second_indices = second_indices[unique_subindices] first_prediction_dict = prediction_io.subset_ungridded_predictions( prediction_dict=first_prediction_dict, desired_storm_indices=first_indices) second_prediction_dict = prediction_io.subset_ungridded_predictions( prediction_dict=second_prediction_dict, desired_storm_indices=second_indices) return first_prediction_dict, second_prediction_dict
5,330,681
def get_default_branch(base_url: str, auth: Optional[AuthBase], ssl_verify: bool = True) -> dict: """Fetch a reference. :param base_url: base Nessie url :param auth: Authentication settings :param ssl_verify: ignore ssl errors if False :return: json Nessie branch """ return cast(dict, _get(base_url + "/trees/tree", auth, ssl_verify=ssl_verify))
5,330,682
def make_source_thesaurus(source_thesaurus=SOURCE_THESAURUS_FILE): """ Get dict mapping country name to `SourceObject` for the country. Parameters ---------- source_thesaurus : str Filepath for the source thesaurus data. Returns ------- Dict of {"Country": SourceObject} pairs. """ with open(source_thesaurus, 'rbU') as f: f.readline() # skip headers csvreader = csv.DictReader(f) source_thesaurus = {} for row in csvreader: source_name = row['Source Name'].decode(UNICODE_ENCODING) source_country = row['Country/Region'].decode(UNICODE_ENCODING) source_url = row['Link/File'].decode(UNICODE_ENCODING) source_priority = row['Prioritization'].decode(UNICODE_ENCODING) # TODO: get year info from other other file (download from Google Drive) source_thesaurus[source_name] = SourceObject(name=source_name, country=source_country, priority=source_priority, url=source_url) return source_thesaurus
5,330,683
def extract_filter(s): """Extracts a filter from str `s` Parameters ---------- s : str * A str that may or may not have a filter identified by ', that HUMAN VALUE' Returns ------- s : str * str `s` without the parsed_filter included parsed_filter : dict * filter attributes mapped from filter from `s` if any found """ split_filter = re.split(pytan.constants.FILTER_RE, s, re.IGNORECASE) # split_filter = ['Folder Name Search with RegEx Match', ' is:.*'] parsed_filter = {} # if filter parsed out from s if len(split_filter) > 1: # get new s from index 0 s = split_filter[0].strip() # s='Folder Name Search with RegEx Match' # get the filter string from index 1 parsed_filter = split_filter[1].strip() # parsed_filter='is:.*' parsed_filter = map_filter(parsed_filter) if not parsed_filter: err = "Filter {!r} is not a valid filter!".format raise pytan.exceptions.HumanParserError(err(split_filter[1])) dbg = 'parsed new string to {!r} and filters to:\n{}'.format humanlog.debug(dbg(s, jsonify(parsed_filter))) return s, parsed_filter
5,330,684
def sigmoid( x, sigmoid_type: str = "tanh", normalization_range: Tuple[Union[float, int], Union[float, int]] = (0, 1) ): """ A sigmoid function. From Wikipedia (https://en.wikipedia.org/wiki/Sigmoid_function): A sigmoid function is a mathematical function having a characteristic "S"-shaped curve or sigmoid curve. Args: x: The input sigmoid_type: Type of sigmoid function to use [str]. Can be one of: * "tanh" or "logistic" (same thing) * "arctan" * "polynomial" normalization_type: Range in which to normalize the sigmoid, shorthanded here in the documentation as "N". This parameter is given as a two-element tuple (min, max). After normalization: >>> sigmoid(-Inf) == normalization_range[0] >>> sigmoid(Inf) == normalization_range[1] * In the special case of N = (0, 1): >>> sigmoid(-Inf) == 0 >>> sigmoid(Inf) == 1 >>> sigmoid(0) == 0.5 >>> d(sigmoid)/dx at x=0 == 0.5 * In the special case of N = (-1, 1): >>> sigmoid(-Inf) == -1 >>> sigmoid(Inf) == 1 >>> sigmoid(0) == 0 >>> d(sigmoid)/dx at x=0 == 1 Returns: The value of the sigmoid. """ ### Sigmoid equations given here under the (-1, 1) normalization: if sigmoid_type == ("tanh" or "logistic"): # Note: tanh(x) is simply a scaled and shifted version of a logistic curve; after # normalization these functions are identical. s = np.tanh(x) elif sigmoid_type == "arctan": s = 2 / pi * np.arctan(pi / 2 * x) elif sigmoid_type == "polynomial": s = x / (1 + x ** 2) ** 0.5 else: raise ValueError("Bad value of parameter 'type'!") ### Normalize min = normalization_range[0] max = normalization_range[1] s_normalized = s * (max - min) / 2 + (max + min) / 2 return s_normalized
5,330,685
def test_get_nonexistent_public_key_fails(app, mock_get): """ Test that if there is no key found for the provided key id, a JWTValidationError is raised. """ mock_get() with pytest.raises(JWTError): get_public_key(kid="nonsense")
5,330,686
def interp(x, x1, y1, x2, y2): """Find a point along a line""" return ((x2 - x) * y1 + (x - x1) * y2) / (x2 - x1)
5,330,687
def msm_distance_measure_getter(X): """ generate the msm distance measure :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ n_dimensions = 1 # todo use other dimensions return { "distance_measure": [cython_wrapper(msm_distance)], "dim_to_use": stats.randint(low=0, high=n_dimensions), "c": [ 0.01, 0.01375, 0.0175, 0.02125, 0.025, 0.02875, 0.0325, 0.03625, 0.04, 0.04375, 0.0475, 0.05125, 0.055, 0.05875, 0.0625, 0.06625, 0.07, 0.07375, 0.0775, 0.08125, 0.085, 0.08875, 0.0925, 0.09625, 0.1, 0.136, 0.172, 0.208, 0.244, 0.28, 0.316, 0.352, 0.388, 0.424, 0.46, 0.496, 0.532, 0.568, 0.604, 0.64, 0.676, 0.712, 0.748, 0.784, 0.82, 0.856, 0.892, 0.928, 0.964, 1, 1.36, 1.72, 2.08, 2.44, 2.8, 3.16, 3.52, 3.88, 4.24, 4.6, 4.96, 5.32, 5.68, 6.04, 6.4, 6.76, 7.12, 7.48, 7.84, 8.2, 8.56, 8.92, 9.28, 9.64, 10, 13.6, 17.2, 20.8, 24.4, 28, 31.6, 35.2, 38.8, 42.4, 46, 49.6, 53.2, 56.8, 60.4, 64, 67.6, 71.2, 74.8, 78.4, 82, 85.6, 89.2, 92.8, 96.4, 100, ], }
5,330,688
def gen_cpmfgp_test_data_from_config_file(config_file_name, raw_func, num_tr_data, num_te_data): """ Generates datasets for CP Multi-fidelity GP fitting. """ # Generate data def _generate_data(_proc_func, _config, _num_data): """ Generates data. """ ZX_proc = sample_from_config_space(_config, _num_data) YY_proc = [_proc_func(z, x) for (z, x) in ZX_proc] ZZ_proc = get_idxs_from_list_of_lists(ZX_proc, 0) XX_proc = get_idxs_from_list_of_lists(ZX_proc, 1) return ZZ_proc, XX_proc, YY_proc, ZX_proc # Get dataset for testing def _get_dataset_for_testing(_proc_func, _config, _num_tr_data, _num_te_data): """ Get dataset for testing. """ ZZ_train, XX_train, YY_train, ZX_train = _generate_data(_proc_func, _config, _num_tr_data) ZZ_test, XX_test, YY_test, ZX_test = _generate_data(_proc_func, _config, _num_te_data) return Namespace(config_file_name=config_file_name, config=config, raw_func=raw_func, ZZ_train=ZZ_train, XX_train=XX_train, YY_train=YY_train, ZX_train=ZX_train, ZZ_test=ZZ_test, XX_test=XX_test, YY_test=YY_test, ZX_test=ZX_test) # Generate the data and return config = load_config_file(config_file_name) proc_func = get_processed_func_from_raw_func_via_config(raw_func, config) return _get_dataset_for_testing(proc_func, config, num_tr_data, num_te_data)
5,330,689
def csv_to_postgres(engine, file: str, table_name: str): """ Given a *.csv filepath, create a populated table in a database :param engine: SQLAlchemy connection/engine for the target database :param file: Full filepath of the *.csv file :param table_name: Name of the table to be created :return: """ df = pd.read_csv(file, index_col=False) # print(df.head()) # Postgres columns are case-sensitive; make lowercase df.columns = df.columns.str.lower() df.rename(columns={'unnamed: 0': 'id'}, inplace=True) df.to_sql(con=engine, name=table_name, if_exists='replace', index=False) return None
5,330,690
def keypair() -> None: """ KeyPair administration commands. """
5,330,691
def find_best_ref_match_partial(trb: str, ref_df: pd.DataFrame): """ Find the row in the given reference df that best matches the given TRB ONLY """ raise NotImplementedError
5,330,692
def _custom_padd(a, min_power_of_2=1024, min_zero_padd=50, zero_padd_ratio=0.5): """ Private helper to make a zeros-mirror-zeros padd to the next power of two of a. Parameters ---------- arrays : np.ndarray, array to padd. min_power_of_2 : int (default=512), min length (power of two) for the padded array. zero_padd_ratio : float (default=0.5), determine the ratio of the length of zero padds (either for the first or the second zero-padd) w.r.t the array length. min_zero_padd : int (default=50) min zero padd, either for the first or the second zero-padd. Note: ----- Having a signal close to ~200 can make trouble. Results ------- arrays : np.ndarray or list of np.ndarray the unpadded array. p : tuple of int, the applied padd. """ if not np.log2(min_power_of_2).is_integer(): raise ValueError("min_power_of_2 should be a power of two, " "got {0}".format(min_power_of_2)) nextpow2 = int(np.power(2, np.ceil(np.log2(len(a))))) nextpow2 = min_power_of_2 if nextpow2 < min_power_of_2 else nextpow2 diff = nextpow2 - len(a) # define the three possible padding zero_padd_len = int(zero_padd_ratio * len(a)) too_short = zero_padd_len < min_zero_padd zero_padd_len = min_zero_padd if too_short else zero_padd_len p_zeros = (zero_padd_len, zero_padd_len) len_padd_left = int(diff / 2) len_padd_right = int(diff / 2) + (len(a) % 2) p_total = (len_padd_left, len_padd_right) if diff == 0: # [ s ] p_total = 0 return a, p_total elif (0 < diff) and (diff < 2 * zero_padd_len): # [ /zeros | s | zeros/ ] a = padd(a, p_total) return a, p_total elif (2 * zero_padd_len < diff) and (diff < 4 * zero_padd_len): # [ zeros | mirror-signal | s | mirror-signal | zeros ] len_reflect_padd_left = len_padd_left - zero_padd_len len_reflect_padd_right = len_padd_right - zero_padd_len p_reflect = (len_reflect_padd_left, len_reflect_padd_right) # padding a = np.pad(a, p_reflect, mode='reflect') a = padd(a, p_zeros) return a, p_total else: # [ zeros | mirror-signal | zeros | s | zeros | mirror-signal | zeros ] len_reflect_padd_left = len_padd_left - 2 * zero_padd_len len_reflect_padd_right = len_padd_right - 2 * zero_padd_len p_reflect = (len_reflect_padd_left, len_reflect_padd_right) # padding a = padd(a, p_zeros) a = np.pad(a, p_reflect, mode='reflect') a = padd(a, p_zeros) return a, p_total
5,330,693
def encode_multipart_formdata(fields, files): """ Encode multipart data to be used in data import adapted from: http://code.activestate.com/recipes/146306/ :param fields: sequence of (name, value) elements for regular form fields. :param files: sequence of (name, filename, value) elements for data to be uploaded as files :return: (content_type, body) ready for httplib.HTTP instance """ boundary = '-------tHISiSsoMeMulTIFoRMbOUNDaRY---' fls = [] for (key, value) in fields: fls.append('--' + boundary) fls.append('Content-Disposition: form-data; name="%s"' % key) fls.append('') fls.append(value) for (key, filename, value) in files: fls.append('--' + boundary) fls.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) fls.append('Content-Type: %s' % get_content_type(filename)) fls.append('') fls.append(value) fls.append('--' + boundary + '--') fls.append('') output = BytesIO() for content in fls: if isinstance(content, bytes): output.write(content) else: output.write(content.encode()) output.write(b"\r\n") body = output.getvalue() content_type = 'multipart/form-data; boundary=%s' % boundary return content_type, body
5,330,694
def is_recording(): """ return current state of recording key macro """ global recording return recording
5,330,695
def test_reward_is_given(complete_mazeworld_states): """Make sure the total reward received for completing the maze is about what is expected (tests Interval) Args: complete_mazeworld_states:list of every state from finishing mazeworld """ total_reward = 0.0 for _, reward, _, _ in complete_mazeworld_states: total_reward += reward assert EXPECTED_REWARD * 0.90 <= total_reward <= EXPECTED_REWARD * 1.10, \ "The reward received was {} but we expected within 10% of {}".format(total_reward, EXPECTED_REWARD)
5,330,696
def download_image_urls( urls_filename: Union[Path, str], synsets: List[str], max_concurrent: int = 50, rewrite: bool = False ) -> Dict[str, Optional[List[str]]]: """Downloads urls for each synset and saves them in json format in a given path. Args: urls_filename: a path to the file where to save the urls. synsets: a list of synsets for which to download urls. max_concurrent (optional): a maximum number of concurrent requests. rewrite (optional): if True, will download new urls even if file exists. """ print("Downloading image urls.") synsets_to_urls = asyncio.run(_download_image_urls(urls_filename, synsets, max_concurrent, rewrite)) return synsets_to_urls
5,330,697
def generate_new_split(lines1, lines2, rng, cutoff=14937): """Takes lines1 and lines2 and combines, shuffles and split again. Useful for working with random splits of data""" lines = [l for l in lines1] # lines1 may not be a list but rather iterable lines.extend(lines2) perm = rng.permutation(len(lines)) lines = [lines[i] for i in perm] lines1 = lines[:cutoff] lines2 = lines[cutoff:] a1 = confusion_matrix.get_embedding_matrix(lines1, normalize=True) a2 = confusion_matrix.get_embedding_matrix(lines2, normalize=True) return (lines1, a1, lines2, a2)
5,330,698
def ProcessPeopleData(): """ 处理人物关系数据集 """ people_data_path = './People/origin/all_data.txt' processor = PeopleProcessor(people_data_path) processor.relation_counts() processor.data_cleaning() processor.data_formatting() processor.divide_datasets(do_dev=True, balance_sample=False)
5,330,699