content
stringlengths
22
815k
id
int64
0
4.91M
def sweep_gateDelay(qubit, sweepPts): """ Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90 seqeuence. Parameters --------- qubit : Channels.LogicalChannel Qubit channel for which to create sequences sweepPts : int/float iterable Iterable to sweep the gate delay over (seconds) Returns ------- void : string This functions produces a set of files enumerating the sweepPts given in the parameters and returns nothing. This function is currently not used and will be depricated in the future. Examples -------- >>> sweepgateDelay(q1, np.linspace(20.0e-9, 220.0e-9, 101)) """ generator = qubit.phys_chan.generator oldDelay = generator.gateDelay for ct, delay in enumerate(sweepPts): seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)], [Id(qubit, length=120e-9), MEAS(qubit)], [Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)], [Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]] generator.gateDelay = delay compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct + 1)) generator.gateDelay = oldDelay
20,300
def execute(args): """Run the server.""" os.environ['LOCAL_DEVELOPMENT'] = 'True' common.kill_leftover_emulators() if not args.skip_install_deps: common.install_dependencies() # Do this everytime as a past deployment might have changed these. appengine.symlink_dirs() # Deploy all yaml files from test project for basic appengine deployment and # local testing to work. This needs to be called on every iteration as a past # deployment might have overwritten or deleted these config files. yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod') appengine.copy_yamls_and_preprocess(yaml_paths) # Build templates. appengine.build_templates() # Clean storage directory if needed. if args.bootstrap or args.clean: if os.path.exists(args.storage_path): print 'Clearing local datastore by removing %s.' % args.storage_path shutil.rmtree(args.storage_path) if not os.path.exists(args.storage_path): os.makedirs(args.storage_path) # Set up local GCS buckets and symlinks. bootstrap_gcs(args.storage_path) # Start pubsub emulator. pubsub_emulator = test_utils.start_cloud_emulator( 'pubsub', args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST], data_dir=args.storage_path) test_utils.setup_pubsub(constants.TEST_APP_ID) # Start our custom GCS emulator. local_gcs = common.execute_async( 'bazel run //go/testing/gcs ' '--sandbox_writable_path=$(pwd)/../local/storage/local_gcs ' '-- -storage-path=$(pwd)/../local/storage/local_gcs', cwd='src') if args.bootstrap: bootstrap_db() start_cron_threads() try: common.execute( '{dev_appserver_path} -A {project} --skip_sdk_update_check=1 ' '--storage_path={storage_path} --port={appserver_port} ' '--admin_port={admin_port} ' '--datastore_emulator_port={datastore_emulator_port} ' '--require_indexes=true --log_level={log_level} ' '--dev_appserver_log_level={log_level} ' '--support_datastore_emulator=true ' '--env_var LOCAL_DEVELOPMENT=True ' '--env_var PUBSUB_EMULATOR_HOST={pubsub_emulator_host} ' '--env_var LOCAL_GCS_BUCKETS_PATH=local_gcs ' '--env_var LOCAL_GCS_SERVER_HOST={local_gcs_server_host} ' 'src/appengine src/appengine/cron-service.yaml'.format( dev_appserver_path=_dev_appserver_path(), project=constants.TEST_APP_ID, storage_path=args.storage_path, appserver_port=constants.DEV_APPSERVER_PORT, admin_port=constants.DEV_APPSERVER_ADMIN_PORT, datastore_emulator_port=constants.DATASTORE_EMULATOR_PORT, log_level=args.log_level, pubsub_emulator_host=constants.PUBSUB_EMULATOR_HOST, local_gcs_server_host=constants.LOCAL_GCS_SERVER_HOST)) except KeyboardInterrupt: print 'Server has been stopped. Exit.' pubsub_emulator.cleanup() local_gcs.terminate()
20,301
def add(n): """Add 1.""" return n + 1
20,302
def get_system_status(memory_total=False, memory_total_actual=False, memory_total_usage=False, memory_total_free=False, all_pids=False, swap_memory=False, pid=False): """ Parameters ---------- threads: bool return dict {id: (user_time, system_time)} memory_maps: bool return dict {path: rss} Note ---- All memory is returned in `MiB` To calculate memory_percent: get_system_status(memory_usage=True) / get_system_status(memory_total=True) * 100 """ import psutil # ====== general system query ====== # if memory_total: return psutil.virtual_memory().total / float(2**20) if memory_total_actual: return psutil.virtual_memory().available / float(2**20) if memory_total_usage: return psutil.virtual_memory().used / float(2**20) if memory_total_free: return psutil.virtual_memory().free / float(2**20) if swap_memory: tmp = psutil.swap_memory() tmp.total /= float(2**20) tmp.used /= float(2**20) tmp.free /= float(2**20) tmp.sin /= float(2**20) tmp.sout /= float(2**20) return tmp if all_pids: return psutil.pids() if pid: return os.getpid()
20,303
def _area(x1, y1, x2, y2, x3, y3): """Heron's formula.""" a = np.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)) b = np.sqrt(pow(x3 - x2, 2) + pow(y3 - y2, 2)) c = np.sqrt(pow(x1 - x3, 2) + pow(y3 - y1, 2)) s = (a + b + c) / 2 return np.sqrt(s * (s - a) * (s - b) * (s - c))
20,304
def set_link_color(color='black', p_id=None, na_id=None): """ Set link's color in project(s) or join. :param color: 'black' / 'red', :param p_id: <Project.p_id>, optional :param na_id: '<NetworkApplication.na_id>, optional :type color: str :type p_id: int :type na_id: int """ session = g.session if p_id: nas = (session.query(db.NetworkApplication.na_id) .filter(db.NetworkApplication.p_id == p_id) .subquery()) (session.query(db.DF_Module) .filter(db.DF_Module.na_id.in_(nas)) .update({'color': color}, synchronize_session=False)) elif na_id: (session.query(db.DF_Module) .filter(db.DF_Module.na_id == na_id) .update({'color': color})) session.commit()
20,305
def test_ljmc_imported(): """Sample test, will always pass so long as import statement worked""" assert "ljmc" in sys.modules
20,306
def plot_coarray(array, ax=None, show_location_errors=False): """Visualizes the difference coarray of the input array. Args: array (~doatools.model.arrays.ArrayDesign): A sensor array. ax (~matplotlib.axes.Axes): Matplotlib axes used for the plot. If not specified, a new figure will be created. Default value is ``None``. show_location_errors (bool): If set to ``True``, will visualized the perturbed array if the input array has location errors. Returns: The axes object containing the plot. """ return _plot_array_impl(array, ax, True, show_location_errors)
20,307
def get_publicKey(usrID): # TODO: from barbican """ Get the user's public key Returns: Public key from meta-container (Keys) in meta-tenant """ auth = v3.Password(auth_url=AUTH_URL,username=SWIFT_USER,password=SWIFT_PASS,project_name='demo',project_domain_id="Default",user_domain_name='Default') sess = session.Session(auth=auth) barbican = bc.Client(session=sess) keystone = kc.Client(session=sess) try: user = keystone.users.get(usrID) dict_keys = json.loads(user.description) ref = dict_keys.get('Public_Key','') ref = "%s/secrets/%s" %(BARBICAN_URL,ref) secret_node = barbican.secrets.get(ref) except Exception,err: return return secret_node.payload
20,308
def pad_sequence(sequences, batch_first=False, padding_value=0.0): """Pad a list of variable-length Variables. This method stacks a list of variable-length :obj:`nnabla.Variable` s with the padding_value. :math:`T_i` is the length of the :math:`i`-th Variable in the sequences. :math:`B` is the batch size equal to the length of the sequences. :math:`T` is the max of :math:`T_i` for all :math:`i`. :math:`*` is the remaining dimensions including none. .. note:: This function **must** be used the dynamic computation mode. Example: .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F import nnabla.utils.rnn as rnn_utils nn.set_auto_forward(True) l2v = lambda ldata: nn.Variable.from_numpy_array(np.asarray(ldata)) a = l2v([1, 1, 1, 1]) b = l2v([2, 2, 2]) c = l2v([2, 2, 2]) d = l2v([3, 3]) e = l2v([3, 3]) sequences = [a, b, c, d, e] padded_sequence = rnn_utils.pad_sequence(sequences) print(padded_sequence.d) Args: sequences (list of :obj:`nnabla.Variable`): Sequence of the variable of (:math:`T_i`, :math:`*`) shape. batch_first (bool): If False, output is of (:math:`T`, :math:`B`, :math:`*`) shape, otherwise (:math:`B`, :math:`T`, :math:`*`). padding_value (float): Padding value. Returns: :obj:`nnabla.Variable` of (:math:`T`, :math:`B`, :math:`*`) or (:math:`B`, :math:`T`, :math:`*`) shape """ B = len(sequences) T = max([e.shape[0] for e in sequences]) shape0 = (B, T) if batch_first else (T, B) shape1 = sequences[0].shape[1:] padded_sequence = F.constant(padding_value, shape0 + shape1) for b, s in enumerate(sequences): l = s.shape[0] if batch_first: padded_sequence[b, :l, ...] = s else: padded_sequence[:l, b, ...] = s return padded_sequence
20,309
def _fetch_global_config(config_url, github_release_url, gh_token): """ Fetch the index_runner_spec configuration file from the Github release using either the direct URL to the file or by querying the repo's release info using the GITHUB API. """ if config_url: print('Fetching config from the direct url') # Fetch the config directly from config_url with urllib.request.urlopen(config_url) as res: # nosec return yaml.safe_load(res) # type: ignore else: print('Fetching config from the release info') # Fetch the config url from the release info if gh_token: headers = {'Authorization': f'token {gh_token}'} else: headers = {} tries = 0 # Sometimes Github returns usage errors and a retry will solve it while True: release_info = requests.get(github_release_url, headers=headers).json() if release_info.get('assets'): break if tries == _FETCH_CONFIG_RETRIES: raise RuntimeError(f"Cannot fetch config from {github_release_url}: {release_info}") tries += 1 for asset in release_info['assets']: if asset['name'] == 'config.yaml': download_url = asset['browser_download_url'] with urllib.request.urlopen(download_url) as res: # nosec return yaml.safe_load(res) raise RuntimeError("Unable to load the config.yaml file from index_runner_spec")
20,310
def parse_color(c, desc): """Check that a given value is a color.""" return c
20,311
def get_package_dir(): """ Gets directory where package is installed :return: """ return os.path.dirname(ndextcgaloader.__file__)
20,312
def __virtual__(): """Only load gnocchiv1 if requirements are available.""" if REQUIREMENTS_MET: return 'gnocchiv1' else: return False, ("The gnocchiv1 execution module cannot be loaded: " "os_client_config or keystoneauth are unavailable.")
20,313
def test_unconcretized_install(install_mockery, mock_fetch, mock_packages): """Test attempts to perform install phases with unconcretized spec.""" spec = Spec('trivial-install-test-package') with pytest.raises(ValueError, match='must have a concrete spec'): spec.package.do_install() with pytest.raises(ValueError, match="only patch concrete packages"): spec.package.do_patch()
20,314
def setup(): """Start headless Chrome in docker container.""" options = webdriver.ChromeOptions() options.add_argument('--no-sandbox') options.add_argument('--headless') options.add_argument('--disable-gpu') driver = webdriver.Chrome(options=options) driver.implicitly_wait(5) return driver
20,315
def lambdaResponse(statusCode, body, headers={}, isBase64Encoded=False): """ A utility to wrap the lambda function call returns with the right status code, body, and switches. """ # Make sure the body is a json object if not isinstance(body, str): body = json.dumps(body) # Make sure the content type is json header = headers header["Content-Type"] = "application/json" header["Access-Control-Allow-Origin"]= "*" response = { "isBase64Encoded": isBase64Encoded, "statusCode": statusCode, "headers": header, "body": body } return response
20,316
def singleton(class_): """ Specify that a class is a singleton :param class_: :return: """ instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance
20,317
def is_datetime( value: Scalar, formats: Optional[Union[str, List[str]]] = None, typecast: Optional[bool] = True ) -> bool: """Test if a given string value can be converted into a datetime object for a given data format. The function accepts a single date format or a list of formates. If no format is given, ISO format is assumed as the default. Parameters ---------- value: scalar Scalar value that is tested for being a date. formats: string or list(string) Date format string using Python strptime() format directives. This can be a list of date formats. typecast: bool, default=True Attempt to parse string values as dates if True. Returns ------- bool """ if isinstance(value, datetime): return True elif not typecast or not isinstance(value, str): return False # Try to convert the given string to a datatime object with the format # that was specified at object instantiation. This will raise an # exception if the value does not match the datetime format string. # Duplicate code depending on whether format is a list of a string. if formats is None: # Issue \#39: dateutil.parse (falsely?) identifies the following # strings as dates. For column profiling we want to exclude these: # 14A; 271 1/2; 41-05; 6-8 # # As a work-around for now we expect a valid date to have at least six # characters (one for day, month, two for year and at least two # non-alphanumeric characters. # # An alternative solution was pointed out by @remram44: # https://gitlab.com/ViDA-NYU/datamart/datamart/-/blob/39462a5dca533a1e55596ddcbfc0ac7e98dce4de/lib_profiler/datamart_profiler/temporal.py#L63 # noqa: E501 # # All solutions seem to suffer from the problem that values like # 152-12 are valid dates (e.g., 152-12-01 in this case) but also # valid house numbers, for example. There is no good solution here. # For now we go with the assumption that if someone wants to specify # a date it should have at least a day, month and year separated by # some special (non-alphanumeric) charcater. if len(value) >= 6 and has_two_spec_chars(value): try: parse(value, fuzzy=False) return True except (OverflowError, TypeError, ValueError): pass else: return to_datetime_format(value=value, formats=formats) is not None return False
20,318
def sortkey(d): """Split d on "_", reverse and return as a tuple.""" parts=d.split("_") parts.reverse() return tuple(parts)
20,319
def main(): """Program entry point.""" args = parse_args() if args.verbose is None: logging.basicConfig(level=logging.WARNING) elif args.verbose == 1: logging.basicConfig(level=logging.INFO) elif args.verbose >= 2: logging.basicConfig(level=logging.DEBUG) host_tag = get_host_tag_or_die() warn_unnecessary(args.arch, args.api, host_tag) check_ndk_or_die() lp32 = args.arch in ('arm', 'x86') min_api = 16 if lp32 else 21 api = args.api if api is None: logger().warning( 'Defaulting to target API %d (minimum supported target for %s)', min_api, args.arch) api = min_api elif api < min_api: sys.exit('{} is less than minimum platform for {} ({})'.format( api, args.arch, min_api)) triple = get_triple(args.arch) toolchain_path = get_toolchain_path_or_die(host_tag) if args.install_dir is not None: install_path = args.install_dir if os.path.exists(install_path): if args.force: logger().info('Cleaning installation directory %s', install_path) shutil.rmtree(install_path) else: sys.exit('Installation directory already exists. Use --force.') else: tempdir = tempfile.mkdtemp() atexit.register(shutil.rmtree, tempdir) install_path = os.path.join(tempdir, triple) create_toolchain(install_path, args.arch, api, toolchain_path, host_tag) if args.install_dir is None: if host_tag == 'windows-x86_64': package_format = 'zip' else: package_format = 'bztar' package_basename = os.path.join(args.package_dir, triple) shutil.make_archive( package_basename, package_format, root_dir=os.path.dirname(install_path), base_dir=os.path.basename(install_path))
20,320
def resolve_stream_name(streams, stream_name): """Returns the real stream name of a synonym.""" if stream_name in STREAM_SYNONYMS and stream_name in streams: for name, stream in streams.items(): if stream is streams[stream_name] and name not in STREAM_SYNONYMS: return name return stream_name
20,321
def get_split_cifar100_tasks(num_tasks, batch_size,run,paradigm,dataset): """ Returns data loaders for all tasks of split CIFAR-100 :param num_tasks: :param batch_size: :return: datasets = {} # convention: tasks starts from 1 not 0 ! # task_id = 1 (i.e., first task) => start_class = 0, end_class = 4 cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),]) cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms) cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms) for task_id in range(1, num_tasks+1): train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} return datasets """ """ datasets = {} paradigm = 'class_iid' run = 0 dataset = core50( paradigm, run) for task_id in range(0, num_tasks): train_loader, val, test_loader = dataset.getNextClasses(task_id) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} return datasets """ datasets = {} #paradigm = 'class_iid' #run = 0 #dataset = load_datasets( paradigm, run) if dataset == 'core50': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_core50(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} if dataset == 'toybox': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_toybox(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} if dataset == 'ilab': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_ilab(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} if dataset == 'cifar100': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_cifar100(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} return datasets
20,322
def is_point_in_triangle(pt, v1, v2, v3): """Returns True if the 2D point pt is within the triangle defined by v1-3. https://www.gamedev.net/forums/topic/295943-is-this-a-better-point-in-triangle-test-2d/ """ b1 = sign(pt, v1, v2) < 0.0 b2 = sign(pt, v2, v3) < 0.0 b3 = sign(pt, v3, v1) < 0.0 return ((b1 == b2) and (b2 == b3))
20,323
def precomputed_aug_experiment( clf, auged_featurized_x_train, auged_featurized_y_train, auged_featurized_x_train_to_source_idxs, auged_featurized_x_test, auged_featurized_y_test, auged_featurized_x_test_to_source_idxs, aug_iter, train_idxs_scores, n_aug_sample_points, update_scores=False, weight_aug_samples=False, use_loss=False, stratified_sampling_x_train_ks=None, ): """ This is a precomputed version of the aug_experiment. Here, we expect training sets to be augmented and featurized up front. This function will index into the augmented set (with featurization) to get the input that would be fed into the classifier. @param clf The classifier to use (e.g., logistic regression) @param auged_featurized_x_train The augmented and featurized training set. @param auged_featurized_y_train The labels of the training set. @param auged_featurized_x_train_to_source_idxs A list of idxs corresponding to the source of augmented images from the original training set. -1 means that the point is an original point. @param auged_featurized_x_test The augmented and featurized test set. @param auged_featurized_y_test The labels of the test set. @param auged_featurized_x_test_to_source_idxs A list of idxs corresponding to the source of augmented images from the original test set. -1 means that the point is an original point. @param aug_iter The policy to use. @param train_idxs_scores The scores to use for the policies (e.g., LOO influence or loss). @param stratified_sampling_x_train_ks The population type of each train sample for stratified sampling. Sampling is round robin in numeric order. @return An list of accuracies on the test set and a list of the points that were chosen for augmentation. """ influence_acc = [] aug_iter_idxs = [] original_mask_train = auged_featurized_x_train_to_source_idxs < 0 original_x_train = auged_featurized_x_train[original_mask_train] original_y_train = auged_featurized_y_train[original_mask_train] auged_x_train = np.copy(original_x_train) auged_y_train = np.copy(original_y_train) n_aug_sample_points = set(n_aug_sample_points) if weight_aug_samples: sample_weight = np.ones(len(original_x_train)) else: sample_weight = None if stratified_sampling_x_train_ks is not None: aug_idxs = stratified_sampling_to_aug_idxs( train_idxs_scores, aug_iter, stratified_sampling_x_train_ks, ) else: aug_idxs = np.array(list(aug_iter(train_idxs_scores))).flatten() assert len(np.unique(aug_idxs)) == len(aug_idxs) already_auged = set() while len(already_auged) < len(original_x_train): assert len(train_idxs_scores) == len(original_x_train) next_idxs = [idx for idx in aug_idxs if idx not in already_auged] idx = next_idxs[0] already_auged.add(idx) aug_mask = auged_featurized_x_train_to_source_idxs == idx x_aug_ = auged_featurized_x_train[aug_mask] auged_x_train = np.concatenate( [ auged_x_train, x_aug_, ], axis=0) y_aug_ = auged_featurized_y_train[aug_mask] auged_y_train = np.concatenate( [ auged_y_train, y_aug_, ], axis=0) if weight_aug_samples: # We downweight all points from the original train point rescale_weight = 1.0 / (len(x_aug_) + 1) weight_aug_ = np.full(len(x_aug_), rescale_weight) sample_weight = np.concatenate([ sample_weight, weight_aug_, ], axis=0) sample_weight[idx] = rescale_weight if len(already_auged) in n_aug_sample_points: fit_params = {"logistic_reg__sample_weight": sample_weight} clf.fit(auged_x_train, auged_y_train, **fit_params) aug_train_poisoned_acc = clf.score( auged_featurized_x_test, auged_featurized_y_test) influence_acc.append(aug_train_poisoned_acc) aug_iter_idxs.append(idx) if update_scores: if isinstance(clf, sklearn.model_selection.GridSearchCV): if use_loss: train_idxs_scores = (clf .best_estimator_ .named_steps["logistic_reg"] .log_losses(L2_alpha=0.0)) else: train_idxs_scores = (clf .best_estimator_ .named_steps["logistic_reg"] .LOO_influence()) else: if use_loss: train_idxs_scores = (clf .named_steps["logistic_reg"] .log_losses(L2_alpha=0.0)) else: train_idxs_scores = (clf .named_steps["logistic_reg"] .LOO_influence()) train_idxs_scores = train_idxs_scores[:len(original_x_train)] if stratified_sampling_x_train_ks is not None: aug_idxs = stratified_sampling_to_aug_idxs( train_idxs_scores, aug_iter, stratified_sampling_x_train_ks, ) else: aug_idxs = np.array( list(aug_iter(train_idxs_scores)) ).flatten() return influence_acc, aug_iter_idxs
20,324
def get_yahoo_data(symbol, start_date, end_date): """Returns pricing data for a YAHOO stock symbol. Parameters ---------- symbol : str Symbol of the stock in the Yahoo. You can refer to this link: https://www.nasdaq.com/market-activity/stocks/screener?exchange=nasdaq. start_date : str Starting date (YYYY-MM-DD) of the period that you want to get data on end_date : str Ending date (YYYY-MM-DD) of the period you want to get data on Returns ------- pandas.DataFrame Stock data (in OHLCAV format) for the specified company and date range """ df = yf.download(symbol, start=start_date, end=end_date) df = df.reset_index() rename_dict = { "Date": "dt", "Open": "open", "High": "high", "Low": "low", "Close": "close", "Adj Close": "adj_close", "Volume": "volume", } rename_list = ["dt", "open", "high", "low", "close", "adj_close", "volume"] df = df.rename(columns=rename_dict)[rename_list].drop_duplicates() df["dt"] = pd.to_datetime(df.dt) return df.set_index("dt")
20,325
def first_n(m: dict, n: int): """Return first n items of dict""" return {k: m[k] for k in list(m.keys())[:n]}
20,326
def listdictnp_combine( lst: List, method: str = "concatenate", axis: int = 0, keep_nested: bool = False, allow_error: bool = False, ) -> Dict[str, Union[np.ndarray, List]]: """Concatenate or stack a list of dictionaries contains numpys along with error handling Parameters ---------- lst : list list of dicts containings np arrays method : str 'concatenate' or 'stack' axis : int axis to concat or stack over keep_nested : bool keep nested structure of list or not allow_error : bool allow for error handling. If op does not succes, list is provided Returns ------- np.array OR list of np.array in case of error """ for k in range(len(lst)): assert ( lst[0].keys() == lst[k].keys() ), "Dict keys do not match in listdictnp_combine fct" # get keys keys = lst[0].keys() output_dict = dict() for key in keys: # merge nested list if keep_nested: tmp = [None] * len(lst) for k in range(len(lst)): tmp[k] = lst[k][key] else: tmp = list() for k in range(len(lst)): tmp = [*tmp, *lst[k][key]] # convert to numpy if possible output_dict[key] = listnp_combine( tmp, method=method, axis=axis, allow_error=allow_error ) return output_dict
20,327
def main(): """Main routine""" setup ( name = "cmssh", version = "%s.%s" % (cmssh.__version__, cmssh.__revision__), description = "An interactive, programmable environment and shell for CMS", package_dir = {'cmssh':'src/cmssh'}, data_files = [('config',['src/config/cmssh_extension.py','src/config/ipython_config.py'])], packages = find_packages('src'), long_description = "cmssh -- an interactive shell for CMS", classifiers = [ 'Environment :: Console', "Intended Audience :: Developers", 'Intended Audience :: End Users/Desktop', 'Intended Audience :: System Administrators', "License :: OSI Approved :: GNU License", 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', "Programming Language :: Python", "Topic :: Database :: Front-Ends", ], requires = ['python (>=2.6)', 'ipython (>=0.11)'], author = "Valentin Kuznetsov", author_email = "vkuznet@gmail.com", url = "https://twiki.cern.ch/twiki/bin/view/CMS/", license = "GNU License", )
20,328
def pytest_configure(config): """Configure Pytest with Astropy. Parameters ---------- config : pytest configuration """ if ASTROPY_HEADER: config.option.astropy_header = True # Customize the following lines to add/remove entries from the list of # packages for which version numbers are displayed when running the tests. PYTEST_HEADER_MODULES.pop('Pandas', None) PYTEST_HEADER_MODULES['scikit-image'] = 'skimage' from . import __version__ packagename = os.path.basename(os.path.dirname(__file__)) TESTED_VERSIONS[packagename] = __version__
20,329
def find_pure_symbol(symbols, clauses): """Find a symbol and its value if it appears only as a positive literal (or only as a negative) in clauses. >>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A]) (A, True) """ for s in symbols: found_pos, found_neg = False, False for c in clauses: if not found_pos and s in disjuncts(c): found_pos = True if not found_neg and ~s in disjuncts(c): found_neg = True if found_pos != found_neg: return s, found_pos return None, None
20,330
def update_field(states, js, beta, loops): """ Update loops random selected states in the Ising model. Performance boost using jit-numba with parallelization of the operations done by one loop. :param states: np.2darray = Field containing the ising model states :param js: np.3darray = 3d Array with shape [N, N, 3], N = states.shape[0], contains the coupling constants :param beta: float = inverse temperature for the ising model :param loops: int = number of states to check :return: void """ size = states.shape[0] for i in prange(loops): y = int(random.random() * (size - 2) + 1) n_x = fn(y) x = int(random.random() * n_x) h = js[y, x, 0] * (states[y, (x + 1) % n_x] + states[y, (x - 1 + n_x) % n_x]) + \ np.sum(np.multiply(np.multiply(geo_couplings(x, y, y + 1), states[y + 1, :fn(y + 1)]), js[y, x, 1])) + \ np.sum(np.multiply(np.multiply(geo_couplings(x, y, y - 1), js[y, x, 2]), states[y - 1, :fn(y - 1)])) states[y, x] = 1 if random.random() < 1 / (1 + np.exp(-2 * beta * h)) else -1
20,331
def cross_entropy_emphasized_loss(labels, predictions, corrupted_inds, axis=0, alpha=0.3, beta=0.7, regularizer=None): """ Compute cross entropy loss over training examples that have been corrupted along certain dimensions :param labels: tensor of training example with no corruption added :param predictions: output tensor of autoencoder :param corrupted_inds: indices of corrupted dimensions (if any) :param axis: axis along which components are taken :param alpha: weight for error on components that were corrupted :param beta: weight for error on components that were not corrupted :return: cross entropy loss, emphasized by corrupted component weight """ assert (labels.shape[axis] == predictions.shape[axis]) assert (labels.dtype == predictions.dtype) num_elems = labels.shape[axis].value * FLAGS.batch_size # corrupted features x_c = tf.boolean_mask(labels, corrupted_inds) z_c = tf.boolean_mask(predictions, corrupted_inds) # uncorrupted features x = tf.boolean_mask(labels, ~corrupted_inds) z = tf.boolean_mask(predictions, ~corrupted_inds) # if training on examples with corrupted indices if x_c is not None: lhs = alpha * (-tf.reduce_sum(tf.add(tf.multiply(x_c, tf.log(z_c)), tf.multiply(1.0 - x_c, tf.log(1.0 - z_c))))) rhs = beta * (-tf.reduce_sum(tf.add(tf.multiply(x, tf.log(z)), tf.multiply(1.0 - x, tf.log(1.0 - z))))) else: lhs = 0 rhs = -tf.reduce_sum(tf.add(tf.multiply(labels, tf.log(predictions)), tf.multiply(1.0 - labels, tf.log(1.0 - predictions)))) return tf.add(lhs, rhs) / num_elems
20,332
def print_array_info(ar): """ Print array shape and other basic information. """ ar = nm.asanyarray(ar) print(ar.shape, 'c_contiguous:', ar.flags.c_contiguous, \ 'f_contiguous:', ar.flags.f_contiguous) print('min:', ar.min(), 'mean:', ar.mean(), 'max:', ar.max())
20,333
def get_process_list(node: Node): """Analyse the process description and return the Actinia process chain and the name of the processing result :param node: The process node :return: (output_objects, actinia_process_list) """ input_objects, process_list = check_node_parents(node=node) output_objects = [] # First analyse the data entry if "id" not in node.arguments: raise Exception("Process %s requires parameter <id>" % PROCESS_NAME) input_object = DataObject.from_string(node.arguments["id"]) spatial_extent = None if "spatial_extent" in node.arguments: spatial_extent = node.arguments["spatial_extent"] temporal_extent = None if "temporal_extent" in node.arguments: temporal_extent = node.arguments["temporal_extent"] bands = None if "bands" in node.arguments: bands = node.arguments["bands"] if input_object.is_strds() and \ (temporal_extent is not None or bands is not None): output_object = DataObject( name=create_output_name(input_object.name, PROCESS_NAME), datatype=input_object.datatype) else: output_object = input_object output_objects.append(output_object) node.add_output(output_object) pc = create_process_chain_entry(input_object, spatial_extent, temporal_extent, bands, output_object) process_list.extend(pc) return output_objects, process_list
20,334
def _validate_image_formation(the_sicd): """ Validate the image formation. Parameters ---------- the_sicd : sarpy.io.complex.sicd_elements.SICD.SICDType Returns ------- bool """ if the_sicd.ImageFormation is None: the_sicd.log_validity_error( 'ImageFormation attribute is not populated, and ImageFormType is {}. This ' 'cannot be valid.'.format(the_sicd.ImageFormType)) return False # nothing more to be done. alg_types = [] for alg in ['RgAzComp', 'PFA', 'RMA']: if getattr(the_sicd, alg) is not None: alg_types.append(alg) if len(alg_types) > 1: the_sicd.log_validity_error( 'ImageFormation.ImageFormAlgo is set as {}, and multiple SICD image formation parameters {} are set.\n' 'Only one image formation algorithm should be set, and ImageFormation.ImageFormAlgo ' 'should match.'.format(the_sicd.ImageFormation.ImageFormAlgo, alg_types)) return False elif len(alg_types) == 0: if the_sicd.ImageFormation.ImageFormAlgo is None: the_sicd.log_validity_warning( 'ImageFormation.ImageFormAlgo is not set, and there is no corresponding\n' 'RgAzComp, PFA, or RMA SICD parameters set. Setting ImageFormAlgo ' 'to "OTHER".'.format(the_sicd.ImageFormation.ImageFormAlgo)) the_sicd.ImageFormation.ImageFormAlgo = 'OTHER' return True elif the_sicd.ImageFormation.ImageFormAlgo != 'OTHER': the_sicd.log_validity_error( 'No RgAzComp, PFA, or RMA SICD parameters populated, but ImageFormation.ImageFormAlgo ' 'is set as {}.'.format(the_sicd.ImageFormation.ImageFormAlgo)) return False return True # there is exactly one algorithm type populated return _validate_image_form_parameters(the_sicd, alg_types[0])
20,335
def handle_closet(player, level, reward_list): """ Handle a closet :param player: The player object for the player :param level: The level that the player is on :return reward: The reward given to the player """ # Print the dialogue for the closet print "You found a closet. It appears to be unlocked." print "Should you open it?" # Get the players move player_move = handle_options(player, ["Open the Closet!", "No! Its a trap!"]) reward = None if player_move == 1: # Decide what happens when the person opens the closet closet_outcome = randint(0, 5) if closet_outcome < level: # There is a rat inside the closet print "OH NO! There is a giant man eating rat in there!" handle_fight(player, 3, 10) else: # You get a helpful reward from the closet reward = reward_list[randint(0, len(reward_list)-1)] print "Congratulations! You found a " + reward + "!" print "This item increases your damage points by", 2 * level player.add_damage_points(2*level) return reward
20,336
def module_path_to_test_path(module): """Convert a module locator to a proper test filename. """ return "test_%s.py" % module_path_to_name(module)
20,337
def inspect_bom(filename): """Inspect file for bom.""" encoding = None try: with open(filename, "rb") as f: encoding = has_bom(f.read(4)) except Exception: # pragma: no cover # print(traceback.format_exc()) pass return encoding
20,338
def parse_requirement(text): """ Parse a requirement such as 'foo>=1.0'. Returns a (name, specifier) named tuple. """ from packaging.specifiers import SpecifierSet match = REQUIREMENT_RE.match(text) if not match: raise ValueError("Invalid requirement: %s" % text) name = match.group('name').strip() spec = SpecifierSet(match.group('specifier') or '') return Requirement(name, spec)
20,339
def H_split(k, N, eps): """Entropy of the split in binary search including overlap, specified by eps""" return (k / N) * (np.log(k) + H_epsilon(k, eps)) + ((N - k) / N) * (np.log(N - k) + H_epsilon(N - k, eps))
20,340
def date_features(inputs, features_slice, columns_index) -> tf.Tensor: """Return an input and output date tensors from the features tensor.""" date = features(inputs, features_slice, columns_index) date = tf.cast(date, tf.int32) date = tf.strings.as_string(date) return tf.strings.reduce_join(date, separator="-", axis=-1, keepdims=True)
20,341
def save_calib(filename, calib_params): """ Saves calibration parameters as '.pkl' file. Parameters ---------- filename : str Path to save file, must be '.pkl' extension calib_params : dict Calibration parameters to save Returns ------- saved : bool Saved successfully. """ if type(calib_params) != dict: raise TypeError("calib_params must be 'dict'") output = open(filename, 'wb') try: pickle.dump(calib_params, output) except: raise IOError("filename must be '.pkl' extension") output.close() saved = True return saved
20,342
def createWrap(cbName, line): """在Python封装段代码中进行处理""" # 生成.h文件中的on部分 #if 'OnRspError' in cbName: #on_line = 'virtual void on' + cbName[2:] + '(dict error, int id, bool last)\n' #override_line = '("on' + cbName[2:] + '")(error, id, last);\n' #elif 'OnRsp' in cbName: #on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last)\n' #override_line = '("on' + cbName[2:] + '")(data, error, id, last);\n' #elif 'OnRtn' in cbName: #on_line = 'virtual void on' + cbName[2:] + '(dict data)\n' #override_line = '("on' + cbName[2:] + '")(data);\n' #elif 'OnErrRtn' in cbName: #on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error)\n' #override_line = '("on' + cbName[2:] + '")(data, error);\n' #else: #on_line = '' if line.count('*') == 1: on_line = 'virtual void on' + cbName[2:] + '(dict data)\n' override_line = '("on' + cbName[2:] + '")(data);\n' elif line.count('*') == 2: on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, bool last)\n' override_line = '("on' + cbName[2:] + '")(data, error, last);\n' elif line.count('*') == 0: on_line = 'virtual void on' + cbName[2:] + '()\n' override_line = '("on' + cbName[2:] + '")();\n' else: on_line = '' if on_line is not '': fwrap.write(on_line) fwrap.write('{\n') fwrap.write(' try\n') fwrap.write(' {\n') fwrap.write(' this->get_override'+override_line) fwrap.write(' }\n') fwrap.write(' catch (error_already_set const &)\n') fwrap.write(' {\n') fwrap.write(' PyErr_Print();\n') fwrap.write(' }\n') fwrap.write('};\n') fwrap.write('\n')
20,343
def context_to_dict(context): """convert a django context to a dict""" the_dict = {} for elt in context: the_dict.update(dict(elt)) return the_dict
20,344
def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet): """calculates the support for items in the itemSet and returns a subset of the itemSet each of whose elements satisfies the minimum support""" _itemSet = set() localSet = defaultdict(int) for item in itemSet: for transaction in transactionList: if item.issubset(transaction): freqSet[item] += 1 localSet[item] += 1 for item, count in list(localSet.items()): support = float(count)/len(transactionList) if support >= minSupport: _itemSet.add(item) return _itemSet
20,345
def create_P(P_δ, P_ζ, P_ι): """ Combine `P_δ`, `P_ζ` and `P_ι` into a single matrix. Parameters ---------- P_δ : ndarray(float, ndim=1) Probability distribution over the values of δ. P_ζ : ndarray(float, ndim=2) Markov transition matrix for ζ. P_ι : ndarray(float, ndim=1) Probability distribution over the values of ι. Returns ---------- P : ndarray(float, ndim=3) Joint probability distribution over the values of δ, ζ and ι. Probabilities vary by δ on the first axis, by ζ on the second axis, and by ι on the third axis. """ P = \ P_δ[:, None, None, None] * P_ζ[None, :, :, None] * \ P_ι[None, None, None, :] return P
20,346
def sel_nearest( dset, lons, lats, tolerance=2.0, unique=False, exact=False, dset_lons=None, dset_lats=None, ): """Select sites from nearest distance. Args: dset (Dataset): Stations SpecDataset to select from. lons (array): Longitude of sites to interpolate spectra at. lats (array): Latitude of sites to interpolate spectra at. tolerance (float): Maximum distance to use site for interpolation. unique (bool): Only returns unique sites in case of repeated inexact matches. exact (bool): Require exact matches. dset_lons (array): Longitude of stations in dset. dset_lats (array): Latitude of stations in dset. Returns: Selected SpecDataset at locations defined by (lons, lats). Note: Args `dset_lons`, `dset_lats` are not required but can improve performance when `dset` is chunked with site=1 (expensive to access station coordinates) and improve precision if projected coordinates are provided at high latitudes. """ coords = Coordinates(dset, lons=lons, lats=lats, dset_lons=dset_lons, dset_lats=dset_lats) station_ids = [] for lon, lat in zip(coords.lons, coords.lats): closest_id, closest_dist = coords.nearest(lon, lat) if closest_dist > tolerance: raise AssertionError( f"Nearest site from (lat={lat}, lon={lon}) is {closest_dist:g} " f"deg away but tolerance is {tolerance:g} deg." ) if exact and closest_dist > 0: raise AssertionError( f"Exact match required but no site at (lat={lat}, lon={lon}), " f"nearest site is {closest_dist} deg away." ) station_ids.append(closest_id) if unique: station_ids = list(set(station_ids)) dsout = dset.isel(**{attrs.SITENAME: station_ids}) # Return longitudes in the convention provided if coords.consistent is False: dsout.lon.values = coords._swap_longitude_convention(dsout.lon.values) dsout = dsout.assign_coords({attrs.SITENAME: np.arange(len(station_ids))}) return dsout
20,347
def evaluate(dataset, predictions, gts, output_folder): """evaluate dataset using different methods based on dataset type. Args: dataset: Dataset object predictions(dict): each item in the list represents the prediction results for one image. gt(dict): Ground truth for each batch output_folder: output folder, to save evaluation files or results. Returns: evaluation result """ args = dict( predictions=predictions, gts=gts, output_folder=output_folder, ) if isinstance(dataset, datasets.MNIST): return do_mnist_evaluation(**args) elif isinstance(dataset, datasets.MWPose): return do_mwpose_evaluation(dataset=dataset, **args) elif isinstance(dataset, datasets.ModelNetHdf): return do_modelnet_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
20,348
def html_xml_save( s=None, possible_sc_link=None, table="htmlxml", course_presentation=None ): """Save the HTML and XML for a VLE page page.""" if not possible_sc_link: # should really raise error here print("need a link") if not s: if "learn2.open.ac.uk" in possible_sc_link: from vlescrapertools import getAuthedSession s = getAuthedSession() else: s = possible_sc_link typ, html_page_url, rawxml, html_src = get_sc_page(possible_sc_link, s) if typ: dbrowdict = { "possible_sc_link": possible_sc_link, "doctype": typ, "html_url": html_page_url, "xml": rawxml, "html_src": html_src, "course_presentation": course_presentation, "courseCode": "", "courseTitle": "", "itemTitle": "", } else: dbrowdict = {} # Get some metadata from the XML # Item/CourseCode # Item/CourseTitle # Item/ItemTitle if typ == "XML": root = etree.fromstring(rawxml.encode("utf-8")) # If the course code is contaminated by a presentation suffix, get rid of the presentation code dbrowdict["courseCode"] = flatten(root.find("CourseCode")).split("-")[0] dbrowdict["courseTitle"] = flatten(root.find("CourseTitle")) dbrowdict["itemTitle"] = flatten(root.find("ItemTitle")) if dbrowdict: DB[table].insert(dbrowdict) return typ, html_page_url, rawxml, html_src
20,349
def app(request): """An instance of the Flask app that points at a test database. If the TEST_DATABASE environment variable is set to "postgres", launch a temporary PostgreSQL server that gets torn down at the end of the test run. """ database = os.environ.get('TEST_DATABASE', 'sqlite') if database == 'postgres': try: psql = request.getfixturevalue('postgresql_proc') uri = f'postgresql+psycopg2://{psql.user}:@{psql.host}:{psql.port}/' except pytest.FixtureLookupError as error: raise Exception('TEST_POSTGRESQL was set but pytest-postgresql was not installed') from error else: uri = 'sqlite://' main.app.app.config['SQLALCHEMY_DATABASE_URI'] = uri return main.app.app
20,350
def htmr(t,axis="z"): """ Calculate the homogeneous transformation matrix of a rotation respect to x,y or z axis. """ from sympy import sin,cos,tan if axis in ("z","Z",3): M = Matrix([[cos(t),-sin(t),0,0], [sin(t),cos(t),0,0], [0,0,1,0], [0,0,0,1]]) elif axis in ("y","Y",2): M = Matrix([[cos(t),0,sin(t),0], [0,1,0,0], [-sin(t),0,cos(t),0], [0,0,0,1]]) elif axis in ("x","X",1): M = Matrix([[1,0,0,0], [0,cos(t),-sin(t),0,], [0,sin(t),cos(t),0], [0,0,0,1]]) else: return eye(4) return M
20,351
def vt(n, gm, gsd, dmin=None, dmax=10.): """Evaluate the total volume of the particles between two diameters. The CDF of the lognormal distribution is calculated using equation 8.12 from Seinfeld and Pandis. Mathematically, it is represented as: .. math:: V_t=\\frac{π}{6}∫_{-∞}^{∞}D_p^3n_N^e(ln D_p)d lnD_p \\;\\;(\mu m^3 cm^{-3}) Parameters ---------- n : float Total aerosol number concentration in units of #/cc gm : float Median particle diameter (geometric mean) in units of :math:`\mu m`. gsd : float Geometric Standard Deviation of the distribution. dmin : float The minimum particle diameter in microns. Default value is 0 :math:`\mu m`. dmax : float The maximum particle diameter in microns. Default value is 10 :math:`\mu m`. Returns ------- Volume | float Returns the total volume of particles between :math:`D_{min}` and :math:`D_{max}` in units of :math:`\mu m^3 cm^{-3}` See Also -------- opcsim.equations.pdf.dv_ddp opcsim.equations.pdf.dv_dlndp opcsim.equations.pdf.dv_dlogdp Examples -------- Integrate a sample distribution between 0 and 2.5 microns: >>> d = opcsim.AerosolDistribution() >>> d.add_mode(1e3, 100, 1.5, "mode 1") >>> n = opcsim.equations.cdf.vt(1e3, 0.1, 1.5, dmax=2.5) """ res = (np.pi/12.)*n*(gm**3) * np.exp(9./2.*(np.log(gsd)**2)) * \ erfc((1.5*np.sqrt(2) * np.log(gsd)) - (np.log(dmax/gm) / (np.sqrt(2) * np.log(gsd)))) if dmin is not None and dmin > 0.0: res -= vt(n, gm, gsd, dmin=None, dmax=dmin) return res
20,352
def core_value_encode(origin): """ 转换utf-8编码为社会主义核心价值观编码 :param origin: :return: """ hex_str = str2hex(origin) twelve = hex2twelve(hex_str) core_value_iter = twelve_2_core_value(twelve) return ''.join(core_value_iter)
20,353
def generate_params_file(output_path, args) -> None: """ Generates parameter files. """ with open(output_path.joinpath(f'params-{time.strftime("%Y_%m_%d_%H%M%S", time.localtime(time.time()))}.txt'), 'w', encoding='UTF-8') as params_file: output_vars = [ f'input={args.fasta_file_path.resolve()}', f'output_dir={args.output_dir}', f'bis={args.bis}', f'strand={args.strand}', f'max_PCR_len={args.max}', f'n_subseq={args.sub}', f'n_tries={args.tries}', f'verbose={args.verbose}', f'seed={args.seed}'] params_file.write('\n'.join(output_vars))
20,354
def user_query_ahjs_is_ahj_official_of(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of AHJs the selected users are AHJ officials of. """ model_name = 'ahj' field_key_pairs = [field_key_pair('AHJPK', 'AHJPK')] queryset = AHJUserMaintains.objects.filter(UserID__in=queryset, MaintainerStatus=True) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs)
20,355
def open_gui(root_path: str): """The main function. This opens the DataLight GUI. :param root_path: The path to the root of the RoboTA project metadata descriptions. """ app = QtWidgets.QApplication(sys.argv) datalight_ui = DatalightUIWindow(root_path) datalight_ui.ui_setup() datalight_ui.main_window.show() datalight_ui.set_window_position() connect_button_methods(datalight_ui) sys.exit(app.exec_())
20,356
def get_cache_node_count( cluster_id: str, configuration: Configuration = None, secrets: Secrets = None ) -> int: """Returns the number of cache nodes associated to the cluster :param cluster_id: str: the name of the cache cluster :param configuration: Configuration :param secrets: Secrets :example: { "type": "probe", "name": "validate cache node count", "tolerance": 3, "provider": { "type": "python", "module": "chaosaws.elasticache.probes", "func": "get_cache_node_count", "arguments": { "cluster_id": "MyTestCluster" } } } """ response = describe_cache_cluster( cluster_id, configuration=configuration, secrets=secrets ) return response["CacheClusters"][0].get("NumCacheNodes", 0)
20,357
def top_sentences(query, sentences, idfs, n): """ Given a `query` (a set of words), `sentences` (a dictionary mapping sentences to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the `n` top sentences that match the query, ranked according to idf. If there are ties, preference should be given to sentences that have a higher query term density. """ # Process query. query = set( [ word.lower() for word in query if word not in string.punctuation and word not in nltk.corpus.stopwords.words("english") ] ) # Create a list tuples (sentence, sum_idfs, qt_density) to sort the sentences. results = [] for sentence, words in sentences.items(): # Determine the total sum of IDF values and query term density for each # sentence. sum_idfs = 0 for word in query: if word in words: sum_idfs += idfs[word] qt_density = sum(words.count(word) for word in query) / len(words) results.append((sentence, sum_idfs, qt_density)) # Sort sentences by their total sum of IDF values and query term density. ranked_sentences = [ sentence for sentence, sum_idfs, qt_density in sorted( results, key=itemgetter(1, 2), reverse=True ) ] # Return the 'n' top sentences. return ranked_sentences[:n]
20,358
def print_defence_history(n:int, history:HistoryRecords, result:bool): """ print history. """ format_str = "[{0}] .... {1} ({2}, {3})" print("\n===== challenge history ======") for i in range(len(history.challenge)): print(format_str.format(i + 1, history.challenge[i], history.response[i][0], history.response[i][1]))
20,359
def load_distribution(label): """Load sample distributions as described by Seinfeld+Pandis Table 8.3. There are currently 7 options including: Urban, Marine, Rural, Remote continental, Free troposphere, Polar, and Desert. Parameters ---------- label : {'Urban' | 'Marine' | 'Rural' | 'Remote Continental' | 'Free Troposphere' | 'Polar' | 'Desert'} Choose which sample distribution to load. Returns ------- An instance of the AerosolDistribution class Examples -------- >>> d = opcsim.load_distribution("Urban") """ label = label.lower() if label not in DISTRIBUTION_DATA.keys(): raise ValueError("Invalid label.") _tmp = AerosolDistribution(label) for each in DISTRIBUTION_DATA[label]: _tmp.add_mode(each[0], each[1], 10**each[2], each[3]) return _tmp
20,360
def split_4d_itk(img_itk: sitk.Image) -> List[sitk.Image]: """ Helper function to split 4d itk images into multiple 3 images Args: img_itk: 4D input image Returns: List[sitk.Image]: 3d output images """ img_npy = sitk.GetArrayFromImage(img_itk) spacing = img_itk.GetSpacing() origin = img_itk.GetOrigin() direction = np.array(img_itk.GetDirection()).reshape(4, 4) spacing = tuple(list(spacing[:-1])) assert len(spacing) == 3 origin = tuple(list(origin[:-1])) assert len(origin) == 3 direction = tuple(direction[:-1, :-1].reshape(-1)) assert len(direction) == 9 images_new = [] for i, t in enumerate(range(img_npy.shape[0])): img = img_npy[t] images_new.append( create_itk_image_spatial_props(img, spacing, origin, direction)) return images_new
20,361
def parse_results(html, keyword): """[summary] Arguments: html {str} -- google search engine html response keyword {str} -- search term Returns: pandas.DataFrame -- Dataframe with the following columns ['keyword', 'rank', 'title', 'link', 'domain'] """ soup = BeautifulSoup(html, 'html.parser') found_results = [] rank = 1 result_block = soup.find_all('div', attrs={'class': 'g'}) for result in result_block: link = result.find('a', href=True) title = result.find('h3') # description = result.find('span', attrs={'class': 'st'}) if link and title: link = link['href'] title = title.get_text() # if description: # description = description.get_text() if link != '#': domain = DOMAIN_RE.findall(link)[0] found_results.append( {'keyword': keyword, 'rank': rank, 'title': title, 'link': link, 'domain': domain}) rank += 1 return pd.DataFrame(found_results, columns=['keyword', 'rank', 'title', 'link', 'domain'])
20,362
def maybe_iter_configs_with_path(x, with_params=False): """ Like x.maybe_iter_configs_with_path(), but returns [(x, [{}])] or [(x, {}, [{}])] if x is just a config object and not a Tuner object. """ if is_tuner(x): return x.iter_configs_with_path(with_params=with_params) else: if with_params: return [(deepcopy(x), {}, [{}])] else: return [(deepcopy(x), {})]
20,363
def median(vals: typing.List[float]) -> float: """Calculate median value of `vals` Arguments: vals {typing.List[float]} -- list of values Returns: float -- median value """ index = int(len(vals) / 2) - 1 return sorted(vals)[index]
20,364
def melody_mapper(notes): """ Makes a map of a melody to be played each item in the list 'notes' should be formatted using these chars: duration - length in seconds the sound will be played note - the note to play sleep - time in seconds to pause (note, duration) example: [('A4', 1), ('C3', 0.5)] :param notes: List of notes :return: list of melody map info """ m_map = {} num_of_notes = 1 for note_info in notes: note, duration, sleep = note_info m_map[str(num_of_notes)] = {'note': note, 'frequency': get_note(note)[1], 'duration': duration, 'sleep': sleep} num_of_notes += 1 return m_map
20,365
def sample_bounding_box_scale_balanced_black(landmarks): """ Samples a bounding box for cropping so that the distribution of scales in the training data is uniform. """ bb_min = 0.9 bb_old = image.get_bounding_box(landmarks) bb_old_shape = np.array((bb_old[2] - bb_old[0], bb_old[3] - bb_old[1])) bb_old_size = np.max(bb_old_shape) margin = (1 - bb_min) / 2 bb_old_min = np.round([bb_old[0] + bb_old_shape[0] * margin, bb_old[1] + bb_old_shape[1] * margin, bb_old[2] - bb_old_shape[0] * margin, bb_old[3] - bb_old_shape[1] * margin]) scale = np.random.random_sample() * 0.94 + 0.08 bb_crop_size = int(round(bb_old_size / scale)) bb_crop_start_x = np.random.random_integers(low=bb_old_min[2] - bb_crop_size, high=bb_old_min[0] + 1) bb_crop_start_y = np.random.random_integers(low=bb_old_min[3] - bb_crop_size, high=bb_old_min[1] + 1) bb_crop_end_x = bb_crop_start_x + bb_crop_size bb_crop_end_y = bb_crop_start_y + bb_crop_size bb_crop = [bb_crop_start_x, bb_crop_start_y, bb_crop_end_x, bb_crop_end_y] return np.array(bb_crop)
20,366
def get_files(data_path): """ 获取目录下以及子目录下的图片 :param data_path: :return: """ files = [] exts = ['jpg', 'png', 'jpeg', 'JPG','bmp'] for ext in exts: # glob.glob 得到所有文件名 # 一层 2层子目录都取出来 files.extend(glob.glob(os.path.join(data_path, '*.{}'.format(ext)))) files.extend(glob.glob(os.path.join(data_path, '*', '*.{}'.format(ext)))) return files
20,367
def get_trainable_layers(layers): """Returns a list of layers that have weights.""" layers = [] # Loop through all layers for l in layers: # If layer is a wrapper, find inner trainable layer l = find_trainable_layer(l) # Include layer if it has weights if l.get_weights(): layers.append(l) return layers
20,368
def outcome_from_application_return_code(return_code: int) -> outcome.Outcome: """Create either an :class:`outcome.Value` in the case of a 0 `return_code` or an :class:`outcome.Error` with a :class:`ReturnCodeError` otherwise. Args: return_code: The return code to be processed. Returns: The outcome wrapping the passed in return code. """ if return_code == 0: return outcome.Value(return_code) return outcome.Error(qtrio.ReturnCodeError(return_code))
20,369
def group_by_scale(labels): """ Utility that groups attribute labels by time scale """ groups = defaultdict(list) # Extract scales from labels (assumes that the scale is given by the last numeral in a label) for s in labels: m = re.findall("\d+", s) if m: groups[m[-1]].append(s) else: print("Bad attribute: ", s) return list(groups.values())
20,370
def FontMapper_GetEncodingDescription(*args, **kwargs): """FontMapper_GetEncodingDescription(int encoding) -> String""" return _gdi_.FontMapper_GetEncodingDescription(*args, **kwargs)
20,371
def create_merged_ngram_dictionaries(indices, n): """Generate a single dictionary for the full batch. Args: indices: List of lists of indices. n: Degree of n-grams. Returns: Dictionary of hashed(n-gram tuples) to counts in the batch of indices. """ ngram_dicts = [] for ind in indices: ngrams = n_gram.find_all_ngrams(ind, n=n) ngram_counts = n_gram.construct_ngrams_dict(ngrams) ngram_dicts.append(ngram_counts) merged_gen_dict = Counter() for ngram_dict in ngram_dicts: merged_gen_dict += Counter(ngram_dict) return merged_gen_dict
20,372
def set_cipher(shared_key, nonce, key_name, hint): """Set shared key to the encryptor and decryptor Encryptor and Decryptor are created for each inter-node connection """ global encryptors, decryptors cipher = Cipher(algorithms.AES(bytes(shared_key)), modes.CTR(nonce), backend=default_backend()) encryptors[key_name] = [cipher.encryptor(), hint] decryptors[key_name] = cipher.decryptor()
20,373
def compute_hash_base64(*fields): """bytes -> base64 string""" value = compute_hash(*fields) return base64.b64encode(value).decode()
20,374
def triplet_margin_loss( anchor, positive, negative, margin=0.1, p=2, use_cosine=False, swap=False, eps=1e-6, scope='', reduction=tf.losses.Reduction.SUM ): """ Computes the triplet margin loss Args: anchor: The tensor containing the anchor embeddings postiive: The tensor containg the positive embeddings negative: The tensor containg the negative embeddings The shapes of anchor, positive and negative must all be equal margin: The margin in the triplet loss p: The norm degree for pairwise distances Options: 1, 2 Default: 2 use_cosine: Should cosine distance be used? swap: Should we swap anchor and positive to get the harder negative? eps: A value used to prevent numerical instability reduction: The reduction method to use """ assert anchor.shape == positive.shape == negative.shape assert p in {1, 2} if use_cosine: def dist_fn(labels, preds): return tf.losses.cosine_distance( labels, preds, axis=1, reduction=tf.losses.Reduction.NONE ) elif p == 2: def dist_fn(labels, preds): return tf.losses.mean_squared_error( labels, preds, reduction=tf.losses.Reduction.NONE ) elif p == 1: def dist_fn(labels, preds): return tf.losses.absolute_difference( labels, preds, reduction=tf.losses.Reduction.NONE ) else: raise NotImplementedError() with tf.variable_scope(scope): pdist = dist_fn(anchor, positive) ndist = dist_fn(anchor, negative) if swap: # ndist_2 is the distance between postive and negative ndist_2 = dist_fn(positive, negative) ndist = tf.maximum(ndist, ndist_2) loss = tf.maximum(pdist - ndist + margin, 0) if reduction == tf.losses.Reduction.NONE: return loss elif reduction == tf.losses.Reduction.SUM: return tf.sum(loss) elif reduction == tf.losses.Reduction.MEAN: return tf.reduce_mean(loss) elif reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE: return tf.sum() / tf.shape(anchor)[0] elif reduction == tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS: return tf.sum(loss) / tf.sum(tf.greater(loss, 0)) else: msg = '{} has not been implemented for triplet_margin_loss'.format( reduction) raise NotImplementedError(msg)
20,375
def f_raw(x, a, b): """ The raw function call, performs no checks on valid parameters.. :return: """ return a * x + b
20,376
def compute_barycentric_weights_1d(samples, interval_length=None, return_sequence=False, normalize_weights=False): """ Return barycentric weights for a sequence of samples. e.g. of sequence x0,x1,x2 where order represents the order in which the samples are added to the interpolant. Parameters ---------- return_sequence : boolean True - return [1],[1/(x0-x1),1/(x1-x0)], [1/((x0-x2)(x0-x1)),1/((x1-x2)(x1-x0)),1/((x2-x1)(x2-x0))] False- return [1/((x0-x2)(x0-x1)),1/((x1-x2)(x1-x0)),1/((x2-x1)(x2-x0))] Note ---- If length of interval [a,b]=4C then weights will grow or decay exponentially at C^{-n} where n is number of points causing overflow or underflow. To minimize this effect multiply each x_j-x_k by C^{-1}. This has effect of rescaling all weights by C^n. In rare situations where n is so large randomize or use Leja ordering of the samples before computing weights. See Barycentric Lagrange Interpolation by Jean-Paul Berrut and Lloyd N. Trefethen 2004 """ if interval_length is None: scaling_factor = 1. else: scaling_factor = interval_length/4. C_inv = 1/scaling_factor num_samples = samples.shape[0] try: from pyapprox.cython.barycentric_interpolation import \ compute_barycentric_weights_1d_pyx weights = compute_barycentric_weights_1d_pyx(samples, C_inv) except (ImportError, ModuleNotFoundError) as e: msg = 'compute_barycentric_weights_1d extension failed' trace_error_with_msg(msg, e) weights = np.empty((num_samples, num_samples), dtype=float) weights[0, 0] = 1. for jj in range(1, num_samples): weights[jj, :jj] = C_inv * \ (samples[:jj]-samples[jj])*weights[jj-1, :jj] weights[jj, jj] = np.prod(C_inv*(samples[jj]-samples[:jj])) weights[jj-1, :jj] = 1./weights[jj-1, :jj] weights[num_samples-1, :num_samples] =\ 1./weights[num_samples-1, :num_samples] if not return_sequence: result = weights[num_samples-1, :] # make sure magintude of weights is approximately O(1) # useful to sample sets like leja for gaussian variables # where interval [a,b] is not very useful # print('max_weights',result.min(),result.max()) if normalize_weights: raise NotImplementedError('I do not think I want to support this option') result /= np.absolute(result).max() # result[I]=result else: result = weights assert np.all(np.isfinite(result)), (num_samples) return result
20,377
def _generate_conversions(): """ Generate conversions for unit systems. """ # conversions to inches to_inch = {'microinches': 1.0 / 1000.0, 'mils': 1.0 / 1000.0, 'inches': 1.00, 'feet': 12.0, 'yards': 36.0, 'miles': 63360, 'angstroms': 1.0 / 2.54e8, 'nanometers': 1.0 / 2.54e7, 'microns': 1.0 / 2.54e4, 'millimeters': 1.0 / 2.54e1, 'centimeters': 1.0 / 2.54e0, 'meters': 1.0 / 2.54e-2, 'kilometers': 1.0 / 2.54e-5, 'decimeters': 1.0 / 2.54e-1, 'decameters': 1.0 / 2.54e-3, 'hectometers': 1.0 / 2.54e-4, 'gigameters': 1.0 / 2.54e-11, 'AU': 5889679948818.897, 'light years': 3.72461748e17, 'parsecs': 1.21483369e18} # if a unit is known by other symbols, include them here synonyms = collections.defaultdict(list) synonyms.update({'millimeters': ['mm'], 'inches': ['in', '"'], 'feet': ["'"], 'meters': ['m']}) # add non- plural version of units to conversions # eg, millimeters -> millimeter for key in to_inch.keys(): if key[-2:] == 'es' and key != 'miles': synonyms[key].append(key[:-2]) elif key[-1] == 's': synonyms[key].append(key[:-1]) # update the dict with synonyms for key, new_keys in synonyms.items(): value = to_inch[key] for new_key in new_keys: to_inch[new_key] = value # convert back to regular dictionary and make keys all lower case to_inch = {k.strip().lower(): v for k, v in to_inch.items()} return to_inch
20,378
def sleep(seconds, check=True): """ Sleep the specified seconds checking for abort button periodically :param seconds: float :param check: bool, check for abort """ if check: end_time = time.time() + seconds while time.time() < end_time: time.sleep(.1) check_abort() else: time.sleep(seconds)
20,379
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf): """ Rather than attempting to merge files that were modified on both branches, it marks them as unresolved. The resolve command must be used to resolve these conflicts.""" return 1
20,380
def parser( text: str, *, field: str, pattern: Pattern[str], type_converter: Optional[Callable] = None, clean_up: Optional[Callable] = None, limit_size: Optional[int] = None, null_value: Optional[Union[str, int, bool, None]] = None, ) -> str: """ Returns text based on regex pattern and other provided conditions. :param text: Str. Text to parse. :param field: Str. Label for output info, eg 'charges', 'bail'. :param pattern: Pattern. Regex, compiled pattern used to search. :param type_converter: Callable. Optional. Set type for return value. Defaults to string converter. :param clean_up: Callable. Optional. Function that does any final formatting. :param limit_size: Int. Optional. Max number of chars in returned string. :param null_value: Any. Optional. Value to set when parse conditions aren't met. Default None. :return: Str. Desired pattern in text. """ # set default if no type converter func is provided if not type_converter: type_converter = lambda x: str(x) # parse logging.info("Attempting to extract charges from text with Regex...") try: match = pattern.search(text) final_value = match.group(field) logging.info(f"{field.upper()}, FIRST PASS: {final_value}") # Options if clean_up: final_value = clean_up(final_value) if limit_size: final_value = final_value[0:limit_size] # Trim final_value = final_value.strip() # Type final_value = type_converter(final_value) except (AttributeError, ValueError) as e: logging.info( "Parsing failed or couldn't find target value - setting " "to None" ) final_value = null_value logging.info(f"{field.upper()}, FINAL: {final_value}") return final_value
20,381
def func4(): """ Let’s convert the item from RGB to grayscale, using the service we created: """
20,382
def checkpoint_metrics_path(checkpoint_path, eval_name, file_name=None): """Gets a path to the JSON of eval metrics for checkpoint in eval_name.""" checkpoint_dir = os.path.dirname(checkpoint_path) checkpoint_name = os.path.basename(checkpoint_path) if eval_name: # This bit of magic is defined by the estimator framework, and isn't easy # to change. We only get to specify the suffix. checkpoint_dir = os.path.join(checkpoint_dir, 'eval_' + eval_name) if not file_name: return os.path.join(checkpoint_dir, checkpoint_name + '.metrics') return os.path.join(checkpoint_dir, file_name)
20,383
def colorize(text='', opts=(), **kwargs): """ Return your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Return the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print(colorize('first line', fg='red', opts=('noreset',))) print('this should be red too') print(colorize('and so should this')) print('this should not be red') """ code_list = [] if text == '' and len(opts) == 1 and opts[0] == 'reset': return '\x1b[%sm' % RESET for k, v in kwargs.items(): if k == 'fg': code_list.append(foreground[v]) elif k == 'bg': code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if 'noreset' not in opts: text = '%s\x1b[%sm' % (text or '', RESET) return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
20,384
def get_prediction(img_path, threshold): """ get_prediction parameters: - img_path - path of the input image - threshold - threshold value for prediction score method: - Image is obtained from the image path - the image is converted to image tensor using PyTorch's Transforms - image is passed through the model to get the predictions - class, box coordinates are obtained, but only prediction score > threshold are chosen. """ img = Image.open(img_path) transform = T.Compose([T.ToTensor()]) img = transform(img) pred = model([img]) pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())] pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] pred_score = list(pred[0]['scores'].detach().numpy()) pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1] pred_boxes = pred_boxes[:pred_t + 1] pred_class = pred_class[:pred_t + 1] return pred_boxes, pred_class
20,385
def display_img(result): """ Display images fetched from any subreddit in your terminal. Args: result([list]): A list of urls which you want to display. """ lst = [] while True: url = random.choice(result) if url not in lst: subprocess.call("w3m -o ext_image_viewer=false -o confirm_qq=false {}".format(url), shell=True) subprocess.call("clear", shell=True) lst.append(url) print("%sPress 'e' to exit or any other key to continue....%s"%(green, reset)) key = getch() if key=="e": subprocess.call("clear", shell=True) sys.exit()
20,386
def make_fixed_size(protein, shape_schema, msa_cluster_size, extra_msa_size, num_res, num_templates=0): """Guess at the MSA and sequence dimensions to make fixed size.""" pad_size_map = { NUM_RES: num_res, NUM_MSA_SEQ: msa_cluster_size, NUM_EXTRA_SEQ: extra_msa_size, NUM_TEMPLATES: num_templates, } for k, v in protein.items(): if k == 'extra_cluster_assignment': continue shape = list(v.shape) schema = shape_schema[k] assert len(shape) == len(schema), f'Rank mismatch between ' + \ f'shape and shape schema for {k}: {shape} vs {schema}' pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)] padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)] if padding: protein[k] = np.pad(v, padding) protein[k].reshape(pad_size) return protein
20,387
def plot_skymap_tract(skyMap, tract=0, title=None, ax=None): """ Plot a tract from a skyMap. Parameters ---------- skyMap: lsst.skyMap.SkyMap The SkyMap object containing the tract and patch information. tract: int [0] The tract id of the desired tract to plot. title: str [None] Title of the tract plot. If None, the use `tract <id>`. ax: matplotlib.axes._subplots.AxesSubplot [None] The subplot object to contain the tract plot. If None, then make a new one. Returns ------- matplotlib.axes._subplots.AxesSubplot: The subplot containing the tract plot. """ if title is None: title = 'tract {}'.format(tract) tractInfo = skyMap[tract] tractBox = afw_geom.Box2D(tractInfo.getBBox()) tractPosList = tractBox.getCorners() wcs = tractInfo.getWcs() xNum, yNum = tractInfo.getNumPatches() if ax is None: fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) tract_center = wcs.pixelToSky(tractBox.getCenter()) .getPosition(afw_geom.degrees) ax.text(tract_center[0], tract_center[1], '%d' % tract, size=16, ha="center", va="center", color='blue') for x in range(xNum): for y in range(yNum): patchInfo = tractInfo.getPatchInfo([x, y]) patchBox = afw_geom.Box2D(patchInfo.getOuterBBox()) pixelPatchList = patchBox.getCorners() path = make_patch(pixelPatchList, wcs) patch = patches.PathPatch(path, alpha=0.1, lw=1) ax.add_patch(patch) center = wcs.pixelToSky(patchBox.getCenter()) .getPosition(afw_geom.degrees) ax.text(center[0], center[1], '%d,%d'%(x,y), size=6, ha="center", va="center") skyPosList = [wcs.pixelToSky(pos).getPosition(afw_geom.degrees) for pos in tractPosList] ax.set_xlim(max(coord[0] for coord in skyPosList) + 1, min(coord[0] for coord in skyPosList) - 1) ax.set_ylim(min(coord[1] for coord in skyPosList) - 1, max(coord[1] for coord in skyPosList) + 1) ax.grid(ls=':',color='gray') ax.set_xlabel("RA (deg.)") ax.set_ylabel("Dec (deg.)") ax.set_title(title) return ax
20,388
def label(vertex): """ Graph vertex label in dot format """ label = f"{vertex.name} {vertex.state or ''}\n{vertex.traceback or ''}" label = json.dumps(label).replace("\\n", r"\l") return f"[label={label}]"
20,389
def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024): """Compute the linear and tree hash from a fileobj. This function will compute the linear/tree hash of a fileobj in a single pass through the fileobj. :param fileobj: A file like object. :param chunk_size: The size of the chunks to use for the tree hash. This is also the buffer size used to read from `fileobj`. :rtype: tuple :return: A tuple of (linear_hash, tree_hash). Both hashes are returned in hex. """ # Python 3+, not binary if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode: raise ValueError('File-like object must be opened in binary mode!') linear_hash = hashlib.sha256() chunks = [] chunk = fileobj.read(chunk_size) while chunk: # It's possible to get a file-like object that has no mode (checked # above) and returns something other than bytes (e.g. str). So here # we try to catch that and encode to bytes. if not isinstance(chunk, bytes): chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8') linear_hash.update(chunk) chunks.append(hashlib.sha256(chunk).digest()) chunk = fileobj.read(chunk_size) if not chunks: chunks = [hashlib.sha256(b'').digest()] return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks))
20,390
def imshow(axim, img, amp_range=None, extent=None,\ interpolation='nearest', aspect='auto', origin='upper',\ orientation='horizontal', cmap='jet') : """ extent - list of four image physical limits for labeling, cmap: 'gray_r' #axim.cla() """ imsh = axim.imshow(img, interpolation=interpolation, aspect=aspect, origin=origin, extent=extent, cmap=cmap) if amp_range is not None : imsh.set_clim(amp_range[0],amp_range[1]) return imsh
20,391
def test_get_aws_client(mock_get_aws_client) -> None: """ Test that a baseclient is returned """ assert get_aws_client()
20,392
def test_duration_min_inclusive001_1114_duration_min_inclusive001_1114_v(mode, save_output, output_format): """ TEST :Facet Schemas for string : facet=minInclusive and value=P1Y1MT1H and document value=P1Y1MT1H """ assert_bindings( schema="msData/datatypes/Facets/duration/duration_minInclusive001.xsd", instance="msData/datatypes/Facets/duration/duration_minInclusive001.xml", class_name="Test", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
20,393
def get_total(): """ Return the rounded total as properly rounded string. Credits: https://github.com/dbrgn/coverage-badge """ cov = coverage.Coverage() cov.load() total = cov.report(file=Devnull()) class Precision(coverage.results.Numbers): """ A class for using the percentage rounding of the main coverage package, with any percentage. To get the string format of the percentage, use the ``pc_covered_str`` property. """ def __init__(self, percent): self.percent = percent @property def pc_covered(self): return self.percent return Precision(total).pc_covered_str
20,394
def _infer_subscript_list(context, index): """ Handles slices in subscript nodes. """ if index == ':': # Like array[:] return ValueSet([iterable.Slice(context, None, None, None)]) elif index.type == 'subscript' and not index.children[0] == '.': # subscript basically implies a slice operation, except for Python 2's # Ellipsis. # e.g. array[:3] result = [] for el in index.children: if el == ':': if not result: result.append(None) elif el.type == 'sliceop': if len(el.children) == 2: result.append(el.children[1]) else: result.append(el) result += [None] * (3 - len(result)) return ValueSet([iterable.Slice(context, *result)]) elif index.type == 'subscriptlist': return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) # No slices return context.infer_node(index)
20,395
def count_by_guess(dictionary, correctly=False): """ Count the number of correctly/incorrectly guessed images for a dataset :param dictionary: :param correctly: :return: """ guessed = 0 for response in dictionary: guessed = guessed + count_by_guess_user(response, correctly) return guessed
20,396
def get_game_by_index(statscursor, table, index): """ Holds get_game_by_index db related data """ query = "SELECT * FROM " + table + " WHERE num=:num" statscursor.execute(query, {'num': index}) return statscursor.fetchone()
20,397
def create_queue(): """Creates the SQS queue and returns the queue url and metadata""" conn = boto3.client('sqs', region_name=CONFIG['region']) queue_metadata = conn.create_queue(QueueName=QUEUE_NAME, Attributes={'VisibilityTimeout':'3600'}) if 'queue_tags' in CONFIG: conn.tag_queue(QueueUrl=queue_metadata['QueueUrl'], Tags=CONFIG['queue_tags']) """Get the SQS queue object from the queue URL""" sqs = boto3.resource('sqs', region_name=CONFIG['region']) queue = sqs.Queue(queue_metadata['QueueUrl']) return conn, queue
20,398
def db_describe(table, **args): """Return the list of columns for a database table (interface to `db.describe -c`). Example: >>> run_command('g.copy', vector='firestations,myfirestations') 0 >>> db_describe('myfirestations') # doctest: +ELLIPSIS {'nrows': 71, 'cols': [['cat', 'INTEGER', '20'], ... 'ncols': 22} >>> run_command('g.remove', flags='f', type='vector', name='myfirestations') 0 :param str table: table name :param list args: :return: parsed module output """ if 'database' in args and args['database'] == '': args.pop('database') if 'driver' in args and args['driver'] == '': args.pop('driver') s = read_command('db.describe', flags='c', table=table, **args) if not s: fatal(_("Unable to describe table <%s>") % table) cols = [] result = {} for l in s.splitlines(): f = l.split(':') key = f[0] f[1] = f[1].lstrip(' ') if key.startswith('Column '): n = int(key.split(' ')[1]) cols.insert(n, f[1:]) elif key in ['ncols', 'nrows']: result[key] = int(f[1]) else: result[key] = f[1:] result['cols'] = cols return result
20,399