content
stringlengths
22
815k
id
int64
0
4.91M
def _b64urldec(input: str) -> bytes: """ Deocde data from base64 urlsafe with stripped padding (as specified in the JWS RFC7515). """ # The input is stripped of padding '='. These are redundant when decoding (only relevant # for concatenated sequences of base64 encoded data) but the decoder checks for them. # Appending two (the maximum number) of padding '=' is the easiest way to ensure it won't choke # on too little padding. return base64.urlsafe_b64decode(input + '==')
5,340,700
def get_sns_topic_arn(aws_creds, ec2_region): """ Retrieves the sns topic arn for the account """ rgt_client = ResourceGroupsTaggingClient(aws_creds, ec2_region, logger) sns_topic_arn = rgt_client.get_sns_topic_arn(SNS_TOPIC_TAG_KEY, SNS_TOPIC_TAG_VALUE) if not sns_topic_arn: raise SnsTopicNotFound(f"Account doesn't have the SNS topic tagged with " f"key: '{SNS_TOPIC_TAG_KEY}' and value: '{SNS_TOPIC_TAG_VALUE}'") return sns_topic_arn
5,340,701
def worker_task(instance_no, total_instances, bin_data_source_blob, logger=None): """ get the task for the worker arguments contains the various parameters that will be used by the machines to process the data like file numbers instance_no belongs to [0, total_instances - 1] """ if logger: log_info = logger.info else: log_info = print_alias BIN_DATA_STORAGE = os.path.expanduser('~/raw_data') PROCESSED_DATA_BLOB_NAME = "processed/" + bin_data_source_blob PROCESSED_DATA_STORAGE = os.path.expanduser('~/' + PROCESSED_DATA_BLOB_NAME) assigned_blobs = assign_files(instance_no=instance_no, total_instances=total_instances, bin_data_source_blob=bin_data_source_blob) log_info("Instance_no: {}".format(instance_no)) log_info('Blobs assigned: ' + str(assigned_blobs)) # downloading the files file_names = [] for blob in assigned_blobs: # downloading bin files rel_file_name = blob.name.replace(bin_data_source_blob + '/', '') filename = os.path.join(BIN_DATA_STORAGE, rel_file_name) make_dirs(os.path.dirname(filename)) blob.download_to_filename(filename) log_info('File {} downloaded to {}'.format(str(blob.name), filename)) file_names.append(filename) save_names = [] upload_names = [] for filename in file_names: # processing the file save_filename = filename.replace(BIN_DATA_STORAGE, PROCESSED_DATA_STORAGE).replace('.bin', '.json') make_dirs(os.path.dirname(save_filename)) log_parser.main(logger, filename=filename, save_filename=save_filename) save_names.append(save_filename) # uploading the file upload_name = save_filename.replace(os.path.expanduser('~/'), '') upload_blob(source_file_name=save_filename, destination_blob_name=upload_name) upload_names.append(upload_name) # print ("file_names: {}".format(file_names)) # print ("save_names: {}".format(save_names)) # print ("upload_names: {}".format(upload_names))
5,340,702
def get_universe_regions_region_id(*, language, region_id, accept_language='en-us', if_none_match=None): """ :param accept_language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response :param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag :param language: ['de', 'en-us', 'fr', 'ja', 'ru', 'zh'] Language to use in the response, takes precedence over Accept-Language :param region_id: region_id integer Get information on a region --- Alternate route: `/dev/universe/regions/{region_id}/` Alternate route: `/legacy/universe/regions/{region_id}/` Alternate route: `/v1/universe/regions/{region_id}/` --- This route expires daily at 11:05 """ ESI_request.request(accept_language=accept_language, if_none_match=if_none_match, language=language, region_id=region_id, data_source='tranquility', version='latest', HTTP_method='GET', path=f'/universe/regions/{region_id}/')
5,340,703
def p_cmdexpr_uniq(p): """cmdexpr : UNIQ"""
5,340,704
def create_bspline_basis(knots, spline_order, dt=0.02): """Create B-spline basis.""" # The repeated boundary knots are appended as it is required for Cox de Boor # recursive algorithm. See https://math.stackexchange.com/questions/2817170/ # what-is-the-purpose-of-having-repeated-knots-in-a-b-spline and the link # https://en.wikipedia.org/wiki/De_Boor%27s_algorithm. knots = list(knots) knots = [knots[0]] * spline_order + knots + [knots[-1]] * spline_order num_basis = len(knots) - spline_order - 1 # Query token is in format: [knots, basis coefficients, spline order] # See https://docs.scipy.org/doc/scipy/reference/generated/ # scipy.interpolate.splev.html query_token = [0, 0, spline_order] query_token[0] = np.array(knots) time_line = np.linspace(knots[0], knots[-1], int(np.round(knots[-1]/dt)) + 1) # Add column for the constent term. basis_matrix = np.zeros((len(time_line), num_basis + 1)) basis_matrix[:, -1] = np.ones(len(time_line)) # Constant term. for basis_index in range(num_basis): basis_coefficients = np.zeros(num_basis) basis_coefficients[basis_index] = 1.0 query_token[1] = basis_coefficients.tolist() base = scipy.interpolate.splev(time_line, query_token) basis_matrix[:, basis_index] = base return basis_matrix, time_line
5,340,705
def execute( device, commands, creds=None, incremental=None, with_errors=False, timeout=settings.DEFAULT_TIMEOUT, command_interval=0, force_cli=False ): """ Connect to a ``device`` and sequentially execute all the commands in the iterable ``commands``. Returns a Twisted ``Deferred`` object, whose callback will get a sequence of all the results after the connection is finished. ``commands`` is usually just a list, however, you can have also make it a generator, and have it and ``incremental`` share a closure to some state variables. This allows you to determine what commands to execute dynamically based on the results of previous commands. This implementation is experimental and it might be a better idea to have the ``incremental`` callback determine what command to execute next; it could then be a method of an object that keeps state. BEWARE: Your generator cannot block; you must immediately decide what next command to execute, if any. Any ``None`` in the command sequence will result in a ``None`` being placed in the output sequence, with no command issued to the device. If any command returns an error, the connection is dropped immediately and the errback will fire with the failed command. You may set ``with_errors`` to get the exception objects in the list instead. Connection failures will still fire the errback. `~trigger.exceptions.LoginTimeout` errors are always possible if the login process takes longer than expected and cannot be disabled. :param device: A `~trigger.netdevices.NetDevice` object :param commands: An iterable of commands to execute (without newlines). :param creds: (Optional) A 2-tuple of (username, password). If unset it will fetch it from ``.tacacsrc``. :param incremental: (Optional) A callback that will be called with an empty sequence upon connection and then called every time a result comes back from the device, with the list of all results. :param with_errors: (Optional) Return exceptions as results instead of raising them :param timeout: (Optional) Command response timeout in seconds. Set to ``None`` to disable. The default is in ``settings.DEFAULT_TIMEOUT``. `~trigger.exceptions.CommandTimeout` errors will result if a command seems to take longer to return than specified. :param command_interval: (Optional) Amount of time in seconds to wait between sending commands. :param force_cli: (Optional) Juniper-only: Force use of CLI instead of Junoscript. :returns: A Twisted ``Deferred`` object """ execute_func = _choose_execute(device, force_cli=force_cli) return execute_func(device=device, commands=commands, creds=creds, incremental=incremental, with_errors=with_errors, timeout=timeout, command_interval=command_interval)
5,340,706
def _attempt_get_hash_function(hash_name, hashlib_used=hashlib, sys_used=sys): """Wrapper used to try to initialize a hash function given. If successful, returns the name of the hash function back to the user. Otherwise returns None. """ try: _fetch_hash = getattr(hashlib_used, hash_name, None) if _fetch_hash is None: return None _attempt_init_of_python_3_9_hash_object(_fetch_hash, sys_used) return hash_name except ValueError: # if attempt_init_of_python_3_9 throws, this is typically due to FIPS being enabled # however, if we get to this point, the viable hash function check has either been # bypassed or otherwise failed to properly restrict the user to only the supported # functions. As such throw the UserError as an internal assertion-like error. return None
5,340,707
def get_proxy(usage: str): """ 通过WEB API接口获取代理 :param usage: 目标站点,对应WEB_AVAILABLE_PROXIES的key :return: 可用代理或None """ url = API_SERVER + "/proxy?usage={}".format(usage) res = requests.get(url, timeout=5) try: if res.status_code == 200: return res.json().get("resource").get("proxy") else: return None except Exception: return None
5,340,708
def test_esnli_preprocessor(): """tests esnli preprocessor""" raw_data = {} for field in dataclasses.fields(RawESNLIExample): key = field.name raw_data[key] = [getattr(example, key) for example in RAW_EXAMPLES_1] dataset = datasets.Dataset.from_dict(raw_data) dataset = ESNLIBuilder.preprocess(dataset) preprocessed_data = dataset.to_dict(batch_size=1, batched=True) # return iterator preprocessed_examples = [] for batch in preprocessed_data: # unbatch preprocessed_example = PreprocessedESNLIExample( **{k: v[0] for k, v in batch.items()} ) preprocessed_examples.append(preprocessed_example) assert preprocessed_examples == PREPROCESSED_EXAMPLES_1
5,340,709
def _load_blocks_txt(): """Load block name from Blocks.txt.""" with open_unicode_data_file("Blocks.txt") as blocks_txt: block_ranges = _parse_code_ranges(blocks_txt.read()) for first, last, block_name in block_ranges: _block_names.append(block_name) _block_range[block_name] = (first, last) for character_code in range(first, last + 1): _block_data[character_code] = block_name
5,340,710
def test_wktext(): """Test +wktext parameter is preserved.""" proj4 = ('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 ' '+x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext ' '+no_defs') assert 'wktext' in crs.from_string(proj4)
5,340,711
def column_names_get(subject: str) -> tuple: """ Returns column names. """ if subject == c.SUBJECT.PLANETS: return c.HEADERS.PLANETS elif subject == c.SUBJECT.STARSHIPS: return c.HEADERS.STARSHIPS elif subject == c.SUBJECT.VEHICLES: return c.HEADERS.VEHICLES elif subject == c.SUBJECT.PEOPLE: return c.HEADERS.PEOPLE else: raise ValueError(f'There are no column names for the {subject} subject.')
5,340,712
def is_morepath_template_auto_reload(): """ Returns True if auto reloading should be enabled. """ auto_reload = os.environ.get("MOREPATH_TEMPLATE_AUTO_RELOAD", "") return auto_reload.lower() in {"1", "yes", "true", "on"}
5,340,713
def handle_get_actor_report(self, handle, connection, match, data, hdr): """ GET /actor/{actor-id}/report Some actor store statistics on inputs and outputs, this reports these. Not always present. Response status code: OK or NOT_FOUND Response: Depends on actor """ self._actor_report(handle, connection, match, data, hdr)
5,340,714
def relabel_prometheus(job_config): """Get some prometheus configuration labels.""" relabel = { 'path': '__metrics_path__', 'scheme': '__scheme__', } labels = { relabel[key]: value for key, value in job_config.items() if key in relabel.keys() } # parse __param_ parameters for param, value in job_config.get('params', {}).items(): labels['__param_%s' % (param,)] = value return labels
5,340,715
def q_geography_capital(): """Ask what the capital of a given country is.""" question = QuestionGenerator() question.set_type('geography') # select country all_countries = facts.get_geography_countries_list() country = random.choice(all_countries) # formulate question question.ask(f"Was ist die Hauptstadt von {country}") # answer capital = facts.get_geography_capital(country) question.set_answer(capital) # other options other_capitals = [c for c in facts.get_geography_capitals_set() if c != capital] for c in other_capitals: question.add_wrong_option(c) return question.create(num_options=3)
5,340,716
def binary_stabilizer_to_pauli_stabilizer(stabilizer_tableau): """ Convert a stabilizer tableau to a list of PauliTerms :param stabilizer_tableau: Stabilizer tableau to turn into pauli terms :return: a list of PauliTerms representing the tableau :rytpe: List of PauliTerms """ stabilizer_list = [] num_qubits = (stabilizer_tableau.shape[1] - 1) // 2 for nn in range(stabilizer_tableau.shape[0]): # iterate through the rows stabilizer_element = [] for ii in range(num_qubits): if stabilizer_tableau[nn, ii] == 1 and stabilizer_tableau[nn, ii + num_qubits] == 0: stabilizer_element.append(sX(ii)) elif stabilizer_tableau[nn, ii] == 0 and stabilizer_tableau[nn, ii + num_qubits] == 1: stabilizer_element.append(sZ(ii)) elif stabilizer_tableau[nn, ii] == 1 and stabilizer_tableau[nn, ii + num_qubits] == 1: stabilizer_element.append(sY(ii)) stabilizer_term = reduce(lambda x, y: x * y, stabilizer_element) * ((-1) ** stabilizer_tableau[nn, -1]) stabilizer_list.append(stabilizer_term) return stabilizer_list
5,340,717
def move_and_replace(src, dst): """ Helper function used to move files from one place to another, creating os replacing them if needed :param src: source directory :param dst: destination directory """ print('move %s to %s' % (src, dst)) src = os.path.abspath(src) dst = os.path.abspath(dst) for src_dir, _, files in os.walk(src): # using os walk to navigate through the directory tree # keep te dir structure by replacing the source root to # the destination on walked path dst_dir = src_dir.replace(src, dst) if not os.path.exists(dst_dir): # to copy not fail, create the not existing dirs os.makedirs(dst_dir) for file_ in files: src_file = os.path.join(src_dir, file_) dst_file = os.path.join(dst_dir, file_) if os.path.exists(dst_file): os.remove(dst_file) # to copy not fail, create existing files shutil.move(src_file, dst_dir) # move the files shutil.rmtree(src)
5,340,718
def checkGroup(self, group, colls): """ Args: group: colls: Returns: """ cut = [] for elem in group: if elem in colls: cut.append(elem) if len(cut) == len(group): return cut else: return []
5,340,719
def get_node_shapes(input_graph_def, target_nodes): """Get shapes of target nodes from input_graph_def, shapes may be partial""" node_shapes = [] for target in target_nodes: for node in input_graph_def.node: if node.name == target: if not 'shape' in node.attr: print("Warning: Fail to get output shape of node: {}".format(node)) node_shapes.append( tensor_shape.as_shape(node.attr['shape'].shape).as_list()) return node_shapes
5,340,720
def calculate_cost(A3, Y): """ 计算损失函数cost值 Args: A3: 正向传播的输出,尺寸大小为(输出尺寸, 样本数量) Y: 真实标签向量,尺寸大小和a3相同 Return: cost: 损失函数cost值 """ m = Y.shape[1] logprobs = np.multiply(-np.log(A3), Y) + np.multiply( -np.log(1 - A3), 1 - Y) cost = 1. / m * np.nansum(logprobs) return cost
5,340,721
def _replace_sysarg(match): """Return the substitution for the $<n> syntax, .e.g. $1 for the first command line parameter. """ return sys.argv[int(match.group(1))]
5,340,722
def qsammobilenetv2(**kwargs): """Constructs a QSAMMobileNetv2 model. """ model = QSAMMobileNetV2(**kwargs) return model
5,340,723
def get_frame_labels_fields( sample_collection, frame_labels_field=None, frame_labels_prefix=None, frame_labels_dict=None, dataset_exporter=None, required=False, force_dict=False, ): """Gets the frame label field(s) of the sample collection matching the specified arguments. Provide one of ``frame_labels_field``, ``frame_labels_prefix``, ``frame_labels_dict``, or ``dataset_exporter``. Args: sample_collection: a :class:`SampleCollection` frame_labels_field (None): the name of the frame labels field to export frame_labels_prefix (None): a frame labels field prefix; the returned labels dict will contain all frame-level fields whose name starts with the given prefix frame_labels_dict (None): a dictionary mapping frame-level label field names to keys dataset_exporter (None): a :class:`fiftyone.utils.data.exporters.DatasetExporter` to use to choose appropriate frame label field(s) required (False): whether at least one matching frame field must be found force_dict (False): whether to always return a labels dict rather than an individual label field Returns: a frame label field or dict mapping frame label fields to keys """ if frame_labels_prefix is not None: frame_labels_dict = _get_frame_labels_dict_for_prefix( sample_collection, frame_labels_prefix ) if frame_labels_dict is not None: return frame_labels_dict if frame_labels_field is None and dataset_exporter is not None: frame_labels_field = _get_default_frame_label_fields_for_exporter( sample_collection, dataset_exporter, required=required ) if frame_labels_field is None and required: raise ValueError( "Unable to find any frame label fields matching the provided " "arguments" ) if ( force_dict and frame_labels_field is not None and not isinstance(frame_labels_field, dict) ): return {frame_labels_field: frame_labels_field} return frame_labels_field
5,340,724
def _rpc_code_to_error_code(rpc_code): """Maps an RPC code to a platform error code.""" return _RPC_CODE_TO_ERROR_CODE.get(rpc_code, exceptions.UNKNOWN)
5,340,725
def peak_values(dataframe_x, dataframe_y, param): """Outputs x (potentials) and y (currents) values from data indices given by peak_detection function. Parameters ---------- DataFrame_x : pd.DataFrame should be in the form of a pandas DataFrame column. For example, df['potentials'] could be input as the column of x data. DataFrame_y : pd.DataFrame should be in the form of a pandas DataFrame column. For example, df['currents'] could be input as the column of y data. param: dict Dictionary of parameters governing the CV run. Returns ------- peak_array : np.array Array of coordinates at peaks in the following order: potential of peak on top curve, current of peak on top curve, potential of peak on bottom curve, current of peak on bottom curve """ peak_values = [] potential_p, potential_n = split(dataframe_x, param) current_p, current_n = split(dataframe_y, param) peak_top_index = peak_detection(current_p, 'positive') peak_bottom_index = peak_detection(current_n, 'negative') # TOPX (bottom part of curve is peak_values.append(potential_p[(peak_top_index['peak_top'])]) # the first part of DataFrame) # TOPY peak_values.append(current_p[(peak_top_index['peak_top'])]) # BOTTOMX peak_values.append(potential_n[(peak_bottom_index['peak_bottom'])]) # BOTTOMY peak_values.append(current_n[(peak_bottom_index['peak_bottom'])]) peak_array = np.array(peak_values) return peak_array
5,340,726
def _infer_geometry(value): """Helper method that tries to infer the $geometry shape for a given value""" if isinstance(value, dict): if "$geometry" in value: return value elif 'coordinates' in value and 'type' in value: return {"$geometry": value} raise InvalidQueryError("Invalid $geometry dictionary should have " "type and coordinates keys") elif isinstance(value, (list, set)): # TODO: shouldn't we test value[0][0][0][0] to see if it is MultiPolygon? # TODO: should both TypeError and IndexError be alike interpreted? try: value[0][0][0] return {"$geometry": {"type": "Polygon", "coordinates": value}} except (TypeError, IndexError): pass try: value[0][0] return {"$geometry": {"type": "LineString", "coordinates": value}} except (TypeError, IndexError): pass try: value[0] return {"$geometry": {"type": "Point", "coordinates": value}} except (TypeError, IndexError): pass raise InvalidQueryError("Invalid $geometry data. Can be either a dictionary " "or (nested) lists of coordinate(s)")
5,340,727
def random_rotation_operator_tensor (operand_space_shape:typing.Tuple[int,...]) -> np.ndarray: """NOTE: Not a uniform distribution.""" if vorpy.tensor.dimension_of_shape(operand_space_shape) == 0: raise Exception(f'invalid dimension for vector space having rotation') A = random_antisymmetric_operator_tensor(np.pi, operand_space_shape) return scipy.linalg.expm(vorpy.tensor.as_linear_operator(A)).reshape(A.shape)
5,340,728
def get_system_details(backends=True): """Return a dictionary with information about the system """ buildno, builddate = platform.python_build() if sys.maxunicode == 65535: # UCS2 build (standard) unitype = 'UCS2' else: # UCS4 build (most recent Linux distros) unitype = 'UCS4' bits, linkage = platform.architecture() d = { 'platform': platform.platform(), 'processor': platform.processor(), 'executable': sys.executable, 'implementation': getattr(platform, 'python_implementation', lambda: 'n/a')(), 'python': platform.python_version(), 'compiler': platform.python_compiler(), 'buildno': buildno, 'builddate': builddate, 'unicode': unitype, 'bits': bits, 'pyvisa': __version__, 'backends': OrderedDict() } if backends: from . import highlevel for backend in highlevel.list_backends(): if backend.startswith('pyvisa-'): backend = backend[7:] try: cls = highlevel.get_wrapper_class(backend) except Exception as e: d['backends'][backend] = ['Could not instantiate backend', '-> %s' % str(e)] continue try: d['backends'][backend] = cls.get_debug_info() except Exception as e: d['backends'][backend] = ['Could not obtain debug info', '-> %s' % str(e)] return d
5,340,729
def train(data_path, hidden_layer_sizes=[784, 800, 10], im_size=(28, 28), dropout_fraction=0.2, random_seed=1, epochs=25, val_fraction=0.15, model_name='digit.keras'): """ Trains feedforward neural network. Stores model. :param data_path: Path to training data. :param hidden_layer_sizes: List of hidden layer neuron sizes. :param im_size: Tuple of image size. Images are resized to this. :param dropout_fraction: Dropout fraction. :param random_seed: Random seed to use for validation split. :param epochs: Number of training iterations. :param val_fraction: Validation split fraction :param model_name: Saved model name. Can also be a path """ # Get design and output matrices for training (X, Y, categories) = generate_matricies(data_path, im_size=im_size) # Explicitly split validation set. (train_X, validation_X, train_Y, validation_Y) = train_test_split(X, Y, test_size=val_fraction, random_state=random_seed) # Initialise model model = Sequential() # First layer model.add(Dense(hidden_layer_sizes[0], activation='relu', input_dim=train_X.shape[1])) model.add(BatchNormalization()) model.add(Dropout(dropout_fraction)) # Deeper layers if len(hidden_layer_sizes) > 1: for layerNum in hidden_layer_sizes[1:]: model.add(Dense(layerNum, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(dropout_fraction)) # Softmax layer model.add(Dense(train_Y.shape[1], activation='softmax')) model.compile(loss="binary_crossentropy", optimizer='sgd', metrics=["accuracy"]) # Early stopping callback early_stop = EarlyStopping(monitor='val_acc', min_delta=1e-3, patience=3, verbose=1, mode='auto') callbacks_list = [early_stop] # Model training model.fit(train_X, train_Y, validation_data=(validation_X, validation_Y), epochs=epochs, batch_size=128, verbose=2, callbacks=callbacks_list) # Save the model print("Saving model..") model.save(model_name) print('Done.')
5,340,730
def get_early_stopping(callback_config:dict): """ Get tf keras EarlyStopping callback. Args: callback_config: config info to build callback """ return keras.callbacks.EarlyStopping(**callback_config)
5,340,731
def test_daterange(): """Asser daterange generator.""" # case invalid range start = date(2021, 2, 1) end = date(2021, 1, 1) dates = [x for x in drifactorial.daterange(start, end)] assert len(dates) == 0 # case include end end = date(2021, 2, 28) dates = [x for x in drifactorial.daterange(start, end)] assert len(dates) == 28 # case exclude end dates = [x for x in drifactorial.daterange(start, end, include_end=False)] assert len(dates) == 27
5,340,732
def test_zernike_zero(): """Make sure same result is obtained for integer and float""" n, m = choose_random_nm() r = 0.5 theta = np.random.rand() * 2 * np.pi - np.pi assert_true( np.isfinite(zernike(r, theta, n, m)).all(), "r, theta, n, m = {}, {}, {}, {}".format(r, theta, n, m), )
5,340,733
def decode(rdf, hint=[]): """Decode ReDIF document.""" def decode(encoding): rslt = rdf.decode(encoding) if rslt.lower().find("template-type") == -1: raise RuntimeError("Decoding Error") return rslt encodings = hint + ["windows-1252", "utf-8", "utf-16", "latin-1"] if rdf[:3] == b"\xef\xbb\xbf": encodings = ["utf-8-sig"] + encodings for enc in encodings: try: return decode(enc) except Exception: continue raise RuntimeError("Decoding Error")
5,340,734
def pad_square(x): """ Pad image to meet square dimensions """ r,c = x.shape d = (c-r)/2 pl,pr,pt,pb = 0,0,0,0 if d>0: pt,pd = int(np.floor( d)),int(np.ceil( d)) else: pl,pr = int(np.floor(-d)),int(np.ceil(-d)) return np.pad(x, ((pt,pb),(pl,pr)), 'minimum')
5,340,735
def get_env_properties( env: Union[gym.Env, VecEnv], network: Union[str, Any] = "mlp" ) -> (Tuple[int]): """ Finds important properties of environment :param env: Environment that the agent is interacting with :type env: Gym Environment :param network: Type of network architecture, eg. "mlp", "cnn" :type network: str :returns: (State space dimensions, Action space dimensions, discreteness of action space and action limit (highest action value) :rtype: int, float, ...; int, float, ...; bool; int, float, ... """ if network == "cnn": state_dim = env.framestack elif network == "mlp": state_dim = env.observation_space.shape[0] elif isinstance(network, (BasePolicy, BaseValue)): state_dim = network.state_dim elif isinstance(network, BaseActorCritic): state_dim = network.actor.state_dim else: raise TypeError if isinstance(env.action_space, gym.spaces.Discrete): action_dim = env.action_space.n discrete = True action_lim = None elif isinstance(env.action_space, gym.spaces.Box): action_dim = env.action_space.shape[0] action_lim = env.action_space.high[0] discrete = False else: raise NotImplementedError return state_dim, action_dim, discrete, action_lim
5,340,736
def gen_abc_json(abc_gt_dir, abc_json_path, image_dir): """ 根据abcnet的gt标注生成coco格式的json标注 :param abc_gt_dir: :param abc_json_path: :param image_dir: :return: """ # Desktop Latin_embed. cV2 = [' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~'] dataset = { 'licenses': [], 'info': {}, 'categories': [], 'images': [], 'annotations': [] } with open('./classes.txt') as f: classes = f.read().strip().split() for i, cls in enumerate(classes, 1): dataset['categories'].append({ 'id': i, 'name': cls, 'supercategory': 'beverage', 'keypoints': ['mean', 'xmin', 'x2', 'x3', 'xmax', 'ymin', 'y2', 'y3', 'ymax', 'cross'] # only for BDN }) def get_category_id(cls): for category in dataset['categories']: if category['name'] == cls: return category['id'] # 遍历abcnet txt 标注 indexes = sorted([f.split('.')[0] for f in os.listdir(abc_gt_dir)]) print(indexes) j = 1 # 标注边框id号 for index in indexes: # if int(index) >3: continue print('Processing: ' + index) im = cv2.imread(os.path.join(image_dir, '{}.jpg'.format(index))) height, width, _ = im.shape dataset['images'].append({ 'coco_url': '', 'date_captured': '', 'file_name': index + '.jpg', 'flickr_url': '', 'id': int(index.split('_')[-1]), # img_1 'license': 0, 'width': width, 'height': height }) anno_file = os.path.join(abc_gt_dir, '{}.txt'.format(index)) with open(anno_file) as f: lines = [line for line in f.readlines() if line.strip()] # 没有清晰的标注,跳过 if len(lines) == 0: continue for i, line in enumerate(lines): pttt = line.strip().split('||||') parts = pttt[0].split(',') ct = pttt[-1].strip() cls = 'text' segs = [float(kkpart) for kkpart in parts[:16]] xt = [segs[ikpart] for ikpart in range(0, len(segs), 2)] yt = [segs[ikpart] for ikpart in range(1, len(segs), 2)] xmin = min([xt[0], xt[3], xt[4], xt[7]]) ymin = min([yt[0], yt[3], yt[4], yt[7]]) xmax = max([xt[0], xt[3], xt[4], xt[7]]) ymax = max([yt[0], yt[3], yt[4], yt[7]]) width = max(0, xmax - xmin + 1) height = max(0, ymax - ymin + 1) if width == 0 or height == 0: continue max_len = 100 recs = [len(cV2) + 1 for ir in range(max_len)] ct = str(ct) print('rec', ct) for ix, ict in enumerate(ct): if ix >= max_len: continue if ict in cV2: recs[ix] = cV2.index(ict) else: recs[ix] = len(cV2) dataset['annotations'].append({ 'area': width * height, 'bbox': [xmin, ymin, width, height], 'category_id': get_category_id(cls), 'id': j, 'image_id': int(index.split('_')[-1]), # img_1 'iscrowd': 0, 'bezier_pts': segs, 'rec': recs }) j += 1 # 写入json文件 folder = os.path.dirname(abc_json_path) if not os.path.exists(folder): os.makedirs(folder) with open(abc_json_path, 'w') as f: json.dump(dataset, f)
5,340,737
def mahalanobis(data, produce=None): """ Calculate mahalanobis distance on a matrix of column vectors. Assumes that rows are observations and columns are features. Parameters ---------- data : numpy array or pandas dataframe The data to calculate distances on (columns are variables, rows are observations). produce : str, optional Variation of the output to produce, either `squared`, `leverage', or `sqrt` (None). The default is None. Returns ------- numpy array Array containing the distances. """ arr = np.array(data).reshape(data.shape[0], -1) cent = arr - arr.mean(axis=0) covmat = np.cov(cent, rowvar=False) invcov = None if arr.shape[1] == 1: invcov = 1/covmat else: try: invcov = np.linalg.inv(covmat) except np.linalg.LinAlgError: invcov = np.linalg.pinv(covmat) md2 = np.sum(cent.dot(invcov) * cent, axis=1) if produce == "squared": return md2 elif produce == "leverage": n = data.shape[0] return ((md2/(n - 1)) + (1/n)) else: return np.sqrt(md2)
5,340,738
def text(el): """ Helper to get the text content of a BeautifulSoup item """ return el.get_text().strip()
5,340,739
def stage(): """Run the stage group""" pass
5,340,740
def collect_DAC_pow(dig, IF_freq): """TODO: Desciption what I, the function, do""" return external_ATS9870_CS_VNA.collect_amp(dig, IF_freq)
5,340,741
def com_google_fonts_check_render_own_name(ttFont): """Check font can render its own name.""" from fontbakery.utils import can_shape menu_name = ttFont["name"].getName( NameID.FONT_FAMILY_NAME, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP, WindowsLanguageID.ENGLISH_USA ).toUnicode() if can_shape(ttFont, menu_name): yield PASS, f'Font can successfully render its own name ({menu_name})' else: yield FAIL,\ Message("render-own-name", f'.notdef glyphs were found when attempting to render {menu_name}')
5,340,742
def psrpipe(eventfile,flags): """ Running psrpipe on the observation, to make more cuts! I decided not to put in pre-determined options for fully flexibility. Though standard flags would be ['--emin','0.3','--emax','12.0','--shrinkelvcut'], though there are others. Check out "psrpipe.py -h"! Also made sure that I moved $OBSID_pipe from the working directory to where NICERSOFT_DATADIR is, though I need to temporarily store the output folder in the working directory. eventfile - path to the event file. Will extract ObsID from this for the NICER files. flags - a LIST of input flags for psrpipe """ if type(eventfile) != str: raise TypeError("eventfile should be a string!") if type(flags) != list: raise TypeError("flags should be a list! Not even an array.") event = fits.open(eventfile) obsid = event[0].header['OBS_ID'] logfile = obsid + '_psrpipe.log' if os.path.isdir(Lv0_dirs.NICERSOFT_DATADIR+obsid+'_pipe'): #to prevent duplicate files ; not likely to be the case, but just in case... subprocess.run(['rm','-r',Lv0_dirs.NICERSOFT_DATADIR+obsid+'_pipe']) with open(logfile,'w') as logtextfile: command = ['psrpipe.py',Lv0_dirs.NICER_DATADIR+obsid] + flags output = subprocess.run(command,capture_output=True,text=True) logtextfile.write(output.stdout) logtextfile.write('*------------------------------* \n') logtextfile.write(output.stderr) logtextfile.close() subprocess.run(['mv',obsid+'_pipe/',Lv0_dirs.NICERSOFT_DATADIR+obsid+'_pipe/']) subprocess.run(['mv',obsid+'_psrpipe.log',Lv0_dirs.NICERSOFT_DATADIR+obsid+'_pipe/']) #done later because the log file was created BEFORE $OBSID_pipe is created return
5,340,743
def get_sha512_manifest(zfile): """ Get MANIFEST.MF from a bar file. :param zfile: Open (!!!) ZipFile instance. :type zfile: zipfile.ZipFile """ names = zfile.namelist() manifest = None for name in names: if name.endswith("MANIFEST.MF"): manifest = name break if manifest is None: raise SystemExit return manifest
5,340,744
def test__combine_lists__add(operand: List[object]) -> None: """Test that `+` operator works for result of `combine_lists()`.""" # Just make sure addition works assert (combine_lists('a', 'b') + operand) is not None assert (operand + combine_lists('a', 'b')) is not None
5,340,745
def plot_heatmap(filename, xdata, ydata, binx, biny, title = None, xlabel = None, ylabel = None, dpi = 150, figsize = (10,10), tfont = 17, lfont = 14): """ Present variables as a 2D heatmap to correlate magnitude and direction. """ def get_bin_id(mybins, vv): for ibin in range(len(mybins)-1): if vv >= mybins[ibin] and vv < mybins[ibin+1]: return ibin + 1 return 0 total = len(xdata) if total == 0: print('Not enough data to produce heatmap, exiting...') return nx, nxbins = np.histogram(xdata, bins = binx) ny, nybins = np.histogram(ydata, bins = biny) temp_x = np.zeros(total) temp_y = np.zeros(total) for ij in range(total): temp_x[ij] = get_bin_id(nxbins, xdata[ij]) temp_y[ij] = get_bin_id(nybins, ydata[ij]) table2d = np.zeros((len(nybins)-1,len(nxbins)-1)) for ij in range(len(temp_x)): table2d[int(temp_y[ij])-1, int(temp_x[ij])-1] += 1 x_labels = [] y_labels = [] for ij in range(len(nxbins)-1): x_labels.append('{:.2f}'.format(0.5*(nxbins[ij] + nxbins[ij+1]))) for ij in range(len(nybins)-1): y_labels.append('{:.1f}'.format(0.5*(nybins[ij] + nybins[ij+1]))) fig, ax = plt.subplots() fig.set_size_inches(figsize[0], figsize[1]) im = ax.imshow(table2d) # We want to show all ticks... ax.set_xticks(np.arange(len(x_labels))) ax.set_yticks(np.arange(len(y_labels))) # ... and label them with the respective list entries ax.set_xticklabels(x_labels) ax.set_yticklabels(y_labels) if title: ax.set_title(title, fontsize = tfont) if ylabel: ax.set_ylabel(ylabel, fontsize = lfont) if xlabel: ax.set_xlabel(xlabel, fontsize = lfont) ylims = ax.get_yticks() rr = ylims[1] - ylims[0] ax.set_ylim(ylims[0] - rr/2., ylims[-1] + rr/2.) cfont = max([8, lfont-2]) ax.tick_params(axis = 'both', which = 'major', labelsize = cfont) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") for i in range(len(nxbins)-1): for j in range(len(nybins)-1): text = ax.text(i, j, int(100.0*table2d[j, i]/total), ha="center", va="center", color="w") fig.tight_layout() if isinstance(filename, list): for item in filename: fig.savefig(item, dpi = dpi) else: fig.savefig(filename, dpi = dpi) plt.close() return 0
5,340,746
def infer_motifs( adata: AnnData, dataset: str, cluster: Optional[str] = "louvain", n_top_genes: Optional[int] = 1000, max_cell_types: Optional[int] = 50, pfm: Optional[str] = None, min_annotated: Optional[int] = 50, num_enhancers: Optional[int] = 10000, maelstrom: Optional[bool] = False, indirect: Optional[bool] = True, n_sketch: Optional[int] = 2500, n_permutations: Optional[int] = 100000, ) -> None: """Infer motif ativity for single cell RNA-seq data. The adata object is modified with the following fields. **X_cell_types** : `adata.obsm` field Cell type coefficients. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. dataset : `str` Name of reference data set or directory with reference data. cluster : `str`, optional (default: "louvain") Name of the clustering, can be either louvain or leiden. n_top_genes : `int`, optional (default: 1000) Number of variable genes that is used. If `n_top_genes` is greater than the number of hypervariable genes in `adata` then all variable genes are used. max_cell_types : `int`, optional (default: 50) Maximum number of cell types to select. pfm : `str`, optional (default: None) Name of motif file in PFM format. The GimmeMotifs default is used if this parameter is not specified. This can be a filename, or a pfm name support by GimmeMotifs such as `JASPAR2018_vertebrates`. If a custom PFM file is specified, there should also be an associated `.motif2factors.txt` file. min_annotated : `int`, optional (default: 50) Cells that are annotated with cell types less than this number will be annotated as "other". num_enhancers : `int`, optional (default: 10000) Number of enhancers to use for motif activity analysis. maelstrom : `boolean`, optional (default: False) Use maelstrom instead of ridge regression for motif activity analysis. """ use_name = True validate_adata(adata) data = ScepiaDataset(dataset) if "scepia" not in adata.uns: adata.uns["scepia"] = {"version": __version__} # Annotate each cell with H3K27ac reference if "cell_annotation" not in adata.obs or "cluster_annotation" not in adata.obs: annotate_cells( adata, dataset=dataset, cluster=cluster, n_top_genes=n_top_genes, min_annotated=min_annotated, max_cell_types=max_cell_types, ) logger.info("Linking variable genes to differential enhancers.") gene_map_file = data.gene_mapping link_file = data.link_file link = pd.read_feather(link_file) if use_name: ens2name = pd.read_csv( gene_map_file, sep="\t", index_col=0, names=["identifier", "name"] ) link = link.join(ens2name, on="gene").dropna() link = link.set_index("name") link.index = link.index.str.upper() enh_genes = adata.var_names[ adata.var_names.str.upper().isin(link.index) ].str.upper() var_enhancers = change_region_size(link.loc[enh_genes, "loc"]).unique() enhancer_df = data.load_reference_data(reftype="enhancer") enhancer_df.index = change_region_size(enhancer_df.index) enhancer_df = enhancer_df.loc[var_enhancers, adata.uns["scepia"]["cell_types"]] enhancer_df = enhancer_df.groupby(enhancer_df.columns, axis=1).mean() enhancer_df.loc[:, :] = scale(enhancer_df) main_cell_types = pd.concat( ( adata.obs["cluster_annotation"].astype(str), adata.obs["cell_annotation"].astype(str), ) ) main_cell_types = [x for x in main_cell_types.unique() if x != "other"] # Select top most variable enhancers of the most important annotated cell types enhancer_df = enhancer_df.loc[ enhancer_df[main_cell_types].var(1).sort_values().tail(num_enhancers).index ] # Center by mean of the most import cell types # Here we chose the majority cell type per cluster mean_value = enhancer_df[main_cell_types].mean(1) enhancer_df = enhancer_df.sub(mean_value, axis=0) fname = NamedTemporaryFile(delete=False).name enhancer_df.to_csv(fname, sep="\t") logger.info("inferring motif activity") pfm = pfmfile_location(pfm) if maelstrom: with TemporaryDirectory() as tmpdir: run_maelstrom( fname, data.genome, tmpdir, center=False, filter_redundant=True, ) motif_act = pd.read_csv( os.path.join(tmpdir, "final.out.txt"), sep="\t", comment="#", index_col=0, ) motif_act.columns = motif_act.columns.str.replace(r"z-score\s+", "") pfm = pfmfile_location(os.path.join(tmpdir, "nonredundant.motifs.pfm")) else: logger.info(f"Activity based on genome {data.genome}") motif_act = moap( fname, scoring="score", genome=data.genome, method="bayesianridge", pfmfile=pfm, ncpus=12, ) adata.uns["scepia"]["pfm"] = pfm adata.uns["scepia"]["motif_activity"] = motif_act[adata.uns["scepia"]["cell_types"]] logger.info("calculating cell-specific motif activity") cell_motif_activity = ( adata.uns["scepia"]["motif_activity"] @ adata.obsm["X_cell_types"].T ).T cell_motif_activity.index = adata.obs_names adata.obs = adata.obs.drop( columns=cell_motif_activity.columns.intersection(adata.obs.columns) ) adata.obs = adata.obs.join(cell_motif_activity) correlate_tf_motifs( adata, indirect=indirect, n_sketch=n_sketch, n_permutations=n_permutations ) add_activity(adata) logger.info("Done with motif inference.")
5,340,747
def cmake_var_string(cmake_vars): """Converts a dictionary to an input suitable for expand_cmake_vars. Ideally we would jist stringify in the expand_cmake_vars() rule, but select() interacts badly with genrules. TODO(phawkins): replace the genrule() with native rule and delete this rule. Args: cmake_vars: a dictionary with string keys and values that are convertable to strings. """ return " ".join([_quote("{}={}".format(k, str(v))) for (k, v) in cmake_vars.items()])
5,340,748
def scattering_transform1d(n_classes, sequence_length): """ Scattering transform """ log_eps = 1e-6 x_in = layers.Input(shape=(sequence_length)) x = Scattering1D(8, 12)(x_in) x = layers.Lambda(lambda x: x[..., 1:, :])(x) x = layers.Lambda(lambda x: tf.math.log(tf.abs(x) + log_eps))(x) x = layers.GlobalAveragePooling1D(data_format='channels_first')(x) x = layers.BatchNormalization(axis=1)(x) x_out = layers.Dense(n_classes, activation='softmax')(x) model = tf.keras.models.Model(x_in, x_out) return model
5,340,749
def main(): """Runs the program.""" # Parse command-line args parser = argparse.ArgumentParser(prog='preprocess_data.py', description='Preprocess Clockify API data from files in an opinionated fashion.', epilog='Have fun preprocessing! :)') parser.add_argument('-p', '--projects-file', type=str, dest='proj_file', help='The name of the file to get projects information from. Defaults\n' + 'to \'projects.json\' if not specified.', required=False) parser.add_argument('-t', '--tasks-file', type=str, dest='tasks_file', help='The name of the file to get tasks information from. Defaults\n' + 'to \'tasks.json\' if not specified.', required=False) parser.add_argument('-e', '--entries-file', type=str, dest='entries_file', help='The name of the file to get time entry data from. Defaults\n' + 'to \'entries.json\' if not specified.', required=False) parser.add_argument('-d', '--data-file', type=str, dest='proc_data_file', help='The name of the file that will hold the preprocessed data.\n' + 'Defaults to \'preprocessed_data.json\'.') parser.add_argument('--csv', dest='export_csv', action='store_true', help='Specify this flag to indicate that the data should also be\n' + 'exported to CSV format. The CSV file will have the same name as\n' + 'the file specified by the -d/--data-file argument, or be named\n' + '\'preprocessed_data.csv\' if that argument is not given.', default=False) args = parser.parse_args() proj_file = 'projects.json' if args.proj_file is None else args.proj_file tasks_file = 'tasks.json' if args.tasks_file is None else args.tasks_file entries_file = 'entries.json' if args.entries_file is None else args.entries_file preproc_data = preprocess_data(proj_file, tasks_file, entries_file) preproc_data_file = \ 'preprocessed_data.json' if args.proc_data_file is None else args.proc_data_file if args.export_csv: preproc_csv_file = preproc_data_file.split('.')[0] + '.csv' export_to_csv(preproc_csv_file, preproc_data) if not preproc_data_file.endswith('.json'): preproc_data_file += '.json' dump_data(preproc_data, preproc_data_file)
5,340,750
def addFileContent(session, filepath, source_file_name, content_hash, encoding): """ Add the necessary file contents. If the file is already stored in the database then its ID returns. If content_hash in None then this function calculates the content hash. Or if is available at the caller and is provided then it will not be calculated again. This function must not be called between addCheckerRun() and finishCheckerRun() functions when SQLite database is used! addCheckerRun() function opens a transaction which is closed by finishCheckerRun() and since SQLite doesn't support parallel transactions, this API call will wait until the other transactions finish. In the meantime the run adding transaction times out. """ source_file_content = None if not content_hash: source_file_content = get_file_content(source_file_name, encoding) hasher = sha256() hasher.update(source_file_content) content_hash = hasher.hexdigest() file_content = session.query(FileContent).get(content_hash) if not file_content: if not source_file_content: source_file_content = get_file_content(source_file_name, encoding) try: compressed_content = zlib.compress(source_file_content, zlib.Z_BEST_COMPRESSION) fc = FileContent(content_hash, compressed_content) session.add(fc) session.commit() except sqlalchemy.exc.IntegrityError: # Other transaction moght have added the same content in # the meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == filepath) \ .one_or_none() if not file_record: try: file_record = File(filepath, content_hash) session.add(file_record) session.commit() except sqlalchemy.exc.IntegrityError as ex: LOG.error(ex) # Other transaction might have added the same file in the # meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == filepath) \ .one_or_none() return file_record.id
5,340,751
def test_simple_push_pull(): """ Test the 'happy path' push/pull interaction 1. Push a file to remote, pull (initially) to make sure we get it 2. Modify file & push to remote, pull to make sure we get update 3. Add new file to remote, pull to make sure we get it 4. Delete new file to remote, pull to make sure it is gone No modifications are done in the puller repo here, so we do not exercise any merging behavior. """ with Remote() as remote, Pusher(remote) as pusher: pusher.push_file('README.md', '1') with Puller(remote) as puller: assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD') assert puller.read_file('README.md') == pusher.read_file('README.md') == '1' pusher.push_file('README.md', '2') puller.pull_all() assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD') assert puller.read_file('README.md') == pusher.read_file('README.md') == '2' pusher.push_file('another-file', '3') puller.pull_all() assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD') assert puller.read_file('another-file') == pusher.read_file('another-file') == '3' pusher.git('rm', 'another-file') pusher.git('commit', '-m', 'Removing File') pusher.git('push', 'origin', 'master') puller.pull_all() assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD') assert not os.path.exists(os.path.join(puller.path, 'another-file'))
5,340,752
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_predicting = (mode == tf.estimator.ModeKeys.PREDICT) # TRAIN and EVAL if not is_predicting: (loss, predicted_labels, log_probs) = create_model( is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) train_op = bert.optimization.create_optimizer( loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) # Calculate evaluation metrics. def metric_fn(label_ids, predicted_labels): accuracy = tf.metrics.accuracy(label_ids, predicted_labels) f1_score = tf.contrib.metrics.f1_score( label_ids, predicted_labels) auc = tf.metrics.auc( label_ids, predicted_labels) recall = tf.metrics.recall( label_ids, predicted_labels) precision = tf.metrics.precision( label_ids, predicted_labels) true_pos = tf.metrics.true_positives( label_ids, predicted_labels) true_neg = tf.metrics.true_negatives( label_ids, predicted_labels) false_pos = tf.metrics.false_positives( label_ids, predicted_labels) false_neg = tf.metrics.false_negatives( label_ids, predicted_labels) return { "eval_accuracy": accuracy, "f1_score": f1_score, "auc": auc, "precision": precision, "recall": recall, "true_positives": true_pos, "true_negatives": true_neg, "false_positives": false_pos, "false_negatives": false_neg } eval_metrics = metric_fn(label_ids, predicted_labels) if mode == tf.estimator.ModeKeys.TRAIN: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) else: (predicted_labels, log_probs) = create_model( is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) predictions = { 'probabilities': log_probs, 'labels': predicted_labels } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Return the actual model function in the closure return model_fn
5,340,753
def test_cmd_run_has_list_input_save_dev_null(): """List input to run complex dict save all output to /dev/null.""" cmd1 = get_cmd('echo one', r'tests\testfiles\cmds\echo.bat one') cmd2 = get_cmd('echo two three', r'tests\testfiles\cmds\echo.bat "two three"') context = Context({ 'cmd': { 'run': [cmd1, cmd2], 'stdout': '/dev/null', 'stderr': '/dev/null'} }) pypyr.steps.cmd.run_step(context) assert 'cmdOut' not in context
5,340,754
def to_numeric_df(kdf): """ Takes a dataframe and turns it into a dataframe containing a single numerical vector of doubles. This dataframe has a single field called '_1'. TODO: index is not preserved currently :param df: :return: a pair of dataframe, list of strings (the name of the columns that were converted to numerical types) """ # TODO, it should be more robust. accepted_types = {np.dtype(dt) for dt in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_]} numeric_fields = [fname for fname in kdf._metadata.column_fields if kdf[fname].dtype in accepted_types] numeric_df = kdf._sdf.select(*numeric_fields) va = VectorAssembler(inputCols=numeric_fields, outputCol="_1") v = va.transform(numeric_df).select("_1") return v, numeric_fields
5,340,755
def FancyAnalyzer(expression=r"\s+", stoplist=STOP_WORDS, minsize=2, maxsize=None, gaps=True, splitwords=True, splitnums=True, mergewords=False, mergenums=False): """Composes a RegexTokenizer with an IntraWordFilter, LowercaseFilter, and StopFilter. >>> ana = FancyAnalyzer() >>> [token.text for token in ana(u"Should I call getInt or get_real?")] [u"should", u"call", u"getInt", u"get", u"int", u"get_real", u"get", u"real"] :param expression: The regular expression pattern to use to extract tokens. :param stoplist: A list of stop words. Set this to None to disable the stop word filter. :param minsize: Words smaller than this are removed from the stream. :param maxsize: Words longer that this are removed from the stream. :param gaps: If True, the tokenizer *splits* on the expression, rather than matching on the expression. """ ret = RegexTokenizer(expression=expression, gaps=gaps) iwf = IntraWordFilter(splitwords=splitwords, splitnums=splitnums, mergewords=mergewords, mergenums=mergenums) lcf = LowercaseFilter() swf = StopFilter(stoplist=stoplist, minsize=minsize) return ret | iwf | lcf | swf
5,340,756
def find_res_shift(x_min, x_max, y_min, y_max, z_min, z_max, target_id, my_sites, res_two_three_dict, my_mols, color_list, button_list): """Function to find the relavant residue shifts""" print "FINDING MAX SHIFTS" max_shift = [] # Get the delta value delta = 5.0 # Filter residues to the ones within 1.0 A of any molecule AND then sort by size tot_res = Residue.objects.filter(target_id=target_id) if x_max: criterion1 = Q(x_max__gte=x_max + delta) criterion2 = Q(x_max__gte=x_min + delta) near_res = tot_res.exclude(criterion1 & criterion2) criterion1 = Q(x_min__lte=x_max - delta) criterion2 = Q(x_min__lte=x_min - delta) near_res = near_res.exclude(criterion1 & criterion2) criterion1 = Q(y_max__gte=y_max + delta) criterion2 = Q(y_max__gte=y_min + delta) near_res = near_res.exclude(criterion1 & criterion2) # Now do y_min criterion1 = Q(y_min__lte=y_max - delta) criterion2 = Q(y_min__lte=y_min - delta) near_res = near_res.exclude(criterion1 & criterion2) # Now do Z # First Z_max criterion1 = Q(z_max__gte=z_max + delta) criterion2 = Q(z_max__gte=z_min + delta) near_res = near_res.exclude(criterion1 & criterion2) # Now Z min criterion1 = Q(z_min__lte=z_max - delta) criterion2 = Q(z_min__lte=z_min - delta) near_res = near_res.exclude(criterion1 & criterion2) near_res = set(near_res.filter().values_list("res_name", "res_num")) else: tot_near_res = [] tot_res_d = {} for my_site in my_sites: criterion1 = Q(x_max__gte=my_site.x_max + delta) criterion2 = Q(x_max__gte=my_site.x_min + delta) near_res = tot_res.exclude(criterion1 & criterion2) criterion1 = Q(x_min__lte=my_site.x_max - delta) criterion2 = Q(x_min__lte=my_site.x_min - delta) near_res = near_res.exclude(criterion1 & criterion2) criterion1 = Q(y_max__gte=my_site.y_max + delta) criterion2 = Q(y_max__gte=my_site.y_min + delta) near_res = near_res.exclude(criterion1 & criterion2) # Now do y_min criterion1 = Q(y_min__lte=my_site.y_max - delta) criterion2 = Q(y_min__lte=my_site.y_min - delta) near_res = near_res.exclude(criterion1 & criterion2) # Now do Z # First Z_max criterion1 = Q(z_max__gte=my_site.z_max + delta) criterion2 = Q(z_max__gte=my_site.z_min + delta) near_res = near_res.exclude(criterion1 & criterion2) # Now Z min criterion1 = Q(z_min__lte=my_site.z_max - delta) criterion2 = Q(z_min__lte=my_site.z_min - delta) near_res = near_res.exclude(criterion1 & criterion2) # Now we get the near res for this site near_res = set(near_res.filter().values_list("res_name", "res_num")) for res in near_res: if res in tot_res_d: tot_res_d[res].append(my_site.pk) else: tot_res_d[res] = [my_site.pk] tot_near_res.extend(list(near_res)) near_res = tot_near_res print "Getting clusters" my_res = ResShift.objects.filter(target_id=target_id, res_name__in=[x[0] for x in near_res], res_num__in=[x[1] for x in near_res]) # Only find those close to the BOX / main out_res_d = {} for i, val in enumerate(sorted(my_res.values_list("max_shift", "res_name", "pk", "res_num"),reverse=True)): my_mol = Molecule() # Define the site the residues are in res_hash = (val[1], val[3]) if res_hash in tot_res_d: my_mol.sites = " ".join(["SITE"+ str(x) for x in tot_res_d[res_hash]]) #my_mol.my_list = [(x[0]) for x in sorted(ResShift.objects.filter(target_id=target).values_list("max_shift"),reverse=True)[:5]] if val[1] in res_two_three_dict: this_res_name = res_two_three_dict[val[1]] else: this_res_name = "UNI" my_mol.res = "^" + this_res_name + str(val[3]) out_res_d[my_mol.res] = {} my_mol.my_name = val[1] + ": " + str(val[3]) my_mol.shift = val[0] my_mol.button = button_list[i % len(button_list)] my_mol.bg = color_list[i % len(color_list)] my_mol.res_cl = {} # Now get how the molecules rank on this residue move # instead we want to go trhrough molecules my_mol.my_list = [] # Now colour the clusters for item in my_mols: this_res = tot_res.filter(res_name=val[1], res_num=val[3], prot_id__molecule=item) if len(this_res) ==0: new_mol = Molecule() # Get the PK from here new_mol.pk = item.pk new_mol.shift = 0.0 new_mol.colour = "" out_res_d[my_mol.res][item.prot_id.code] = "" my_mol.my_list.append(new_mol) elif len(this_res) == 1: this_res = this_res[0] new_mol = Molecule() # Get the PK from here new_mol.pk = item.pk new_mol.shift = this_res.max_shift new_mol.clus_id = "RESCL" + str(this_res.clust_id) + "_" + val[1] + "_" + str(val[3]) my_mol.res_cl["RESCL" + str(this_res.clust_id) + "_" + val[1] + "_" + str(val[3])] = [color_list[this_res.clust_id % len(color_list)], button_list[this_res.clust_id % len(button_list)]] new_mol.colour = color_list[this_res.clust_id % len(color_list)] out_res_d[my_mol.res][this_res.prot_id.code] = button_list[this_res.clust_id % len(button_list)] my_mol.my_list.append(new_mol) else: print "ERROR MORE THAN ONE MOLS" # Now append this guy to the list max_shift.append(my_mol) return json.dumps(out_res_d), max_shift
5,340,757
def lookup(*getters): """Find data by provided parameters and group by type respectively""" getters = list(reversed(getters)) def wrap(struct): while getters: _type, getter = getters.pop() if _type == G_TYPE_KEY: struct = getter(struct) continue if _type == G_TYPE_ARR: n_getters = list(reversed(getters)) return [lookup(*n_getters)(elem) for elem in getter(struct)] return struct return wrap
5,340,758
def test_toggle_off(basic_app): """Check if title is correctly displayed if the toggle is turned off""" basic_app.settings = {"time_in_menu_bar": False} update_title_bar(basic_app) assert basic_app.title is None
5,340,759
def compute_one(t, lhs, rhs, **kwargs): """ Join two pandas data frames on arbitrary columns The approach taken here could probably be improved. To join on two columns we force each column to be the index of the dataframe, perform the join, and then reset the index back to the left side's original index. """ result = pd.merge(lhs, rhs, left_on=t.on_left, right_on=t.on_right, how=t.how) return result.reset_index()[t.columns]
5,340,760
def simulate(mat, det, e0=20.0, dose=defaultDose, withPoisson=True, nTraj=defaultNumTraj, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams=defaultXtraParams): """simulate(mat,det,[e0=20.0],[withPoisson=True],[nTraj=defaultNumTraj],[dose=defaultDose],[sf=defaultCharFluor],[bf=defaultBremFluor],[xtraParams=defaultXtraParams]) Simulate a bulk spectrum for the material mat on the detector det at beam energy e0 (in keV). If \ sf then simulate characteristic secondary fluorescence. If bf then simulate bremsstrahlung secondary \ fluorescence. nTraj specifies the number of electron trajectories. dose is in nA*sec.""" mat = dtsa2.material(mat) if not isinstance(mat, epq.Material): print u"Please provide a material with a density - %s" % mat tmp = u"MC simulation of bulk %s at %0.1f keV%s%s" % (mat, e0, (" + CSF" if sf else ""), (" + BSF" if bf else "")) print tmp res = base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBulk, { "Material" : mat }, xtraParams) res.getProperties().setCompositionProperty(epq.SpectrumProperties.StandardComposition, mat) return res
5,340,761
def link_match_family(link, family_name): """Checks whether the a link can be used in a given family. When this function is used with built-in family names, it tests whether the link name can be used with the given built-in family. If the family name is not known, we return True because the user is working with a custom ``Family`` object. Which links can work with which families are taken from statsmodels. """ if family_name in FAMILY_LINKS: return link in FAMILY_LINKS[family_name] # Custom family, we don't know what link functions can be used return True
5,340,762
def test_naked_domain(create_user): """Test user against naked domain pattern (e.g google.com) """ emails = ["harold@bar.com"] patterns = ["bar.com"] assert create_user.preprocess_pattern(emails, patterns) == True fail_emails = ["harold@help.bar.com"] assert create_user.preprocess_pattern(fail_emails, patterns) == False
5,340,763
def store_abu_result_tuple(abu_result_tuple, n_folds=None, store_type=EStoreAbu.E_STORE_NORMAL, custom_name=None): """ 保存abu.run_loop_back的回测结果AbuResultTuple对象,根据n_folds,store_type参数 来定义存储的文件名称,透传参数使用ABuStore.store_abu_result_tuple执行操作 :param abu_result_tuple: AbuResultTuple对象类型 :param n_folds: 回测执行了几年,只影响存贮文件名 :param store_type: 回测保存类型EStoreAbu类型,只影响存贮文件名 :param custom_name: 如果store_type=EStoreAbu.E_STORE_CUSTOM_NAME时需要的自定义文件名称 """ ABuStore.store_abu_result_tuple(abu_result_tuple, n_folds, store_type=store_type, custom_name=custom_name)
5,340,764
def auc(test_set, user_factors, subreddit_factors, subreddits, users): """ Returns the auc score on a test data set """ num_users = len(test_set) total = 0 # treat the signal as 1 as per the implicit bpr paper for subreddit, user, signal in tqdm.tqdm_notebook(test_set): # outer summation # inner summation # TODO: try to parallelize u = users_index[user] i = subreddits_index[subreddit] x_ui = user_factors[u].dot(subreddit_factors[i]) js = [] for j in range(0, num_subreddits): if j != i and j not in E_u[u]: js.append(j) total += np.sum(np.heaviside(x_ui - user_factors[u].dot(subreddit_factors[js].T), 0)) / len(js) # for j in range(0, subreddits): # numel = 0 # total_user = 0 # if j != i and j not in E_u[u]: # numel += 1 # x_uj = user_factors[u].dot(subreddit_factors[j]) # total_user += heaviside(x_ui - x_uj) # total += (total_user * 1.0 / numel) return total / num_users
5,340,765
def insert_internal_bgp_peering(peering, service_node): """ Creates/updates the relationship and nodes needed to express the internal peerings. """ pass
5,340,766
def convert_AST_to_expr(ast): """Creates expression from the AST.""" converter = ASTToInstrBlockConverter() instrs = converter.my_visit(ast) return instrs[0]
5,340,767
def then_app_running_stage(context): """Check that the app is deployed and running on stage.""" result = context.result result | should.equal('Success').desc("Application is reachable in the Stage stage.")
5,340,768
def testAgentSpecValidation_whenDefinitionIsCorrect_noRaise(): """Unit test to check the validity of the Agent json-schema. Case where the Agent definition is valid. """ valid_yaml_data = """ kind: Agent name: "agent" version : 1.1.0 description: "Agent1 Description should be here" image: "some/path/to/the/image" source: "https://github.com/" durability: "development" restrictions: - "restriction1" - "restriction2" in_selectors: - "in_selector1" - "in_selector2" out_selectors: - "out_selector1" - "out_selector2" restart_policy: "any" mem_limit: 4096 portmap: port_src: 50001 port_dst: 50200 docker_file_path: "some/path/to/Dockerfile" docker_build_root: "/" agent_runner: "theAgentRunner" agent_path: "some/path/to/agent.py" agenArgument: - name: "agentArgumentName1" type: ["string", "number", "boolean"] description: "agentArgumentDescription1" default_value: "agentArgumentDefaultValue1" - name: "agentArgumentName2" type: ["string", "number", "boolean"] description: "agentArgumentDescription2" default_value: 42 """ yaml_data_file = io.StringIO(valid_yaml_data) data = loader.load_agent_yaml(yaml_data_file) assert data['name'] == 'agent' assert data['version'] == '1.1.0' assert data['agenArgument'][1]['default_value'] == 42
5,340,769
def linting(session): """Launch linting locally.""" session = base_install(session) session.run("pre-commit", "run", "-a")
5,340,770
def add_data_from_api(service, repo, variable_type, keys): """Retrieves Github API data. Utilizes the function from github_api/github.py to do so. This function adds the retrieved variables directly to the data dictionary. Args: service (Service): Service object with API connection and metadata vars repo (Repo) : Repository variables bundled together variable_type (string): which type of variable should be retrieved. Supported are: contributors, languages, readmes keys (list): A list of the keys for the retrieved data Returns: boolean: Whether the request was successful or not. In case of unsuccessful request, skip repository """ # for nested data only, otherwise key can be directly used if variable_type in ("contributors", "languages"): data[variable_type] = [] retrieved_data = get_data_from_api(service, repo, variable_type, verbose=False) if retrieved_data is not None: if variable_type in ("contributors", "languages"): for entry in retrieved_data: data[variable_type].append(dict(zip(keys, entry[1:]))) elif variable_type == "readmes": data[keys[0]] = retrieved_data[1] else: return False time.sleep(2) return True
5,340,771
def heads(context, directory='migrations', verbose=False, resolve_dependencies=False): """Show current available heads in the script directory""" if alembic_version >= (0, 7, 0): config = _get_config(directory) command.heads(config, verbose=verbose, resolve_dependencies=resolve_dependencies) else: raise RuntimeError('Alembic 0.7.0 or greater is required')
5,340,772
def part1(data): """ >>> part1(read_input()) 0 """ return data
5,340,773
def get_rectanguloid_mask(y, fat=1): """Get a rectanguloid mask of the data""" M = y.nonzero().max(0)[0].tolist() m = y.nonzero().min(0)[0].tolist() M = [min(M[i] + fat, y.shape[i] - 1) for i in range(3)] m = [max(v - fat, 0) for v in m] mask = torch.zeros_like(y) mask[m[0] : M[0], m[1] : M[1], m[2] : M[2]] = 1 return mask
5,340,774
def create_schema(engine=None): """Create schema from models, without a migration.""" base = models.BASE if engine is None: engine = get_engine() base.metadata.create_all(engine)
5,340,775
def colorize_output(output): """Add HTML colors to the output.""" # Task status color_output = re.sub(r'(ok: [-\w\d\[\]]+)', r'<font color="green">\g<1></font>', output) color_output = re.sub(r'(changed: [-\w\d\[\]]+)', r'<font color="orange">\g<1></font>', color_output) if not re.search(r'failed: 0', color_output): color_output = re.sub(r'(failed: [-\w\d\[\]]+)', r'<font color="red">\g<1></font>', color_output) color_output = re.sub(r'(fatal: [-\w\d\[\]]+):', r'<font color="red">\g<1></font>', color_output) # Play recap color_output = re.sub(r'(ok=[\d]+)', r'<font color="green">\g<1></font>', color_output) color_output = re.sub(r'(changed=[\d]+)', r'<font color="orange">\g<1></font>', color_output) color_output = re.sub(r'(failed=[1-9][0-9]*)', r'<font color="red">\g<1></font>', color_output) return color_output
5,340,776
async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the weenect device_trackers.""" platform = entity_platform.async_get_current_platform() coordinator = hass.data[DOMAIN][entry.entry_id] @callback def async_add_device_trackers( added: List[int], ) -> None: """Add device_trackers callback.""" trackers: list = [] for tracker_id in added: trackers.append( WeenectDeviceTracker( coordinator, tracker_id, ) ) async_add_entities(trackers, True) unsub_dispatcher = async_dispatcher_connect( hass, f"{entry.entry_id}_{TRACKER_ADDED}", async_add_device_trackers, ) coordinator.unsub_dispatchers.append(unsub_dispatcher) if len(coordinator.data) > 0: async_add_device_trackers(coordinator.data.keys()) async def async_call_service(service_call: ServiceCall) -> None: """Handle dispatched services.""" assert platform is not None # nosec entities = await platform.async_extract_from_service(service_call) tracker_ids = [] for entity in entities: assert isinstance(entity, WeenectEntity) # nosec tracker_ids.append(entity.id) for tracker_id in set(tracker_ids): if service_call.service == SERVICE_SET_UPDATE_INTERVAL: await async_set_update_interval( hass, tracker_id, service_call.data[UPDATE_INTERVAL] ) if service_call.service == SERVICE_ACTIVATE_SUPER_LIVE: await async_activate_super_live(hass, tracker_id) if service_call.service == SERVICE_REFRESH_LOCATION: await async_refresh_location(hass, tracker_id) if service_call.service == SERVICE_RING: await async_ring(hass, tracker_id) if service_call.service == SERVICE_VIBRATE: await async_vibrate(hass, tracker_id) hass.services.async_register( DOMAIN, SERVICE_SET_UPDATE_INTERVAL, async_call_service, schema=SERVICE_SET_UPDATE_INTERVAL_SCHEMA, ) hass.services.async_register( DOMAIN, SERVICE_ACTIVATE_SUPER_LIVE, async_call_service, schema=SERVICE_SCHEMA, ) hass.services.async_register( DOMAIN, SERVICE_REFRESH_LOCATION, async_call_service, schema=SERVICE_SCHEMA, ) hass.services.async_register( DOMAIN, SERVICE_RING, async_call_service, schema=SERVICE_SCHEMA, ) hass.services.async_register( DOMAIN, SERVICE_VIBRATE, async_call_service, schema=SERVICE_SCHEMA, )
5,340,777
def parse_args(): """Parse command-line arguments""" parser = argparse.ArgumentParser( description='Stop a subjective evaluation without ' + 'destroying resources') parser.add_argument('--aws_api_key', help='The public API key for AWS') parser.add_argument( '--aws_api_secret_key', help='The private API key for AWS') parser.add_argument('--heroku_api_key', help='The API key for Heroku') parser.add_argument( '--mysql_local_user', help='The username of the local MySQL database') parser.add_argument( '--mysql_local_password', help='The corresponding password of the local MySQL database') return parser.parse_args()
5,340,778
def parse_user_date(usr_date: str) -> date: """ Parses a user's date input, prompts the user to input useful date data if user's date was invalid Args: usr_date : str, user input of date info. Should be in <yyyy/mm/dd> format Returns: valid datetime.date() object """ expected_len = len("yyyy/mm/dd") if usr_date is None: return prompt_user_date() try: dt_list = usr_date[0:expected_len].split("/") # Ensure right number of fields if len(dt_list) >= 3: try: # Ensure year is long enough to be useful if len(dt_list[0]) == 4: year = int(dt_list[0]) else: raise BreakoutError() # set rest of info month = int(dt_list[1]) day = int(dt_list[2]) # deal with bad user characters except ValueError: raise BreakoutError() # create date if user isn't a dingus calendar_date = date(year, month, day) else: raise BreakoutError() except BreakoutError: # Make user give us a useful date if they are a dingus calendar_date = prompt_user_date() return calendar_date
5,340,779
def up_sampling_block(x, n_filter, kernel_size, name, activation='relu', up_size=(2, 2)): """Xception block x => sepconv block -> sepconv block -> sepconv block-> add(Act(x)) => """ x = layers.UpSampling2D(size=up_size, name=name+'up')(x) if activation: x = layers.Activation('relu', name=name+'_act')(x) x = sepconv_bn_relu(x, n_filter, kernel_size, padding='same', activation=None, name=name+'_sepconv1') return x
5,340,780
def connected_components(graph): """ Connected components. @attention: Indentification of connected components is meaningful only for non-directed graphs. @type graph: graph @param graph: Graph. @rtype: dictionary @return: Pairing that associates each node to its connected component. """ visited = {} count = 1 # For 'each' node not found to belong to a connected component, find its connected component. for each in graph: if (each not in visited): _dfs(graph, visited, count, each) count = count + 1 return visited
5,340,781
def format_keyvals( entries: typing.Iterable[typing.Tuple[str, typing.Union[None, str, urwid.Widget]]], key_format: str = "key", value_format: str = "text", indent: int = 0 ) -> typing.List[urwid.Columns]: """ Format a list of (key, value) tuples. Args: entries: The list to format. keys must be strings, values can also be None or urwid widgets. The latter makes it possible to use the result of format_keyvals() as a value. key_format: The display attribute for the key. value_format: The display attribute for the value. indent: Additional indent to apply. """ max_key_len = max((len(k) for k, v in entries if k is not None), default=0) max_key_len = min(max_key_len, KEY_MAX) if indent > 2: indent -= 2 # We use dividechars=2 below, which already adds two empty spaces ret = [] for k, v in entries: if v is None: v = urwid.Text("") elif not isinstance(v, urwid.Widget): v = urwid.Text([(value_format, v)]) ret.append( urwid.Columns( [ ("fixed", indent, urwid.Text("")), ( "fixed", max_key_len, urwid.Text([(key_format, k)]) ), v ], dividechars=2 ) ) return ret
5,340,782
def get_item_workdays(scorecard): """ Gets the number of days in this period""" supplier = frappe.get_doc('Supplier', scorecard.supplier) total_item_days = frappe.db.sql(""" SELECT SUM(DATEDIFF( %(end_date)s, po_item.schedule_date) * (po_item.qty)) FROM `tabPurchase Order Item` po_item, `tabPurchase Order` po WHERE po.supplier = %(supplier)s AND po_item.received_qty < po_item.qty AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.parent = po.name""", {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0)[0][0] if not total_item_days: total_item_days = 0 return total_item_days
5,340,783
def load_data(ticker='SNAP', barSizeSetting='3 mins', what='TRADES'): """ loads historical tick data """ if what == 'TRADES': folder = '/home/nate/Dropbox/data/ib_full_adj/data/' elif what == 'ADJUSTED_LAST': folder = '/home/nate/Dropbox/data/ib_split_adj_only/data/' bss = barSizeSetting.replace(' ', '_') trades = pd.read_hdf(folder + ticker + '_trades_' + bss + '.h5') # fill 0 volume with 1 trades.at[trades['volume'] == 0, 'volume'] = 1 bid = pd.read_hdf(folder + ticker + '_bid_' + bss + '.h5') ask = pd.read_hdf(folder + ticker + '_ask_' + bss + '.h5') opt_vol = pd.read_hdf(folder + ticker + '_opt_vol_' + bss + '.h5') # drop duplicates just in case...dupes throw off concat trades.drop_duplicates(inplace=True) bid.drop_duplicates(inplace=True) ask.drop_duplicates(inplace=True) opt_vol.drop_duplicates(inplace=True) # sometimes with dupes, index is no longer sorted trades.sort_index(inplace=True) bid.sort_index(inplace=True) ask.sort_index(inplace=True) opt_vol.sort_index(inplace=True) # TODO: find opt_vol and other files with problems # e.g. found BOX opt_vol file had some price data in it # look for outliers or matches within other DFs, then delete messed up DFs # rename columns so can join to one big dataframe bid.columns = ['bid_' + c for c in bid.columns] ask.columns = ['ask_' + c for c in ask.columns] opt_vol.columns = ['opt_vol_' + c for c in opt_vol.columns] # inner join should drop na's but just to be safe # opt_vol has missing values at the end of each day for some reason... # so cant do inner join or dropna full_df = pd.concat([trades, bid, ask, opt_vol], axis=1)#, join='inner').dropna() full_df.index = full_df.index.tz_localize('America/New_York') return full_df
5,340,784
def time_human(x): """ Gets time as human readable """ # Round time x = round(x, 2) for number, unit in [(60, "s"), (60, "min"), (24, "h"), (365, "days")]: if abs(x) < number: return f"{x:.2f} {unit}" x /= number return f"{x:.2f} years"
5,340,785
def transaction_json_to_binary_codec_form( dictionary: Dict[str, XRPL_VALUE_TYPE] ) -> Dict[str, XRPL_VALUE_TYPE]: """ Returns a new dictionary in which the keys have been formatted as CamelCase and standardized to be serialized by the binary codec. Args: dictionary: The dictionary to be reformatted. Returns: A new dictionary object that has been reformatted. """ # This method should be made private when it is removed from `xrpl.transactions` return { _key_to_tx_json(key): _value_to_tx_json(value) for (key, value) in dictionary.items() }
5,340,786
def test_eval_frac(): """ Test procedure for the function eval_frac """ print('Testing eval_frac') # Start with a good test result = frac.eval_frac('2/10') introcs.assert_floats_equal(0.2, result) # Now a lot of error tests # Slash in bad places result = frac.eval_frac('12/') introcs.assert_equals(None, result) result = frac.eval_frac('/12') introcs.assert_equals(None, result) # Non numbers around slash result = frac.eval_frac('12/a') introcs.assert_equals(None, result) result = frac.eval_frac('a/12') introcs.assert_equals(None, result) # Non-ints around slash result = frac.eval_frac('12/13.5') introcs.assert_equals(None, result) result = frac.eval_frac('13.5/12') introcs.assert_equals(None, result) # Division by 0 result = frac.eval_frac('0/12') introcs.assert_floats_equal(0.0, result) result = frac.eval_frac('12/0') introcs.assert_equals(None, result)
5,340,787
def conv3x3(in_planes, out_planes, stride=1, groups=1): """3x3 convolution with padding""" return nn.Conv1d(in_planes, out_planes, kernel_size=7, stride=stride, padding=3, bias=False, groups=groups)
5,340,788
def limit_sub_bbox(bbox, sub_bbox): """ >>> limit_sub_bbox((0, 1, 10, 11), (-1, -1, 9, 8)) (0, 1, 9, 8) >>> limit_sub_bbox((0, 0, 10, 10), (5, 2, 18, 18)) (5, 2, 10, 10) """ minx = max(bbox[0], sub_bbox[0]) miny = max(bbox[1], sub_bbox[1]) maxx = min(bbox[2], sub_bbox[2]) maxy = min(bbox[3], sub_bbox[3]) return minx, miny, maxx, maxy
5,340,789
def _load_taxa_incorp_list(inFile, config): """Loading list of taxa that incorporate isotope. Parameters ---------- inFile : str File name of taxon list config : config object Returns ------- {library:[taxon1, ...]} """ taxa = {} with open(inFile, 'rb') as inFH: for line in inFH: line = line.rstrip().split('\t') # if 1 column, using config-defined libraries if len(line) == 1: line = [[x,line[0]] for x in config.keys()] else: line = [line] for x in line: try: taxa[x[0]].append(x[1]) except KeyError: taxa[x[0]] = [x[1]] return taxa
5,340,790
def get_subsections(config: Config) -> t.List[t.Tuple[str, t.Dict]]: """Collect parameter subsections from main configuration. If the `parameters` section contains subsections (e.g. '[parameters.1]', '[parameters.2]'), collect the subsection key-value pairs. Otherwise, return an empty dictionary (i.e. there are no subsections). This is useful for specifying multiple API keys for your configuration. For example: ``` [parameters.alice] api_key=KKKKK1 api_url=UUUUU1 [parameters.bob] api_key=KKKKK2 api_url=UUUUU2 [parameters.eve] api_key=KKKKK3 api_url=UUUUU3 ``` """ return [(name, params) for name, params in config['parameters'].items() if isinstance(params, dict)] or [('default', {})]
5,340,791
def team_m2m_changed(sender, instance, action, reverse, model, pk_set, **kwargs): """Called when a Team's members list is changed""" teams = None users = None # If actions is pre_clear or post_clear, pk_set will be None. If # this is the case, just set pk_set to the empty list. pk_set = [] if pk_set is None else pk_set if reverse: # The query is "reversed" if the members are modified by # making a call to "user.team_set". In this case, pk_set is a # list of Teams, and instance is a User object teams = [Team.objects.get(pk=pk) for pk in pk_set] users = [instance] else: # If the query isn't reversed, the members are being modified # by making a call to "team.members". In this case, pk_set is # a list of Users, and instance is a Team object. teams = [instance] users = [User.objects.get(pk=pk) for pk in pk_set] # If we're adding a new user... if action == "pre_add": if len(teams) != 1: logger.warning("Trying to add user to more than one team") for team in teams: for user in users: # If the team is full, throw an exception if team.members.count() >= team.competition.max_num_team_members: logger.error("%s has too many members on it!", team.name) # Remove the user from any old teams they might # already be on for this competition old_teams = user.team_set.filter(competition=team.competition) for old_team in old_teams: logger.debug("Removing %s from %s", user.username, old_team.name) user.team_set.remove(old_team) # Add the user to the team's auth.group user.groups.add(team.get_group()) if action == "post_remove": for team in teams: # Remove the user from the team's auth.group for user in users: user.groups.remove(team.get_group()) # If there aren't any members left on the team, delete it. if team.members.count() == 0: logger.info("%s has no more team members. Deleting it.", team.name) team.delete()
5,340,792
def print_as_table(data: dict, *, capitalize: bool = False): """ Prints the data of a dictionary as a simple table. """ # Get the largest key size = 0 for key in data.keys(): if len(key) > size: size = len(key) # Now, time to start printing for key, value in data.items(): key = str(key) value = str(value) if capitalize: key = key[0].upper() + key[1:] print(key + ":" + (" " * (size - len(key) + 3)) + " " + value)
5,340,793
def calc_psi(B, rev=False): """Calc Flux function (only valid in 2d) Parameters: B (VectorField): magnetic field, should only have two spatial dimensions so we can infer the symmetry dimension rev (bool): since this integration doesn't like going through undefined regions (like within 1 earth radius of the origin for openggcm), you can use this to start integrating from the opposite corner. Returns: ScalarField: 2-D scalar flux function Raises: ValueError: If B has <> 2 spatial dimensions """ # TODO: if this is painfully slow, i bet just putting this exact # code in a cython module would make it a bunch faster, the problem # being that the loops are in python instead of some broadcasting # numpy type thing B = B.slice_reduce(":") # try to guess if a dim of a 3D field is invariant reduced_axes = [] if B.nr_sdims > 2: slcs = [slice(None)] * B.nr_sdims for i, nxi in enumerate(B.sshape): if nxi <= 2: slcs[i] = 0 reduced_axes.append(B.crds.axes[i]) slcs.insert(B.nr_comp, slice(None)) B = B[slcs] # ok, so the above didn't work... just nip out the smallest dim? if B.nr_sdims == 3: slcs = [slice(None)] * B.nr_sdims i = np.argmin(B.sshape) slcs[i] = 0 reduced_axes.append(B.crds.axes[i]) logger.warning("Tried to get the flux function of a 3D field. " "I can't do that, so I'm\njust ignoring the {0} " "dimension".format(reduced_axes[-1])) slcs.insert(B.nr_comp, slice(None)) B = B[slcs] if B.nr_sdims != 2: raise ValueError("flux function only implemented for 2D fields") comps = "" for comp in "xyz": if comp in B.crds.axes: comps += comp # ex: comps = "yz", comp_inds = [1, 2] comp_inds = [dict(x=0, y=1, z=2)[comp] for comp in comps] # Note: what follows says y, z, but it has been generalized # to any two directions, so hy isn't necessarily hy, but it's # easier to see at a glance if it's correct using a specific # example ycc, zcc = B.get_crds(comps) comp_views = B.component_views() hy, hz = comp_views[comp_inds[0]], comp_views[comp_inds[1]] dy = ycc[1:] - ycc[:-1] dz = zcc[1:] - zcc[:-1] ny, nz = len(ycc), len(zcc) A = np.empty((ny, nz), dtype=B.dtype) if rev: A[-1, -1] = 0.0 for i in range(ny - 2, -1, -1): A[i, -1] = A[i + 1, -1] - dy[i] * 0.5 * (hz[i, -1] + hz[i + 1, -1]) for j in range(nz - 2, -1, -1): A[:, j] = A[:, j + 1] + dz[j] * 0.5 * (hy[:, j + 1] + hy[:, j]) else: A[0, 0] = 0.0 for i in range(1, ny): A[i, 0] = A[i - 1, 0] + dy[i - 1] * 0.5 * (hz[i, 0] + hz[i - 1, 0]) for j in range(1, nz): A[:, j] = A[:, j - 1] - dz[j - 1] * 0.5 * (hy[:, j - 1] + hy[:, j]) psi = field.wrap_field(A, B.crds, name="psi", center=B.center, pretty_name=r"$\psi$", parents=[B]) if reduced_axes: slc = "..., " + ", ".join("{0}=None".format(ax) for ax in reduced_axes) psi = psi[slc] return psi
5,340,794
def batch(iterable, batch_size=5): """Wraps around an iterable or generator and yields results in batches. Example: gen = (letter for letter in string.ascii_uppercase) for n in batch(gen, 8): print(''.join(n)) Will produce: ABCDEFGH IJKLMNOP QRSTUVWX YZ """ if isinstance(iterable, types.GeneratorType): cont = True while cont: result = [] for n in range(batch_size): try: result.append(next(iterable)) except StopIteration: cont = False break yield result else: l = len(iterable) for ndx in range(0, l, batch_size): yield iterable[ndx:min(ndx + batch_size, l)]
5,340,795
def log_uncertain_unfollowed_pool(login, unfollowed, logger, logfolder): """Prints and logs the uncertain unfollowed to a seperate file""" try: with open('{0}{1}_uncertain_unfollowedPool.csv'.format(logfolder, login), 'a+') as followPool: with interruption_handler(): followPool.write('{},\n'.format(unfollowed)) except BaseException as e: logger.error("log_uncertain_unfollowed_pool error {}".format(str(e)))
5,340,796
def test_ap_hs20_deauth_req_from_radius(dev, apdev): """Hotspot 2.0 connection and deauthentication request from RADIUS""" bssid = apdev[0]['bssid'] params = hs20_ap_params() params['nai_realm'] = [ "0,example.com,21[2:4]" ] params['hs20_deauth_req_timeout'] = "2" hostapd.add_ap(apdev[0]['ifname'], params) dev[0].request("SET pmf 2") dev[0].hs20_enable() dev[0].add_cred_values({ 'realm': "example.com", 'username': "hs20-deauth-test", 'password': "password" }) interworking_select(dev[0], bssid, freq="2412") interworking_connect(dev[0], bssid, "TTLS") ev = dev[0].wait_event(["HS20-DEAUTH-IMMINENT-NOTICE"], timeout=5) if ev is None: raise Exception("Timeout on deauth imminent notice") if " 1 100" not in ev: raise Exception("Unexpected deauth imminent contents") ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=3) if ev is None: raise Exception("Timeout on disconnection")
5,340,797
def infer_printed_type(t): """Infer the types that should be printed. The algorithm is as follows: 1. Replace all constant types with None. 2. Apply type-inference on the resulting type. 3. For the first internal type variable that appears, find a constant whose type contains that variable, set that constant to print_type. 4. Repeat until no internal type variables appear. """ from logic.context import Context def clear_const_type(t): if t.is_const() and not hasattr(t, "print_type"): t.backupT = t.T t.T = None elif t.is_comb(): clear_const_type(t.fun) clear_const_type(t.arg) elif t.is_abs(): if not hasattr(t, "print_type"): t.backup_var_T = t.var_T t.var_T = None clear_const_type(t.body) def recover_const_type(t): if t.is_const(): t.T = t.backupT elif t.is_comb(): recover_const_type(t.fun) recover_const_type(t.arg) elif t.is_abs(): t.var_T = t.backup_var_T recover_const_type(t.body) for i in range(100): clear_const_type(t) type_infer(t, forbid_internal=False) def has_internalT(T): return any(is_internal_type(subT) for subT in T.get_tsubs()) to_replace, to_replaceT = None, None def find_to_replace(t): nonlocal to_replace, to_replaceT if (t.is_zero() or t.is_one() or \ (t.is_comb('of_nat', 1) and t.arg.is_binary() and t.arg.dest_binary() >= 2)) and \ has_internalT(t.get_type()): replT = t.get_type() if t.is_comb(): t = t.fun if to_replace is None or replT.size() < to_replaceT.size(): to_replace = t to_replaceT = replT elif t.is_const() and has_internalT(t.T): if to_replace is None or t.T.size() < to_replaceT.size(): to_replace = t to_replaceT = t.T elif t.is_abs(): if has_internalT(t.var_T): if to_replace is None or t.var_T.size() < to_replaceT.size(): to_replace = t to_replaceT = t.var_T find_to_replace(t.body) elif t.is_comb(): find_to_replace(t.fun) find_to_replace(t.arg) find_to_replace(t) recover_const_type(t) if to_replace is None: break to_replace.print_type = True assert i != 99, "infer_printed_type: infinite loop." return None
5,340,798
def bias_variable(shape): """ 返回指定形状的偏置量 :param shape: :return: """ b = tf.Variable(tf.constant(0.0, shape=shape)) return b
5,340,799