content
stringlengths
22
815k
id
int64
0
4.91M
def splinter_session_scoped_browser(): """Make it test scoped.""" return False
30,100
def sample_mask(source, freq_vocab, threshold=1e-3, min_freq=0, seed=None, name=None): """Generates random mask for downsampling high frequency items. Args: source: string `Tensor` of any shape, items to be sampled. freq_vocab: `Counter` with frequencies vocabulary. threshold: `float`, items occurrence threshold. min_freq: `int`, items below that frequency will be treated as unique. seed: `int`, used to create a random seed (optional). See @{tf.random.set_seed} for behavior. name: `string`, a name for the operation (optional). Returns: A boolean `Tensor` of same shape as source: "keep" flags. """ with tf.name_scope(name or 'sample_mask'): source = tf.convert_to_tensor(source, dtype=tf.string, name='source') seed1, seed2 = random_seed.get_seed(seed) if not isinstance(freq_vocab, Counter): raise ValueError('Frequency vocabulary should be a Counter instance') keys, freqs = zip(*freq_vocab.most_common()) return tfmiss_ops.miss_sample_mask( source=source, keys=keys, freqs=freqs, threshold=threshold, min_freq=min_freq, seed=seed1, seed2=seed2 )
30,101
def _xList(l): """ """ if l is None: return [] return l
30,102
def test_grid_length(grid): """ Grid: [ ["a", "b", "c"], ["m", "o", "y"] ] """ assert len(grid) == 2
30,103
def main() -> None: """ Standard main function. """ print(fetch_url("https://python.org")) print("") print(fetch_url("https://python.org")) time.sleep(11) print(fetch_url("https://python.org"))
30,104
def IABN2Float(module: nn.Module) -> nn.Module: """If `module` is IABN don't use half precision.""" if isinstance(module, InplaceAbn): module.float() for child in module.children(): IABN2Float(child) return module
30,105
def check_heartbeat() -> None: """ Check the agent's heartbeat by verifying heartbeat file has been recently modified """ current_timestamp = pendulum.now().timestamp() last_modified_timestamp = path.getmtime("{}/heartbeat".format(AGENT_DIRECTORY)) # If file has not been modified in the last 40 seconds then raise an exit code of 1 if current_timestamp - last_modified_timestamp > 40: sys.exit(1)
30,106
def start_of_day(val): """ Return a new datetime.datetime object with values that represent a start of a day. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime """ if type(val) == date: val = datetime.fromordinal(val.toordinal()) return val.replace(hour=0, minute=0, second=0, microsecond=0)
30,107
def test_init_asana(asana): """Tests Asana initialization. """ assert asana asana.client.options['client_name'] = 'brewbot' me = asana.client.users.me() assert me['workspaces'][0]['name'] == 'lakeannebrewhouse.com'
30,108
def setup(): """ Setup """ size(100,100)
30,109
def pending_mediated_transfer(app_chain, token_network_identifier, amount, identifier): """ Nice to read shortcut to make a LockedTransfer where the secret is _not_ revealed. While the secret is not revealed all apps will be synchronized, meaning they are all going to receive the LockedTransfer message. Returns: The secret used to generate the LockedTransfer """ # pylint: disable=too-many-locals if len(app_chain) < 2: raise ValueError('Cannot make a LockedTransfer with less than two apps') target = app_chain[-1].raiden.address # Generate a secret initiator_channel = views.get_channelstate_by_token_network_and_partner( views.state_from_app(app_chain[0]), token_network_identifier, app_chain[1].raiden.address, ) address = initiator_channel.identifier nonce_int = channel.get_next_nonce(initiator_channel.our_state) nonce_bytes = nonce_int.to_bytes(2, 'big') secret = sha3(address + nonce_bytes) initiator_app = app_chain[0] init_initiator_statechange = initiator_init( initiator_app.raiden, identifier, amount, secret, token_network_identifier, target, ) events = initiator_app.raiden.wal.log_and_dispatch( init_initiator_statechange, initiator_app.raiden.get_block_number(), ) send_transfermessage = must_contain_entry(events, SendLockedTransfer, {}) transfermessage = LockedTransfer.from_event(send_transfermessage) initiator_app.raiden.sign(transfermessage) for mediator_app in app_chain[1:-1]: mediator_init_statechange = mediator_init(mediator_app.raiden, transfermessage) events = mediator_app.raiden.wal.log_and_dispatch( mediator_init_statechange, mediator_app.raiden.get_block_number(), ) send_transfermessage = must_contain_entry(events, SendLockedTransfer, {}) transfermessage = LockedTransfer.from_event(send_transfermessage) mediator_app.raiden.sign(transfermessage) target_app = app_chain[-1] mediator_init_statechange = target_init(transfermessage) events = target_app.raiden.wal.log_and_dispatch( mediator_init_statechange, target_app.raiden.get_block_number(), ) return secret
30,110
def get_comments(post, sort_mode='hot', max_depth=5, max_breadth=5): """ Retrieves comments for a post. :param post: The unique id of a Post from which Comments will be returned. :type post: `str` or :ref:`Post` :param str sort_mode: The order that the Posts will be sorted by. Options are: "top" (ranked by upvotes minus downvotes), "best" (similar to top, except that it uses a more complicated algorithm to have good posts jump to the top and stay there, and bad comments to work their way down, see http://blog.reddit.com/2009/10/reddits-new-comment-sorting-system.html), "hot" (similar to "top", but weighted by time so that recent, popular posts are put near the top), "new" (posts will be sorted by creation time). :param int max_depth: The maximum depth that comments will be retrieved from (i.e., how many descendants from the topmost comment). To go down infinitely, use None. :param int max_breadth: The maximum breadth that comments will be retrieved from (i.e., how many siblings from the topmost comment). Note that this breadth applies at every subtree - in effect, it is the branching factor. To get all siblings, use None. :returns: list of Comment """ if sort_mode not in SORT_MODES: raise RedditException("Unknown sort mode: {}".format(sort_mode)) if isinstance(post, Post): post = post.id elif not isinstance(post, str): raise RedditException("The post parameter should be a String or a Post") result = _get_comments_string(post, sort_mode, max_depth, max_breadth) if result: try: json_result = _from_json(result)[1]['data']['children'] except ValueError: raise RedditException("The response from the server didn't make any sense.") if "error" in json_result: raise RedditException("Error from Reddit: {}".format(json_result.get("error", "Unknown error."))) if max_breadth is None: return [Comment._from_json(r, post, max_depth=max_depth-1) for r in json_result] else: return [Comment._from_json(r, post, max_depth=max_depth-1, max_breadth=max_breadth) for r in json_result[:max_breadth]] else: if _CONNECTED: raise RedditException("No response from the server.") else: raise RedditException("No data was in the cache for this comment.")
30,111
def neighbor_json(json): """Read neighbor game from json""" utils.check( json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type') return _NeighborDeviationGame( gamereader.loadj(json['model']), num_neighbors=json.get('neighbors', json.get('devs', None)))
30,112
def get_b16_config(): """Returns the ViT-B/16 configuration.""" config = ml_collections.ConfigDict() config.name = 'ViT-B_16' config.half_precision = True config.encoder = ml_collections.ConfigDict() config.encoder.patches = ml_collections.ConfigDict({'size': (16, 16)}) config.encoder.hidden_size = 768 config.encoder.mlp_dim = 3072 config.encoder.num_heads = 12 config.encoder.num_layers = 12 config.encoder.attention_dropout_rate = 0.0 config.encoder.dropout_rate = 0.0 config.encoder.drop_path_rate = 0.0 config.decoder = ml_collections.ConfigDict() config.decoder.hidden_size = 384 config.decoder.mlp_dim = 1536 config.decoder.num_heads = 6 config.decoder.num_layers = 4 config.decoder.attention_dropout_rate = 0.0 config.decoder.dropout_rate = 0.0 config.decoder.drop_path_rate = 0.0 config.decoder.out_dim = 768 return config
30,113
def build_container_hierarchy(dct): """Create a hierarchy of Containers based on the contents of a nested dict. There will always be a single top level scoping Container regardless of the contents of dct. """ top = Container() for key,val in dct.items(): if isinstance(val, dict): # it's a dict, so this is a Container top.add(key, build_container_hierarchy(val)) else: setattr(top, key, val) return top
30,114
def occ_frac(stop_rec_range, bin_size_minutes, edge_bins=1): """ Computes fractional occupancy in inbin and outbin. Parameters ---------- stop_rec_range: list consisting of [intime, outtime] bin_size_minutes: bin size in minutes edge_bins: 1=fractional, 2=whole bin Returns ------- [inbin frac, outbin frac] where each is a real number in [0.0,1.0] """ intime = stop_rec_range[0] outtime = stop_rec_range[1] bin_freq_str = '{}T'.format(int(bin_size_minutes)) indtbin = intime.floor(bin_freq_str) outdtbin = outtime.floor(bin_freq_str) # inbin occupancy if edge_bins == 1: right_edge = min(indtbin + timedelta(minutes=bin_size_minutes), outtime) inbin_occ_secs = (right_edge - intime).total_seconds() inbin_occ_frac = inbin_occ_secs / (bin_size_minutes * 60.0) else: inbin_occ_frac = 1.0 # outbin occupancy if indtbin == outdtbin: outbin_occ_frac = 0.0 # Use inbin_occ_frac else: if edge_bins == 1: left_edge = max(outdtbin, intime) outbin_occ_secs = (outtime - left_edge).total_seconds() outbin_occ_frac = outbin_occ_secs / (bin_size_minutes * 60.0) else: outbin_occ_frac = 1.0 assert 1.0 >= inbin_occ_frac >= 0.0, \ "bad inbin_occ_frac={:.3f} in={} out={}".format(inbin_occ_frac, intime, outtime) assert 1.0 >= outbin_occ_frac >= 0.0, \ "bad outbin_occ_frac={:.3f} in={} out={}".format(outbin_occ_frac, intime, outtime) return [inbin_occ_frac, outbin_occ_frac]
30,115
def geomprogr_mesh(N=None, a=0, L=None, Delta0=None, ratio=None): """Compute a sequence of values according to a geometric progression. Different options are possible with the input number of intervals in the sequence N, the length of the first interval Delta0, the total length L and the ratio of the sought geometric progression. Three of them are requested in input to find a valid sequence. The sequence is drawn within the points a and b.""" if list(locals().values()).count(None) > 1: raise ValueError('Insufficient number of input data for a sequence') if ratio is not None: if (ratio < 0): raise ValueError('negative ratio is not valid') if L is not None: if (L < 0): raise ValueError('negative total length is not valid') if Delta0 is not None: if (Delta0 < 0): raise ValueError('negative length of the 1st interval is not valid') if N is not None: if (N < 0): raise ValueError('negative number of intervals is not valid') if N is None: if ratio < 1: N = np.log(1 - L / Delta0 * (1 - ratio)) / np.log(ratio) else: N = np.log(1 + L / Delta0 * (ratio - 1)) / np.log(ratio) elif L is None: if ratio < 1: L = Delta0 * (1 - ratio**N) / (1 - ratio) else: L = Delta0 * (ratio**N - 1) / (ratio - 1) elif Delta0 is None: if not np.isclose(ratio, 1): Delta0 = L * (1 - ratio) / (1 - ratio**N) else: Delta0 = L / float(N) elif ratio is None: f = lambda q: q**N - L / Delta0 * q + L / Delta0 - 1 x = L / float(N) if Delta0 > x: ratio = brentq(f, 0, 1 - 1.e-6) elif Delta0 < x: ratio = brentq(f, 1 + 1.e-6, 20) else: ratio = 1 if np.isclose(ratio, 1): r = np.linspace(0, L, N + 1) else: r = np.insert(np.full(N - 1, ratio), 0, 1) r = np.cumprod(r) * Delta0 r = np.insert(np.cumsum(r), 0, 0) return r + a
30,116
def list_subclasses(package, base_class): """ Dynamically import all modules in a package and scan for all subclasses of a base class. `package`: the package to import `base_class`: the base class to scan for subclasses return: a dictionary of possible subclasses with class name as key and class type information as value """ import_modules(package) subclasses = all_subclasses(base_class) return dict(zip(map(lambda c: c.__name__, subclasses), subclasses))
30,117
def maxima_in_range(r, g_r, r_min, r_max): """Find the maxima in a range of r, g_r values""" idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r))) g_r_slice = g_r[idx] g_r_max = g_r_slice[g_r_slice.argmax()] idx_max, _ = find_nearest(g_r, g_r_max) return r[idx_max], g_r[idx_max]
30,118
def shared_fit_preprocessing(fit_class): """ Shared preprocessing to get X, y, class_order, and row_weights. Used by _materialize method for both python and R fitting. :param fit_class: PythonFit or RFit class :return: X: pd.DataFrame of features to use in fit y: pd.Series of target to use in fit class_order: array specifying class order, or None row_weights: pd.Series of row weights, or None """ # read in data if fit_class.input_filename.endswith(".mtx"): colnames = None if fit_class.sparse_column_file: colnames = [column.strip() for column in open(fit_class.sparse_column_file).readlines()] df = pd.DataFrame.sparse.from_spmatrix(mmread(fit_class.input_filename), columns=colnames) else: df = pd.read_csv(fit_class.input_filename) # get num rows to use if fit_class.num_rows == "ALL": fit_class.num_rows = len(df) else: if fit_class.num_rows > len(df): raise DrumCommonException( "Requested number of rows greater than data length {} > {}".format( fit_class.num_rows, len(df) ) ) fit_class.num_rows = int(fit_class.num_rows) # get target and features, resample and modify nrows if needed if fit_class.target_filename or fit_class.target_name: if fit_class.target_filename: y_unsampled = pd.read_csv(fit_class.target_filename, index_col=False) assert ( len(y_unsampled.columns) == 1 ), "Your target dataset at path {} has {} columns named {}".format( fit_class.target_filename, len(y_unsampled.columns), y_unsampled.columns ) assert len(df) == len( y_unsampled ), "Your input data has {} entries, but your target data has {} entries".format( len(df), len(y_unsampled) ) if y_unsampled.columns[0] in df.columns: y_unsampled.columns = ["__target__"] df = df.merge(y_unsampled, left_index=True, right_index=True) assert len(y_unsampled.columns.values) == 1 fit_class.target_name = y_unsampled.columns.values[0] df = df.dropna(subset=[fit_class.target_name]) X = df.drop(fit_class.target_name, axis=1).sample(fit_class.num_rows, random_state=1) y = df[fit_class.target_name].sample(fit_class.num_rows, random_state=1) else: X = df.sample(fit_class.num_rows, random_state=1) y = None row_weights = extract_weights(X, fit_class) class_order = extract_class_order(fit_class) return X, y, class_order, row_weights
30,119
def webhook(): """ Triggers on each GET and POST request. Handles GET and POST requests using this function. :return: Return status code acknowledge for the GET and POST request """ if request.method == 'POST': data = request.get_json(force=True) log(json.dumps(data)) # you may not want to log every incoming message in production, but it's good for testing if data["object"] == "page": for entry in data["entry"]: for event in entry["messaging"]: sender_id = event["sender"]["id"] if 'message' in event and 'text' in event['message']: message_text = event["message"]["text"] if event.get("message").get("quick_reply"): feedback_payload = event["message"]["quick_reply"]["payload"] handle_message(feedback_payload, sender_id, message_type="feedback") else: handle_message(message_text, sender_id) if 'postback' in event and 'payload' in event['postback']: postback_payload = event['postback']['payload'] log(postback_payload) handle_message(postback_payload, sender_id, message_type="feedback") if event.get("delivery"): pass if event.get("optin"): pass return "ok", 200 elif request.method == 'GET': # Verification if request.args.get("hub.verify_token") == VERIFY_TOKEN: return request.args.get('hub.challenge'), 200 else: return 'Error, wrong validation token', 403
30,120
def extract_winner(state: 'TicTacToeState') -> str: """ Return the winner of the game, or announce if the game resulted in a tie. """ winner = 'No one' tictactoe = TicTacToeGame(True) tictactoe.current_state = state if tictactoe.is_winner('O'): winner = 'O' elif tictactoe.is_winner('X'): winner = 'X' return winner
30,121
def _prensor_value_fetch(prensor_tree: prensor.Prensor): """Fetch function for PrensorValue. See the document in session_lib.""" # pylint: disable=protected-access type_spec = prensor_tree._type_spec components = type_spec._to_components(prensor_tree) def _construct_prensor_value(component_values): return _prensor_value_from_type_spec_and_component_values( type_spec, iter(component_values)) return components, _construct_prensor_value
30,122
def test_can_parse_a_unary_array_from_single_step(): """ It should extract a single ordinary step correctly into an array of steps """ steps = parse_steps(I_HAVE_TASTY_BEVERAGES) assert len(steps) == 1 assert isinstance(steps[0], Step) assert steps[0].sentence == first_line_of(I_HAVE_TASTY_BEVERAGES)
30,123
def start_workers_with_fabric(): """ testing spinning up workers using fabric """ tmp_file = open(settings.AUTOSCALE_TMP_FILE, 'w') tmp_file.write('running') tmp_file.close() subprocess.call("/usr/local/bin/fab \ -f /opt/codebase/auto-scale/fabfile.py \ create_multiple_workers", shell=True) return True
30,124
def test_glob_list(mock_glob): """Multiple paths ok.""" context = Context({ 'ok1': 'ov1', 'glob': ['./arb/x', './arb/y', './arb/z']}) mock_glob.return_value = [ './f1.1', './f2.1', './f2.2', './f2.3', ] with patch_logger('pypyr.steps.glob', logging.INFO) as mock_logger_info: glob_step.run_step(context) mock_logger_info.assert_called_once_with( 'glob checked 3 globs and saved 4 paths to globOut') assert context, "context shouldn't be None" assert len(context) == 3, "context should have 3 items" assert context['ok1'] == 'ov1' assert context['glob'] == ['./arb/x', './arb/y', './arb/z'] assert context["globOut"] == [ './f1.1', './f2.1', './f2.2', './f2.3', ] mock_glob.assert_called_once_with( ['./arb/x', './arb/y', './arb/z'])
30,125
def request_validation_error(error): """Handles Value Errors from bad data""" message = str(error) app.logger.error(message) return { 'status_code': status.HTTP_400_BAD_REQUEST, 'error': 'Bad Request', 'message': message }, status.HTTP_400_BAD_REQUEST
30,126
def all(request): """Handle places list page.""" places = Place.objects.all() context = {'places': places} return render(request, 'rental/list_place.html', context)
30,127
def get_key_by_value(dictionary, search_value): """ searchs a value in a dicionary and returns the key of the first occurrence :param dictionary: dictionary to search in :param search_value: value to search for """ for key, value in dictionary.iteritems(): if value == search_value: return ugettext(key)
30,128
def _subtract_ten(x): """Subtracts 10 from x using control flow ops. This function is equivalent to "x - 10" but uses a tf.while_loop, in order to test the use of functions that involve control flow ops. Args: x: A tensor of integral type. Returns: A tensor representing x - 10. """ def stop_condition(counter, x_minus_counter): del x_minus_counter # unused return tf.less(counter, 10) def iteration(counter, x_minus_counter): return tf.add(counter, 1), tf.add(x_minus_counter, -1) initial_values = [tf.constant(0), x] return tf.while_loop(stop_condition, iteration, initial_values)[1]
30,129
def load_fortune_file(f: str) -> list: """ load fortunes from a file and return it as list """ saved = [] try: with open(f, 'r') as datfile: text = datfile.read() for line in text.split('%'): if len(line.strip()) > 0: saved.append(line) except OSError: app.logger.warning('fail to process file: {}'.format(f)) pass else: return saved
30,130
def maskStats(wins, last_win, mask, maxLen): """ return a three-element list with the first element being the total proportion of the window that is masked, the second element being a list of masked positions that are relative to the windown start=0 and the window end = window length, and the third being the last window before breaking to expidite the next loop """ chrom = wins[0].split(":")[0] a = wins[1] L = wins[2] b = a + L prop = [0.0,[],0] try: for i in range(last_win, len(mask[chrom])): x, y = mask[chrom][i][0], mask[chrom][i][1] if y < a: continue if b < x: return prop else: # i.e. [a--b] and [x--y] overlap if a >= x and b <= y: return [1.0, [[0,maxLen]], i] elif a >= x and b > y: win_prop = (y-a)/float(b-a) prop[0] += win_prop prop[1].append([0,int(win_prop * maxLen)]) prop[2] = i elif b <= y and a < x: win_prop = (b-x)/float(b-a) prop[0] += win_prop prop[1].append([int((1-win_prop)*maxLen),maxLen]) prop[2] = i else: win_prop = (y-x)/float(b-a) prop[0] += win_prop prop[1].append([int(((x-a)/float(b-a))*maxLen), int(((y-a)/float(b-a))*maxLen)]) prop[2] = i return prop except KeyError: return prop
30,131
def dsoftmax(Z): """Given a (m,n) matrix, returns a (m,n,n) jacobian matrix""" m,n=np.shape(Z) softZ=(softmax(Z)) prodtensor=np.einsum("ij,ik->ijk",softZ,softZ) diagtensor=np.einsum('ij,jk->ijk', softZ, np.eye(n, n)) return diagtensor-prodtensor
30,132
async def vbd_unplug(cluster_id: str, vbd_uuid: str): """Unplug from VBD""" try: session = create_session( _id=cluster_id, get_xen_clusters=Settings.get_xen_clusters() ) vbd: VBD = VBD.get_by_uuid(session=session, uuid=vbd_uuid) if vbd is not None: ret = dict(success=vbd.unplug()) else: ret = dict(success=False) session.xenapi.session.logout() return ret except Failure as xenapi_error: raise HTTPException( status_code=500, detail=xenapi_failure_jsonify(xenapi_error) ) except Fault as xml_rpc_error: raise HTTPException( status_code=int(xml_rpc_error.faultCode), detail=xml_rpc_error.faultString, ) except RemoteDisconnected as rd_error: raise HTTPException(status_code=500, detail=rd_error.strerror)
30,133
def scrape_sentence(file_path: str): """Scrape list of sentence in txtfile, separated by newline.""" root_dump = Sentence.ROOT root_dump.mkdir(parents=True, exist_ok=True) scraper(Sentence, _load_words(file_path), root_dump)
30,134
def write_file_latest(data: List[Any], file_path: str) -> None: """writes the most recent file as -latest.md""" logging.debug("writing file -latest: %s", file_path) table = tabulate(data, headers="keys", showindex="always", tablefmt="github") last_char_index = file_path.rfind("/") latest_file_path = file_path[:last_char_index] + "/-latest.md" with open( latest_file_path, "w+", newline="", encoding=constants.DEFAULT_FILE_ENCODING ) as _file: _file.write(table)
30,135
def read_ignore_patterns(f: BinaryIO) -> Iterable[bytes]: """Read a git ignore file. Args: f: File-like object to read from Returns: List of patterns """ for line in f: line = line.rstrip(b"\r\n") # Ignore blank lines, they're used for readability. if not line: continue if line.startswith(b'#'): # Comment continue # Trailing spaces are ignored unless they are quoted with a backslash. while line.endswith(b' ') and not line.endswith(b'\\ '): line = line[:-1] line = line.replace(b'\\ ', b' ') yield line
30,136
def calculate_age(created, now): """ Pprepare a Docker CLI-like output of image age. After researching `datetime`, `dateutil` and other libraries I decided to do this manually to get as close as possible to Docker CLI output. `created` and `now` are both datetime.datetime objects. """ age = {} rdelta = relativedelta.relativedelta(now, created) difference = now - created if rdelta.years > 0: age['number'] = rdelta.years age['unit'] = 'years' elif rdelta.years == 0 and difference >= timedelta(days=60): age['number'] = rdelta.months age['unit'] = 'months' elif rdelta.years == 0 and difference < timedelta(days=60) and difference >= timedelta(days=14): days = 0 if rdelta.months == 1: days = 30 days += rdelta.days weeks = round(days / 7) age['number'] = weeks age['unit'] = 'weeks' elif rdelta.years == 0 and difference < timedelta(days=14) and difference >= timedelta(days=1): age['number'] = rdelta.days age['unit'] = 'days' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours >= 1: age['number'] = rdelta.hours age['unit'] = 'hours' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes > 0: age['number'] = rdelta.minutes age['unit'] = 'minutes' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes <= 0 and rdelta.seconds > 0: age['number'] = rdelta.seconds age['unit'] = 'seconds' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes <= 0 and rdelta.seconds <= 0: age['number'] = 1 age['unit'] = 'second' else: raise DkrlsError(f'Encountered age of an image which this CLI can\'t handle: {rdelta}') return age
30,137
def Maxout(x, num_unit): """ Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_. Args: x (tf.Tensor): a NHWC or NC tensor. Channel has to be known. num_unit (int): a int. Must be divisible by C. Returns: tf.Tensor: of shape NHW(C/num_unit) named ``output``. """ input_shape = x.get_shape().as_list() ndim = len(input_shape) assert ndim == 4 or ndim == 2 ch = input_shape[-1] assert ch is not None and ch % num_unit == 0 if ndim == 4: x = tf.reshape(x, [-1, input_shape[1], input_shape[2], ch / num_unit, num_unit]) else: x = tf.reshape(x, [-1, ch / num_unit, num_unit]) return tf.reduce_max(x, ndim, name='output')
30,138
def is_youtube_url(url: str) -> bool: """Checks if a string is a youtube url Args: url (str): youtube url Returns: bool: true of false """ match = re.match(r"^(https?\:\/\/)?(www\.youtube\.com|youtu\.be)\/.+$", url) return bool(match)
30,139
def time_nanosleep(): """ Delay for a number of seconds and nanoseconds""" return NotImplementedError()
30,140
def get_regions(positions, genome_file, base=0, count=7): """Return a list of regions surrounding a position. Will loop through each chromosome and search all positions in that chromosome in one batch. Lookup is serial per chromosome. Args: positions (dict): Dictionary of {chrom->positons} genome_file (str): Location of a genome fasta file or directory of files. If directory, file names must be <chrom_name>.fa[.gz]. Gzipped OK. base (int): Either 0 or 1, base of positions in your list count (int): Distance + and - the position to extract Returns: dict: {chrom->{postion->sequence}} """ # If genome file is a directory, use recursion! Because why not. if os.path.isdir(genome_file): chroms = positions.keys() files = [] for chrom in chroms: files.append(get_fasta_file(genome_file, chrom)) final = {} for chrom, fl in zip(chroms, files): final.update( get_dinucleotides({chrom: positions[chrom]}, fl, base, count) ) return final done = [] results = {} with open_zipped(genome_file) as fasta_file: for chrom in seqio.parse(fasta_file, 'fasta'): if chrom.id not in positions: continue else: done.append(chrom.id) results[chrom.id] = {} for pos in positions[chrom.id]: ps = pos-base # Correct base-1 positions here region = seq(chrom[ps-count:ps+count+1]) results[chrom.id][pos] = region if len(done) != len(positions.keys()): print('The following chromosomes were not in files: {}' .format([i for i in positions if i not in done])) return results
30,141
def test_evaluate_sets_all_inputs_clean(clear_default_graph): """After the evaluation, the inputs are considered clean.""" node = SquareNode() node.inputs['in1'].value = 2 node.inputs['compound_in']['0'].value = 0 assert node.is_dirty node.evaluate() assert not node.is_dirty
30,142
def set_pin_connection( conn, write_cur, pin_graph_node_pkey, forward, graph_node_pkey, tracks ): """ Sets pin connection box location canonical location. Tracks that are a part of the pinfeed also get this location. """ cur = conn.cursor() cur2 = conn.cursor() cur.execute( """SELECT node_pkey, graph_node_type FROM graph_node WHERE pkey = ?""", (pin_graph_node_pkey, ) ) pin_node_pkey, graph_node_type = cur.fetchone() source_wires = [] cur.execute( """SELECT pkey FROM wire WHERE node_pkey = ( SELECT node_pkey FROM graph_node WHERE pkey = ? )""", (graph_node_pkey, ) ) for (wire_pkey, ) in cur: if forward: cur2.execute( """SELECT count() FROM pip_in_tile WHERE src_wire_in_tile_pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE pkey = ? )""", (wire_pkey, ) ) else: cur2.execute( """SELECT count() FROM pip_in_tile WHERE dest_wire_in_tile_pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE pkey = ? )""", (wire_pkey, ) ) has_pip = cur2.fetchone()[0] if has_pip: source_wires.append(wire_pkey) assert len(source_wires) <= 1 if len(source_wires) == 1: cur.execute( "SELECT phy_tile_pkey FROM wire WHERE pkey = ?", (source_wires[0], ) ) phy_tile_pkey = cur.fetchone()[0] for track_pkey in tracks: write_cur.execute( "UPDATE track SET canon_phy_tile_pkey = ? WHERE pkey = ?", ( phy_tile_pkey, track_pkey, ) ) if not forward: assert NodeType(graph_node_type) == NodeType.IPIN source_wire_pkey = source_wires[0] write_cur.execute( """ UPDATE graph_node SET connection_box_wire_pkey = ? WHERE pkey = ? """, ( source_wire_pkey, pin_graph_node_pkey, ) )
30,143
def render_series_fragment(site_config): """ Adds "other posts in this series" fragment to series posts. """ series_fragment = open("_includes/posts_in_series.html", "r").read() for post_object in site_config["series_posts"]: print("Generating 'Other posts in this series' fragment for " + post_object[1]) category, post_name, page_url = post_object loader = jinja2.FileSystemLoader(searchpath="./") template = jinja2.Environment(loader=loader) rendered_series_text = template.from_string(series_fragment) posts_to_show = site_config["categories"].get(category) see_more_link = False if len(posts_to_show) > 10: see_more_link = True category_slug = ( category.replace(" ", "-").lower().replace("(", "").replace(")", "") ) rendered_series_text = rendered_series_text.render( posts_in_series=posts_to_show[:10], see_more_link=see_more_link, site=site_config, category_slug=category_slug, page={"url": page_url}, ) year_month_date = "/".join(post_name.split("-")[:3]) + "/" post_name = ( "-".join(post_name.split("-")[3:]).replace(".md", "").replace(".html", "") ) with open(OUTPUT + year_month_date + post_name + "/index.html", "r") as file: file_content = file.read() file_content = file_content.replace( "<!--- posts_in_series -->", rendered_series_text ) with open(OUTPUT + year_month_date + post_name + "/index.html", "w") as file: file.write(file_content) return series_fragment
30,144
def create_barplot_orthologues_by_species(df, path, title, colormap, genes, species): """ The function creates a bar plot using seaborn. :param df: pandas.DataFrame object :param path: The CSV file path. :param title: Title for the plot. :param colormap: Colormap :param genes: Ordered list of genes. :param species: Ordered list of species. :return: """ print("Creating barplot by species for {}".format(path)) output_path = os.path.dirname(path) output = join_folder(output_path, "%s_barplot_byspecies.png" % title) fig = plt.figure(figsize=(16, 10), dpi=180) sns.barplot(x='Species', y='Orthologues', hue='Gene Name', data=df, order=species, hue_order=genes, palette=colormap) plt.ylabel("#Orthologues") plt.xlabel("Species") plt.ylim(0, ) # plt.suptitle(title, fontsize=16) plt.yticks(fontsize=10) plt.xticks(fontsize=10) plt.savefig(output) plt.close() return output
30,145
def import_tracks(postgres_pwd): """ Imports tracks and labels tables to Postgres database """ if os.path.exists('config/tracks.csv') and os.path.exists('config/labels/labels.csv'): print('Importing tables to Postgres database..') time.sleep(2) connection_error = False try: pg = create_engine(f'postgresql://postgres:{postgres_pwd}@0.0.0.0:5555/postgres') # try to connect to pg except: print('Connection failed. Trying again in 10 seconds') time.sleep(10) try: pg = create_engine(f'postgresql://postgres:{postgres_pwd}@0.0.0.0:5555/postgres') # try to connect to pg except: connection_error = True if connection_error: print('Connection failed. You can try again later using the csvimport.py script in the /config folder.') else: tracksimport = pd.read_csv('config/tracks.csv', delimiter = ',', decimal = '.') tracksimport.iloc[:,:91] = tracksimport.iloc[:,:91].apply(pd.to_numeric, errors='coerce', downcast='float') tracksimport[['trackid', 'year', 'labelid']] = tracksimport[['trackid', 'year', 'labelid']].apply(pd.to_numeric, errors='coerce', downcast='integer') tracksimport.to_sql('tracks', pg, if_exists='replace', method='multi', index = False, chunksize=1000) # import to postgres labelsimport = pd.read_csv('config/labels/labels.csv', delimiter = ',', decimal = '.') labelsimport[['labelid', 'numtracks']] = labelsimport[['labelid', 'numtracks']].apply(pd.to_numeric, errors='coerce', downcast='integer') labelsimport.to_sql('labels', pg, if_exists='replace', method='multi', index = False, chunksize=1000) # import to postgres pg.dispose() print("Database import complete.") else: print('Error: could not find one or any of these files: config/tracks.csv; config/labels/labels.csv')
30,146
def get_class_by_name(name): """Gets a class object by its name, e.g. sklearn.linear_model.LogisticRegression""" if name.startswith('cid.analytics'): # We changed package names in March 2017. This preserves compatibility with old models. name = name.replace('cid.analytics', 'analytics.core') elif name.startswith('cid.'): name = name.replace('cid.', 'analytics.') module, class_name = name.rsplit('.', 1) return getattr(import_module(module), class_name)
30,147
def numero_22(): """numero_22""" check50.run("python3 numeros_introescos.py").stdin("11031103\n11031130", prompt=False).stdout("11031103\n11031104\n11031105\n11031106\n11031107\n11031108\n11031109\n11031110\n11031111\n11031112\n11031113\n11031114\n11031115\n11031116\n11031117\n11031118\n11031119\n11031120\n11031121\n11031122\n11031123\n11031124\n11031125\n11031126\n11031127\n11031128\n11031129\n11031130\n28", regex=False).exit(0)
30,148
def parse_additive(token): """ Parse token type - Additive """ track(token, False) if hasattr(token, '_fields'): for f in token._fields: track(f, False) if is_tainted(getattr(token, f)): common.logger.debug("TAINTED: " + str(token)) else: if list_checker(token, f): common.logger.debug("TAINTED: " + str(token)) return
30,149
def _single_optimize( direction, criterion, criterion_kwargs, params, algorithm, constraints, algo_options, derivative, derivative_kwargs, criterion_and_derivative, criterion_and_derivative_kwargs, numdiff_options, logging, log_options, error_handling, error_penalty, cache_size, scaling_options, ): """Minimize or maximize *criterion* using *algorithm* subject to *constraints*. See the docstring of ``_optimize`` for an explanation of all arguments. Returns: dict: The optimization result. """ # store all arguments in a dictionary to save them in the database later problem_data = { "direction": direction, # "criterion"-criterion, "criterion_kwargs": criterion_kwargs, "algorithm": algorithm, "constraints": constraints, "algo_options": algo_options, # "derivative"-derivative, "derivative_kwargs": derivative_kwargs, # "criterion_and_derivative"-criterion_and_derivative, "criterion_and_derivative_kwargs": criterion_and_derivative_kwargs, "numdiff_options": numdiff_options, "log_options": log_options, "error_handling": error_handling, "error_penalty": error_penalty, "cache_size": int(cache_size), } # partial the kwargs into corresponding functions criterion = functools.partial(criterion, **criterion_kwargs) if derivative is not None: derivative = functools.partial(derivative, **derivative_kwargs) if criterion_and_derivative is not None: criterion_and_derivative = functools.partial( criterion_and_derivative, **criterion_and_derivative_kwargs ) # process params and constraints params = add_default_bounds_to_params(params) for col in ["value", "lower_bound", "upper_bound"]: params[col] = params[col].astype(float) check_params_are_valid(params) # calculate scaling factor and offset if scaling_options not in (None, {}): scaling_factor, scaling_offset = calculate_scaling_factor_and_offset( params=params, constraints=constraints, criterion=criterion, **scaling_options, ) else: scaling_factor, scaling_offset = None, None # name and group column are needed in the dashboard but could lead to problems # if present anywhere else params_with_name_and_group = _add_name_and_group_columns_to_params(params) problem_data["params"] = params_with_name_and_group params_to_internal, params_from_internal = get_reparametrize_functions( params=params, constraints=constraints, scaling_factor=scaling_factor, scaling_offset=scaling_offset, ) # get internal parameters and bounds x = params_to_internal(params["value"].to_numpy()) lower_bounds, upper_bounds = get_internal_bounds( params=params, constraints=constraints, scaling_factor=scaling_factor, scaling_offset=scaling_offset, ) # process algorithm and algo_options if isinstance(algorithm, str): algo_name = algorithm else: algo_name = getattr(algorithm, "name", "your algorithm") if isinstance(algorithm, str): try: algorithm = AVAILABLE_ALGORITHMS[algorithm] except KeyError: proposed = propose_algorithms(algorithm, list(AVAILABLE_ALGORITHMS)) raise ValueError( f"Invalid algorithm: {algorithm}. Did you mean {proposed}?" ) from None algo_options = _adjust_options_to_algorithms( algo_options, lower_bounds, upper_bounds, algorithm, algo_name ) # get convert derivative convert_derivative = get_derivative_conversion_function( params=params, constraints=constraints, scaling_factor=scaling_factor, scaling_offset=scaling_offset, ) # do first function evaluation first_eval = { "internal_params": x, "external_params": params, "output": criterion(params), } # fill numdiff_options with defaults numdiff_options = _fill_numdiff_options_with_defaults( numdiff_options, lower_bounds, upper_bounds ) # create and initialize the database if not logging: database = False else: database = _create_and_initialize_database( logging, log_options, first_eval, problem_data ) # set default error penalty error_penalty = _fill_error_penalty_with_defaults( error_penalty, first_eval, direction ) # create cache x_hash = hash_array(x) cache = {x_hash: {"criterion": first_eval["output"]}} # partial the internal_criterion_and_derivative_template internal_criterion_and_derivative = functools.partial( internal_criterion_and_derivative_template, direction=direction, criterion=criterion, params=params, reparametrize_from_internal=params_from_internal, convert_derivative=convert_derivative, derivative=derivative, criterion_and_derivative=criterion_and_derivative, numdiff_options=numdiff_options, database=database, database_path=logging, log_options=log_options, error_handling=error_handling, error_penalty=error_penalty, first_criterion_evaluation=first_eval, cache=cache, cache_size=cache_size, ) res = algorithm(internal_criterion_and_derivative, x, **algo_options) p = params.copy() p["value"] = params_from_internal(res["solution_x"]) res["solution_params"] = p if "solution_criterion" not in res: res["solution_criterion"] = criterion(p) if direction == "maximize": res["solution_criterion"] = -res["solution_criterion"] # in the long run we can get some of those from the database if logging was used. optional_entries = [ "solution_derivative", "solution_hessian", "n_criterion_evaluations", "n_derivative_evaluations", "n_iterations", "success", "reached_convergence_criterion", "message", ] for entry in optional_entries: res[entry] = res.get(entry, f"Not reported by {algo_name}") if logging: _log_final_status(res, database, logging, log_options) return res
30,150
def item_len(item): """return length of the string format of item""" return len(str(item))
30,151
def main(): """ main """ with open('input.txt') as fp: data = sorted([int(line.strip()) for line in fp.readlines()]) data.insert(0, 0) print('part 1 answer:', part1(data))
30,152
def get_progress_logger(): """Returns the swift progress logger""" return progress_logger
30,153
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None): """Retry calling the decorated function using an exponential backoff. http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry :param ExceptionToCheck: the exception to check. may be a tuple of exceptions to check :type ExceptionToCheck: Exception or tuple :param tries: number of times to try (not retry) before giving up :type tries: int :param delay: initial delay between retries in seconds :type delay: int :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry :type backoff: int :param logger: logger to use. If None, print :type logger: logging.Logger instance """ def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) if logger: logger.warning(msg) else: sys.stderr.write(msg + '\n') time.sleep(mdelay) mtries -= 1 mdelay *= backoff try: return f(*args, **kwargs) except ExceptionToCheck as e: msg = "Failed last attempt %s, %s %s" % (str(e), str(args), str(kwargs)) if logger: logger.warning(msg) else: sys.stderr.write(msg + "\n") raise return f_retry # true decorator return deco_retry
30,154
def instantiate_me(spec2d_files, spectrograph, **kwargs): """ Instantiate the CoAdd2d subclass appropriate for the provided spectrograph. The class must be subclassed from Reduce. See :class:`Reduce` for the description of the valid keyword arguments. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The instrument used to collect the data to be reduced. tslits_dict: dict dictionary containing slit/order boundary information tilts (np.ndarray): Returns: :class:`PypeIt`: One of the classes with :class:`PypeIt` as its base. """ indx = [ c.__name__ == (spectrograph.pypeline + 'Coadd2d') for c in Coadd2d.__subclasses__() ] if not np.any(indx): msgs.error('Pipeline {0} is not defined!'.format(spectrograph.pypeline)) return Coadd2d.__subclasses__()[np.where(indx)[0][0]](spec2d_files, spectrograph, **kwargs)
30,155
def quoteattr(s, table=ESCAPE_ATTR_TABLE): """Escape and quote an attribute value. """ for c, r in table: if c in s: s = s.replace(c, r) return '"%s"' % s
30,156
def is_numeric(array): """Return False if any value in the array or list is not numeric Note boolean values are taken as numeric""" for i in array: try: float(i) except ValueError: return False else: return True
30,157
def reductions_right(collection, callback=None, accumulator=None): """This method is like :func:`reductions` except that it iterates over elements of a `collection` from right to left. Args: collection (list|dict): Collection to iterate over. callback (mixed): Callback applied per iteration. accumulator (mixed, optional): Initial value of aggregator. Default is to use the result of the first iteration. Returns: list: Results of each reduction operation. Example: >>> reductions_right([1, 2, 3, 4], lambda total, x: total ** x) [64, 4096, 4096] Note: The last element of the returned list would be the result of using :func:`reduce_`. .. versionadded:: 2.0.0 """ return reductions(collection, callback, accumulator, from_right=True)
30,158
def pelt_settling_time(margin=1, init=0, final=PELT_SCALE, window=PELT_WINDOW, half_life=PELT_HALF_LIFE, scale=PELT_SCALE): """ Compute an approximation of the PELT settling time. :param margin: How close to the final value we want to get, in PELT units. :type margin_pct: float :param init: Initial PELT value. :type init: float :param final: Final PELT value. :type final: float :param window: PELT window in seconds. :type window: float :param half_life: PELT half life, in number of windows. :type half_life: int :param scale: PELT scale. :type scale: float .. note:: The PELT signal is approximated as a first order filter. This does not take into account the averaging inside a window, but the window is small enough in practice for that effect to be negligible. """ tau = _pelt_tau(half_life, window) # Response of a first order low pass filter: # y(t) = u(t) * (1 - exp(-t/tau)) # We want to find `t` such as the output y(t) is as close as we want from # the input u(t): # A * u(t) = u(t) * (1 - exp(-t/tau)) # A is how close from u(t) we want the output to get after a time `t` # From which follows: # A = (1 - exp(-t/tau)) # t = -tau * log(1-A) # Since the equation we have is for a step response, i.e. from 0 to a final # value delta = abs(final - init) # Since margin and delta are in the same unit, we don't have to normalize # them to `scale` first. relative_margin = (margin / delta) A = 1 - relative_margin settling_time = - tau * math.log(1 - A) return settling_time
30,159
def affichage_graphiques(v, a, t, EpSim, EkSim): """plot and shows physics of the track, such as : velocity, acceleation, potential energy, and kinetic energy, all according to time""" # affichage de la vitesse, et de l'accélération en fonction du temps # print("\n", v, "\n\n\n", a) energy_total_list = [] for i in range(len(EpSim)): energy_total_list.append(EkSim[i] + EpSim[i]) plt.figure() plt.plot(t, v, 'b-', label='Vs (m/s)') plt.plot(t, a, 'r-', label='a (m/s**2') # plt.plot(t, EkSim+EpSim, 'k-', label='E/m') plt.legend() plt.ylabel('Speed and acceleration according to time') plt.xlabel('t [s]') plt.title("Speed and acceleration") plt.show() # plot énergies en fonction du temps plt.figure() plt.plot(t, EpSim, 'b-', label='Ep/m') plt.plot(t, EkSim, 'r-', label='Ek/m') plt.plot(t, energy_total_list, 'k-', label='E/m') plt.legend() plt.ylabel('Energy/mass [J/kg]') plt.xlabel('t [s]') plt.title("Energy according to time") plt.show()
30,160
def get_file_content(url, comes_from=None): """Gets the content of a file; it may be a filename, file: URL, or http: URL. Returns (location, content). Content is unicode.""" match = _scheme_re.search(url) if match: scheme = match.group(1).lower() if (scheme == 'file' and comes_from and comes_from.startswith('http')): raise InstallationError( 'Requirements file %s references URL %s, which is local' % (comes_from, url)) if scheme == 'file': path = url.split(':', 1)[1] path = path.replace('\\', '/') match = _url_slash_drive_re.match(path) if match: path = match.group(1) + ':' + path.split('|', 1)[1] path = urllib.unquote(path) if path.startswith('/'): path = '/' + path.lstrip('/') url = path else: ## FIXME: catch some errors resp = urlopen(url) encoding = get_http_message_param(resp.headers, 'charset', 'utf-8') return geturl(resp), resp.read().decode(encoding) try: f = open(url) content = f.read() except IOError: e = sys.exc_info()[1] raise InstallationError('Could not open requirements file: %s' % str(e)) else: f.close() return url, content
30,161
def InstancesOverlap(instanceList,instance): """Returns True if instance contains a vertex that is contained in an instance of the given instanceList.""" for instance2 in instanceList: if InstanceOverlap(instance,instance2): return True return False
30,162
def git_push(ctx): """ Push new version and corresponding tag to origin :return: """ # get current version new_version = version.__version__ values = list(map(lambda x: int(x), new_version.split('.'))) # Push to origin new version and corresponding tag: # * commit new version # * create tag # * push version,tag to origin local(ctx, f'git add {project_name}/version.py version.py') local(ctx, 'git commit -m "updated version"') local(ctx, f'git tag {values[0]}.{values[1]}.{values[2]}') local(ctx, 'git push origin --tags') local(ctx, 'git push')
30,163
def extract_relative_directory(archive, member_path, dest_dir): """ Extracts all members from the archive that match the path specified Will strip the specified path from the member before copying to the destination """ if not member_path.endswith('/'): member_path += '/' offset = len(member_path) filtered_members = [copy.copy(member) for member in archive.getmembers() if member.name.startswith(member_path)] for member in filtered_members: member.name = member.name[offset:] archive.extractall(dest_dir, filtered_members)
30,164
def pprint(strings) -> None: """ Pretty prints string arrays :param strings: An array of strings :type strings: list[str] :return: None :rtype: None """ for i in range(len(strings)): print(strings[i])
30,165
def calc_qm_lea(p_zone_ref, temp_zone, temp_ext, u_wind_site, dict_props_nat_vent): """ Calculation of leakage infiltration and exfiltration air mass flow as a function of zone indoor reference pressure :param p_zone_ref: zone reference pressure (Pa) :param temp_zone: air temperature in ventilation zone (°C) :param temp_ext: exterior air temperature (°C) :param u_wind_site: wind velocity (m/s) :param dict_props_nat_vent: dictionary containing natural ventilation properties of zone :returns: - qm_lea_in : air mass flow rate into zone through leakages (kg/h) - qm_lea_out : air mass flow rate out of zone through leakages (kg/h) """ # get default leakage paths from locals coeff_lea_path = dict_props_nat_vent['coeff_lea_path'] height_lea_path = dict_props_nat_vent['height_lea_path'] # lookup wind pressure coefficients for leakage paths from locals coeff_wind_pressure_path = dict_props_nat_vent['coeff_wind_pressure_path_lea'] # calculation of pressure difference at leakage path delta_p_path = calc_delta_p_path(p_zone_ref, height_lea_path, temp_zone, coeff_wind_pressure_path, u_wind_site, temp_ext) # calculation of leakage air volume flow at path qv_lea_path = calc_qv_lea_path(coeff_lea_path, delta_p_path) # Eq. (65) in [1], infiltration is sum of air flows greater zero qv_lea_in = qv_lea_path[np.where(qv_lea_path > 0)].sum() # Eq. (66) in [1], exfiltration is sum of air flows smaller zero qv_lea_out = qv_lea_path[np.where(qv_lea_path < 0)].sum() # conversion to air mass flows according to 6.4.3.8 in [1] # Eq. (67) in [1] qm_lea_in = qv_lea_in * calc_rho_air(temp_ext) # Eq. (68) in [1] qm_lea_out = qv_lea_out * calc_rho_air(temp_zone) return qm_lea_in, qm_lea_out
30,166
def test_collect_missing_module(): """Assert error is raised for missing modules.""" handler = get_handler(theme="material") with pytest.raises(CollectionError): handler.collect("aaaaaaaa", {})
30,167
def adjust_learning_rate(learning_rate, weight_decay, optimizer, epoch, lr_steps): """Sets the learning rate to the initial LR decayed by 10 every 20 or 30 epochs""" decay = 0.1 ** (sum(epoch >= np.array(lr_steps))) lr = learning_rate * decay for param_group in optimizer.param_groups: param_group['lr'] = lr * param_group['lr_mult'] param_group['weight_decay'] = weight_decay * param_group['decay_mult']
30,168
def folder2Outline( folder, node, filter=None): """Create an outline from a folder. For each file or folder create a node with properties. For folders recursively create children. """ defaults = NSUserDefaults.standardUserDefaults() ignoredot = False try: ignoredot = bool(defaults.objectForKey_( u'optIgnoreDotFiles')) except: pass root, directories, files = os.walk( folder ).next() if ignoredot: files[:] = [f for f in files if not f.startswith('.')] directories[:] = [f for f in directories if not f.startswith('.')] result = [] items = files[:] items.extend( directories ) items.sort() for item in items: path = os.path.join( root, item) typ, currnode = makeFilePropertiesNode( path ) if typ == "folder": folder2Outline( path, currnode ) node['children'].append( currnode )
30,169
def update_hits_at_k( hits_at_k_values: Dict[int, List[float]], rank_of_positive_subject_based: int, rank_of_positive_object_based: int ) -> None: """Update the Hits@K dictionary for two values.""" for k, values in hits_at_k_values.items(): if rank_of_positive_subject_based < k: values.append(1.0) else: values.append(0.0) if rank_of_positive_object_based < k: values.append(1.0) else: values.append(0.0)
30,170
def kill(): """ Kill / stop module execution """ global STOP STOP = True util.printit("\n\n\n\n\n\n\n")
30,171
def start_monitor(): """Define and start scheduled monitoring service.""" monitor_enabled = config_json[env]['MONITOR_ENABLED'] monitor_trigger_interval_s = int( config_json[env]['MONITOR_TRIGGER_INTERVAL_S'] ) # IF SCHEDULE IS ENABLED IN CONFIG: if monitor_enabled == "1": print("\nSpace Weather Service Monitor: ENABLED (running every %s seconds)" % monitor_trigger_interval_s) # RUN INITIAL CHECK SPACE WEATHER processes.process_check_space_weather() # CREATE SCHEDULER W/ INTERVAL TRIGGER AND START scheduler = BackgroundScheduler() scheduler.add_job( func = processes.process_check_space_weather, trigger = IntervalTrigger( seconds = monitor_trigger_interval_s ), id = 'check_space_weather', name = 'Checking Space Weather Every 30 Seconds') scheduler.start() atexit.register( lambda: scheduler.shutdown() ) else: print("\nSpace Weather Service Monitor: DISABLED")
30,172
def exit_missing_credentials(): """ Exit the application with missing credentials error. """ logging.error('>> Please enter the credentials to the config.ini first!') exit()
30,173
def http_400_view(request): """Test view for 400""" raise SuspiciousOperation
30,174
def set_topics(): """ Adds topics to repositories in the open-contracting-extensions organization. - ocds-extension - ocds-core-extension - ocds-community-extension - ocds-profile - european-union - public-private-partnerships """ format_string = 'https://raw.githubusercontent.com/open-contracting-extensions/{}/{}/docs/extension_versions.json' profiles = defaultdict(list) for profile, branch in (('european-union', 'latest'), ('public-private-partnerships', '1.0-dev')): extension_versions = requests.get(format_string.format(profile, branch)).json() for extension_id in extension_versions.keys(): profiles[extension_id].append(profile) registry = ExtensionRegistry(extension_versions_url, extensions_url) repos = requests.get('https://api.github.com/orgs/open-contracting-extensions/repos?per_page=100').json() for repo in repos: topics = [] if repo['name'].endswith('_extension'): topics.append('ocds-extension') else: topics.append('ocds-profile') for version in registry: if '/{}/'.format(repo['full_name']) in version.base_url: if version.core: topics.append('ocds-core-extension') else: topics.append('ocds-community-extension') topics.extend(profiles[version.id]) break else: if 'ocds-profile' not in topics: print('{} is not registered'.format(repo['name'])) response = requests.put('https://api.github.com/repos/{}/topics'.format(repo['full_name']), data=json.dumps({'names': topics}), headers={'accept': 'application/vnd.github.mercy-preview+json'}) response.raise_for_status()
30,175
def test_logger(monkeypatch): """Test logger function returns valid loggers.""" _my_logger = tolog.logger("tolkein") assert isinstance(_my_logger, logging.Logger) assert _my_logger.name == "tolkein" assert _my_logger.level == logging.INFO monkeypatch.setenv("DEBUG", "true") _debug_logger = tolog.logger("debug") assert isinstance(_debug_logger, logging.Logger) assert _debug_logger.name == "debug" assert _debug_logger.level == logging.DEBUG
30,176
async def ban(bon): """ For .ban command, bans the replied/tagged person """ # Here laying the sanity check chat = await bon.get_chat() admin = chat.admin_rights creator = chat.creator # Well if not (admin or creator): return await bon.edit(NO_ADMIN) user, reason = await get_user_from_event(bon) if not user: return # Announce that we're going to whack the pest await bon.edit("**Banindo...**") try: await bon.client(EditBannedRequest(bon.chat_id, user.id, BANNED_RIGHTS)) except BadRequestError: return await bon.edit(NO_PERM) # Helps ban group join spammers more easily try: reply = await bon.get_reply_message() if reply: await reply.delete() except BadRequestError: return await bon.edit( "**Não tenho direitos de excluir mensagens, mas o usuário foi banido!**" ) # Delete message and then tell that the command # is done gracefully # Shout out the ID, so that fedadmins can fban later if reason: await bon.edit(f"**{str(user.id)}** foi banido!\nMotivo: {reason}") else: await bon.edit(f"**{str(user.id)}** foi banido!") # Announce to the logging group if we have banned the person # successfully! if BOTLOG: await bon.client.send_message( BOTLOG_CHATID, "#BAN\n" f"USUÁRIO: [{user.first_name}](tg://user?id={user.id})\n" f"CHAT: {bon.chat.title}(`{bon.chat_id}`)", )
30,177
def read_offset(rt_info): """ 获取所有分区的offset :param rt_info: rt的详细信息 :return: offset_msgs 和 offset_info """ rt_id = rt_info[RESULT_TABLE_ID] task_config = get_task_base_conf_by_name(f"{HDFS}-table_{rt_id}") if not task_config: return {} try: partition_num = task_config[TASKS_MAX] webhdfs_addr = _get_webhdfs_addr_by_rt(rt_info) offset_dir = get_offset_dir( webhdfs_addr, task_config[GROUP_ID], task_config[NAME], task_config[TOPICS_DIR], partition_num ) offset_msgs = {} if offset_dir: for p in range(partition_num): files = _get_hdfs_dir_files(webhdfs_addr, f"{offset_dir}/{p}") offset = get_max_offset(files) if files else "-1" topic_partition = f"table_{rt_id}-{p}" offset_msgs[topic_partition] = offset logger.info(f"rt {rt_id} get offset_msgs from hdfs offset dir: {offset_msgs}") return offset_msgs except Exception: logger.warning(f"failed to get offset_msgs for rt {rt_id}", exc_info=True) return {}
30,178
def _CalculateElementMaxNCharge(mol,AtomicNum=6): """ ################################################################# **Internal used only** Most negative charge on atom with atomic number equal to n ################################################################# """ Hmol=Chem.AddHs(mol) GMCharge.ComputeGasteigerCharges(Hmol,iter_step) res=[] for atom in Hmol.GetAtoms(): if atom.GetAtomicNum()==AtomicNum: res.append(float(atom.GetProp('_GasteigerCharge'))) if res==[]: return 0 else: return min(res)
30,179
def get_task_metrics_dir( model="spatiotemporal_mean", submodel=None, gt_id="contest_tmp2m", horizon="34w", target_dates=None ): """Returns the directory in which evaluation metrics for a given submodel or model are stored Args: model: string model name submodel: string submodel name or None; if None, returns metrics directory associated with selected submodel or returns None if no submodel selected gt_id: contest_tmp2m or contest_precip horizon: 34w or 56w """ if submodel is None: submodel = get_selected_submodel_name(model=model, gt_id=gt_id, horizon=horizon, target_dates=target_dates) if submodel is None: return None return os.path.join( "eval", "metrics", model, "submodel_forecasts", submodel, f"{gt_id}_{horizon}" )
30,180
def check_stability(lambda0, W, mu, tau, dt_max): """Check if the model is stable for given parameter estimates.""" N, _ = W.shape model = NetworkPoisson(N=N, dt_max=dt_max) model.lamb = lambda0 model.W = W model.mu = mu model.tau = tau return model.check_stability(return_value=True)
30,181
def pid2id(pid): """convert pid to slurm jobid""" with open('/proc/%s/cgroup' % pid) as f: for line in f: m = re.search('.*slurm\/uid_.*\/job_(\d+)\/.*', line) if m: return m.group(1) return None
30,182
def get_develop_directory(): """ Return the develop directory """ if platform.system() == "Windows": return os.path.dirname(os.path.realpath(__file__)) + "\\qibullet" else: return os.path.dirname(os.path.realpath(__file__)) + "/qibullet"
30,183
def multiaxis_scatterplot(xdata, ydata, *, axes_loc, xlabel='', ylabel='', title='', num_cols=1, num_rows=1, saveas='mscatterplot', **kwargs): """ Create a scatter plot with multiple axes. :param xdata: list of arraylikes, passed on to the plotting functions for each axis (x-axis) :param ydata: list of arraylikes, passed on to the plotting functions for each axis (y-axis) :param axes_loc: list of tuples of two integers, location of each axis :param xlabel: str or list of str, labels for the x axis :param ylabel: str or list of str, labels for the y-axis :param title: str or list of str, titles for the subplots :param num_rows: int, how many rows of axis are created :param num_cols: int, how many columns of axis are created :param saveas: str filename of the saved file Special Kwargs: :param subplot_params: dict with integer keys, can contain all valid kwargs for :py:func:`multiple_scatterplots()` with the integer key denoting to which subplot the changes are applied :param axes_kwargs: dict with integer keys, additional arguments to pass on to `subplot2grid` for the creation of each axis (e.g colspan, rowspan) Other Kwargs will be passed on to all :py:func:`multiple_scatterplots()` calls (If they are not overwritten by parameters in `subplot_params`). """ #convert parameters to list of parameters for subplots subplot_params = kwargs.pop('subplot_params', {}) axes_kwargs = kwargs.pop('axes_kwargs', {}) param_list = [None] * len(axes_loc) for indx, val in enumerate(param_list): if indx in subplot_params: param_list[indx] = subplot_params[indx] else: param_list[indx] = {} if indx in axes_kwargs: param_list[indx]['axes_kwargs'] = axes_kwargs[indx] if not isinstance(xlabel, list): param_list[indx]['xlabel'] = xlabel else: param_list[indx]['xlabel'] = xlabel[indx] if not isinstance(ylabel, list): param_list[indx]['ylabel'] = ylabel else: param_list[indx]['ylabel'] = ylabel[indx] if not isinstance(title, list): param_list[indx]['title'] = title else: param_list[indx]['title'] = title[indx] general_keys = {'figure_kwargs', 'show', 'save_plots'} general_info = {key: val for key, val in kwargs.items() if key in general_keys} kwargs = {key: val for key, val in kwargs.items() if key not in general_keys} plot_params.set_parameters(**general_info) #figsize is automatically scaled with the shape of the plot plot_shape = (num_cols, num_rows) plot_params['figure_kwargs'] = { 'figsize': ([plot_shape[indx] * size for indx, size in enumerate(plot_params['figure_kwargs']['figsize'])]) } plot_shape = tuple(reversed(plot_shape)) fig = plt.figure(**plot_params['figure_kwargs']) axis = [] for indx, subplot_data in enumerate(zip(axes_loc, xdata, ydata, param_list)): location, x, y, params = subplot_data subplot_kwargs = copy.deepcopy(kwargs) subplot_kwargs.update(params) ax = plt.subplot2grid(plot_shape, location, fig=fig, **subplot_kwargs.pop('axes_kwargs', {})) with NestedPlotParameters(plot_params): ax = multiple_scatterplots(x, y, axis=ax, **subplot_kwargs, save_plots=False, show=False) axis.append(ax) plot_params.save_plot(saveas) return axis
30,184
def log1p_mse_loss(estimate: torch.Tensor, target: torch.Tensor, reduce: str = 'sum'): """ Computes the log1p-mse loss between `x` and `y` as defined in [1], eq. 4. The `reduction` only affects the speaker dimension; the time dimension is always reduced by a mean operation as in [1]. It has the advantage of not going to negative infinity in case of perfect reconstruction while keeping the logarithmic nature. The log1p-mse loss is defined as [1]: .. math:: L^{\\text{T-L1PMSE}} = \\log_10 (1 + \sum_t |x(t) - y(t)|^2) Args: estimate (... x T): The estimated signal target (... x T, same as estimate): The target signal reduce: Returns: The log1p-mse error between `estimate` and `target` References: [1] Thilo von Neumann, Christoph Boeddeker, Lukas Drude, Keisuke Kinoshita, Marc Delcroix, Tomohiro Nakatani, and Reinhold Haeb-Umbach. „Multi-talker ASR for an unknown number of sources: Joint training of source counting, separation and ASR“. http://arxiv.org/abs/2006.02786. """ # Use the PyTorch implementation for MSE, should be the fastest return _reduce( torch.log10( 1 + F.mse_loss(estimate, target, reduce='none').mean(dim=-1)), reduce=reduce )
30,185
def quaternion_inverse(quaternion: np.ndarray) -> np.ndarray: """Return inverse of quaternion.""" return quaternion_conjugate(quaternion) / np.dot(quaternion, quaternion)
30,186
def set_lockto_collid(id): """Lock to a collection. """ cherrypy.response.cookie['lockto'] = id.encode('utf8') cherrypy.response.cookie['lockto']['path'] = '/' cherrypy.response.cookie['lockto']['version'] = '1'
30,187
def find_pgfortran(conf): """Find the PGI fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(["pgfortran", "pgf95", "pgf90"], var="FC") conf.get_pgfortran_version(fc) conf.env.FC_NAME = "PGFC"
30,188
def _make_indexable(iterable): """Ensure iterable supports indexing or convert to an indexable variant. Convert sparse matrices to csr and other non-indexable iterable to arrays. Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged. Parameters ---------- iterable : {list, dataframe, array, sparse} or None Object to be converted to an indexable iterable. """ if issparse(iterable): return mt.tensor(iterable) elif hasattr(iterable, "iloc"): if iterable.ndim == 1: return md.Series(iterable) else: return md.DataFrame(iterable) elif hasattr(iterable, "__getitem__"): return mt.tensor(iterable) elif iterable is None: return iterable return mt.tensor(iterable)
30,189
def _soft_validate_additional_properties(validator, additional_properties_value, instance, schema): """This validator function is used for legacy v2 compatible mode in v2.1. This will skip all the additional properties checking but keep check the 'patternProperties'. 'patternProperties' is used for metadata API. If there are not any properties on the instance that are not specified in the schema, this will return without any effect. If there are any such extra properties, they will be handled as follows: - if the validator passed to the method is not of type "object", this method will return without any effect. - if the 'additional_properties_value' parameter is True, this method will return without any effect. - if the schema has an additionalProperties value of True, the extra properties on the instance will not be touched. - if the schema has an additionalProperties value of False and there aren't patternProperties specified, the extra properties will be stripped from the instance. - if the schema has an additionalProperties value of False and there are patternProperties specified, the extra properties will not be touched and raise validation error if pattern doesn't match. """ if (not validator.is_type(instance, "object") or additional_properties_value): return properties = schema.get("properties", {}) patterns = "|".join(schema.get("patternProperties", {})) extra_properties = set() for prop in instance: if prop not in properties: if patterns: if not re.search(patterns, prop): extra_properties.add(prop) else: extra_properties.add(prop) if not extra_properties: return if patterns: error = "Additional properties are not allowed (%s %s unexpected)" if len(extra_properties) == 1: verb = "was" else: verb = "were" yield jsonschema_exc.ValidationError( error % (", ".join(repr(extra) for extra in extra_properties), verb)) else: for prop in extra_properties: del instance[prop]
30,190
def read_document(collection, document_id): """Return the contents of the document containing document_id""" print("Found a document with _id {}: {}".format(document_id, collection.find_one({"_id": document_id})))
30,191
def batchnorm_forward(x, gamma, beta, bn_param): """ Forward pass for batch normalization. During training the sample mean and (uncorrected) sample variance are computed from minibatch statistics and used to normalize the incoming data. During training we also keep an exponentially decaying running mean of the mean and variance of each feature, and these averages are used to normalize data at test-time. At each timestep we update the running averages for mean and variance using an exponential decay based on the momentum parameter: running_mean = momentum * running_mean + (1 - momentum) * sample_mean running_var = momentum * running_var + (1 - momentum) * sample_var Note that the batch normalization paper suggests a different test-time behavior: they compute sample mean and variance for each feature using a large number of training images rather than using a running average. For this implementation we have chosen to use running averages instead since they do not require an additional estimation step; the torch7 implementation of batch normalization also uses running averages. Input: - x: Data of shape (N, D) - gamma: Scale parameter of shape (D,) - beta: Shift paremeter of shape (D,) - bn_param: Dictionary with the following keys: - mode: 'train' or 'test'; required - eps: Constant for numeric stability - momentum: Constant for running mean / variance. - running_mean: Array of shape (D,) giving running mean of features - running_var Array of shape (D,) giving running variance of features Returns a tuple of: - out: of shape (N, D) - cache: A tuple of values needed in the backward pass """ mode = bn_param['mode'] eps = bn_param.get('eps', 1e-5) momentum = bn_param.get('momentum', 0.9) N, D = x.shape running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype)) running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype)) out, cache = None, None if mode == 'train': # Forward pass # Step 1 - shape of mu (D,) mu = 1 / float(N) * np.sum(x, axis=0) # Step 2 - shape of var (N,D) xmu = x - mu # Step 3 - shape of carre (N,D) carre = xmu**2 # Step 4 - shape of var (D,) var = 1 / float(N) * np.sum(carre, axis=0) # Step 5 - Shape sqrtvar (D,) sqrtvar = np.sqrt(var + eps) # Step 6 - Shape invvar (D,) invvar = 1. / sqrtvar # Step 7 - Shape va2 (N,D) va2 = xmu * invvar # Step 8 - Shape va3 (N,D) va3 = gamma * va2 # Step 9 - Shape out (N,D) out = va3 + beta running_mean = momentum * running_mean + (1.0 - momentum) * mu running_var = momentum * running_var + (1.0 - momentum) * var cache = (mu, xmu, carre, var, sqrtvar, invvar, va2, va3, gamma, beta, x, bn_param) elif mode == 'test': mu = running_mean var = running_var xhat = (x - mu) / np.sqrt(var + eps) out = gamma * xhat + beta cache = (mu, var, gamma, beta, bn_param) else: raise ValueError('Invalid forward batchnorm mode "%s"' % mode) # Store the updated running means back into bn_param bn_param['running_mean'] = running_mean bn_param['running_var'] = running_var return out, cache
30,192
def rectangles_of_one(base): """ Given a base array, generate list of all (point, rectangle) where the rectangle is all ones. """ stack = [] for p, slice in array_to_point_slices(base): stack.append((p, slice)) while stack: p, slice = stack.pop() if all_ones(slice): yield p, slice if can_extend_right(base, p, slice): stack.append((p, extend_right(base, p, slice))) if can_extend_down(base, p, slice): stack.append((p, extend_down(base, p, slice))) if can_extend_right(base, p, slice) and can_extend_down(base, p, slice): stack.append((p, extend_diagonally(base, p, slice)))
30,193
def mkdir_p(path): """ Creates directory with parent directory as needed (similar to 'mkdir -p ${path}'). Does not raise an error if directory exists Inputs: ------- path: type(str) """ try: os.makedirs(path) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(path): pass else: raise err return
30,194
def chunking(): """ transforms dataframe of full texts into a list of chunked texts of 2000 tokens each """ word_list = [] chunk_list = [] text_chunks = [] # comma separating every word in a book for entry in range(len(df)): word_list.append(df.text[entry].split()) # create a chunk of 2000 words for entry in word_list: chunk_list.append(list(divide_chunks(entry, 2000))) # flatten chunk list from a nested list to a list text_chunks = [item for l in chunk_list for item in l] print("Texts have been divided into cunks of 2000 tokens each for easier preprocessing") return(text_chunks)
30,195
def generate_random_string(): """Create a random string with 8 letters for users.""" letters = ascii_lowercase + digits return ''.join(choice(letters) for i in range(8))
30,196
def contains_message(response, message): """ Inspired by django's self.assertRaisesMessage Useful for confirming the response contains the provided message, """ if len(response.context['messages']) != 1: return False full_message = str(list(response.context['messages'])[0]) return message in full_message
30,197
def definition(): """To be used by UI.""" sql = f""" SELECT c.course_id, c.curriculum_id, cs.course_session_id, description + ' year ' +CAST(session as varchar(2)) as description, CASE WHEN conf.course_id IS NULL THEN 0 ELSE 1 END as linked, 0 as changed FROM ({select_all_and_default(Course)}) as c LEFT JOIN c_course_session cs ON cs.curriculum_id = c.curriculum_id LEFT JOIN c_course_config conf ON conf.course_id = c.course_id AND conf.course_session_id = cs.course_session_id""" return sql
30,198
def exec_psql_cmd(command, host, port, db="template1", tuples_only=True): """ Sets up execution environment and runs the HAWQ queries """ src_cmd = "export PGPORT={0} && source {1}".format(port, hawq_constants.hawq_greenplum_path_file) if tuples_only: cmd = src_cmd + " && psql -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command) else: cmd = src_cmd + " && psql -t -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command) retcode, out, err = exec_ssh_cmd(host, cmd) if retcode: Logger.error("SQL command executed failed: {0}\nReturncode: {1}\nStdout: {2}\nStderr: {3}".format(cmd, retcode, out, err)) raise Fail("SQL command executed failed.") Logger.info("Output:\n{0}".format(out)) return retcode, out, err
30,199