content
stringlengths
22
815k
id
int64
0
4.91M
def client(): """Client Fixture.""" client_obj = Client(base_url=BASE_URL) return client_obj
36,000
def encode_one_hot(s): """One-hot encode all characters of the given string. """ all = [] for c in s: x = np.zeros((INPUT_VOCAB_SIZE)) index = char_indices[c] x[index] = 1 all.append(x) return all
36,001
def date_to_datetime(date, time_choice='min'): """ Convert date to datetime. :param date: date to convert :param time_choice: max or min :return: datetime """ choice = getattr(datetime.datetime, 'min' if time_choice == 'min' else 'max').time() return timezone.make_aware( datetime.datetime.combine(date, choice), timezone.get_current_timezone(), )
36,002
def standardize_cell(atoms, cell_type): """ Standardize the cell of the atomic structure. Parameters: atoms: `ase.Atoms` Atomic structure. cell_type: { 'standard', 'standard_no_symmetries', 'primitive', None} Starting from the input cell, creates a standard cell according to same standards before the supercell generation. \n `cell_type` = 'standard' creates a standard conventional cell. See :py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell`. \n `cell_type` = 'standard_no_symmetries' creates a standard conventional cell without using symmetries. See :py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell_no_sym`. \n `cell_type` = 'primitive' creates a standard primitive cell. See :py:mod:`ai4materials.utils.utils_crystals.get_primitive_std_cell`. \n `cell_type` = `None` does not creates any cell. It simply uses the unit cell as input for the supercell generation. Returns: `ase.Atoms` Atomic structure in the standard cell of the selected type. .. codeauthor:: Angelo Ziletti <angelo.ziletti@gmail.com> """ if cell_type == 'standard': atoms = get_conventional_std_cell(atoms) elif cell_type == 'standard_no_symmetries': atoms = get_conventional_std_cell_no_sym(atoms) elif cell_type == 'primitive': atoms = get_primitive_std_cell(atoms) elif cell_type is None: pass else: raise ValueError("Unrecognized cell_type value.") return atoms
36,003
def test_store(benchmark): """Benchmark for creating and storing a node, via the full ORM mechanism. """ _, node_dict = benchmark(get_data_node) assert node_dict['node'].is_stored, node_dict
36,004
def get_biggan_stats(): """ precomputed biggan statistics """ center_of_mass = [137 / 255., 127 / 255.] object_size = [213 / 255., 210 / 255.] return center_of_mass, object_size
36,005
def get_input_var_value(soup, var_id): """Get the value from text input variables. Use when you see this HTML format: <input id="wired_config_var" ... value="value"> Args: soup (soup): soup pagetext that will be searched. var_id (string): The id of a var, used to find its value. Returns: (string): The value of the variable """ try: var_value = soup.find('input', {'id': var_id}).get('value') return var_value except AttributeError: print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup) raise LookupError
36,006
def _create_file(path): """Opens file in write mode. It also creates intermediate directories if necessary. """ dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname) return open(path, 'w')
36,007
def get_top(metric: str, limit: int) -> List[List[Any]]: """Get top stocks based on metric from sentimentinvestor [Source: sentimentinvestor] Parameters ---------- metric : str Metric to get top tickers for limit : int Number of tickes to get Returns ------- List[List[Any]] List of tickers and scores """ data = sentipy.sort(metric, limit) table: List[List[Any]] = [] for index, stock in enumerate(data): if not hasattr(stock, "symbol") or not hasattr(stock, metric): logging.warning("data for stock %s is incomplete, ignoring", index + 1) table.append([]) else: table.append([index + 1, stock.symbol, stock.__getattribute__(metric)]) return table
36,008
def check_realm_emoji_update(var_name: str, event: Dict[str, object]) -> None: """ The way we send realm emojis is kinda clumsy--we send a dict mapping the emoji id to a sub_dict with the fields (including the id). Ideally we can streamline this and just send a list of dicts. The clients can make a Map as needed. """ _check_realm_emoji_update(var_name, event) assert isinstance(event["realm_emoji"], dict) for k, v in event["realm_emoji"].items(): assert v["id"] == k
36,009
def test_add_single_entities( reference_data: np.ndarray, upper_bound: np.ndarray, lower_bound: np.ndarray, ishan: Entity, ) -> None: """Test the addition of SEPTs""" tensor1 = SEPT( child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound ) tensor2 = SEPT( child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound ) result = tensor2 + tensor1 assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type" assert ( result.max_vals == 2 * upper_bound ).all(), "Addition of two SEPTs results in incorrect max_val" assert ( result.min_vals == 2 * lower_bound ).all(), "Addition of two SEPTs results in incorrect min_val" # Try with negative values tensor3 = SEPT( child=reference_data * -1.5, entity=ishan, max_vals=upper_bound, min_vals=lower_bound, ) result = tensor3 + tensor1 assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type" assert ( result.max_vals == tensor3.max_vals + tensor1.max_vals ).all(), "SEPT + SEPT results in incorrect max_val" assert ( result.min_vals == tensor3.min_vals + tensor1.min_vals ).all(), "SEPT + SEPT results in incorrect min_val" return None
36,010
def prob_get_expected_after_certain_turn(turns_later: int, turns_remain: int, tiles_expect: int) -> float: """The probability of get expected tile after `turns_later` set of turns. :param turns_later: Get the expected tile after `turns_after` set of turns :param turns_remain: The remaining turns :param tiles_expect: The number of expected tiles :return: Probability """ tiles_remain = 4 * turns_remain + 14 if tiles_expect > turns_later: greater = tiles_remain - turns_later less = tiles_remain - tiles_expect else: greater = tiles_remain - tiles_expect less = tiles_remain - turns_later numerator, denominator = 1, 1 i, j = less, greater while i > tiles_remain - turns_later - tiles_expect: numerator = numerator * i i = i - 1 while j > greater: denominator = denominator * j j = j - 1 return numerator / denominator
36,011
def do_1D(g=GerryMander(algorithm="brute-force"), rounds=5): """ """ for n_dists in [3, 9, 27]: for units_in_dist in [3, 5, 9, 27]: for unit_size in [1, 10, 100]: m = Model(n_dims=1, unit_size=unit_size, n_dists=n_dists, units_in_dist=units_in_dist) avg = run_1D_2D(m, g, D=1, rounds=rounds) line1 = f"1D dists {n_dists} units_in_dist {units_in_dist} " line2 = f"unit_size {unit_size} avg_score {avg}" write_to(line1+line2, "results_1D.txt")
36,012
def build(gpu, cudnn, opencv, openmp, force, root): """Build darknet.""" darknet = pydarknet2.Darknet(root=root) darknet.build(gpu=gpu, cudnn=cudnn, opencv=opencv, openmp=openmp, force=force)
36,013
def get_gpcr_calpha_distances(pdb, xtc, gpcr_name, res_dbnum, first_frame=0, last_frame=-1, step=1): """ Load distances between all selected atoms. Parameters ---------- pdb : str File name for the reference file (PDB or GRO format). xtc : str File name for the trajectory (xtc format). gpcr_name : str Name of the GPCR as in the GPCRdb. res_dbnum : list Relative GPCR residue numbers. first_frame : int, default=0 First frame to return of the features. Zero-based. last_frame : int, default=-1 Last frame to return of the features. Zero-based. step : int, default=1 Subsampling step width when reading the frames. Returns ------- feature_names : list of str Names of all C-alpha distances. feature_labels : list of str Labels containing GPCRdb numbering of the residues. features_data : numpy array Data for all C-alpha distances [Å]. """ # Select residues from relative residue numbers resnums, reslabels = select_gpcr_residues(gpcr_name, res_dbnum) # Create the selection string selection = 'name CA and resid' for rn in resnums: selection += ' %i'%rn # Create the GPCRdb distance labels distlabels = [] k = -1 for i in range(len(reslabels)): for j in range(i + 1, len(reslabels)): k += 1 _dl = 'CA DIST: %s - %s'%(reslabels[i], reslabels[j]) distlabels.append(_dl) # Calculate the distances and get the sequential names names, data = get_atom_self_distances(pdb, xtc, selection=selection, first_frame=first_frame, last_frame=last_frame, step=step) return names, distlabels, data
36,014
def main(yumrepomap=None, **kwargs): """ Checks the distribution version and installs yum repo definition files that are specific to that distribution. :param yumrepomap: list of dicts, each dict contains two or three keys. 'url': the url to the yum repo definition file 'dist': the linux distribution to which the repo should be installed. one of 'amazon', 'redhat', 'centos', or 'all'. 'all' is a special keyword that maps to all distributions. 'epel_version': optional. match the major version of the epel-release that applies to the system. one of '6' or '7'. if not specified, the repo is installed to all systems. Example: [ { 'url' : 'url/to/the/yum/repo/definition.repo', 'dist' : 'amazon' or 'redhat' or 'centos' or 'all', 'epel_version' : '6' or '7', }, ] """ scriptname = __file__ print('+' * 80) print('Entering script -- {0}'.format(scriptname)) print('Printing parameters...') print(' yumrepomap = {0}'.format(yumrepomap)) if not yumrepomap: print('`yumrepomap` is empty. Nothing to do!') return None if not isinstance(yumrepomap, list): raise SystemError('`yumrepomap` must be a list!') # Read first line from /etc/system-release release = None try: with open(name='/etc/system-release', mode='rb') as f: release = f.readline().strip() except Exception as exc: raise SystemError('Could not read /etc/system-release. ' 'Error: {0}'.format(exc)) # Search the release file for a match against _supported_dists m = _match_supported_dist.search(release.lower()) if m is None: # Release not supported, exit with error raise SystemError('Unsupported OS distribution. OS must be one of: ' '{0}.'.format(', '.join(_supported_dists))) # Assign dist,version from the match groups tuple, removing any spaces dist,version = (x.translate(None, ' ') for x in m.groups()) # Determine epel_version epel_version = None if 'amazon' == dist: epel_version = _amazon_epel_versions.get(version, None) else: epel_version = version.split('.')[0] if epel_version is None: raise SystemError('Unsupported OS version! dist = {0}, version = {1}.' .format(dist, version)) for repo in yumrepomap: # Test whether this repo should be installed to this system if repo['dist'] in [dist, 'all'] and repo.get('epel_version', 'all') \ in [epel_version, 'all']: # Download the yum repo definition to /etc/yum.repos.d/ url = repo['url'] repofile = '/etc/yum.repos.d/{0}'.format(url.split('/')[-1]) download_file(url, repofile) print('{0} complete!'.format(scriptname)) print('-' * 80)
36,015
def schedule_fetch(): """Enqueues tasks to fetch instances.""" for instance_group_manager in models.InstanceGroupManager.query(): if instance_group_manager.url: utilities.enqueue_task('fetch-instances', instance_group_manager.key)
36,016
def detectRegions(image, er_filter1, er_filter2): """ detectRegions(image, er_filter1, er_filter2) -> regions """ pass
36,017
def zero_pad1d(inputs, padding=0): """Zero padding for 1d tensor Args: ----------------------------- inputs : tvm.te.tensor.Tensor shape [batch, channel, length] padding: (optional:0) int or tuple ----------------------------- Returns: ----------------------------- tvm.te.tensor.Tensor shape [batch, channel, padded_length] ----------------------------- """ padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding assert_print(isinstance(padding, tuple), "type(padding)={}".format(type(padding))) assert_print(len(padding) == 2) padding_zero = tvm.tir.expr.const(0, inputs.dtype) batch_size, in_channel, in_len = inputs.shape return tvm.te.compute( (batch_size, in_channel, in_len + padding[0] + padding[1]), lambda b, c, l: tvm.te.if_then_else( tvm.te.all(l >= padding[0], l < in_len + padding[0]), inputs[b, c, l - padding[0]], padding_zero ) )
36,018
def gelu(x): """gelu activation function copied from pytorch-pretrained-BERT.""" return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
36,019
def section11(): """ # Show Annotations Over Image After uploading items and annotations with their metadata, you might want to see some of them and perform visual validation. To see only the annotations, use the annotation type *show* option. """
36,020
def stock_fund_stock_holder(stock: str = "600004") -> pd.DataFrame: """ 新浪财经-股本股东-基金持股 https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_FundStockHolder/stockid/600004.phtml :param stock: 股票代码 :type stock: str :return: 新浪财经-股本股东-基金持股 :rtype: pandas.DataFrame """ url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructure/stockid/{stock}.phtml" r = requests.get(url) temp_df = pd.read_html(r.text)[13].iloc[:, :5] temp_df.columns = [*range(5)] big_df = pd.DataFrame() need_range = temp_df[temp_df.iloc[:, 0].str.find("截止日期") == 0].index.tolist() + [len(temp_df)] for i in range(len(need_range)-1): truncated_df = temp_df.iloc[need_range[i]: need_range[i + 1], :] truncated_df = truncated_df.dropna(how="all") temp_truncated = truncated_df.iloc[2:, :] temp_truncated.reset_index(inplace=True, drop=True) concat_df = pd.concat([temp_truncated, truncated_df.iloc[0, 1:]], axis=1) concat_df.columns = truncated_df.iloc[1, :].tolist() + ["截止日期"] concat_df["截止日期"] = concat_df["截止日期"].fillna(method="ffill") concat_df["截止日期"] = concat_df["截止日期"].fillna(method="bfill") big_df = pd.concat([big_df, concat_df], axis=0, ignore_index=True) big_df.dropna(inplace=True) big_df.reset_index(inplace=True, drop=True) return big_df
36,021
def set_to_available(request, slug, version): """ Updates the video status. Sets the version already encoded to available. """ video = get_object_or_404(Video, slug=slug) status, created = VideoStatus.objects.get_or_create(video_slug=slug) if version == 'web': status.web_available = True elif version == 'cocreate': status.cocreate_available = True else: status.mobile_available = True status.is_encoding = False status.encode_duration = Decimal(str(status.encode_duration)) status.save() # If the video is part of a cocreate project, auto-compile the cocreate project. try: if video.section and video.section.cocreate: cocreate_obj = video.section.cocreate init_cocreate(cocreate_obj, generate_slug) except Section.DoesNotExist: pass return HttpResponse("OK")
36,022
def run_test(test): """ Make the request """ print(bcolors.HEADER + "Running test: "+ test + bcolors.ENDC) results = dict() with open(pathlib.Path(test,"test.ini"), "r") as testini: testini_json = json.loads(testini.read()) if "IGNORE" in testini_json.keys(): results[testini_json["file_name"]] = "IGNORED" return results expected_result = (testini_json["result"],testini_json["body"]) with test_setup(pathlib.Path(test,testini_json["file_name"]), testini_json["file_name"]): try: r = requests.get("http://127.0.0.1:8080/"+testini_json["test_name"]) except requests.exceptions.ConnectionError: print ( "Test: " + bcolors.BOLD + test + bcolors.ENDC + " " + bcolors.FAIL + "FAILED" + bcolors.ENDC ) results[testini_json["file_name"]] = "FAILED" if ( "STOPONFAIL" in sys.argv ): exit(1) return results print("Status Code:" + str(r.status_code)) print("Body:" + r.text) print("Expected Status code: "+ str(expected_result[0])) print("Expected text: "+ expected_result[1]) try: assert r.status_code == expected_result[0] assert r.text == expected_result[1] results[testini_json["file_name"]] = "PASSED" except AssertionError: print ( "Test: " + bcolors.BOLD + test + bcolors.ENDC + " " + bcolors.FAIL + "FAILED" + bcolors.ENDC ) results[testini_json["file_name"]] = "FAILED" if ( "STOPONFAIL" in sys.argv ): exit(1) return results print ( "Test: " + bcolors.BOLD + test + bcolors.ENDC + " " + bcolors.OKGREEN + "PASSED" + bcolors.ENDC ) return results
36,023
def datetime_to_str(dct, attr_name): """Convert datetime object in dict to string.""" if (dct.get(attr_name) is not None and not isinstance(dct.get(attr_name), six.string_types)): dct[attr_name] = dct[attr_name].isoformat(' ')
36,024
def new_reps_reminder(): """Send email to reps-mentors listing new subscribers the past month.""" prev = go_back_n_months(now().date()) prev_date = prev.strftime('%B %Y') reps = UserProfile.objects reps_num = reps.count() new_reps = reps.filter(date_joined_program__month=prev.month) email_template = 'emails/new_reps_monthly_reminder.jinja' subject = '[Info] New Reps for %s' % prev_date recipient = settings.REPS_MENTORS_LIST data = {'reps': new_reps, 'date': prev_date, 'reps_num': reps_num} send_generic_mail.delay([recipient], subject, email_template, data)
36,025
def generate_solve_c(): """Generate C source string for the recursive solve() function.""" piece_letters = 'filnptuvwxyz' stack = [] lines = [] add = lines.append add('#define X_PIECE_NUM {}'.format(piece_letters.index('x'))) add(""" void solve(char* board, int pos, unsigned int used) { if (used == (1 << NUM_PIECES) - 1) { display_solution(board); return; } while (board[pos]) { pos++; } """) indent = ' ' * 4 for c in ORIENTATIONS: if c == '.': indent = indent[:-4] add(indent + '}') stack.pop() elif c > 'a': # Found a piece that fits: if it's not yet used, place it and # solve rest of board recursively piece_num = piece_letters.index(c) add(indent + 'if ((used & (1<<{})) == 0) {{'.format(piece_num)) add(indent + ' _num_tries++;') add(indent + ' used ^= 1<<{};'.format(piece_num)) for offset in stack: add(indent + ' board[pos + {}] = {!r};'.format(offset, c)) add(indent + ' solve(board, pos, used);') for offset in stack: add(indent + ' board[pos + {}] = 0;'.format(offset)) add(indent + ' used ^= 1<<{};'.format(piece_num)) add(indent + '}') indent = indent[:-4] add(indent + '}') stack.pop() else: i = ord(c) - ord('A') + 3 x, y = i % 8, i // 8 offset = y * TOTAL_WIDTH + x - 3 add(indent + 'if (board[pos + {}] == 0) {{'.format(offset)) indent += ' ' * 4 stack.append(offset) add('}') return '\n'.join(lines)
36,026
def process_ax_data(user, ax_data): """ Process OpenID AX data. """ import django_openidconsumer.config emails = ax_data.get(django_openidconsumer.config.URI_GROUPS.get('email').get('type_uri', ''), '') display_names = ax_data.get(django_openidconsumer.config.URI_GROUPS.get('alias').get('type_uri', ''), '') if emails and not user.email.strip(): user.email = emails[0] user.save() if not user.profile.display_name.strip() or is_random(user.profile.display_name): if display_names: user.profile.display_name = display_names[0] elif emails: user.profile.display_name = emails[0].split('@')[0] user.profile.save()
36,027
def get_permission_info(room): """ Fetches permissions about the room, like ban info etc. # Return Value dict of session_id to current permissions, a dict containing the name of the permission mapped to a boolean value. """ return jsonify({k: addExtraPermInfo(v) for k, v in room.permissions.items()})
36,028
def ravel(m): """ravel(m) returns a 1d array corresponding to all the elements of it's argument. """ return reshape(m, (-1,))
36,029
async def test_if_fires_on_zone_appear(hass, calls): """Test for firing if entity appears in zone.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": { "platform": "geo_location", "source": "test_source", "zone": "zone.test", "event": "enter", }, "action": { "service": "test.automation", "data_template": { "some": "{{ trigger.%s }}" % "}} - {{ trigger.".join( ( "platform", "entity_id", "from_state.state", "to_state.state", "zone.name", ) ) }, }, } }, ) # Entity appears in zone without previously existing outside the zone. context = Context() hass.states.async_set( "geo_location.entity", "hello", {"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"}, context=context, ) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].context.parent_id == context.id assert ( calls[0].data["some"] == "geo_location - geo_location.entity - - hello - test" )
36,030
def parse_index_file(filename): """Parse index file.""" index = [] for line in open(filename): # My additions print ("Printing this unstripped text:", line) index.append(int(line.strip())) return index
36,031
def init(): """Top level command handler.""" @click.command() @click.option('--port', type=int, help='Port to listen.', default=0) @click.option('--tun-dev', type=str, required=True, help='Device to use when establishing tunnels.') @click.option('--tun-addr', type=str, required=False, help='Local IP address to use when establishing tunnels.') @click.option('--tun-cidrs', type=cli.LIST, required=True, help='CIDRs block assigned to the tunnels.') @click.option('--policies-dir', type=str, required=True, help='Directory where to look for policies') @click.option('--state-dir', type=str, required=False, default='/var/run/warpgate', help='Directory where running state is kept') def warpgate_policy_server(port, tun_dev, tun_addr, tun_cidrs, policies_dir, state_dir): """Run warpgate policy server.""" myhostname = socket.getfqdn() policy_server.run_server( admin_address=myhostname, admin_port=port, tun_devname=tun_dev, tun_address=( tun_addr if tun_addr else socket.gethostbyname(myhostname) ), tun_cidrs=tun_cidrs, policies_dir=policies_dir, state_dir=state_dir ) return warpgate_policy_server
36,032
def check_collision(bird_rect:object, pipes:list, collide_sound:object): """ Checks for collision with the Pipe and the Base """ for pipe in pipes: if bird_rect.colliderect(pipe): collide_sound.play() return False if bird_rect.bottom >= gv.BASE_TOP: return False return True
36,033
def compute_ranking_scores(ranking_scores, global_ranks_to_save, rank_per_query): """ Compute ranking scores (MRR and MAP) and a bunch of interesting ranks to save to file from a list of ranks. Args: ranking_scores: Ranking scores previously computed global_ranks_to_save: Global interesting ranks to save to file rank_per_query: List of ranks computed by the model evaluation procedure Returns: ranking scores (in a dict) and a dict of global interesting ranks to save to file """ # compute binarized (0/1) relevance scores rs = [np.asarray([i == rank['ground_truth_label'] for i in rank['rank_labels']], dtype=np.dtype(int)) for rank in rank_per_query] # compute and log MRR and MAP scores ranking_scores['MRR'].append(mean_reciprocal_rank(rs)) ranking_scores['MAP'].append(mean_average_precision(rs)) # compute a bunch of indexes for interesting queries to save in csv files as examples max_rr, max_rr_idx = max_reciprocal_rank(rs) min_rr, min_rr_idx = min_reciprocal_rank(rs) max_ap, max_ap_idx = max_average_precision(rs) min_ap, min_ap_idx = min_average_precision(rs) # save indexes (and values) just computed to a dict queries_indexes = { 'max_rr': {'value': max_rr, 'index': max_rr_idx}, 'min_rr': {'value': min_rr, 'index': min_rr_idx}, 'max_ap': {'value': max_ap, 'index': max_ap_idx}, 'min_ap': {'value': min_ap, 'index': min_ap_idx} } # get interesting queries ranks_to_save = { key: { 'value': scores['value'], 'rank': rank_per_query[scores['index']] } for key, scores in queries_indexes.items() } # if the global ranks to save dict is none set it to the current ranks to save if global_ranks_to_save is None: global_ranks_to_save = ranks_to_save else: # otherwise select from the current ranks to save the ones that are more 'interesting' than those # already in the global ranks to save dict if ranks_to_save['max_rr']['value'] > global_ranks_to_save['max_rr']['value']: global_ranks_to_save['max_rr']['value'] = ranks_to_save['max_rr']['value'] global_ranks_to_save['max_rr']['rank'] = ranks_to_save['max_rr']['rank'] if ranks_to_save['min_rr']['value'] < global_ranks_to_save['min_rr']['value']: global_ranks_to_save['min_rr']['value'] = ranks_to_save['min_rr']['value'] global_ranks_to_save['min_rr']['rank'] = ranks_to_save['min_rr']['rank'] if ranks_to_save['max_ap']['value'] > global_ranks_to_save['max_ap']['value']: global_ranks_to_save['max_ap']['value'] = ranks_to_save['max_ap']['value'] global_ranks_to_save['max_ap']['rank'] = ranks_to_save['max_ap']['rank'] if ranks_to_save['min_ap']['value'] < global_ranks_to_save['min_ap']['value']: global_ranks_to_save['min_ap']['value'] = ranks_to_save['min_ap']['value'] global_ranks_to_save['min_ap']['rank'] = ranks_to_save['min_ap']['rank'] # return computed ranking scores and global ranks to save dict return ranking_scores, global_ranks_to_save
36,034
def AirAbsorptionRelaxationFrequencies(T,p,H,T0, p_r): """ Calculates the relaxation frequencies for air absorption conforming to ISO 9613-1. Called by :any:`AirAbsorptionCoefficient`. Parameters ---------- T : float Temperature in K. p : float Pressure in Pa. H : float Humidity as molar conentration in percent. T0 : float Reference temperature in K, 293.15 K. p_r : float Reference sound pressure in Pa, 101.325*10³ Pa. Returns ------- f_rO : float Relaxation frequency of oxygen. f_rN : float Relaxation frequency of nitrogen. """ f_rO = p / p_r * (24 + 4.04 * 10**4 * H * (0.02+H) / (0.391+H)) f_rN = p / p_r * (T/T0)**(-0.5) * (9+280*H*np.exp(-4.17*((T/T0)**(-1/3)-1))) return f_rO, f_rN
36,035
def test_plot_raw_ssp_interaction(raw, browser_backend): """Test SSP projector UI of plot_raw().""" with raw.info._unlock(): raw.info['lowpass'] = 10. # allow heavy decim during plotting # apply some (not all) projs to test our proj UI (greyed out applied projs) projs = raw.info['projs'][-2:] raw.del_proj([-2, -1]) raw.apply_proj() raw.add_proj(projs) fig = raw.plot() # open SSP window fig._fake_keypress('j') assert browser_backend._get_n_figs() == 2 ssp_fig = fig.mne.fig_proj assert _proj_status(ssp_fig, browser_backend) == [True, True, True] # this should have no effect (proj 0 is already applied) assert _proj_label(ssp_fig, browser_backend)[0].endswith('(already applied)') _proj_click(0, fig, browser_backend) assert _proj_status(ssp_fig, browser_backend) == [True, True, True] # this should work (proj 1 not applied) _proj_click(1, fig, browser_backend) assert _proj_status(ssp_fig, browser_backend) == [True, False, True] # turn it back on _proj_click(1, fig, browser_backend) assert _proj_status(ssp_fig, browser_backend) == [True, True, True] # toggle all off (button axes need both press and release) _proj_click_all(fig, browser_backend) assert _proj_status(ssp_fig, browser_backend) == [True, False, False] fig._fake_keypress('J') assert _proj_status(ssp_fig, browser_backend) == [True, True, True] fig._fake_keypress('J') assert _proj_status(ssp_fig, browser_backend) == [True, False, False] # turn all on _proj_click_all(fig, browser_backend) assert fig.mne.projector is not None # on assert _proj_status(ssp_fig, browser_backend) == [True, True, True]
36,036
def set_edit_mode(request, state): """ Enable the edit mode; placeholders and plugins will be wrapped in a ``<div>`` that exposes metadata for frontend editing. """ setattr(request, '_fluent_contents_edit_mode', bool(state))
36,037
def table_from_bool(ind1, ind2): """ Given two boolean arrays, return the 2x2 contingency table ind1, ind2 : array-like Arrays of the same length """ return [ sum(ind1 & ind2), sum(ind1 & ~ind2), sum(~ind1 & ind2), sum(~ind1 & ~ind2), ]
36,038
def test(device): """ Test if get_inlet_pressure_for_gain_correction() returns the value previously set with set_inlet_pressure_for_gain_correction(). """ result = device.set_inlet_pressure_for_gain_correction(2.345) assert result is None result = device.get_inlet_pressure_for_gain_correction() assert type(result) is float assert result == pytest.approx(2.345, abs=0.0001) result = device.set_inlet_pressure_for_gain_correction(1) assert result is None result = device.get_inlet_pressure_for_gain_correction() assert type(result) is float assert result == 1.0
36,039
def getHWBeatLEDState(*args, **kwargs): """ get HWBeatLEDState """ pass
36,040
def preprocess_skills(month_kpi_skills: pd.DataFrame, quarter_kpi_skills: pd.DataFrame) -> pd.DataFrame: """ Функция принимает на вход два DataFrame: - с данными по KPI сотрудников ВЭД за последний месяц - с данными по KPI сотрудников ВЭД за последний квартал Возвращает объединенный DataFrame по двум таблицам с дополнительными признаками отношений выполненных работ к нормам сотрудников :param month_kpi_skills: pd.DataFrame :param quarter_kpi_skills: pd.DataFrame :return: pd.DataFrame """ month_kpi_skills.fillna(0, inplace=True) quarter_kpi_skills.fillna(0, inplace=True) # Переносим данные по месячным скилам в один дата-фрейм month_kpi_skills.columns = month_skills_columns quarter_kpi_skills.columns = quarter_skills_columns assert sorted(month_kpi_skills['ВЭД'].unique()) == sorted(quarter_kpi_skills['ВЭД'].unique()), 'В таблицах KPI за месяц из за квартал содержатся разные ВЭД' kpi_skills = month_kpi_skills.merge(quarter_kpi_skills, on='ВЭД', how='inner') # Считаем отношения между результатами за 3 мес и нормами kpi_skills['Звонки / Норма'] = kpi_skills['Звонки (3 мес)'] / kpi_skills['Звонки норма (3 мес)'] kpi_skills['Обработанные заявки / Норма'] = kpi_skills['Обработанные заявки (3 мес)'] / kpi_skills['Норма 88% (3 мес)'] kpi_skills['48 часов / Норма'] = kpi_skills['Обработка не позднее 48 часов (3 мес)'] / kpi_skills['Норма 85% (3 мес)'] kpi_skills['Полнота сбора / Норма'] = kpi_skills['Полнота сбора (3 мес)'] / kpi_skills['Норма 95% (3 мес)'] kpi_skills['Встречи / Норма'] = kpi_skills['Встречи (3 мес)'] / kpi_skills['Встречи норма (3 мес)'] kpi_skills.fillna(0.0, inplace=True) # Заполняем NaN там, где возникло деление на 0 kpi_skills.drop(['Звонки норма', 'Встречи норма', 'Звонки норма (3 мес)', 'Встречи норма (3 мес)'], axis=1, inplace=True) kpi_skills = kpi_skills.reindex(columns=skills_final_columns) return kpi_skills
36,041
def bias_col_spline(im, overscan, dymin=5, dymax=2, statistic=np.mean, **kwargs): """Compute the offset by fitting a spline to the mean of each row in the serial overscan region. Args: im: A masked (lsst.afw.image.imageLib.MaskedImageF) or unmasked (lsst.afw.image.imageLib.ImageF) afw image. overscan: A bounding box for the parallel overscan region. dymin: The number of rows to skip at the beginning of the parallel overscan region. dymax: The number of rows to skip at the end of the parallel overscan region. statistic: The statistic to use to calculate the offset for each columns. Keyword Arguments: k: The degree of the spline fit. The default is: 3. s: The amount of smoothing to be applied to the fit. The default is: 18000. t: The number of knots. If None, finds the number of knots to use for a given smoothing factor, s. The default is: None. Returns: A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. """ try: imarr = im.Factory(im, overscan).getArray() except AttributeError: # Dealing with a MaskedImage imarr = im.Factory(im, overscan).getImage().getArray() ny, nx = imarr.shape cols = np.arange(nx) values = np.array([statistic(imarr[dymin:-dymax,j]) for j in cols]) rms = 7 # Expected read noise per pixel weights = np.ones(nx) * (rms / np.sqrt(nx)) return interpolate.splrep(cols, values, w=1/weights, k=kwargs.get('k', 3), s=kwargs.get('s', 18000), t=kwargs.get('t', None))
36,042
def read_config(): """ Returns the decoded config data in 'db_config.json' Will return the decoded config file if 'db_config.json' exists and is a valid JSON format. Otherwise, it will return a False. """ # Check if file exists if not os.path.isfile('db_config.json'): return False # Check if file is a valid JSON format. try: with open('db_config.json') as json_data: config = json.load(json_data) except ValueError: print '[WARN] Error Decoding config.json' return False return config
36,043
def get_sub_title_from_series(ser: pandas.Series, decimals: int = 3) -> str: """pandas.Seriesから、平均値、標準偏差、データ数が記載されたSubTitleを生成する。""" mean = round(ser.mean(), decimals) std = round(ser.std(), decimals) sub_title = f"μ={mean}, α={std}, N={len(ser)}" return sub_title
36,044
def pk_init(): """PK项目初始化""" for pk_data in setting.pk_datas(): if pk_data['title'] in pk_mission_started: continue if pk_data['battle_config']['type'] == 'increase': if time.mktime(time.strptime(pk_data['start_time'], '%Y-%m-%d %H:%M:%S')) > time.time(): # 如果还没开始, 先保存零状态 fund.pk.cache_pk_amount(pk_data) # 获取增量的时间节点 time_list = pk_data['battle_config']['time_spot'] for time_spot in time_list: sched.add_job(fund.pk.cache_pk_amount, 'date', run_date=time_spot, args=[pk_data]) pk_interval = int(setting.read_config('pk', 'interval')) logger.info('对%s项目的PK播报将于%s启动,每%d秒钟一次', pk_data['title'], pk_data['start_time'], pk_interval) sched.add_job( send_pk_message, 'interval', seconds=pk_interval, start_date=pk_data['start_time'], end_date=pk_data['end_time'], args=[pk_data] ) pk_mission_started.append(pk_data['title'])
36,045
def deploy(branch='release', path='/readux.io/readux'): """Execute group of tasks for deployment. :param branch: Git branch to clone, defaults to 'master' :type branch: str, optional """ options = { 'REPO_URL': 'https://github.com/ecds/readux.git', 'ROOT_PATH': path, 'VENV_PATH': '{rp}/venv'.format(rp=path), 'RELEASE_PATH': '{rp}/releases'.format(rp=path), 'VERSION': datetime.now().strftime("%Y%m%d%H%M%S") } version_folder = '{rp}/{vf}'.format(rp=options['RELEASE_PATH'], vf=options['VERSION']) run('mkdir -p {p}'.format(p=version_folder)) with cd(version_folder): # _create_new_dir() _get_latest_source(branch, options) _update_virtualenv(options) _link_settings(options) _create_static_media_symlinks(options) _update_static_files(options) _update_database(options) _update_symlink(options) _restart_webserver() _restart_background_tasks(options) _clean_old_builds(options)
36,046
def write_pptables(f, dimension, captionStringFormat): """Writes line for pptables images.""" headerERT = 'Table showing the ERT in number of function evaluations divided by' \ 'the best ERT measured during BBOB-2009 for dimension %d' % dimension f.write("\n<H2> %s </H2>\n" % headerERT) for ifun in range(1, 25): f.write("\n<!--pptablesf%03d%02dDHtml-->\n" % (ifun, dimension)) if genericsettings.isTab: key = 'bbobpptablesmanylegendexpensive' if genericsettings.isExpensive else 'bbobpptablesmanylegend' f.write(captionStringFormat % htmldesc.getValue('##' + key + str(dimension) + '##'))
36,047
def atand2(delta_y: ArrayLike, delta_x: ArrayLike) -> ArrayLike: """Return the arctan2 of an angle specified in degrees. Returns ------- float An angle, in degrees. """ return numpy.degrees(numpy.arctan2(delta_y, delta_x))
36,048
def adjust_doy_calendar( source: xr.DataArray, target: Union[xr.DataArray, xr.Dataset] ) -> xr.DataArray: """Interpolate from one set of dayofyear range to another calendar. Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1 to 365). Parameters ---------- source : xr.DataArray Array with `dayofyear` coordinate. target : xr.DataArray or xr.Dataset Array with `time` coordinate. Returns ------- xr.DataArray Interpolated source array over coordinates spanning the target `dayofyear` range. """ doy_max_source = source.dayofyear.max() doy_max = max_doy[get_calendar(target)] if doy_max_source == doy_max: return source return _interpolate_doy_calendar(source, doy_max)
36,049
def _maintainer_change(change_list: ChangeList, old: Data, new: Data): """ Appends a summary of a change to a dataset's maintainer field between two versions (old and new) to change_list. """ # if the old dataset had a maintainer if old.get("maintainer") and new.get("maintainer"): change_list.append( { "type": "maintainer", "method": "change", "pkg_id": new.get("id"), "title": new.get("title"), "new_maintainer": new["maintainer"], "old_maintainer": old["maintainer"], } ) # if they removed the maintainer elif not new.get("maintainer"): change_list.append( { "type": "maintainer", "pkg_id": new.get("id"), "title": new.get("title"), "method": "remove", } ) # if there wasn't one there before else: change_list.append( { "type": "maintainer", "pkg_id": new.get("id"), "title": new.get("title"), "new_maintainer": new.get("maintainer"), "method": "add", } )
36,050
def iou(box_a, box_b): """Calculates intersection area / union area for two bounding boxes.""" assert area(box_a) > 0 assert area(box_b) > 0 intersect = np.array( [[max(box_a[0][0], box_b[0][0]), max(box_a[0][1], box_b[0][1])], [min(box_a[1][0], box_b[1][0]), min(box_a[1][1], box_b[1][1])]]) return area(intersect) / (area(box_a) + area(box_b) - area(intersect))
36,051
def view_routes_asa() -> None: """View all database entries""" get_tables_names() for table in route_tables: get_table_rows = [row for row in cursor.execute('SELECT count(*) FROM {}'.format(table))] if get_table_rows[0][0] == 0: continue else: print("\nRouting Table: " + table + "\n") print("__________________" + "\n") query = cursor.execute('SELECT * FROM {}'.format(table)) for row in query: print( "Context: {}\nPrefix: {}\nProtocol: {}\nAdmin-Distance: {}\nHop(s): {}\nOut-Interface(s): {}\n" "Metric(s): {}\nTag: {}\n" .format(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])) print("Total Routes: %s" % get_table_rows[0][0])
36,052
def poll(): """ The send buffer is flushed and any outstanding CA background activity is processed. .. note:: same as pend_event(1e-12) """ status = libca.ca_pend_event(1e-12) return ECA(status)
36,053
def TopLevelWindow_GetDefaultSize(*args): """TopLevelWindow_GetDefaultSize() -> Size""" return _windows_.TopLevelWindow_GetDefaultSize(*args)
36,054
def human_size(numbytes): """converts a number of bytes into a readable string by humans""" KB = 1024 MB = 1024*KB GB = 1024*MB TB = 1024*GB if numbytes >= TB: amount = numbytes / TB unit = "TiB" elif numbytes >= GB: amount = numbytes / GB unit = "GiB" elif numbytes >= MB: amount = numbytes / MB unit = "MiB" elif numbytes >= KB: amount = numbytes / KB unit = "KiB" else: amount = numbytes unit = "B" return "%.3f%s" % (amount, unit)
36,055
def parse_args(): """ Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit. :return: Populated namespace. """ parser = argparse.ArgumentParser(description='MM-Fit Demo') parser.add_argument('--data', type=str, default='mm-fit/', help='location of the dataset') parser.add_argument('--unseen_test_set', default=False, action='store_true', help='if set to true the unseen test set is used for evaluation') parser.add_argument('--epochs', type=int, default=25, help='number of training epochs') parser.add_argument('--lr', type=float, default=1e-3, help='learning rate') parser.add_argument('--batch_size', type=int, default=128, help='batch size') parser.add_argument('--eval_every', type=int, default=1, help='how often to eval model (in epochs)') parser.add_argument('--early_stop', type=int, default=20, help='stop after this number of epoch if the validation loss did not improve') parser.add_argument('--checkpoint', type=int, default=10, help='how often to checkpoint model parameters (epochs)') parser.add_argument('--multimodal_ae_wp', type=str, default='', help='file path for the weights of the multimodal autoencoder part of the model') parser.add_argument('--model_wp', type=str, default='', help='file path for weights of the full model') parser.add_argument('--window_length', type=int, default=5, help='length of data window in seconds') parser.add_argument('--window_stride', type=float, default=0.2, help='length of window stride in seconds') parser.add_argument('--target_sensor_sampling_rate', type=float, default=50, help='Sampling rate of sensor input signal (Hz)') parser.add_argument('--skeleton_sampling_rate', type=float, default=30, help='sampling rate of input skeleton data (Hz)') parser.add_argument('--layers', type=int, default=3, help='number of FC layers') parser.add_argument('--hidden_units', type=int, default=200, help='number of hidden units') parser.add_argument('--ae_layers', type=int, default=3, help='number of autoencoder FC layers') parser.add_argument('--ae_hidden_units', type=int, default=200, help='number of autoencoder hidden units') parser.add_argument('--embedding_units', type=int, default=100, help='number of hidden units') parser.add_argument('--dropout', type=float, default=0.0, help='dropout percentage') parser.add_argument('--ae_dropout', type=float, default=0.0, help='multimodal autoencoder dropout percentage') parser.add_argument('--num_classes', type=int, default=None, help='number of output classes') parser.add_argument('--name', type=str, default='mmfit_demo_' + str(int(time.time())), help='name of experiment') parser.add_argument('--output', type=str, default='output/', help='path to output folder') return parser.parse_args()
36,056
def get_argument_defaults(node: ast.arguments) -> typing.Iterable: """Gets the defaults for the arguments in an ast.arguments node""" total_positional_arguments = len(node.posonlyargs) + len(node.args) positional_defaults = pad_defaults_list(node.defaults, total_positional_arguments) for default in positional_defaults: yield get_default_value(default) if node.vararg is not None: yield inspect.Parameter.empty for default in node.kw_defaults: yield get_default_value(default) if node.kwarg is not None: yield inspect.Parameter.empty
36,057
def node_definitions( id_fetcher: Callable[[str, GraphQLResolveInfo], Any], type_resolver: GraphQLTypeResolver = None, ) -> GraphQLNodeDefinitions: """ Given a function to map from an ID to an underlying object, and a function to map from an underlying object to the concrete GraphQLObjectType it corresponds to, constructs a `Node` interface that objects can implement, and a field object to be used as a `node` root field. If the type_resolver is omitted, object resolution on the interface will be handled with the `is_type_of` method on object types, as with any GraphQL interface without a provided `resolve_type` method. """ node_interface = GraphQLInterfaceType( "Node", description="An object with an ID", fields=lambda: { "id": GraphQLField( GraphQLNonNull(GraphQLID), description="The id of the object." ) }, resolve_type=type_resolver, ) # noinspection PyShadowingBuiltins node_field = GraphQLField( node_interface, description="Fetches an object given its ID", args={ "id": GraphQLArgument( GraphQLNonNull(GraphQLID), description="The ID of an object" ) }, resolve=lambda _obj, info, id: id_fetcher(id, info), ) nodes_field = GraphQLField( GraphQLNonNull(GraphQLList(node_interface)), args={ "ids": GraphQLArgument( GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLID))), description="The IDs of objects", ) }, resolve=lambda _obj, info, ids: [id_fetcher(id_, info) for id_ in ids], ) return GraphQLNodeDefinitions(node_interface, node_field, nodes_field)
36,058
def create_viewer_node(scene, preceeding_node_name, preceeding_channel_name): """ For debug purposes. Allows to visualize intermediate nodes. :param scene: :param preceeding_node_name: :param preceeding_channel_name: :return: """ logger.info('create_viewer_node: ...') scene_nodes = scene.node_tree.nodes scene_links = scene.node_tree.links mask_id_node = scene_nodes.get(preceeding_node_name) viewer_node = scene_nodes.new('CompositorNodeViewer') scene_links.new(mask_id_node.outputs[preceeding_channel_name], viewer_node.inputs['Image']) logger.info('create_viewer_node: Done')
36,059
def get_image_from_request(request): """ This function is used to extract the image from a POST or GET request. Usually it is a url of the image and, in case of the POST is possible to send it as a multi-part data. Returns a tuple with (ok:boolean, error:string, image:ndarray) """ if request.method == 'POST': content_type = parse_content_type(request) if content_type == "multipart/form-data": if 'image' in request.files: try: image = read_image_from_stream(request.files['image']) return (True, '', image) except: return (False, "Unable to read uploaded file", None) else: return (False, "No image provided in form-data request", None) elif content_type == 'application/json': try: input_params = request.get_json(True) except: return (False, 'No valid JSON present', None) if 'imageUrl' in input_params: image_url = input_params['imageUrl'] try: image = read_image_from_url(image_url) return (True, '', image) except: return (False, 'Unable to read image from url', None) elif 'imageB64' in input_params: image_b64 = input_params['imageB64'] try: image = read_image_b64(image_b64) return (True, '', image) except: return (False, 'Unable to read base 64 image', None) else: return (False, 'Image url or base 64 string not informed', None) elif request.method == 'GET': if request.args.get('imageUrl') == None: return (False, 'Image url not informed', None) else: image_url = request.args.get('imageUrl') try: image = read_image_from_url(image_url) return (True, '', image) except: return (False, 'Unable to read image from url', None)
36,060
def con_external(): """Define a connection fixture. Returns ------- ibis.omniscidb.OmniSciDBClient """ omnisci_client = ibis.omniscidb.connect( user=EXT_OMNISCIDB_USER, password=EXT_OMNISCIDB_PASSWORD, host=EXT_OMNISCIDB_HOST, port=EXT_OMNISCIDB_PORT, database=EXT_OMNISCIDB_DATABASE, protocol=EXT_OMNISCIDB_PROTOCOL ) return omnisci_client
36,061
def parse_risk(data_byte_d): """Parse and arrange risk lists. Parameters ---------- data_byte_d : object Decoded StringIO object. Returns ------- neocc_lst : *pandas.Series* or *pandas.DataFrame* Data frame with risk list data parsed. """ # Read data as csv neocc_lst = pd.read_csv(data_byte_d, sep='|', skiprows=[3], header=2) # Remove redundant white spaces neocc_lst.columns = neocc_lst.columns.str.strip() neocc_lst = neocc_lst.replace(r'\s+', ' ', regex=True) df_obj = neocc_lst.select_dtypes(['object']) neocc_lst[df_obj.columns] = df_obj.apply(lambda x: x.str.strip()) # Rename columns col_dict = {"Num/des. Name": 'Object Name', "m": 'Diameter in m', "Vel km/s": 'Vel in km/s'} neocc_lst.rename(columns=col_dict, inplace=True) # Remove last column neocc_lst = neocc_lst.drop(neocc_lst.columns[-1], axis=1) # Convert column with date to datetime variable neocc_lst['Date/Time'] = pd.to_datetime(neocc_lst['Date/Time']) # Split Years into 2 columns to avoid dashed between integers # Check dataframe is not empty (for special list) if len(neocc_lst.index.values) != 0: neocc_lst[['First year', 'Last year']] = neocc_lst['Years']\ .str.split("-", expand=True)\ .astype(int) # Drop split column neocc_lst = neocc_lst.drop(['Years'], axis=1) # Reorder columns neocc_lst = neocc_lst[['Object Name', 'Diameter in m', '*=Y', 'Date/Time', 'IP max', 'PS max', 'TS', 'Vel in km/s', 'First year', 'Last year', 'IP cum', 'PS cum']] # Adding metadata neocc_lst.help = ('Risk lists contain a data frame with the ' 'following information:\n' '-Object Name: name of the NEA\n' '-Diamater in m: approximate diameter in meters\n' '-*=Y: recording an asterisk if the value has ' 'been estimated from the absolute magnitude\n' '-Date/Time: predicted impact date in datetime ' 'format\n' '-IP max: Maximum Impact Probability\n' '-PS max: Palermo scale rating\n' '-Vel in km/s: Impact velocity at atmospheric entry' ' in km/s\n' '-First year: first year of possible impacts\n' '-Last year: last year of possible impacts\n' '-IP cum: Cumulative Impact Probability\n' '-PS cum: Cumulative Palermo Scale') return neocc_lst
36,062
def prime_site_stats_cache(): """ Collect stats for site and prime the cache. Run this as a scheduled task to improve performance. """ logging.debug("Starting scheduled cache priming...") cache.set("site_total_communities", social_models.GamerCommunity.objects.count()) cache.set("site_total_gamers", social_models.GamerProfile.objects.count()) cache.set( "site_total_active_games", game_models.GamePosting.objects.exclude( status__in=["cancel", "closed"] ).count(), ) cache.set( "site_total_completed_sessions", game_models.GameSession.objects.filter(status="complete").count(), ) cache.set("site_total_systems", catalog_models.GameSystem.objects.count()) cache.set("site_total_tracked_editions", catalog_models.GameEdition.objects.count()) cache.set("site_total_publishers", catalog_models.GamePublisher.objects.count()) cache.set("site_total_modules", catalog_models.PublishedModule.objects.count()) cache.set("site_total_sourcebooks", catalog_models.SourceBook.objects.count()) fetch_or_set_discord_comm_links() logging.debug("Finished cache priming.")
36,063
def plot_step_w_variable_station_filters(df, df_stations=None, options=None): """ """ p = PlotStepWithControls(df, df_stations, options) return p.plot()
36,064
def pick_random_element(count): """ Parameters ---------- count: {string: int} A dictionary of all transition counts from some state we're in to all other states Returns ------- The next character, randomly sampled from the empirical probabilities determined from the counts """ keys = list(count.keys()) counts = np.array(list(count.values())) counts = np.cumsum(counts) r = np.random.rand()*counts[-1] idx = np.searchsorted(counts, r) return keys[idx]
36,065
def do_flake8() -> str: """ Flake8 Checks """ command = "flake8" check_command_exists(command) command_text = f"flake8 --config {settings.CONFIG_FOLDER}/.flake8" command_text = prepinform_simple(command_text) execute(*(command_text.split(" "))) return "flake 8 succeeded"
36,066
def _download_repo(repo_path): """ Download Google's repo. """ logger.info('Fetching repo') repo_url = CONSTANTS['repo']['url'] response = requests.get(repo_url) if response.status_code != 200: raise CommandError('Unable to download repo from %s' % repo_url) with open(repo_path, 'wb') as f: f.write(response.content) logger.success('Fetched repo') # Ensure that the repo binary is executable st = os.stat(repo_path) os.chmod(repo_path, st.st_mode | stat.S_IEXEC)
36,067
def counter_current_heat_exchange(s0_in, s1_in, s0_out, s1_out, dT, T_lim0=None, T_lim1=None, phase0=None, phase1=None, H_lim0=None, H_lim1=None): """ Allow outlet streams to exchange heat until either the given temperature limits or the pinch temperature and return the total heat transfer [Q; in kJ/hr]. """ # Counter current heat exchange setup: # First find the hot inlet, cold inlet, hot outlet and cold outlet streams # along with the maximum temperature approaches for the hotside and the # cold side. if s0_in.T > s1_in.T: s_hot_in = s0_in s_cold_in = s1_in s_hot_out = s0_out s_cold_out = s1_out T_lim_coldside = T_lim0 T_lim_hotside = T_lim1 H_lim_coldside = H_lim0 H_lim_hotside = H_lim1 phase_coldside = phase0 phase_hotside = phase1 else: s_cold_in = s0_in s_hot_in = s1_in s_cold_out = s0_out s_hot_out = s1_out T_lim_hotside = T_lim0 T_lim_coldside = T_lim1 H_lim_hotside = H_lim0 H_lim_coldside = H_lim1 phase_hotside = phase0 phase_coldside = phase1 if (s_hot_in.T - s_cold_in.T) <= dT: return 0. # No heat exchange T_pinch_coldside = s_cold_in.T + dT if T_lim_coldside: if T_lim_coldside > s_hot_in.T: return 0. # No heat exchange else: T_lim_coldside = max(T_pinch_coldside, T_lim_coldside) else: T_lim_coldside = T_pinch_coldside T_pinch_hotside = s_hot_in.T - dT if T_lim_hotside: if T_lim_hotside < s_cold_in.T: return 0. # No heat exchange else: T_lim_hotside = min(T_pinch_hotside, T_lim_hotside) else: T_lim_hotside = T_pinch_hotside # Find which side reaches the pinch first by selecting the side that needs # the least heat transfer to reach the pinch. # Pinch on the cold side Q_hot_stream = heat_exchange_to_condition(s_hot_in, s_hot_out, T_lim_coldside, phase_coldside, H_lim_coldside, heating=False) # Pinch on the hot side Q_cold_stream = heat_exchange_to_condition(s_cold_in, s_cold_out, T_lim_hotside, phase_hotside, H_lim_hotside, heating=True) if Q_hot_stream == Q_cold_stream == 0.: s0_out.copy_like(s0_in) s1_in.copy_like(s1_out) return 0. if Q_hot_stream > 0 or Q_cold_stream < 0: # Sanity check if Q_hot_stream / s_hot_in.C < 0.1 or Q_cold_stream / s_cold_in.C > -0.1: s0_out.copy_like(s0_in) s1_in.copy_like(s1_out) return 0. raise RuntimeError('inlet stream not in vapor-liquid equilibrium') if Q_cold_stream < -Q_hot_stream: # Pinch on the hot side Q = Q_cold_stream if phase_coldside: s_hot_out.H = s_hot_in.H - Q else: s_hot_out.vle(H=s_hot_in.H - Q, P=s_hot_out.P) else: # Pinch on the cold side Q = Q_hot_stream if phase_hotside: s_cold_out.H = s_cold_in.H - Q else: s_cold_out.vle(H=s_cold_in.H - Q, P=s_cold_out.P) return abs(Q)
36,068
def ReadCOSx1dsumSpectrum(filename): """ filename with full path Purporse is to have other variation of files and differnet way of reading in. """ wave,flux,dfp,dfm = np.loadtxt(filename,unpack=True,usecols=[0,1,4,5]) return np.array([wave,flux,dfp,dfm])
36,069
def parse_args(): """Use argparse to get command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument('--task', '-t', choices=['seg', 'det', 'drivable', 'det-tracking']) parser.add_argument('--gt', '-g', help='path to ground truth') parser.add_argument('--result', '-r', help='path to results to be evaluated') parser.add_argument('--categories', '-c', nargs='+', help='categories to keep') args = parser.parse_args() return args
36,070
def op_atanh(x): """Returns the inverse hyperbolic tangent of this mathematical object.""" if isinstance(x, list): return [op_atanh(a) for a in x] elif isinstance(x, complex): return cmath.atanh(x) else: return math.atanh(x)
36,071
def find_intersections( solutions: Mapping[Any, Callable], ray_direction: Array, target_center: Array, ) -> dict: """ find intersections between ray_direction and target_center given a mapping of functions (like output of `solutions.make_ray_sphere_lambdas`) """ # suppress irrelevant warnings about imaginary values with warnings.catch_warnings: warnings.simplefilter("ignore") return { coordinate: solution(*ray_direction, *target_center) for coordinate, solution in solutions.items() }
36,072
def _add_url(id_or_url: str, new_url: str): """Add a url from a select novel""" controllers.add_url(id_or_url, new_url)
36,073
def Circum_O_R(vertex_pos, tol): """ Function finds the center and the radius of the circumsphere of the every tetrahedron. Reference: Fiedler, Miroslav. Matrices and graphs in geometry. No. 139. Cambridge University Press, 2011. Parameters ----------------- vertex_pos : The position of vertices of a tetrahedron tol : Tolerance defined to identify co-planar tetrahedrons Returns ---------- circum_center : The center of the circum-sphere circum_rad : The radius of the circum-sphere """ dis_ij = pdist(vertex_pos, 'euclidean') sq_12, sq_13, sq_14, sq_23, sq_24, sq_34 = np.power(dis_ij, 2) MatrixC = np.array([[0, 1, 1, 1, 1], [1, 0, sq_12, sq_13, sq_14], [1, sq_12, 0, sq_23, sq_24], [1, sq_13, sq_23, 0, sq_34], [1, sq_14, sq_24, sq_34, 0]]) det_MC = (np.linalg.det(MatrixC)) if (det_MC < tol): return [0, 0, 0], 0 else: M = -2*np.linalg.inv(MatrixC) circum_center = (M[0, 1]*vertex_pos[0, :] + M[0, 2]*vertex_pos[1, :] + M[0, 3]*vertex_pos[2, :] + M[0, 4] * vertex_pos[3, :]) / (M[0, 1] + M[0, 2] + M[0, 3] + M[0, 4]) circum_rad = np.sqrt(M[0, 0])/2 return circum_center, circum_rad
36,074
def reverse_one_hot(image): """ Transform a 2D array in one-hot format (depth is num_classes), to a 2D array with only 1 channel, where each pixel value is the classified class key. #Arguments image: The one-hot format image #Returns A 2D array with the same width and height as the input, but with a depth size of 1, where each pixel value is the calssified class key. """ x = np.argmax(image, axis=-1) return x
36,075
def get_feature_clusters(x: torch.Tensor, output_size: int, clusters: int = 8): """ Applies KMeans across feature maps of an input activations tensor """ if not isinstance(x, torch.Tensor): raise NotImplementedError(f"Function supports torch input tensors only, but got ({type(x)})") if x.ndim == 3: x = x.unsqueeze(0) b, c, h, w = x.shape assert h == w, f"image should be square, but got h = {h} and w = {w}" scale_factor = int(np.ceil(output_size / h)) x = interpolate(x, scale_factor=scale_factor, mode='bilinear', align_corners=True) x = torch2np(x, squeeze=True).reshape((output_size * output_size), c) x = KMeans(n_clusters=clusters).fit_predict(x).reshape(output_size, output_size) return x
36,076
def not_enough_params(user: server.UserConnection, command: str) -> None: """Sent when a user sends a command to the server that does not contain all required arguments.""" message = f"461 {user.nick} {command} :Not enough parameters" user.send_que.put((message, "mantatail"))
36,077
def get_phone_operator(phonenumber): """ Get operator type for a given phonenumber. >>> get_phone_operator('+959262624625') <Operator.Mpt: 'MPT'> >>> get_phone_operator('09970000234') <Operator.Ooredoo: 'Ooredoo'> >>> get_phone_operator('123456789') <Operator.Unknown: 'Unknown'> """ phonenumber = str(phonenumber).strip() if mpt_re.match(phonenumber): return (Operator.Mpt) if ooredoo_re.match(phonenumber): return (Operator.Ooredoo) if telenor_re.match(phonenumber): return (Operator.Telenor) if mytel_re.match(phonenumber): return (Operator.Mytel) return (Operator.Unknown)
36,078
def projectpoints(P, X): """ Apply full projection matrix P to 3D points X in cartesian coordinates. Args: P: projection matrix X: 3d points in cartesian coordinates Returns: x: 2d points in cartesian coordinates """ X_hom = cart2hom(X) X_pro = P.dot(X_hom) # 像素坐标系 齐次三维坐标 x = hom2cart(X_pro) return x
36,079
def getDMI(): """ Read hardware information from DMI. This function attempts to read from known files in /sys/class/dmi/id/. If any are missing or an error occurs, those fields will be omitted from the result. Returns: a dictionary with fields such as bios_version and product_serial. """ dmi = dict() for field in DMI_FIELDS: path = os.path.join("/sys/class/dmi/id", field) try: with open(path, 'r') as source: value = source.read().strip() dmi[field] = value except: pass return dmi
36,080
def mdot(a,b): """ Computes a contraction of two tensors/vectors. Assumes the following structure: tensor[m,n,i,j,k] OR vector[m,i,j,k], where i,j,k are spatial indices and m,n are variable indices. """ if (a.ndim == 3 and b.ndim == 3) or (a.ndim == 4 and b.ndim == 4): c = (a*b).sum(0) elif a.ndim == 5 and b.ndim == 4: c = np.empty(np.maximum(a[:,0,:,:,:].shape,b.shape),dtype=b.dtype) for i in range(a.shape[0]): c[i,:,:,:] = (a[i,:,:,:,:]*b).sum(0) elif a.ndim == 4 and b.ndim == 5: c = np.empty(np.maximum(b[0,:,:,:,:].shape,a.shape),dtype=a.dtype) for i in range(b.shape[1]): c[i,:,:,:] = (a*b[:,i,:,:,:]).sum(0) elif a.ndim == 5 and b.ndim == 5: c = np.empty((a.shape[0],b.shape[1],a.shape[2],a.shape[3],max(a.shape[4],b.shape[4])),dtype=a.dtype) for i in range(c.shape[0]): for j in range(c.shape[1]): c[i,j,:,:,:] = (a[i,:,:,:,:]*b[:,j,:,:,:]).sum(0) elif a.ndim == 5 and b.ndim == 6: c = np.empty((a.shape[0],b.shape[1],b.shape[2],max(a.shape[2],b.shape[3]),max(a.shape[3],b.shape[4]),max(a.shape[4],b.shape[5])),dtype=a.dtype) for mu in range(c.shape[0]): for k in range(c.shape[1]): for l in range(c.shape[2]): c[mu,k,l,:,:,:] = (a[mu,:,:,:,:]*b[:,k,l,:,:,:]).sum(0) else: raise Exception('mdot', 'wrong dimensions') return c
36,081
def GetDeviceProtocolController(protocol): # real signature unknown; restored from __doc__ """ GetDeviceProtocolController(protocol) Creates a :class:`DeviceProtocolController` that provides device-specific controls. This interface is intended to allow closer integration with remote devices. .. note:: Note that the use of scripting with Android is explicitly **not supported** due to the inherent fragility and unreliability of the Android platform. This interface is designed primarily for internal use and no support will be provided for Android-specific problems encountered using this. This function will not block, however the protocol may still be initialising when it is returned so immediate use of it may block. :param str protocol: The protocol to fetch a controller for. :return: A handle to the protocol controller, or ``None`` if something went wrong such as an unsupported protocol being specified. :rtype: DeviceProtocolController """ pass
36,082
def sample_duration(sample): """Returns the duration of the sample (in seconds) :param sample: :return: number """ return sample.duration
36,083
def test_get_temperature_conv_errors(caplog): """Test errors when requesting temperature conversion""" # ValueError should be raised if you try to convert a unit to itself with pytest.raises(ValueError): utils.get_temperature_conversion('degK', 'K') assert 'Cannot convert unit to itself' in caplog.text # Converting between units other than kelvin with pytest.raises(ValueError): utils.get_temperature_conversion('C', 'F') assert 'Only convert to/from Kelvin' in caplog.text
36,084
def fetch_commons_memberships(from_date=np.NaN, to_date=np.NaN, on_date=np.NaN): """Fetch Commons memberships for all MPs. fetch_commons_memberships fetches data from the data platform showing Commons memberships for each MP. The memberships are processed to impose consistent rules on the start and end dates for memberships. The from_date and to_date arguments can be used to filter the memberships returned. The on_date argument is a convenience that sets the from_date and to_date to the same given date. The on_date has priority: if the on_date is set, the from_date and to_date are ignored. The filtering is inclusive: a membership is returned if any part of it falls within the period specified with the from and to dates. Note that a membership with a NaN end date is still open. Parameters ---------- from_date : str or date or NaN, optional A string or datetime.date representing a date. If a string is used it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The default value is numpy.NaN, which means no records are excluded on the basis of the from_date. to_date : str or date or NaN, optional A string or datetime.date representing a date. If a string is used it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The default value is np.NaN, which means no records are excluded on the basis of the to_date. on_date : str or date or NaN, optional A string or datetime.date representing a date. If a string is used it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The default value is np.NaN, which means no records are excluded on the basis of the on_date. Returns ------- out : DataFrame A pandas dataframe of Commons memberships for each MP, with one row per Commons membership. """ # Set from_date and to_date to on_date if set if not pd.isna(on_date): from_date = on_date to_date = on_date # Fetch the Commons memberships commons_memberships = fetch_commons_memberships_raw() # Get elections and fix the end dates of memberships end_dates = commons_memberships['seat_incumbency_end_date'].values general_elections = elections.get_general_elections().values general_elections_count = len(general_elections) # If the end date for a membership falls after dissolution adjust it for i in range(len(end_dates)): date = end_dates[i] if pd.isna(date): continue for j in range(general_elections_count): dissolution = general_elections[j, 1] election = general_elections[j, 2] if date > dissolution and date <= election: end_dates[i] = dissolution continue commons_memberships['seat_incumbency_end_date'] = end_dates # Filter on dates if requested if not pd.isna(from_date) or not pd.isna(to_date): commons_memberships = filter.filter_dates( commons_memberships, start_col='seat_incumbency_start_date', end_col='seat_incumbency_end_date', from_date=from_date, to_date=to_date) # Tidy up and return commons_memberships.sort_values( by=['family_name', 'seat_incumbency_start_date'], inplace=True) commons_memberships.reset_index(drop=True, inplace=True) return commons_memberships
36,085
def async_push_message(send_to, content): """ 模拟异步推送消息 :param send_to: :param content: :return: """ logging.info('模拟异步推送消息') logging.info('send_to: {}'.format(send_to)) logging.info('content: {}'.format(content)) # 休眠 sleep(10)
36,086
def ensure_dir_empty(dirpath): """ remove files from dir """ if not os.path.exists(dirpath): os.mkdir(dirpath) for fname in os.listdir(dirpath): fpath = os.path.join(dirpath, fname) if os.path.isfile(fpath): os.remove(fpath)
36,087
def check_single_row(row, msg): """Checks whether the provided list of rows has only 1 element. Args: row: the list of rows. msg: the error message to raise if there are no rows found. Raises: ValueError: if no rows are found. RuntimeError: if more than one row is found. """ if row.shape[0] < 1: raise ValueError(msg) # unlikely to happen unless someone tampered with /data if row.shape[0] > 1: raise RuntimeError("Module data has been corrupted")
36,088
def excel_col_w_fitting(excel_path, sheet_name_list): """ This function make all column widths of an Excel file auto-fit with the column content. :param excel_path: The Excel file's path. :param sheet_name_list: The sheet names of the Excel file. :return: File's column width correctly formatted. """ import win32com.client as win32 excel = win32.gencache.EnsureDispatch('Excel.Application') work_book = excel.Workbooks.Open(excel_path) for sheet_name in sheet_name_list: work_sheet = work_book.Worksheets(sheet_name) work_sheet.Columns.AutoFit() work_book.Save() excel.Application.Quit() return None
36,089
def load_meetings(root=public_meetings.data_root, ext=public_meetings.file_ext): """ Load all meetings from `root` ending with `ext` Args: root(str): root meeting directory ext(str): file extension Returns: meetings(dict): a dictionnary {hash: meeting_data} """ meetings = {} for filename in os.listdir(root): if not filename.endswith(ext): continue path = os.path.join(root, filename) h = filename.replace(ext, '') meetings[h] = load_meeting(path) return meetings
36,090
def example_alter_configs(a, args): """ Alter configs atomically, replacing non-specified configuration properties with their default values. """ resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) fs = a.alter_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: f.result() # empty, but raises exception on failure print("{} configuration successfully altered".format(res)) except Exception: raise
36,091
def test_edge_init_no_direction(db_3_vertices): """Test if vertices are sorted by place when the edge is not oriented.""" db, v1, v2, v3 = db_3_vertices e1 = Edge(v1, v2, has_direction=False) assert e1.start is v1 assert e1.end is v2 e2 = Edge(v3, v2, has_direction=False) assert e2.start is v2 assert e2.end is v3
36,092
def padding_reflect(image, pad_size): """ Padding with reflection to image by boarder Parameters ---------- image: NDArray Image to padding. Only support 2D(gray) or 3D(color) pad_size: tuple Padding size for height adn width axis respectively Returns ------- ret: NDArray Image after padding """ shape = image.shape assert len(shape) in [2, 3], 'image must be 2D or 3D' is_3D = True if len(shape) == 2: image = np.expand_dims(image, axis=2) shape = image.shape is_3D = False h, w = pad_size ret = np.zeros((shape[0]+2*h, shape[1]+2*w, shape[2])) for i in xrange(shape[0]+2*h): for j in xrange(shape[1]+2*w): if i < h: if j < w: ret[i, j, :] = image[h-1-i, w-1-j, :] elif w <= j <= w + shape[1] - 1: ret[i, j, :] = image[h-1-i, j-w, :] else: ret[i, j, :] = image[h-1-i, w+2*shape[1]-1-j, :] elif h <= i <= h + shape[0] - 1: if j < w: ret[i, j, :] = image[i-h, w-1-j, :] elif w <= j <= w + shape[1] - 1: ret[i, j, :] = image[i-h, j-w, :] else: ret[i, j, :] = image[i-h, w+2*shape[1]-1-j, :] else: if j < w: ret[i, j, :] = image[h+2*shape[0]-1-i, w-1-j, :] elif w <= j <= w + shape[1] - 1: ret[i, j, :] = image[h+2*shape[0]-1-i, j-w, :] else: ret[i, j, :] = image[h+2*shape[0]-1-i, w+2*shape[1]-1-j, :] return ret if is_3D else np.squeeze(ret, axis=2)
36,093
def check_if_all_elements_have_geometry(geodataframes_list): """ Iterates over a list and checks if all members of the list have geometry information associated with them. Parameters ---------- geodataframes_list : A list object A list object that contains one or more geopandas.GeoDataFrame objects Returns ------- bool Returns True if all elements within geodataframes_list have geometry info associated with them Returns False if atleast one element within geodataframes_list does not have geometry info associated with it """ valerror_text = "geodataframes_list must be of list type. Got {}".format(type(geodataframes_list)) if not isinstance(geodataframes_list, list): raise ValueError(valerror_text) valerror_text = "Elements of the list should be of type geopandas.GeoDataFrame. Got at least one value that is not." if check_if_all_elements_are_gdf(geodataframes_list) is False: raise ValueError(valerror_text) for geodataframe in geodataframes_list: if has_geometry(geodataframe) is False: return False return True
36,094
def conference_schedule(parser, token): """ {% conference_schedule conference schedule as var %} """ contents = token.split_contents() tag_name = contents[0] try: conference = contents[1] schedule = contents[2] var_name = contents[4] except IndexError: raise template.TemplateSyntaxError("%r tag had invalid arguments" % tag_name) class ScheduleNode(TNode): def __init__(self, conference, schedule, var_name): self.var_name = var_name self.conference = self._set_var(conference) self.schedule = self._set_var(schedule) def render(self, context): schedule = models.Schedule.objects.get( conference = self._get_var(self.conference, context), slug = self._get_var(self.schedule, context), ) context[self.var_name] = schedule_context(schedule) return '' return ScheduleNode(conference, schedule, var_name)
36,095
def test_trainer_loggers_property(): """Test for correct initialization of loggers in Trainer.""" logger1 = CustomLogger() logger2 = CustomLogger() # trainer.loggers should be a copy of the input list trainer = Trainer(logger=[logger1, logger2]) assert trainer.loggers == [logger1, logger2] # trainer.loggers should create a list of size 1 trainer = Trainer(logger=logger1) assert trainer.loggers == [logger1] # trainer.loggers should be an empty list trainer = Trainer(logger=False) assert trainer.loggers == [] # trainer.loggers should be a list of size 1 holding the default logger trainer = Trainer(logger=True) assert trainer.loggers == [trainer.logger] assert type(trainer.loggers[0]) == TensorBoardLogger
36,096
def make_index(genome_fasta, output_dir, cores, cg, chg, chh, cwg, triplet_seq, seq_context) -> None: """Create augmented index files Can add triplet seq and more general sequence context information. """ if chg and cwg: raise ValueError("--chg and --cwg are mutually exclusive!") motifs = [] if cg: motifs.append('CG') if chg: motifs.append('CHG') if chh: motifs.append('CHH') if cwg: motifs.append('CWG') if not motifs: raise ValueError('You have to select at least one motif.') annotations = OrderedDict((('triplet_seq', triplet_seq), ('seq_context', seq_context))) start_parallel_index_generation(genome_fasta=genome_fasta, index_output_dir=output_dir, motifs=motifs, annotations=annotations, cores=cores)
36,097
def run_delete_process() -> Tuple[str, http.HTTPStatus]: """Handles deleting tasks pushed from Task Queue.""" return _run_process(constants.Operation.DELETE)
36,098
def annotate(f, expr, ctxt): """ f: function argument expr: expression ctxt: context :returns: type of expr """ t = f(expr, ctxt) expr.type = t return t
36,099