content
stringlengths
22
815k
id
int64
0
4.91M
def buy_and_hold_manager_factory(mgr, j:int, y, s:dict, e=1000): """ Ignores manager preference except every j data points For this to make any sense, 'y' must be changes in log prices. For this to be efficient, the manager must respect the "e" convention. That is, the manager must do little work when e<0 :param mgr: :param j: :param y: :param s: State :param mgr_kwargs: :return: w Portfolio weights """ if j==1: # Special case: just use the manager # This is the only time the user's e parameter is passed on. s_mgr = s['s_mgr'] w, s_mgr = mgr(y=y,s=s_mgr, e=e) s['s_mgr'] = s_mgr return w, s else: if s.get('w') is None: # Initialization s['count']=0 s_mgr = {} w, s_mgr = mgr(y=y,s=s_mgr, e=1000) s['s_mgr'] = s_mgr s['w'] = w return w, s else: s['count'] = s['count']+1 if s['count'] % j == 0: # Sporadically use the manager s_mgr = s['s_mgr'] w, s_mgr = mgr(y=y, s=s_mgr, e=1000) s['s_mgr'] = s_mgr s['w'] = w return w, s else: # Tell the manager not to worry too much about this data point, as the weights won't be used ... s_mgr = s['s_mgr'] _ignore_w, s_mgr = mgr(y=y, s=s_mgr, e=-1) s['s_mgr'] = s_mgr # ... instead we let it ride w_prev = s['w'] w = normalize( [ wi*math.exp(yi) for wi,yi in zip(w_prev,y)] ) s['w'] = w return w, s
32,200
def explicit_wait_visibility_of_element_located(browser, xpath, timeout=35): """Explicitly wait until visibility on element.""" locator = (By.XPATH, xpath) condition = expected_conditions.visibility_of_element_located(locator) try: wait = WebDriverWait(browser, timeout) result = wait.until(condition) except TimeoutException: print("Timeout Exception in explicit wait") return False return result
32,201
def list_all_resources(): """Return a list of all known resources. :param start_timestamp: Limits resources by last update time >= this value. (optional) :type start_timestamp: ISO date in UTC :param end_timestamp: Limits resources by last update time < this value. (optional) :type end_timestamp: ISO date in UTC :param metadata.<key>: match on the metadata within the resource. (optional) """ return _list_resources( project=acl.get_limited_to_project(flask.request.headers))
32,202
def get_completed_exploration_ids(user_id, collection_id): """Returns a list of explorations the user has completed within the context of the provided collection. Args: user_id: str. ID of the given user. collection_id: str. ID of the collection. Returns: list(str). A list of exploration ids that the user with the given user id has completed within the context of the provided collection with the given collection id. The list is empty if the user has not yet completed any explorations within the collection, or if either the collection and/or user do not exist. A progress model isn't added until the first exploration of a collection is completed, so, if a model is missing, there isn't enough information to infer whether that means the collection doesn't exist, the user doesn't exist, or if they just haven't mdae any progress in that collection yet. Thus, we just assume the user and collection exist for the sake of this call, so it returns an empty list, indicating that no progress has yet been made. """ progress_model = user_models.CollectionProgressModel.get( user_id, collection_id) return progress_model.completed_explorations if progress_model else []
32,203
def test_parallellize_with_args(mocker): """Test that parallelize decorator passes arguments to the target function.""" mock = mocker.Mock() name = mock.__qualname__ = 'mock' args = 'can dank memes', 'melt steel beams?' kwargs = dict(tests='pls pass') parallel_func = parallelize(mock) parallel_func(*args, **kwargs) assert mock.called_once_with( target=mock, name=name, args=args, kwargs=kwargs, ) assert mock.start.called_once()
32,204
def clean_str(string): """ Tokenization/string cleaning for all datasets except for SST. """ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\?", " ? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower()
32,205
def test_good_cases(task, good_cases): """ Used to test properly formatted configs. Prints feedback from the task. Args: task: A task dictionary mapping 'type' to the task name (e.g. 'ssh') good_cases: A list of tuples of the form ('configName, config'). config should be properly formatted. """ sim = usersim.UserSim(True) for config_name, config in good_cases: task['config'] = config api.new_task(task) print('Correctly accepted %s' % config_name) result = sim.cycle() if result: print(' Feedback from task:') print(' %s' % str(result))
32,206
def transitive_closure(graph): """ Compute the transitive closure of the graph :param graph: a graph (list of directed pairs) :return: the transitive closure of the graph """ closure = set(graph) while True: new_relations = set((x, w) for x, y in closure for q, w in closure if q == y) closure_until_now = closure | new_relations if closure_until_now == closure: break closure = closure_until_now closure_no_doubles = [(x, y) for (x, y) in closure if not x == y] return closure_no_doubles
32,207
def call_function(func_name, func_args, params, system): """ func_args : list of values (int or string) return str or None if fail return ROPChain if success """ if( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_X86 ): return call_function_linux_x86(func_name, func_args, params ) elif( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_X64 ): return call_function_linux_x64(func_name, func_args, params ) elif( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_ARM32 ): return call_function_linux_arm32(func_name, func_args, params ) else: return "Not implemented yet for this system/arch"
32,208
def error_handler(update, context): """Log Errors caused by Updates.""" log.error( 'with user: "%s (%s)"\nmessage: "%s"\ntraceback: %s', update.effective_user, update.effective_user.id, context.error, traceback.format_exc() ) return ConversationHandler.END
32,209
def jac(w, centred_img_patches, F, NUM_MODES): """ The Jacobian of the numerical search procedure. Parameters ---------- w : numpy array (floats) Column vector of model weights, used to construct mapping. centred_img_patches : numpy array (floats) The mean-centred {p x NUM_PATCHES} array of p-elements image patches. F : numpy array (floats) Column vector of all errors. NUM_MODES : int Number of independent modes into which the image will be decomposed. Returns ------- J : numpy array (floats) The Jacobian for the current error vector and set of weights. """ # Initialise numerical perturbation and Jacobian array PERT = 1e-15 num_var = w.size num_err = F.size J = np.zeros([num_err, num_var]) # Iterate over all weights and populate Jacobian for i in range(num_var): w_pert = w.copy() w_pert[i] = w[i] + PERT inverse_mapping_pert = generate_inverse_mapping(w_pert, centred_img_patches, NUM_MODES) sources_pert = map_patches_to_sources(inverse_mapping_pert, centred_img_patches) source_cov_pert = cov(sources_pert) dF = err(sources_pert, source_cov_pert) - F J[:,[i]] = dF/PERT return J
32,210
def test_boolean_constraint_deprecated_int(): """Check that validate_params raise a deprecation message but still passes validation when using an int for a parameter accepting a boolean. """ @validate_params({"param": ["boolean"]}) def f(param): pass # True/False and np.bool_(True/False) are valid params f(True) f(np.bool_(False)) # an int is also valid but deprecated with pytest.warns( FutureWarning, match="Passing an int for a boolean parameter is deprecated" ): f(1)
32,211
def get_session(): """Entrega uma instancia da session, para manipular o db.""" return Session(engine)
32,212
def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder): """ Horizontal bar chart representing mean activity and other activities per unique categorization :param stock_and_mileage_df: Dataframe of the vehicles registration list :param output_folder: output folder name where to store resulting chart :return: an html file containing the horizontal bar chart of the mean activity """ data = stock_and_mileage_df.copy() # Delete off road data data = data[data['Category'] != 'Off Road'] # Create single column classification data['segmentation'] = data.apply(lambda row: all_categories_grouping(row), axis=1) horizontal_plot = go.Figure() # Add Activity statistics and stock traces horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers', name='Activitat màxima', marker_color='rgb(288, 26, 28)' )) horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers', name='Activitat mínima', marker_color='rgb(229, 196, 148)' )) horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers', name="Desviació standard de l'activitat", marker=dict( color='rgb(800, 800, 800)', opacity=0) )) horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers', name="Estoc", marker=dict( color='rgb(800, 800, 800)', opacity=0) )) horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers', name="Lifetime cumulative activity mitja", marker=dict( color='rgb(800, 800, 800)', opacity=0) )) # For each category add the mean activity bar chart (to diferenciate by same colors as Stock distribution Pie Chart) for category in CATEGORIES: horizontal_plot.add_trace(go.Bar( y=data[data['Category'] == category]['segmentation'], x=data[data['Category'] == category]['Mean_Activity'], orientation='h', marker_color=COLOR_DISCRETE_MAP[category], name=f'Activitat mitjana {category}' )) # Update plot information horizontal_plot.update_layout( title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra", title_x=0.5, height=4000, width=1500, template='plotly_white', xaxis_title='Activitat mitja (km/any)', yaxis_title='Tipologia de vehicle', hovermode="y unified", hoverlabel=dict(namelength=100), xaxis_range=[0, stock_and_mileage_df['Max_Activity'].max()*1.05], xaxis=dict( tickmode='array', tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000], ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k']) ) horizontal_plot.update_xaxes(showgrid=True, zeroline=True) horizontal_plot.show() # Save plot to html file filename = output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html" horizontal_plot.write_html(filename)
32,213
def sdm_ecart(f): """ Compute the ecart of ``f``. This is defined to be the difference of the total degree of `f` and the total degree of the leading monomial of `f` [SCA, defn 2.3.7]. Invalid if f is zero. Examples ======== >>> from sympy.polys.distributedmodules import sdm_ecart >>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)]) 0 >>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)]) 3 """ return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f))
32,214
async def test_if_fires_on_mqtt_message_with_device( hass, device_reg, mqtt_mock, tag_mock ): """Test tag scanning, with device.""" config = copy.deepcopy(DEFAULT_CONFIG_DEVICE) async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config)) await hass.async_block_till_done() device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}) # Fake tag scan. async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN) await hass.async_block_till_done() tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
32,215
def draw_gif_frame(image, bbox, frame_no): """Draw a rectangle with given bbox info. Input: - image: Frame to draw on - length: Number of info (4 info/box) - bbox: A list containing rectangles' info to draw -> frame id x y w h Output: Frame that has been drawn on""" obj_id = bbox[1] bbox_left = int(bbox[2]) bbox_top = int(bbox[3]) bbox_right = bbox_left + int(bbox[4]) bbox_bottom = bbox_top + int(bbox[5]) # Set up params left_top_pt = (bbox_left, bbox_top) right_bottom_pt = (bbox_right, bbox_bottom) color = (0, 0, 255) thickness = 8 org = (bbox_left, bbox_top - 5) font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 1 thicknes_id = 3 line_type = cv2.LINE_4 cv2.rectangle(image, left_top_pt, right_bottom_pt, color, thickness) cv2.putText(image, str(obj_id), org, font, font_scale, color, thicknes_id, line_type) put_text(image, str(frame_no)) return image
32,216
def build_response(session_attributes, speechlet_response): """ Build the Alexa response """ # Log debug_print("build_response") return { 'version': '1.0', 'sessionAttributes': session_attributes, 'response': speechlet_response }
32,217
def test_raises_correct_count2( assert_errors, parse_ast_tree, context, first, second, third, default_options, mode, ): """Testing that raises are counted correctly.""" test_instance = context.format(first, second, third) tree = parse_ast_tree(mode(test_instance)) visitor = FunctionComplexityVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [])
32,218
def test_build(): """ Test build of a small filter """ nz = 8 # 128 points equally spaced in theta (z=cos(theta)) nphi = 16 # I usually use nphi=2*nz so regular pixels at equator # GRF with C_lambda = 1/(1.0 + 10^-4 (lambda(lambda+1))^2) coeffs = (1.0, 0.0, 1e-4) # this has length scale around l=10 sf = build_filter(nz=nz, nphi=nphi, coeffs=coeffs, dtype=np.float64) # Build the filter coefficients sf.create_realisation() trans, innovs = sf._trans, sf._jumps file_obj = io.BytesIO() # buffer to save in sf.save(file_obj) file_obj.seek(0) # go back to start... sf = load_filter(file_obj) print(trans.shape) assert(sf._nz==nz) assert(sf._nphi==nphi) assert(sf.coeffs==coeffs) assert(np.all(np.equal(trans, sf._trans))) assert(np.all(np.equal(innovs, sf._jumps))) print(sf)
32,219
def modify_branch(sbox, branch, number, conflicting=False): """Commit a modification to branch BRANCH. The actual modification depends on NUMBER. If CONFLICTING=True, the change will be of a kind that conflicts with any other change that has CONFLICTING=True. We don't modify (properties on) the branch root node itself, to make it easier for the tests to distinguish mergeinfo changes from these mods.""" uniq = branch + str(number) # something like 'A1' or 'B2' if conflicting: sbox.simple_propset('conflict', uniq, branch + '/C') else: # Make some changes. We add a property, which we will read later in # logical_changes_in_branch() to check that the correct logical # changes were merged. We add a file, so that we will notice if # Subversion tries to merge this same logical change into a branch # that already has it (it will raise a tree conflict). sbox.simple_propset('prop-' + uniq, uniq, branch + '/D') sbox.simple_copy(branch + '/mu', branch + '/mu-' + uniq) sbox.simple_commit()
32,220
def create_host(api_client, orig_host_name, orig_host_uid, cloned_host_name, cloned_host_ip): """ Create a new host object with 'new_host_name' as its name and 'new_host_ip_address' as its IP-address. The new host's color and comments will be copied from the the "orig_host" object. :param api_client: Api client of the domain :param orig_host_uid: original host uid :param cloned_host_name: cloned host name :param cloned_host_ip: cloned host IP :return: the cloned host uid on success, otherwise None """ # get details of the original host object log("\n\tGathering information for host {}".format(orig_host_name)) res = api_client.api_call("show-host", {"uid": orig_host_uid}) if res.success is False: discard_write_to_log_file(api_client, "Failed to open existing host: {}. Aborting.".format(res.error_message)) return None # copy the color and comments from the original host color = res.data["color"] comments = res.data["comments"] # create a new host object log("\n\tCreating a new host {}".format(cloned_host_name)) res = api_client.api_call("add-host", {"name": cloned_host_name, "ip-address": cloned_host_ip, "color": color, "comments": comments}) if res.success is False: discard_write_to_log_file(api_client, "Failed to create the new host: {}. Aborting.".format(res.error_message)) return None return res.data["uid"]
32,221
def parenthesize(x): """Return a copy of x surrounded by open and close parentheses""" cast = type(x) if cast is deque: return deque(['('] + list(x) + [')']) return cast('(') + x + cast(')')
32,222
def get_logo_color(): """Return color of logo used in application main menu. RGB format (0-255, 0-255, 0-255). Orange applied. """ return (255, 128, 0)
32,223
def hello(to_print): """ This function prints the paramater """ print('Hello ' + to_print)
32,224
def DG(p,t,Ep=10): """ Entrenamiento por Descenso de Gradiente """ # m será igual al número patrones de # entrenamiento (ejemplos) y n al número # de elementos del vector de caracteristicas. m,n = p.shape a = 0.5 #--- Pesos iniciales --- w = np.random.uniform(-0.25,0.25,2) b = np.random.uniform(-0.25,0.25) # ---------------------- for N in range(Ep): # Iteración sobre num. de épocas for ti in range(m): # Iteración sobre num. de patrones #---- Salida ---- net = np.dot(w,p[ti])+b y = logsig(net) #----------------- #---Regla Delta--- err = t[ti]- y Delta = 2*err*df(net)*p[ti] w = w + a*Delta b = b + a*2*err*df(net) #----------------- return w,b
32,225
def batch_name_change(): """Change the format of pano file name from `rid_order_pid_heading` to `pid_heading` """ for fn in tqdm(os.listdir(PANO_FOLFER)): if 'jpg' not in fn: continue old_name = os.path.join(PANO_FOLFER, fn) new_name = os.path.join(PANO_FOLFER, "_".join(fn.split("_")[2:])) os.rename(old_name, new_name) return
32,226
def cancel_transfer(transfertool_obj, transfer_id): """ Cancel a transfer based on external transfer id. :param transfertool_obj: Transfertool object to be used for cancellation. :param transfer_id: External-ID as a 32 character hex string. """ record_counter('core.request.cancel_request_external_id') try: transfertool_obj.cancel(transfer_ids=[transfer_id]) except Exception: raise RucioException('Could not cancel FTS3 transfer %s on %s: %s' % (transfer_id, transfertool_obj, traceback.format_exc()))
32,227
def plot_tc_errors(rec, legend=True, ax=None, per_stim=False, ylim=(0, 200)): """ Plot tuning curve (TC) sMAPE. .. WARNING:: Untested! .. TODO:: Test or remove `plot_tc_errors`. Parameters ---------- rec : `.GANRecords` """ if ax is None: _, ax = pyplot.subplots() import matplotlib.patheffects as pe epoch = rec.TC_mean['epoch'] model = rec.TC_mean['gen'].as_matrix() true = rec.TC_mean['data'].as_matrix() total_error = smape(model, true) total_error_lines = ax.plot( epoch, total_error, path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) if per_stim: per_stim_error = 200 * abs((model - true) / (model + true)) per_stim_lines = ax.plot(epoch, per_stim_error, alpha=0.4) else: per_stim_error = per_stim_lines = None if legend: if per_stim: leg = ax.legend( total_error_lines + per_stim_lines, ['TC sMAPE'] + list(range(len(per_stim_lines))), loc='center left') else: leg = ax.legend( total_error_lines, ['TC sMAPE'], loc='upper left') leg.set_frame_on(True) leg.get_frame().set_facecolor('white') if ylim: ax.set_ylim(ylim) return Namespace( ax=ax, per_stim_error=per_stim_error, per_stim_lines=per_stim_lines, total_error=total_error, total_error_lines=total_error_lines, )
32,228
def test_token_authenticator_noauth(app): """Create a token for a user relying on Authenticator.authenticate and no auth header""" name = 'user' data = { 'auth': { 'username': name, 'password': name, }, } r = yield api_request(app, 'users', name, 'tokens', method='post', data=json.dumps(data) if data else None, noauth=True, ) assert r.status_code == 200 reply = r.json() assert 'token' in reply r = yield api_request(app, 'authorizations', 'token', reply['token']) r.raise_for_status() reply = r.json() assert reply['name'] == name
32,229
def delete_video_db(video_id): """Delete a video reference from the database.""" connection = connect_db() connection.cursor().execute('DELETE FROM Content WHERE contentID=%s', (video_id,)) connection.commit() close_db(connection) return True
32,230
def int_to_float_fn(inputs, out_dtype): """Create a Numba function that converts integer and boolean ``ndarray``s to floats.""" if any(i.type.numpy_dtype.kind in "ib" for i in inputs): args_dtype = np.dtype(f"f{out_dtype.itemsize}") @numba.njit(inline="always") def inputs_cast(x): return x.astype(args_dtype) else: args_dtype_sz = max([_arg.type.numpy_dtype.itemsize for _arg in inputs]) args_dtype = np.dtype(f"f{args_dtype_sz}") @numba.njit(inline="always") def inputs_cast(x): return x.astype(args_dtype) return inputs_cast
32,231
def test_authenticate_password(HTTPConnection): """Test rpcpassword/rpcuser authentication""" destination_address = 'mynHfTyTWyGGB76NBFbfUrTnn8YWQkTJVs' args = [ '--mnemonic-file={}'.format(datafile('mnemonic_6.txt')), '--rpcuser=abc', '--rpcpassword=abc', '2of3', '--recovery-mnemonic-file={}'.format(datafile('mnemonic_7.txt')), '--rescan', '--key-search-depth={}'.format(key_depth), '--search-subaccounts={}'.format(sub_depth), '--destination-address={}'.format(destination_address), ] check_http_auth(HTTPConnection, args, '127.0.0.1', 18332, 3600, b'abc:abc')
32,232
def _media_item_post_save_handler(*args, sender, instance, created, raw, **kwargs): """ A post_save handler for :py:class:`~.MediaItem` which creates blank view and edit permissions if they don't exist. """ # If this is a "raw" update (e.g. from a test fixture) or was not the creation of the item, # don't try to create objects. if raw or not created: return if not hasattr(instance, 'view_permission'): Permission.objects.create(allows_view_item=instance)
32,233
def get_sorted_features(available_features: Iterable[Feature] = None): """ Register default features and setuptools entrypoint 'ddb_features' inside features registry. Features are registered in order for their dependency to be registered first with a topological sort. Withing a command phase, actions are executed in the order of their feature registration. """ if available_features is None: available_features = _available_features entrypoint_features = {f.name: f for f in available_features} for entry_point in pkg_resources.iter_entry_points('ddb_features'): feature = entry_point.load()() entrypoint_features[feature.name] = feature required_dependencies, toposort_data = _prepare_dependencies_data(entrypoint_features) _check_missing_dependencies(entrypoint_features, required_dependencies) dependencies = config.data.get('dependencies') if dependencies: for feat, feat_dependencies in dependencies.items(): if feat not in toposort_data: toposort_data[feat] = set() for feat_dependency in feat_dependencies: toposort_data[feat].add(feat_dependency) sorted_feature_names = toposort_flatten(toposort_data, sort=True) for feature_name in sorted_feature_names: feature = entrypoint_features.get(feature_name) if feature: yield feature
32,234
def vel_gradient(**kwargs): """ Calculates velocity gradient across surface object in supersonic flow (from stagnation point) based upon either of two input variable sets. First method: vel_gradient(R_n = Object radius (or equivalent radius, for shapes that are not axisymmetric), p_0 = flow stagnation pressure, p_inf = flow freestream static pressure rho = flow density) Second method: vel_gardient(R_n = Object radius (or equivalent radius, for shapes that are not axisymmetric), delta = Shock stand-off distance (from object stagnation point), U_s = Flow velocity immediately behind shock) """ if ('R_n' in kwargs) and ('p_0' in kwargs) and ('p_inf' in kwargs) and \ ('rho' in kwargs): from numpy import sqrt vel_gradient = (1 / kwargs['R_n']) * sqrt((2 * (kwargs['p_0'] - \ kwargs['p_inf'])) / kwargs['rho']) elif ('R_n' in kwargs) and ('U_s' in kwargs) and ('delta' in kwargs): b = kwargs['delta'] + kwargs['R_n'] vel_gradient = (kwargs['U_s'] / kwargs['R_n']) * (1 + ((2 + ((b**3) / \ (kwargs['R_n']**3))) / (2 * (((b**3) / (kwargs['R_n']**3)) - 1)))) else: raise KeyError('Incorrect variable assignment') return vel_gradient
32,235
def lcm(numbers): """ Get the least common multiple of a list of numbers ------------------------------------------------------------------------------------ input: numbers [1,2,6] list of integers output: 6 integer """ return reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1)
32,236
def parseBracketed(idxst,pos): """parse an identifier in curly brackets. Here are some examples: >>> def test(st,pos): ... idxst= IndexedString(st) ... (a,b)= parseBracketed(idxst,pos) ... print(st[a:b]) ... >>> test(r'{abc}',0) {abc} >>> test(r'{ab8c}',0) {ab8c} >>> test(r'{c}',0) {c} >>> test(r'{}',0) Traceback (most recent call last): ... ParseException: command enclosed in curly brackets at line 1, col 1 >>> test(r'{abc',0) Traceback (most recent call last): ... ParseException: command enclosed in curly brackets at line 1, col 1 >>> test(r'x{ab8c}',1) {ab8c} """ if not isinstance(idxst, IndexedString): raise TypeError("idxst par wrong: %s" % repr(idxst)) st= idxst.st() m= rx_bracketed.match(st,pos) if m is None: raise ParseException("command enclosed in curly brackets at", rowcol= idxst.rowcol(pos)) return(pos,m.end())
32,237
def compute_agg_tiv(tiv_df, agg_key, bi_tiv_col, loc_num): """ compute the agg tiv depending on the agg_key""" agg_tiv_df = (tiv_df.drop_duplicates(agg_key + [loc_num], keep='first')[list(set(agg_key + ['tiv', 'tiv_sum', bi_tiv_col]))] .groupby(agg_key, observed=True).sum().reset_index()) if 'is_bi_coverage' in agg_key: # we need to separate bi coverage from the other tiv agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==False, 'agg_tiv'] = agg_tiv_df['tiv_sum'] - agg_tiv_df[bi_tiv_col] agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==True, 'agg_tiv'] = agg_tiv_df[bi_tiv_col] else: agg_tiv_df['agg_tiv'] = agg_tiv_df['tiv_sum'] return agg_tiv_df[agg_key + ['agg_tiv']]
32,238
def convert_configurations_to_array(configs: List[Configuration]) -> np.ndarray: """Impute inactive hyperparameters in configurations with their default. Necessary to apply an EPM to the data. Parameters ---------- configs : List[Configuration] List of configuration objects. Returns np.ndarray Array with configuration hyperparameters. Inactive values are imputed with their default value. """ configs_array = np.array([config.get_array() for config in configs], dtype=np.float64) configuration_space = configs[0].configuration_space for hp in configuration_space.get_hyperparameters(): default = hp._inverse_transform(hp.default) idx = configuration_space.get_idx_by_hyperparameter_name(hp.name) # Create a mask which is True for all non-finite entries in column idx! column_mask = np.zeros(configs_array.shape, dtype=np.bool) column_mask[:, idx] = True nonfinite_mask = ~np.isfinite(configs_array) mask = column_mask & nonfinite_mask configs_array[mask] = default return configs_array
32,239
def annual_mean( start: Optional[datetime] = None, end: Optional[datetime] = None ) -> dict: """Get the annual mean data ---------------------------- Data from March 1958 through April 1974 have been obtained by C. David Keeling of the Scripps Institution of Oceanography (SIO) and were obtained from the Scripps website (scrippsco2.ucsd.edu). The estimated uncertainty in the annual mean is the standard deviation of the differences of annual mean values determined independently by NOAA/ESRL and the Scripps Institution of Oceanography. NOTE: In general, the data presented for the last year are subject to change, depending on recalibration of the reference gas mixtures used, and other quality control procedures. Occasionally, earlier years may also be changed for the same reasons. Usually these changes are minor. CO2 expressed as a mole fraction in dry air, micromol/mol, abbreviated as ppm """ if start and not isinstance(start, datetime): raise TypeError("Start must be a datetime object") if end and not isinstance(end, datetime): raise TypeError("End must be a datetime object") url = 'https://www.esrl.noaa.gov/gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt' res = requests.get(url) raw = res.content.decode("utf-8") lines = raw.splitlines() _license = "\n".join(lines[:41]) description = "\n".join(lines[41:56]) headers = lines[56] mean = { "url": url, "license": _license, "description": description, "headers": headers, "raw": raw, "data": { "yr": [], "mean (ppm)": [], "unc": [], }, } # Parse data for row in lines[57:]: yr, ppm, unc = row.split() date = datetime(year=int(yr), month=1, day=1) if start and start > date: continue if end and end < date: break mean["data"]["yr"].append(yr) mean["data"]["mean (ppm)"].append(ppm) mean["data"]["unc"].append(unc) return mean
32,240
def test_exit_ok(insights_config, insights_client): """ Support collection replaces the normal client run. """ with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0
32,241
def exception(logger,extraLog=None): """ A decorator that wraps the passed in function and logs exceptions should one occur @param logger: The logging object """ print logger def decorator(func): print "call decorator" def wrapper(*args, **kwargs): print "call exception decor" print args print kwargs try: print "-----: normal" return func(*args, **kwargs) except: # log the exception err = "There was an exception in " err += func.__name__ #logger.exception(err) print "-----: except" logger.exception(err,extra=extraLog) # re-raise the exception raise return wrapper return decorator
32,242
def test_extract_licenses_wrong_file(mocked_function): """Test the function extract_licenses().""" # make sure the LRU cache is clear get_license_synonyms.cache.clear() get_license_synonyms() result = extract_licenses(["this-is-not-a-file"]) assert not result
32,243
def read_external_sources(service_name): """ Try to get config from external sources, with the following priority: 1. Credentials file(ibm-credentials.env) 2. Environment variables 3. VCAP Services(Cloud Foundry) :param service_name: The service name :return: dict """ config = {} config = read_from_credential_file(service_name) if not config: config = read_from_env_variables(service_name) if not config: config = read_from_vcap_services(service_name) return config
32,244
def test_multi_mws_failing_2(client: FlaskClient): """test Router.route with multiple middlewares""" headers = {'content-type': 'application/json'} resp = client.post('/multi-mws', headers=headers) assert resp.status_code == 401 assert not resp.json.get('success') assert resp.json.get('message'), 'unauthorized access'
32,245
def test_validate_custom_integration_manifest(integration: Integration): """Test validate custom integration manifest.""" with pytest.raises(vol.Invalid): integration.manifest["version"] = "lorem_ipsum" CUSTOM_INTEGRATION_MANIFEST_SCHEMA(integration.manifest) with pytest.raises(vol.Invalid): integration.manifest["version"] = None CUSTOM_INTEGRATION_MANIFEST_SCHEMA(integration.manifest) integration.manifest["version"] = "1" schema = CUSTOM_INTEGRATION_MANIFEST_SCHEMA(integration.manifest) assert schema["version"] == "1"
32,246
def intersect(box_a, box_b): """ We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B]. """ A = box_a.size(0) B = box_b.size(0) max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2)) inter = torch.clamp((max_xy - min_xy), min=0) return inter[:, :, 0] * inter[:, :, 1]
32,247
def tokenize(text): """ tokenize text messages Input: text messages Output: list of tokens """ # find urls and replace them with 'urlplaceholder' url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' text = re.sub(url_regex, 'urlplaceholder', text) # normalize case and remove punctuation text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # tokenize text tokens = word_tokenize(text) # lemmatize and remove stop words stop_words = stopwords.words("english") lemmatizer = WordNetLemmatizer() tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] return tokens
32,248
def peakFindBottom(x, y, peaks, fig=None, verbose=1): """ Find the left bottom of a detected peak Args: x (array): independent variable data y (array): signal data peaks (list): list of detected peaks fig (None or int): if integer, then plot results verbose (int): verbosity level """ kk = np.ones(3) / 3. ys = scipy.ndimage.filters.correlate1d(y, kk, mode='nearest') peaks = copy.deepcopy(peaks) dy = np.diff(ys, n=1) dy = np.hstack((dy, [0])) kernel_size= [int(np.max([2, dy.size / 100])), ] dy = qtt.algorithms.generic.boxcar_filter(dy, kernel_size = kernel_size) for ii, peak in enumerate(peaks): if verbose: print('peakFindBottom: peak %d' % ii) if not peak['valid']: continue ind = range(peak['phalf0']) left_of_peak = 0 * y.copy() left_of_peak[ind] = 1 r = range(y.size) left_of_peak_and_decreasing = left_of_peak * (dy < 0) # set w to zero where the scan is increasing left_of_peak_and_decreasing[0] = 1 # make sure to stop at the left end of the scan... ww = left_of_peak_and_decreasing.nonzero()[0] if verbose >= 2: print(' peakFindBottom: size of decreasing area %d' % ww.size) if ww.size == 0: if peak['valid']: peak['valid'] = 0 peak['validreason'] = 'peakFindBottom' if verbose >= 2: print('peakFindBottom: invalid peak') print(ind) print(dy) continue bidx = ww[-1] peak['pbottomlow'] = bidx w = left_of_peak * (dy > 0) # we need to be rising # we need to be above 10% of absolute low value w = w * (ys < ys[bidx] + .1 * (ys[peak['p']] - ys[bidx])) w = w * (r >= peak['pbottomlow']) ww = w.nonzero()[0] if ww.size == 0: if peak['valid']: peak['valid'] = 0 peak['validreason'] = 'peakFindBottom' if verbose >= 2: print('peakFindBottom: invalid peak (%s)' % ('rising part ww.size == 0',)) print(w) print(ys) continue bidx = ww[-1] peak['pbottom'] = bidx peak['pbottoml'] = bidx peak['xbottom'] = x[bidx] peak['xbottoml'] = x[bidx] peak['vbottom'] = y[bidx] # legacy peak['ybottoml'] = y[bidx] if verbose >= 3: plt.figure(53) plt.clf() plt.plot(x[ind], 0 * np.array(ind) + 1, '.b', label='ind') plt.plot(x[range(y.size)], w, 'or', label='w') plt.plot(x[range(y.size)], dy < 0, 'dg', markersize=12, label='dy<0') pgeometry.enlargelims() pgeometry.plot2Dline([-1, 0, peak['x']], '--c', label='x') pgeometry.plot2Dline([-1, 0, x[peak['phalf0']]], '--y', label='phalf0') pgeometry.plot2Dline([-1, 0, x[peak['pbottomlow']]], ':k', label='pbottomlow') pgeometry.plot2Dline([-1, 0, peak['xbottoml']], '--y', label='xbottoml') plt.legend(loc=0) return peaks
32,249
def test_parallel_one_process() -> None: """ Tests that a `ParallelRunner` runs on a graph with only one process. """ runner = ParallelRunner( graph=MyLocalGraph( config=MySinkConfig(output_filename=PARALLEL_ONE_PROCESS_FILENAME) ) ) runner.run() remaining_numbers = {str(i) for i in range(NUM_MESSAGES)} with open(PARALLEL_ONE_PROCESS_FILENAME, "r") as output_file: lines = output_file.readlines() assert len(lines) == NUM_MESSAGES for line in lines: message = MyMessage2.fromdict(json.loads(line)) assert message.str_field in remaining_numbers remaining_numbers.remove(message.str_field) assert len(remaining_numbers) == 0 os.remove(PARALLEL_ONE_PROCESS_FILENAME)
32,250
def replacelast(string, old, new, count = 1): """Replace the last occurances of a string""" return new.join(string.rsplit(old,count))
32,251
def parse_config(config): """Parse the config dictionary for common objects. Currently only parses the following: * `directories` for relative path names. Args: config (dict): Config items. Returns: dict: Config items but with objects. """ # Prepend the base directory to relative dirs if 'directories' in config: base_dir = os.getenv('PANDIR') for dir_name, rel_dir in config['directories'].items(): abs_dir = os.path.normpath(os.path.join(base_dir, rel_dir)) if abs_dir != rel_dir: config['directories'][dir_name] = abs_dir return config
32,252
def _safe_read_img(img): """ Read in tiff image if a path is given instead of np object. """ img = imread(img) if isinstance(img, str) else np.array(img) return np.nan_to_num(img)
32,253
def max_version(*modules: Module) -> str: """Maximum version number of a sequence of modules/version strings See `get_version` for how version numbers are extracted. They are compared as `packaging.version.Version` objects. """ return str(max(get_version(x) for x in modules))
32,254
def afire(self): """ Fire signal asynchronously """ self.logger.debug('Fired %r', self) for cls in self.__class__.__mro__: if hasattr(cls, '__handlers__'): self.logger.debug('Propagate on %r', cls) for handler in cls.__handlers__: try: self.logger.debug('Call %r', handler) result = handler(self) if asyncio.iscoroutine(result): yield from result except: self.logger.error('Failed on processing %r by %r', self, handler, exc_info=True)
32,255
def load_secret(name, default=None): """Check for and load a secret value mounted by Docker in /run/secrets.""" try: with open(f"/run/secrets/{name}") as f: return f.read().strip() except Exception: return default
32,256
def snake_case(x): """ Converts a string to snake case """ # Disclaimer: This method is annoyingly complex, and i'm sure there is a much better way to do this. # The idea is to iterate through the characters # in the string, checking for specific cases and handling them accordingly. One note, # the built it isupper() and islower() methods will consider an underscore False. # The process looks like this: # First, we will check if the current character is uppercase, if its not, we simply insert # that character into the new string as is. # Second, we need to see if it's the first character of the string. if it is, we will need # to check if it is part of an acronym that should stay capitalized, even in snake case(e.g. XML, JSON, HTML). # We do this by looking at the next character and checking if it is also capitalized. If it # is, we will insert the character in capital form, if not, we will lowercase it and insert it. # If the current character is NOT the first character of the string, we still need to determine # if it is part of an acronym. The same process is applied except now we also look at the previous # character to see if it is capitalized. If it is, we can assume this is part of an acronym. # If the next character is uppercase, but the previous one isn't, than we assume it is part of # an acronym and insert it in uppercase form. now, when checking if the previous character is lowercase during our acronym check, # it is possible that islower() will return False because the character before it is an underscore. This means # We have to handle both possibilities. x = sub('\s+', '_', x) # First, we go ahead and replace any consecutive spaces with underscores out = '' for i, char in enumerate(x): if char.isupper(): # Get the next and previous characters for later use next_char = x[i + 1] previous_char = x[i - 1] if not i == 0: # Check if we are not at the first character if previous_char.islower(): out += '_' if next_char.islower() or next_char == '_': out += char.lower() continue elif previous_char == '_': if next_char.islower() or next_char == '_': out += char.lower() continue elif next_char.isupper(): out += char continue else: out += char.lower() continue elif not char == '_' and x[i - 1].isupper() and x[i - 2].isupper(): # This could be a lowercased word following an acronym without any spaces out += '_' # We will insert an underscore to break this character into its own word elif char == '_' and x[i - 1] == '_': continue out += char if out.endswith('_'): out = out[:len(out) - 1] return out
32,257
async def process_manga(data_list: list[dict], image_path: str) -> str: """对单张图片进行涂白和嵌字的工序 Args: data_list (list[dict]): ocr识别的文字再次封装 image_path (str): 图片下载的路径(同时也作为最后保存覆盖的路径) Returns: str: 保存的路径 """ image = Image.open(image_path).convert("RGB") for i in data_list: image = await draw_white(image, i) if i["is_vertical"]: image = await add_text_for_manga(image, i) else: image = await add_text(image, i) image.save(image_path) return image_path
32,258
def separate(a, start=0, n=None): """In-place decimation-in-time: evens->first half, odd->second half `start` and `n` (length) specify the view into `a`: this function will only modify the `a[start:start + n]` sub-section of `a`. """ n = n or len(a) b = a[(start + 1):(start + n):2] a[start:(start + n // 2)] = a[start:(start + n):2] a[(start + n // 2):(start + n)] = b[:]
32,259
def padded_nd_indices(is_valid, shuffle=False, seed=None): """Pads the invalid entries by valid ones and returns the nd_indices. For example, when we have a batch_size = 1 and list_size = 3. Only the first 2 entries are valid. We have: ``` is_valid = [[True, True, False]] nd_indices, mask = padded_nd_indices(is_valid) ``` nd_indices has a shape [1, 3, 2] and mask has a shape [1, 3]. ``` nd_indices = [[[0, 0], [0, 1], [0, 0]]] mask = [[True, True, False]] ``` nd_indices can be used by gather_nd on a Tensor t ``` padded_t = tf.gather_nd(t, nd_indices) ``` and get the following Tensor with first 2 dims are [1, 3]: ``` padded_t = [[t(0, 0), t(0, 1), t(0, 0)]] ``` Args: is_valid: A boolean `Tensor` for entry validity with shape [batch_size, list_size]. shuffle: A boolean that indicates whether valid indices should be shuffled. seed: Random seed for shuffle. Returns: A tuple of Tensors (nd_indices, mask). The first has shape [batch_size, list_size, 2] and it can be used in gather_nd or scatter_nd. The second has the shape of [batch_size, list_size] with value True for valid indices. """ with tf.compat.v1.name_scope(name='nd_indices_with_padding'): is_valid = tf.convert_to_tensor(value=is_valid) list_size = tf.shape(input=is_valid)[1] num_valid_entries = tf.reduce_sum( input_tensor=tf.cast(is_valid, dtype=tf.int32), axis=1) indices, mask = _circular_indices(list_size, num_valid_entries) # Valid indices of the tensor are shuffled and put on the top. # [batch_size, list_size, 2]. shuffled_indices = organize_valid_indices( is_valid, shuffle=shuffle, seed=seed) # Construct indices for gather_nd [batch_size, list_size, 2]. nd_indices = _to_nd_indices(indices) nd_indices = tf.gather_nd(shuffled_indices, nd_indices) return nd_indices, mask
32,260
def test_hourlies(): """ BDD Scenario. """
32,261
def part_two(data: str) -> int: """The smallest number leading to an md5 hash with six leading zeros for data.""" return smallest_number_satisfying(data, starts_with_six_zeros)
32,262
def _read_stream(fd, fn): """Reads bytes from a file descriptor, utf-8 decodes them, and passes them to the provided callback function on the next IOLoop tick. Assumes fd.read will block and should be used in a thread. """ while True: # Specify a max read size so the read doesn't block indefinitely # Using a value less than the typical default max pipe size # and greater than a single system page. buff = fd.read(8192) if buff: fn(buff.decode('utf-8'))
32,263
def showlatesttag(context, mapping): """List of strings. The global tags on the most recent globally tagged ancestor of this changeset. If no such tags exist, the list consists of the single string "null". """ return showlatesttags(context, mapping, None)
32,264
def cat(file_path, encoding="utf-8", errors="strict"): """ .. code: ^-^ (-.-) |.| / \\ | | _/ | || | | \_||_/_/ :param file_path: Path to file to read :param encoding: defaults to utf-8 to decode as, will fail on binary :param errors: Decoding errors: 'strict', 'ignore' or 'replace' """ with open(file_path, "rb") as f: if python_version >= (2, 7): print(f.read().decode(encoding, errors=errors)) else: print(f.read().decode(encoding))
32,265
def list_filters(): """ List all filters """ filters = [_serialize_filter(imgfilter) for imgfilter in FILTERS.values()] return response_list(filters)
32,266
def test_compliant_imei(mocked_imei_data): """Tests compliant IMEI""" compliant_imei_response = mocked_imei_data['compliant'] response = CommonResources.compliance_status(compliant_imei_response, 'basic') assert "Compliant" in response['compliant']['status']
32,267
def len_adecuada(palabra, desde, hasta): """ (str, int, int) -> str Valida si la longitud de la palabra está en el rango deseado >>> len_adecuada('hola', 0, 100) 'La longitud de hola, está entre 0 y 100' >>> len_adecuada('hola', 1, 2) 'La longitud de hola, no está entre 1 y 2' :param palabra: :param desde: :param hasta: :return: """ return 'La longitud de {0}, {1}está entre {2} y {3}'\ .format(palabra, "" if desde <= len(palabra) <= hasta else "no ", desde, hasta)
32,268
def test_alpha_path(scale_predictors, fit_intercept, P1): """Test regularization path.""" if scale_predictors and not fit_intercept: return np.random.seed(1234) y = np.random.choice([1, 2, 3, 4], size=100) X = np.random.randn(100, 5) * np.array([1, 5, 10, 25, 100]) model = GeneralizedLinearRegressor( family="poisson", alpha_search=True, l1_ratio=1, n_alphas=10, scale_predictors=scale_predictors, fit_intercept=fit_intercept, P1=P1, ) model.fit(X=X, y=y) # maximum alpha result in all zero coefficients np.testing.assert_almost_equal(model.coef_path_[0], 0) # next alpha gives at least one non-zero coefficient assert np.any(model.coef_path_[1] > 0)
32,269
def ECEF_from_ENU(enu, latitude, longitude, altitude): """ Calculate ECEF coordinates from local ENU (east, north, up) coordinates. Args: enu: numpy array, shape (Npts, 3), with local ENU coordinates latitude: latitude of center of ENU coordinates in radians longitude: longitude of center of ENU coordinates in radians Returns: numpy array, shape (Npts, 3), with ECEF x,y,z coordinates """ enu = np.array(enu) if enu.ndim > 1 and enu.shape[1] != 3: if enu.shape[0] == 3: warnings.warn('The expected shape of the ENU array is (Npts, 3). ' 'Support for arrays shaped (3, Npts) will go away in a ' 'future version.', PendingDeprecationWarning) enu_use = enu.T transpose = True else: raise ValueError('The expected shape of the ENU array array is (Npts, 3).') else: enu_use = enu transpose = False if enu.shape == (3, 3): warnings.warn('The enu array in ECEF_from_ENU is being ' 'interpreted as (Npts, 3). Historically this function ' 'has supported (3, Npts) arrays, please verify that ' 'array ordering is as expected.', PendingDeprecationWarning) if enu_use.ndim == 1: enu_use = enu_use[np.newaxis, :] xyz = np.zeros_like(enu_use) xyz[:, 0] = (-np.sin(latitude) * np.cos(longitude) * enu_use[:, 1] - np.sin(longitude) * enu_use[:, 0] + np.cos(latitude) * np.cos(longitude) * enu_use[:, 2]) xyz[:, 1] = (-np.sin(latitude) * np.sin(longitude) * enu_use[:, 1] + np.cos(longitude) * enu_use[:, 0] + np.cos(latitude) * np.sin(longitude) * enu_use[:, 2]) xyz[:, 2] = (np.cos(latitude) * enu_use[:, 1] + np.sin(latitude) * enu_use[:, 2]) xyz_center = XYZ_from_LatLonAlt(latitude, longitude, altitude) xyz[:, 0] = xyz[:, 0] + xyz_center[0] xyz[:, 1] = xyz[:, 1] + xyz_center[1] xyz[:, 2] = xyz[:, 2] + xyz_center[2] if len(enu.shape) == 1: xyz = np.squeeze(xyz) elif transpose: return xyz.T return xyz
32,270
def showresults(options=''): """ Generate and plot results from a kima run. The argument `options` should be a string with the same options as for the kima-showresults script. """ # force correct CLI arguments args = _parse_args(options) plots = [] if args.rv: plots.append('6') if args.planets: plots.append('1') if args.orbital: plots.append('2'); plots.append('3') if args.gp: plots.append('4'); plots.append('5') if args.extra: plots.append('7') for number in args.plot_number: plots.append(number) try: evidence, H, logx_samples = postprocess(plot=args.diagnostic) except IOError as e: print(e) sys.exit(1) res = KimaResults(list(set(plots))) show() # render the plots # __main__.__file__ doesn't exist in the interactive interpreter if not hasattr(__main__, '__file__'): return res
32,271
def convert_as_number(symbol: str) -> float: """ handle cases: ' ' or '' -> 0 '10.95%' -> 10.95 '$404,691,250' -> 404691250 '$8105.52' -> 8105.52 :param symbol: string :return: float """ result = symbol.strip() if len(result) == 0: return 0 result = re.sub('[%$, *]', '', result) return float(result)
32,272
def mgus(path): """Monoclonal gammapothy data Natural history of 241 subjects with monoclonal gammapothy of undetermined significance (MGUS). mgus: A data frame with 241 observations on the following 12 variables. id: subject id age: age in years at the detection of MGUS sex: `male` or `female` dxyr: year of diagnosis pcdx: for subjects who progress to a plasma cell malignancy the subtype of malignancy: multiple myeloma (MM) is the most common, followed by amyloidosis (AM), macroglobulinemia (MA), and other lymphprolifative disorders (LP) pctime: days from MGUS until diagnosis of a plasma cell malignancy futime: days from diagnosis to last follow-up death: 1= follow-up is until death alb: albumin level at MGUS diagnosis creat: creatinine at MGUS diagnosis hgb: hemoglobin at MGUS diagnosis mspike: size of the monoclonal protein spike at diagnosis mgus1: The same data set in start,stop format. Contains the id, age, sex, and laboratory variable described above along with +----------------+----------------------------------------------------------+ | start, stop: | sequential intervals of time for each subject | +----------------+----------------------------------------------------------+ | status: | =1 if the interval ends in an event | +----------------+----------------------------------------------------------+ | event: | a factor containing the event type: censor, death, | | | or plasma cell malignancy | +----------------+----------------------------------------------------------+ | enum: | event number for each subject: 1 or 2 | +----------------+----------------------------------------------------------+ Mayo Clinic data courtesy of Dr. Robert Kyle. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `mgus.csv`. Returns: Tuple of np.ndarray `x_train` with 241 rows and 12 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'mgus.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/survival/mgus.csv' maybe_download_and_extract(path, url, save_file_name='mgus.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
32,273
def smiles_dict(): """Store SMILES for compounds used in test cases here.""" smiles = { "ATP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)OP(=O)(O)O)[C" + "@@H](O)[C@H]1O", "ADP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)O)[C@@H](O)[C" + "@H]1O", "meh": "CCC(=O)C(=O)O", "l_ala": "C[C@H](N)C(=O)O", "d_ala": "C[C@@H](N)C(=O)O", "FADH": "Cc1cc2c(cc1C)N(CC(O)C(O)C(O)COP(=O)(O)OP(=O)(O)OCC1OC(n3cnc" + "4c(N)ncnc43)C(O)C1O)c1nc(O)nc(O)c1N2", "S-Adenosylmethionine": "C[S+](CC[C@H](N)C(=O)O)C[C@H]1O[C@@H](n2cnc" + "3c(N)ncnc32)[C@H](O)[C@@H]1O", } return smiles
32,274
def prepare_parser() -> ArgumentParser: """Create all CLI parsers/subparsers.""" # Handle core parser args parser = ArgumentParser( description="Learning (Hopefully) Safe Agents in Gridworlds" ) handle_parser_args({"core": parser}, "core", core_parser_configs) # Handle environment subparser args env_subparsers = parser.add_subparsers( help="Types of gridworld environments", dest="env_alias" ) env_subparsers.required = True env_parsers = {} for env_name in ENV_MAP: env_parsers[env_name] = env_subparsers.add_parser(env_name) handle_parser_args(env_parsers, env_name, env_parser_configs) # Handle agent subparser args agent_subparsers = {} for env_name, env_parser in env_subparsers.choices.items(): agent_parser_configs = deepcopy(stashed_agent_parser_configs) agent_subparsers[env_name] = env_parser.add_subparsers( help="Types of agents", dest="agent_alias" ) agent_subparsers[env_name].required = True agent_parsers = {} for agent_name in AGENT_MAP: agent_parsers[agent_name] = agent_subparsers[env_name].add_parser( agent_name ) handle_parser_args(agent_parsers, agent_name, agent_parser_configs) return parser
32,275
def import_google(authsub_token, user): """ Uses the given AuthSub token to retrieve Google Contacts and import the entries with an email address into the contacts of the given user. Returns a tuple of (number imported, total number of entries). """ contacts_service = gdata.contacts.service.ContactsService() contacts_service.auth_token = authsub_token contacts_service.UpgradeToSessionToken() entries = [] feed = contacts_service.GetContactsFeed() entries.extend(feed.entry) next_link = feed.GetNextLink() while next_link: feed = contacts_service.GetContactsFeed(uri=next_link.href) entries.extend(feed.entry) next_link = feed.GetNextLink() total = 0 imported = 0 for entry in entries: name = entry.title.text for e in entry.email: email = e.address total += 1 try: Contact.objects.get(user=user, email=email) except Contact.DoesNotExist: Contact(user=user, name=name, email=email).save() imported += 1 return imported, total
32,276
def level_6(nothing_value): """Given a starting value for the 'nothing' value, follow the chain.""" filename = nothing_value + '.txt' comments = [] with zipfile.ZipFile(ZIPFILE) as zip: while True: print(" [*] Contents of {}:".format(filename)) with zip.open(filename) as f: contents = f.read() print(" [+] {}".format(contents)) comments.append(zip.getinfo(filename).comment) if not contents.startswith(b'Next nothing is'): break filename = contents.decode().split()[-1] + '.txt' print(b''.join(comments).decode())
32,277
def getGpsTime(dt): """_getGpsTime returns gps time (seconds since midnight Sat/Sun) for a datetime """ total = 0 days = (dt.weekday()+ 1) % 7 # this makes Sunday = 0, Monday = 1, etc. total += days*3600*24 total += dt.hour * 3600 total += dt.minute * 60 total += dt.second return(total)
32,278
def class_logger(module_logger, attribute = "logger"): """ Class decorator to add a class-level Logger object as a class attribute. This allows control of debugging messages at the class level rather than just the module level. This decorator takes the module logger as an argument. """ def decorator(cls): setattr(cls, attribute, module_logger.getChild(cls.__name__)) return cls return decorator
32,279
def validate_date(date, flash_errors=True): """ Validates date string. Format should be YYYY-MM-DD. Flashes errors if flash_errors is True. """ try: datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError: if flash_errors: flask.flash('Invalid date provided. Make sure dates are in YYYY-MM-DD format.') return False return True
32,280
def makeImg(qr: object, filename: str) -> None: """ Convert the qr code into '*.png' format and save in under 'qr_img' """ mat = _magnify(qr.get_qr_matrix_with_margins()) img = Image.fromarray((mat*255).astype(np.uint8), 'L') _saveImg(img, filename)
32,281
def generate_error_map(image, losses, box_lenght): """ Function to overlap an error map to an image Args: image: input image losses: list of losses, one for each masked part of the flow. Returs: error_map: overlapped error_heatmap and image. """ box_lenght = int(box_lenght) # Assert that everything is correct num_boxes = int(image.shape[0] / box_lenght) * int(image.shape[1] / box_lenght) assert(num_boxes ==len(losses)) img_width = int(np.floor(image.shape[1] / box_lenght) * box_lenght) img_height = int(np.floor(image.shape[0] / box_lenght) * box_lenght) image = image[:img_height, :img_width] image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) heatmap = np.ones_like(image[:,:,0]) res_heatmap = np.reshape(heatmap, (box_lenght, box_lenght, num_boxes)) res_heatmap = res_heatmap * np.array(losses) heatmap = np.zeros((img_height, img_width)) # ugly for loop, unable to solve atm i = 0 for y in np.arange(0, img_height, step=box_lenght): for x in np.arange(0, img_width, step=box_lenght): # convert to x,y coordinates heatmap[y: y+box_lenght, x: x+box_lenght] = res_heatmap[:,:,i] i+=1 heatmap = np.asarray(heatmap / np.max(heatmap) * 255, dtype=np.uint8) heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) final = cv2.addWeighted(heatmap_img, 0.5, postprocess_image(image), 0.5, 0) return final
32,282
def get_order(oid): # noqa: E501 """Gets an existing order by order id # noqa: E501 :param oid: :type oid: str :rtype: Order """ oid = int(oid) msg = "error retrieving order" ret_code = 400 if oid in orders: msg = {"status": f"order retrieved", "order": orders[oid], "oid": oid} ret_code = 200 else: msg = f"Order: {oid} could not be found" ret_code = 404 return msg, ret_code
32,283
def remove_page(page_id): """Remove a page from the list of pages to be scraped""" if isfile(PAGES_F_PATH) and getsize(PAGES_F_PATH) != 0: with open(PAGES_F_PATH, 'r+') as pages_file: try: pages = json.load(pages_file) for page in pages: if page['id'] == page_id: pages.remove(page) page_cache_f_path = "%s/%s.html" % (CACHE_DIR, page_id) try: os.remove(page_cache_f_path) except: pass try: pages_file.seek(0) pages_file.truncate(0) json.dump(pages, pages_file) except Exception as ex: print("ERROR: %s" % ex) except JSONDecodeError: print(messages.CORRUPTED_PAGES_F_ERR) except Exception as ex: print("ERROR: %s" % ex) else: print(messages.NO_PAGES)
32,284
def run_with_lightning(model, data_loader, experiment: str, hp: Params, gpus: int, cpus: int, max_epochs: int, best_model_callback, out_data: List, run=1, monitor='val_loss', stop_patience = 15): """ Function to run with lightning with each combo - for hp tuning. 1. Everything is logged through tensorboard. 2. EarlyStopping will stop a run at a default patience of 5. 3. Saves the best model found per run according to set value. Not sure how to do more than val_loss at this time. 4. Writes a JSON file with all output data and hp inputs for IAE 5. Writes a txt file to get run number with hyperparameters. :return: """ early_stop_callback = EarlyStopping(monitor='val_loss', min_delta=0.00, patience=stop_patience, verbose=True, mode='min') per_permute_model = ModelCheckpoint( monitor=monitor, dirpath=experiment+'/chk/'+str(run), filename='best_hp-{epoch:02d}-{val_loss:.4f}', save_top_k=1, mode='min', ) #Note that without that default_hp_metric, you will NOT be able to ACTUALLY log hp metrics. ## Such a stupid fucking thing to do. tb_logger = pl_loggers.TensorBoardLogger(experiment+'/logs/'+str(run), default_hp_metric=False) trainer = Trainer(logger=tb_logger, callbacks=[best_model_callback, early_stop_callback, per_permute_model], max_epochs=max_epochs, min_epochs=1, auto_lr_find=False, auto_scale_batch_size=False, gpus=gpus, num_processes= cpus, progress_bar_refresh_rate=1, num_sanity_val_steps=0) trainer.fit(model, data_loader) print("Done fitting for run. ", run, "Evaluating best model.") best_model = per_permute_model.best_model_path print('best_model', best_model) #Add optional model path. # If we do that, we need to also setup the dataloader trainer.test(ckpt_path=best_model) #Write resultant data to a json for easy analysis in IAE out_df_data = dict() out_df_data.update(trainer.model.out_data) out_df_data.update(hp.data) out_data.append(out_df_data) out_df = pandas.DataFrame.from_dict(out_data) out_df.to_json(experiment+'/out_data.json', orient='records', lines=True) #Write hyperparemeters to text file for quick lookup. key_file = experiment+'/run_keys.txt' if not os.path.exists(key_file): m = 'w' else: m = 'a' exp_keys = open(key_file, m) out = [] for key in hp.data: out.append(key+"="+str(hp[key])) exp_keys.write(str(run)+" : "+" ".join(out)+'\n\n') exp_keys.close() print("Done hp run", run)
32,285
def delete_object(client, args): """ Removes a file from a bucket """ parser = argparse.ArgumentParser(PLUGIN_BASE+' del') parser.add_argument('bucket', metavar='BUCKET', type=str, help="The bucket to delete from.") parser.add_argument('file', metavar='OBJECT', type=str, help="The object to remove.") parsed = parser.parse_args(args) # get the key to delete try: bucket = client.get_bucket(parsed.bucket) except S3ResponseError: print('No bucket named '+parsed.bucket) sys.exit(2) k = bucket.get_key(parsed.file) if k is None: print('No {} in {}'.format(parsed.file, parsed.bucket)) sys.exit(2) # delete the key k.delete() print('{} removed from {}'.format(parsed.file, parsed.bucket))
32,286
def test_atomic_integer_enumeration_nistxml_sv_iv_atomic_integer_enumeration_1_2(mode, save_output, output_format): """ Type atomic/integer is restricted by facet enumeration. """ assert_bindings( schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-enumeration-1.xsd", instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-enumeration-1-2.xml", class_name="NistschemaSvIvAtomicIntegerEnumeration1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
32,287
def most_recent_assembly(assembly_list): """Based on assembly summaries find the one submitted the most recently""" if assembly_list: return sorted(assembly_list, key=operator.itemgetter('submissiondate'))[-1]
32,288
def dict_from_xml_text(xml_text, fix_ampersands=False): """ Convert an xml string to a dictionary of values :param xml_text: valid xml string :param fix_ampersands: additionally replace & to &amp; encoded value before parsing to etree :return: dictionary of data """ if fix_ampersands: xml_text = re.sub(r'&', '&amp;', xml_text) root = Etree.fromstring(xml_text) return dict_from_etree(root)
32,289
def find_encryption_key(loop_size, subject_number): """Find encryption key from the subject_number and loop_size.""" value = 1 for _ in range(loop_size): value = transform_value(value, subject_number) return value
32,290
def test_status_string(): """status_string should be the string version of the status code.""" assert gx.GrpcException().status_string == "UNKNOWN" assert ( gx.GrpcException(status_code=grpc.StatusCode.NOT_FOUND).status_string == "NOT_FOUND" ) assert gx.NotFound().status_string == "NOT_FOUND"
32,291
def carla_rotation_to_RPY(carla_rotation): """ Convert a carla rotation to a roll, pitch, yaw tuple Considers the conversion from left-handed system (unreal) to right-handed system (ROS). Considers the conversion from degrees (carla) to radians (ROS). :param carla_rotation: the carla rotation :type carla_rotation: carla.Rotation :return: a tuple with 3 elements (roll, pitch, yaw) :rtype: tuple """ roll = -math.radians(carla_rotation.roll) pitch = -math.radians(carla_rotation.pitch) yaw = -math.radians(carla_rotation.yaw) return (roll, pitch, yaw)
32,292
def compute_p1_curl_transformation(space, quadrature_order): """ Compute the transformation of P1 space coefficients to surface curl values. Returns two lists, curl_transforms and curl_transforms_transpose. The jth matrix in curl_transforms is the map from P1 function space coefficients (or extended space built upon P1 type spaces) to the jth component of the surface curl evaluated at the quadrature points, multiplied with the quadrature weights and integration element. The list curl_transforms_transpose contains the transpose of these matrices. """ from bempp.api.integration.triangle_gauss import rule from scipy.sparse import coo_matrix from scipy.sparse.linalg import aslinearoperator grid_data = space.grid.data("double") number_of_elements = space.grid.number_of_elements quad_points, weights = rule(quadrature_order) npoints = len(weights) dof_count = space.localised_space.grid_dof_count data, iind, jind = compute_p1_curl_transformation_impl( grid_data, space.support_elements, space.normal_multipliers, quad_points, weights, ) curl_transforms = [] curl_transforms_transpose = [] for index in range(3): curl_transforms.append( aslinearoperator( coo_matrix( (data[index, :], (iind, jind)), shape=(npoints * number_of_elements, dof_count), ).tocsr() ) @ aslinearoperator(space.map_to_localised_space) @ aslinearoperator(space.dof_transformation) ) curl_transforms_transpose.append( aslinearoperator(space.dof_transformation.T) @ aslinearoperator(space.map_to_localised_space.T) @ aslinearoperator( coo_matrix( (data[index, :], (jind, iind)), shape=(dof_count, npoints * number_of_elements), ).tocsr() ) ) return curl_transforms, curl_transforms_transpose
32,293
def _get_individual_id(individual) -> str: """ Returns a unique identifier as string for the given individual. :param individual: The individual to get the ID for. :return: A string representing the ID. """ if hasattr(individual, "identifier") and (isinstance(individual.identifier, list) and len(individual.identifier) > 0 and type(individual.identifier[0]) in [int, str]) or ( type(individual.identifier) in [int, str]): return str(individual.identifier[0]) else: return str(individual)
32,294
def collate(root, collators=DEFAULT_COLLATORS): """ ScrollNode collator. Modifies tree in place. Takes two arguments, a root node, and a dict of collation functions. Each key in this dict is a node kind, and each value is a string, function pair. The string being the joining string (see str.join) and the function turning a given node into a string. """ nodegen = iter(root.nodes) root.nodes = [] node = next(nodegen, None) while node is not None: root.nodes.append(node) if node.kind in collators: adjns = [node] next_node = collect_nodes(node.kind, adjns, nodegen) jc, fn = collators[node.kind] node.value = jc.join(fn(n.value) for n in adjns) else: next_node = next(nodegen, None) node = next_node
32,295
def get_self_url(d): """Returns the URL of a Stash resource""" return d.html_url if isinstance(d, PullRequest) else d["links"]["self"][0]["href"]
32,296
def shift(arr, *args): """ **WARNING** The ``Si`` arguments can be either a single array containing the shift parameters for each dimension, or a sequence of up to eight scalar shift values. For arrays of more than one dimension, the parameter ``Sn`` specifies the shift applied to the n-th dimension while this implementation supports lists as ``arr`` argument, to match the style of IDL, the IDLpy bridge does *not* support lists, and returns it *unchanged*! If ``SHIFT`` is used in combination with ``FFT``, maybe you should look at ``np.fft.fftshift``. """ arr = np.asarray(arr) # accept list (see note above) if arr.ndim==1: if len(args)==1: return np.roll(arr, _int_list(args)) elif arr.ndim==2: if len(args)==1: return np.roll(arr, _int_list(args)) if len(args)==2: return np.roll(arr, _int_list(args)[::-1], axis=(0,1)) elif arr.ndim==3: if len(args)==1: return np.roll(arr, args) elif len(args)==1: raise IDLException("Incorrect number of arguments.") elif len(args)==3: return np.roll(arr, args[::-1], axis=(0,1,2)) raise NotImplementedError("shift does only work for 1D, 2D and 3D arrays.")
32,297
def ConvertToMeaningfulConstant(pset): """ Gets the flux constant, and quotes it above some energy minimum Emin """ # Units: IF TOBS were in yr, it would be smaller, and raw const greater. # also converts per Mpcs into per Gpc3 units=1e9*365.25 const = (10**pset[7])*units # to cubic Gpc and days to year Eref=1e40 #erg per Hz Emin=10**pset[0] Emax=10**pset[1] gamma=pset[3] factor=(Eref/Emin)**gamma - (Emax/Emin)**gamma const *= factor return const
32,298
def stop_child_processes() -> NoReturn: """Stops sub processes (for meetings and events) triggered by child processes.""" with db.connection: cursor_ = db.connection.cursor() children = cursor_.execute("SELECT meetings, events FROM children").fetchone() for pid in children: if not pid: continue try: proc = psutil.Process(pid) except psutil.NoSuchProcess as error: logger.error(error) continue if proc.is_running(): logger.info(f"Sending [SIGTERM] to child process with PID: {pid}") proc.terminate() if proc.is_running(): logger.info(f"Sending [SIGKILL] to child process with PID: {pid}") proc.kill()
32,299