content
stringlengths
22
815k
id
int64
0
4.91M
def _async_untrack_devices(hass: HomeAssistant, entry: ConfigEntry) -> None: """Remove tracking for devices owned by this config entry.""" devices = hass.data[DOMAIN][NMAP_TRACKED_DEVICES] remove_mac_addresses = [ mac_address for mac_address, entry_id in devices.config_entry_owner.items() if entry_id == entry.entry_id ] for mac_address in remove_mac_addresses: if device := devices.tracked.pop(mac_address, None): devices.ipv4_last_mac.pop(device.ipv4, None) del devices.config_entry_owner[mac_address]
10,800
def test_greyscale_display(): """ SSD1362 OLED screen can draw and display a greyscale image. """ device = ssd1362(serial, mode="RGB", framebuffer=full_frame()) serial.reset_mock() # Use the same drawing primitives as the demo with canvas(device) as draw: primitives(device, draw) # Initial command to reset the display serial.command.assert_called_once_with(21, 0, 127, 117, 0, 63) # To regenerate test data, uncomment the following (remember not to commit though) # ================================================================================ # from baseline_data import save_reference_data # save_reference_data("demo_ssd1362_greyscale", serial.data.call_args.args[0]) # Next 4096 bytes are data representing the drawn image serial.data.assert_called_once_with(get_reference_data('demo_ssd1362_greyscale'))
10,801
def fix_random_seed(seed: int = 42) -> None: """ 乱数のシードを固定する。 Parameters ---------- seed : int 乱数のシード。 """ os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed)
10,802
def ensure_dict(value): """Convert None to empty dict.""" if value is None: return {} return value
10,803
def mutSet(individual): """Mutation that pops or add an element.""" if random.random() < 0.5: if len(individual) > 0: # We cannot pop from an empty set individual.remove(random.choice(sorted(tuple(individual)))) else: individual.add(random.randrange(param.NBR_ITEMS)) return individual,
10,804
def test_get_redirect(test_case, page): """ Test whether the page returns a redirection. :param test_case: test class, must be an instance of unittest.TestCase :param page: str with the page of the flask_monitoringdashboard """ with test_case.app.test_client() as c: test_case.assertEqual(302, c.get('dashboard/{}'.format(page)).status_code)
10,805
def validate_tweet(tweet: str) -> bool: """It validates a tweet. Args: tweet (str): The text to tweet. Raises: ValueError: Raises if tweet length is more than 280 unicode characters. Returns: bool: True if validation holds. """ str_len = ((tweet).join(tweet)).count(tweet) + 1 if str_len > 280: raise ValueError(f"tweet is more than 280 unicode characters\n {tweet}") else: return True
10,806
def put_dir(src, dest): """Tar the src directory, upload it to the current active host, untar it, and perform renaming if necessary. src: str directory to be copied to remote host dest: str pathname of directory on remote host """ tmpdir = tempfile.mkdtemp() tarpath = tar_dir(src, os.path.basename(src), tmpdir) remote_dir = os.path.dirname(dest) put_untar(tarpath, remote_dir) shutil.rmtree(tmpdir, onerror=onerror)
10,807
def feed(data, batch_size, reuse=True): """Feed data in batches""" if type(data)==list or type(data)==tuple and len(data)==2: data_seqs, data_vals = data yield_vals = True else: data_seqs = data yield_vals = False num_batches = len(data_seqs) // batch_size if num_batches == 0: raise Exception("Dataset not large enough to accomodate batch size") while True: for ctr in range(num_batches): out = data_seqs[ctr * batch_size : (ctr + 1) * batch_size] if yield_vals: out = (out, data_vals[ctr * batch_size : (ctr + 1) * batch_size]) yield out if not reuse and ctr == num_batches - 1: yield None
10,808
def train_and_eval(model, model_dir, train_input_fn, eval_input_fn, steps_per_epoch, epochs, eval_steps): """Train and evaluate.""" train_dataset = train_input_fn() eval_dataset = eval_input_fn() callbacks = get_callbacks(model, model_dir) history = model.fit( x=train_dataset, validation_data=eval_dataset, steps_per_epoch=steps_per_epoch, epochs=epochs, validation_steps=eval_steps, callbacks=callbacks) tf.get_logger().info(history) return model
10,809
def open_website(url): """ Open website and return a class ready to work on """ headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' } page = requests.get(url, headers=headers) source = html.fromstring(page.content) return source
10,810
def train(): """Trains the model""" if not request.is_json: return jsonify(error='Request must be json'), 400 try: frame = data_uri_to_cv2_img(request.json['frame']) except: # pylint: disable=bare-except e_type, value, _ = sys.exc_info() print(e_type) print(value) return jsonify(error='Could not decode frame'), 400 model_id = request.json['model_id'] coordinates = request.json['coord_x'], request.json['coord_y'] if model_id not in frames: frames[model_id] = list() frames[model_id].append((frame, coordinates)) if len(frames[model_id]) >= N_TRAINING_SAMPLES: models[model_id] = GazeTrackingModel(frames[model_id]) remaining_frames = N_TRAINING_SAMPLES - len(frames[model_id]) return jsonify(remaining=remaining_frames)
10,811
def euclidean3d(v1, v2): """Faster implementation of euclidean distance for the 3D case.""" if not len(v1) == 3 and len(v2) == 3: print("Vectors are not in 3D space. Returning None.") return None return np.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2 + (v1[2] - v2[2]) ** 2)
10,812
def blast_seqs(seqs, blast_constructor, blast_db=None, blast_mat_root=None, params={}, add_seq_names=True, out_filename=None, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, HALT_EXEC=False ): """Blast list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ # set num keep if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename ih = input_handler or guess_input_handler(seqs, add_seq_names) blast_app = blast_constructor( params=params, blast_mat_root=blast_mat_root, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) return blast_app(seqs)
10,813
def merge_dicts(dicts, handle_duplicate=None): """Merge a list of dictionaries. Invoke handle_duplicate(key, val1, val2) when two dicts maps the same key to different values val1 and val2, maybe logging the duplication. """ if not dicts: return {} if len(dicts) == 1: return dicts[0] if handle_duplicate is None: return {key: val for dict_ in dicts for key, val in dict_.items()} result = {} for dict_ in dicts: for key, val in dict_.items(): if key in result and val != result[key]: handle_duplicate(key, result[key], val) continue result[key] = val return result
10,814
def _timestamp(line: str) -> Timestamp: """Returns the report timestamp from the first line""" start = line.find("GUIDANCE") + 11 text = line[start : start + 16].strip() timestamp = datetime.strptime(text, r"%m/%d/%Y %H%M") return Timestamp(text, timestamp.replace(tzinfo=timezone.utc))
10,815
def spread(ctx, source_repo): """Spread an issue with ISSUE_ID from SOURCE_REPO to the rest of the repos.""" ghm = GitHubMux(ctx.obj['organization'], ctx.obj['token'], ctx.obj['exclude']) ghm.spread_issue(ctx.obj['issue_id'], source_repo)
10,816
def start_qpsworkers(languages, worker_hosts): """Starts QPS workers as background jobs.""" if not worker_hosts: # run two workers locally (for each language) workers=[(None, 10000), (None, 10010)] elif len(worker_hosts) == 1: # run two workers on the remote host (for each language) workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)] else: # run one worker per each remote host (for each language) workers=[(worker_host, 10000) for worker_host in worker_hosts] return [create_qpsworker_job(language, shortname= 'qps_worker_%s_%s' % (language, worker_idx), port=worker[1] + language.worker_port_offset(), remote_host=worker[0]) for language in languages for worker_idx, worker in enumerate(workers)]
10,817
def is_source_ext(filename): """ Tells if filename (filepath) is a source file. For our purposes "sources" are any files that can #include and can be included. """ _, ext = os.path.splitext(filename) return ext in [".h", ".hh", ".hpp", ".inc", ".c", ".cc", ".cxx", ".cpp", ".f", ".F"]
10,818
def MCE(conf, pred, true, bin_size = 0.1): """ Maximal Calibration Error Args: conf (numpy.ndarray): list of confidences pred (numpy.ndarray): list of predictions true (numpy.ndarray): list of true labels bin_size: (float): size of one bin (0,1) # TODO should convert to number of bins? Returns: mce: maximum calibration error """ upper_bounds = np.arange(bin_size, 1+bin_size, bin_size) cal_errors = [] for conf_thresh in upper_bounds: acc, avg_conf, _ = compute_acc_bin(conf_thresh-bin_size, conf_thresh, conf, pred, true) cal_errors.append(np.abs(acc-avg_conf)) return max(cal_errors)
10,819
def validate_frame_range(shots, start_time, end_time, sequence_time=False): """ Verify if the given frame range is overlapping existing shots timeline range. If it is overlapping any shot tail, it redefine the start frame at the end of it. If it is overlapping any shot head, it will push back all shots (and animation) behind the range to ensure the space is free to insert new shot. :param list[str] shots: Maya shot node names. :param int start_time: :param int end_time: :param bool sequence_time: Operate on Camera Sequencer's timeline instead of Maya timeline. :rtype: tuple[int, int] :return: Free range. """ start_attribute = "sequenceStartFrame" if sequence_time else "startFrame" end_attribute = "sequenceEndFrame" if sequence_time else "endFrame" length = end_time - start_time # Offset start_time to ensure it is not overlapping any shot tail. for shot in shots: shot_start = cmds.getAttr(shot + "." + start_attribute) shot_end = cmds.getAttr(shot + "." + end_attribute) # Ensure the time is not in the middle of a shot. if shot_start <= start_time <= shot_end: start_time = shot_end + 1 break # Detect overlapping shots from heads. end_time = start_time + length overlapping_shots = filter_shots_from_range( shots=shots, start_frame=start_time, end_frame=end_time, sequence_time=sequence_time) if not overlapping_shots: return start_time, end_time # Push back overlapping shots. offset = max( end_time - cmds.getAttr(shot + "." + start_attribute) + 1 for shot in overlapping_shots) if sequence_time: # Operating on the camera sequencer timeline don't need to adapt # animation. shift_shots_in_sequencer(shots, offset, after=end_time - offset) return start_time, end_time shift_shots(shots, offset, after=end_time - offset) curves = cmds.ls(type=ANIMATION_CURVES_TYPES) if curves: hold_animation_curves(curves, end_time - offset, offset) return start_time, end_time
10,820
def sparse_add(sv1, sv2): """dict, dict -> dict Returns a new dictionary that is the sum of the other two. >>>sparse_add(sv1, sv2) {0: 5, 1: 6, 2: 9} """ newdict = {} keys = set(sv1.keys()) | set(sv2.keys()) for key in keys: x = sv1.get(key, 0) + sv2.get(key, 0) newdict[key] = x return (newdict)
10,821
def return_flagger(video_ID): """ In GET request - Returns the username of the user that flagged the video with the corresponding video ID from the FLAGS table. """ if request.method == 'GET': return str(db.get_flagger(video_ID))
10,822
def distanceModulus(sc, d2dm=True, **kw): """Distance Modulus. DM = 5 log10(d / 10) + A Parameters ---------- sc: SkyCoord d2dm: bool if true: distance -> DM else: DM -> distance Returns ------- DM or distance: scalar, array TODO ---- A, obs """ if d2dm: distanceModulus_magnitude(sc) else: distanceModulus_distance(sc)
10,823
def get_all_hits(): """Retrieves all hits. """ hits = [ i for i in get_connection().get_all_hits()] pn = 1 total_pages = 1 while pn < total_pages: pn = pn + 1 print "Request hits page %i" % pn temp_hits = get_connection().get_all_hits(page_number=pn) hits.extend(temp_hits) return hits
10,824
def create_socket( host: str = "", port: int = 14443, anidb_server: str = "", anidb_port: int = 0 ) -> socket.socket: """Create a socket to be use to communicate with the server. This function is called internally, so you only have to call it if you want to change the default parameters. :param host: local host to bind the socket to, defaults to "" (which I think is any. Read the docs.) :type host: str, optional :param port: local port to bind the socket to, defaults to 14443 :type port: int, optional :param anidb_server: aniDB server name, defaults to environment ANIDB_SERVER :type anidb_server: str, optional :param anidb_port: anidb port, default to environment ANIDB_PORT :type anidb_port: int, optional :return: The created socket. :rtype: socket.socket """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((host, port)) anidb_server = value_or_error("ANIDB_SERVER", anidb_server) anidb_port = value_or_error("ANIDB_PORT", anidb_port) s.connect((anidb_server, anidb_port)) logger.info( f"Created socket on UDP %s:%d => %s:%d", host, port, anidb_server, anidb_port ) global _conn _conn = s return s
10,825
def aten_eq(mapper, graph, node): """ 构造判断数值是否相等的PaddleLayer。 TorchScript示例: %125 : bool = aten::eq(%124, %123) 参数含义: %125 (bool): 对比后结果。 %124 (-): 需对比的输入1。 %123 (-): 需对比的输入2。 """ scope_name = mapper.normalize_scope_name(node) output_name = mapper._get_outputs_name(node)[0] layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) # 获取当前节点输出的list current_outputs = [output_name] # 处理输入0,即%124 mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name) layer_inputs["x"] = inputs_name[0] x_value = list(node.inputs())[0] x_type = x_value.type() # 处理输入1,即%123 mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name) layer_inputs["y"] = inputs_name[1] y_value = list(node.inputs())[1] y_type = y_value.type() # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name) return current_inputs, current_outputs
10,826
def flip_es_aliases(): """Flip elasticsearch aliases to the latest version""" _require_target() with cd(env.code_root): sudo('%(virtualenv_root)s/bin/python manage.py ptop_es_manage --flip_all_aliases' % env)
10,827
def _create_simple_tf1_conv_model( use_variable_for_filter=False) -> Tuple[core.Tensor, core.Tensor]: """Creates a basic convolution model. This is intended to be used for TF1 (graph mode) tests. Args: use_variable_for_filter: Setting this to `True` makes the filter for the conv operation a `tf.Variable`. Returns: in_placeholder: Input tensor placeholder. output_tensor: The resulting tensor of the convolution operation. """ in_placeholder = array_ops.placeholder(dtypes.float32, shape=[1, 3, 4, 3]) filters = random_ops.random_uniform(shape=(2, 3, 3, 2), minval=-1., maxval=1.) if use_variable_for_filter: filters = variables.Variable(filters) output_tensor = nn_ops.conv2d( in_placeholder, filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC') return in_placeholder, output_tensor
10,828
def plot_diversity_bootstrapped(diversity_df): """Plots the result of bootstrapped diversity""" div_lines = ( alt.Chart() .mark_line() .encode( x="year:O", y=alt.Y("mean(score)", scale=alt.Scale(zero=False)), color="parametre_set", ) ) div_bands = ( alt.Chart() .mark_errorband(extent="ci") .encode( x="year:O", y=alt.Y("score", scale=alt.Scale(zero=False)), color="parametre_set", ) ) out = alt.layer( div_lines, div_bands, data=diversity_df, height=150, width=400 ).facet(row="diversity_metric", column="test") return out
10,829
def test_atomic_unsigned_int_total_digits_nistxml_sv_iv_atomic_unsigned_int_total_digits_1_2(mode, save_output, output_format): """ Type atomic/unsignedInt is restricted by facet totalDigits with value 1. """ assert_bindings( schema="nistData/atomic/unsignedInt/Schema+Instance/NISTSchema-SV-IV-atomic-unsignedInt-totalDigits-1.xsd", instance="nistData/atomic/unsignedInt/Schema+Instance/NISTXML-SV-IV-atomic-unsignedInt-totalDigits-1-2.xml", class_name="NistschemaSvIvAtomicUnsignedIntTotalDigits1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
10,830
def escape(s): """ Returns the given string with ampersands, quotes and carets encoded. >>> escape('<b>oh hai</b>') '&lt;b&gt;oh hai&lt;/b&gt;' >>> escape("Quote's Test") 'Quote&#39;s Test' """ mapping = ( ('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;'), ('"', '&quot;'), ("'", '&#39;'), ) for tup in mapping: s = s.replace(tup[0], tup[1]) return s
10,831
def _get_db_columns_for_model(model): """ Return list of columns names for passed model. """ return [field.column for field in model._meta._fields()]
10,832
def downgrade(): """Migrations for the downgrade.""" raise NotImplementedError('Downgrade of 535039300e4a.')
10,833
def mvkth(val): """ Moves kth motor in the in the Kappa chamber """ name="kth" Move_Motor_vs_Branch(name,val)
10,834
def get_UV(filename): """ Input: filename (including path) Output: (wave_leftedges, wav_rightedges, surface radiance) in units of (nm, nm, photons/cm2/sec/nm) """ wav_leftedges, wav_rightedges, wav, toa_intensity, surface_flux, SS,surface_intensity, surface_intensity_diffuse, surface_intensity_direct=np.genfromtxt(filename, skip_header=1, skip_footer=0, usecols=(0, 1, 2,3,4,5,6,7,8), unpack=True) surface_intensity_photons=surface_intensity*(wav/(hc)) return wav_leftedges, wav_rightedges, surface_intensity_photons
10,835
def flip_robot_elbow(*args): """ Toggles Inverse Kinematic Solution 2 Boolean :param args: :return: """ robots = get_robot_roots() if not robots: pm.warning('Nothing Selected; Select a valid robot') return for robot in robots: target_ctrl_attr = get_target_ctrl_path(robot) + '.ikSolution2' ik_sol = pm.getAttr(target_ctrl_attr) pm.setAttr(target_ctrl_attr, not ik_sol)
10,836
def load_chunks(chunk_file_location, chunk_ids): """Load patch paths from specified chunks in chunk file Parameters ---------- chunks : list of int The IDs of chunks to retrieve patch paths from Returns ------- list of str Patch paths from the chunks """ patch_paths = [] with open(chunk_file_location) as f: data = json.load(f) chunks = data['chunks'] for chunk in data['chunks']: if chunk['id'] in chunk_ids: patch_paths.extend([[x,chunk['id']] for x in chunk['imgs']]) if len(patch_paths) == 0: raise ValueError( f"chunks {tuple(chunk_ids)} not found in {chunk_file_location}") return patch_paths
10,837
def set_arguments() -> argparse.Namespace: """Setting the arguments to run from CMD :return: arguments """ # Adding main description parser = argparse.ArgumentParser( description=f'{m.__description__}', epilog=f'{m.__copyright__}\n | Versioon: {m.__version__}') # Postional arguments parser.add_argument('Excel', help='Exceli täielik asukoht, näiteks "./docs/examples/sisend_test.xlsx"') parser.add_argument('Kinnistu', help='kinnistu numbrite veeru nimi, näiteks "Kinnistu reg.osa"') parser.add_argument('Out', help='väljundi nimi, salvestatakse samasse kausta run.py-ga, näiteks kinnistu_tulemused') # Optional arguments parser.add_argument('-i', '--intermediate', help='Vahetulemused. (Default: %(default)s).', action='store_true') parser.add_argument('-l', '--logi', help='Logimiseks. (Default: %(default)s).', action='store_true') args = parser.parse_args() # collection return args
10,838
def get_model_input(batch, input_id=None): """ Get model input from batch batch: batch of model input samples """ if isinstance(batch, dict) or isinstance(batch, list): assert input_id is not None return batch[input_id] else: return batch
10,839
def issym(b3): """test if a list has equal number of positive and negative values; zeros belong to both. """ npos = 0; nneg = 0 for item in b3: if (item >= 0): npos +=1 if (item <= 0): nneg +=1 if (npos==nneg): return True else: return False
10,840
def get_token(user, pwd, expires_in=3600, expire_on=None, device=None): """ Get the JWT Token :param user: The user in ctx :param pwd: Pwd to auth :param expires_in: number of seconds till expiry :param expire_on: yyyy-mm-dd HH:mm:ss to specify the expiry (deprecated) :param device: The device in ctx """ if not frappe.db.exists("User", user): raise frappe.ValidationError(_("Invalide User")) from frappe.sessions import clear_sessions login = LoginManager() login.check_if_enabled(user) if not check_password(user, pwd): login.fail('Incorrect password', user=user) login.login_as(user) login.resume = False login.run_trigger('on_session_creation') _expires_in = 3600 if cint(expires_in): _expires_in = cint(expires_in) elif expire_on: _expires_in = (get_datetime(expire_on) - get_datetime()).total_seconds() token = get_bearer_token( user=user, expires_in=_expires_in ) frappe.local.response["token"] = token["access_token"] frappe.local.response.update(token)
10,841
def predictOneVsAll(all_theta, X): """will return a vector of predictions for each example in the matrix X. Note that X contains the examples in rows. all_theta is a matrix where the i-th row is a trained logistic regression theta vector for the i-th class. You should set p to a vector of values from 1..K (e.g., p = [1 3 1 2] predicts classes 1, 3, 1, 2 for 4 examples) """ m = X.shape[0] # You need to return the following variables correctly p = np.zeros((m, 1)) # probs = np.zeros((all_theta.shape[0], X.shape[0])) # ====================== YOUR CODE HERE ====================== # Instructions: Complete the following code to make predictions using # your learned logistic regression parameters (one-vs-all). # You should set p to a vector of predictions (from 1 to # num_labels). # # Hint: This code can be done all vectorized using the max function. # In particular, the np.argmax function can return the index of the max # element, for more information see 'numpy.argmax' on the numpy website. # If your examples are in rows, then, you can use # np.argmax(probs, axis=1) to obtain the max for each row. # p = np.argmax(sigmoid(np.dot(all_theta, X.T)), axis=0) + 1 # for i in range(all_theta.shape[0]): # probs[i,:] = sigmoid(X @ all_theta[i,:]) # p = (np.argmax(probs, axis=0) + 1) # ========================================================================= return p
10,842
def lecture_produit(ligne : str) -> Tuple[str, int, float]: """Précondition : la ligne de texte décrit une commande de produit. Renvoie la commande produit (nom, quantité, prix unitaire). """ lmots : List[str] = decoupage_mots(ligne) nom_produit : str = lmots[0] quantite : int = int(lmots[1]) prix_unitaire : float = float(lmots[2]) return (nom_produit, quantite, prix_unitaire)
10,843
def print_curation_topic_tree(menu_topics, slugs=[]): """ Print the `menu_topics` obtained from `get_ka_learn_menu_topics` in the form of a dict tree structure suitable for inclusion in `curaiton.py`. The output of the function can be added to `TOPIC_TREE_REPLACMENTS_PER_LANG` in `curation.py` to obtain restructuring operations of the KA API results to make the Kolibri channel topic tree look like the KA website. """ print('[') for top_menu in menu_topics: if top_menu['slug'] in slugs: line = ' {' line += '"slug": "' + top_menu['slug'] + '", ' line += '"translatedTitle": "' + top_menu['translatedTitle'] + '", ' line += '"children": [' print(line) for menu in top_menu['children']: subline = ' {' subline += '"slug": "' + menu['slug'] + '", ' subline += '"translatedTitle": "' + menu['translatedTitle'] + '"},' print(subline) print(' ]},') print(']')
10,844
def affinity_matrix(test_specs): """Generate a random user/item affinity matrix. By increasing the likehood of 0 elements we simulate a typical recommending situation where the input matrix is highly sparse. Args: users (int): number of users (rows). items (int): number of items (columns). ratings (int): rating scale, e.g. 5 meaning rates are from 1 to 5. spars: probability of obtaining zero. This roughly corresponds to the sparseness. of the generated matrix. If spars = 0 then the affinity matrix is dense. Returns: np.array: sparse user/affinity matrix of integers. """ np.random.seed(test_specs["seed"]) # uniform probability for the 5 ratings s = [(1 - test_specs["spars"]) / test_specs["ratings"]] * test_specs["ratings"] s.append(test_specs["spars"]) P = s[::-1] # generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items X = np.random.choice( test_specs["ratings"] + 1, (test_specs["users"], test_specs["items"]), p=P ) Xtr, Xtst = numpy_stratified_split( X, ratio=test_specs["ratio"], seed=test_specs["seed"] ) return (Xtr, Xtst)
10,845
def get_wm_desktop(window): """ Get the desktop index of the window. :param window: A window identifier. :return: The window's virtual desktop index. :rtype: util.PropertyCookieSingle (CARDINAL/32) """ return util.PropertyCookieSingle(util.get_property(window, '_NET_WM_DESKTOP'))
10,846
def get_parents(tech_id, model_config): """ Returns the full inheritance tree from which ``tech`` descends, ending with its base technology group. To get the base technology group, use ``get_parents(...)[-1]``. Parameters ---------- tech : str model_config : AttrDict """ tech = model_config.techs[tech_id].essentials.parent parents = [tech] while True: tech = model_config.tech_groups[tech].essentials.parent if tech is None: break # We have reached the top of the chain parents.append(tech) return parents
10,847
def load_dynamic_configuration( config: Configuration, secrets: Secrets = None ) -> Configuration: """ This is for loading a dynamic configuration if exists. The dynamic config is a regular activity (probe) in the configuration section. If there's a use-case for setting a configuration dynamically right before the experiment is starting. It executes the probe, and then the return value of this probe will be the config you wish to set. The dictionary needs to have a key named `type` and as a value `probe`, alongside the rest of the probe props. (No need for the `tolerance` key). For example: ```json "some_dynamic_config": { "name": "some config probe", "type": "probe", "provider": { "type": "python", "module": "src.probes", "func": "config_probe", "arguments": { "arg1":"arg1" } } } ``` `some_dynamic_config` will be set with the return value of the function config_probe. Side Note: the probe type can be the same as a regular probe can be, python, process or http. The config argument contains all the configurations of the experiment including the raw config_probe configuration that can be dynamically injected. The configurations contain as well all the env vars after they are set in `load_configuration`. The `secrets` argument contains all the secrets of the experiment. For `process` probes, the stdout value (stripped of endlines) is stored into the configuration. For `http` probes, the `body` value is stored. For `python` probes, the output of the function will be stored. We do not stop on errors but log a debug message and do not include the key into the result dictionary. """ # we delay this so that the configuration module can be imported leanly # from elsewhere from chaoslib.activity import run_activity conf = {} secrets = secrets or {} had_errors = False logger.debug("Loading dynamic configuration...") for (key, value) in config.items(): if not (isinstance(value, dict) and value.get("type") == "probe"): conf[key] = config.get(key, value) continue # we have a dynamic config name = value.get("name") provider_type = value["provider"]["type"] value["provider"]["secrets"] = deepcopy(secrets) try: output = run_activity(value, conf, secrets) except Exception: had_errors = True logger.debug(f"Failed to load configuration '{name}'", exc_info=True) continue if provider_type == "python": conf[key] = output elif provider_type == "process": if output["status"] != 0: had_errors = True logger.debug( f"Failed to load configuration dynamically " f"from probe '{name}': {output['stderr']}" ) else: conf[key] = output.get("stdout", "").strip() elif provider_type == "http": conf[key] = output.get("body") if had_errors: logger.warning( "Some of the dynamic configuration failed to be loaded." "Please review the log file for understanding what happened." ) return conf
10,848
def _download_all_example_data(verbose=True): """Download all datasets used in examples and tutorials.""" # This function is designed primarily to be used by CircleCI. It has # verbose=True by default so we get nice status messages # Consider adding datasets from here to CircleCI for PR-auto-build from . import (sample, testing, misc, spm_face, somato, brainstorm, megsim, eegbci, multimodal, mtrf, fieldtrip_cmc) sample.data_path() testing.data_path() misc.data_path() spm_face.data_path() somato.data_path() multimodal.data_path() mtrf.data_path() fieldtrip_cmc.data_path() sys.argv += ['--accept-brainstorm-license'] try: brainstorm.bst_raw.data_path() brainstorm.bst_auditory.data_path() brainstorm.bst_phantom_elekta.data_path() brainstorm.bst_phantom_ctf.data_path() finally: sys.argv.pop(-1) megsim.load_data(condition='visual', data_format='single-trial', data_type='simulation', update_path=True) megsim.load_data(condition='visual', data_format='raw', data_type='experimental', update_path=True) megsim.load_data(condition='visual', data_format='evoked', data_type='simulation', update_path=True) eegbci.load_data(1, [6, 10, 14], update_path=True) sys.argv += ['--accept-hcpmmp-license'] try: fetch_hcp_mmp_parcellation() finally: sys.argv.pop(-1)
10,849
def main(argv=None): """Main function: Parse, process, print""" if argv is None: argv = sys.argv args = parse_args(argv) if not args: exit(1) original_tags = copy.deepcopy(tags.load(args["config"])) with io.open(args["src"], "r", encoding="utf-8", errors="ignore") as fin: lines = fin.readlines() transacs_orig = qifile.parse_lines(lines, options=args) try: transacs = process_transactions(transacs_orig, options=args) except EOFError: # exit on Ctrl + D: restore original tags tags.save(args["config"], original_tags) return 1 res = qifile.dump_to_buffer(transacs + transacs_orig[len(transacs) :]) if not args.get("dry-run"): with io.open(args["dest"], "w", encoding="utf-8") as dest: dest.write(res) if args["batch"] or args["dry-run"]: print("\n" + res) return 0 if len(transacs) == len(transacs_orig) else 1
10,850
async def guess(botti, message, botData): """ Für alle ausführbar Dieser Befehl schätzt eine Zahl zwischen 1 und 100 mit Einsatz. !guess {ZAHL} {EINSATZ} {ZAHL} Ganze positive Zahl <= 100 {EINSATZ} Ganze Zahl >= 0, "allin" [Setzt alles] !guess 50 1000 """ try: guessValue = message.content.split(" ")[1] betValue = message.content.split(" ")[2] if ((not betValue.isdigit()) and (not (betValue == "allin"))) or (not guessValue.isdigit()): raise IndexError() guessValue = int(guessValue) if guessValue > 100: raise IndexError() if betValue == "allin": betValue = _getBalance(botData, message.author.id) else: betValue = int(betValue) checkBal = _checkBalance(botData, message.author.id, betValue) if checkBal == -1: await modules.bottiHelper._sendMessagePingAuthor(message, ":x: Sieht so aus, als hättest du wohl noch kein Konto. Verwende `!balance`, um eins anzulegen!") return elif checkBal == 0: await modules.bottiHelper._sendMessagePingAuthor(message, ":x: Dafür reicht dein Kontostand nicht aus!") return randomint = randint(0, 100) difference = abs(guessValue - randomint) if difference == 0: win = int(13 * betValue) elif difference < 2: win = int(6 * betValue) elif difference < 4: win = int(4 * betValue) elif difference < 8: win = int(2 * betValue) elif difference < 16: win = int(1 * betValue) else: win = -betValue winMessage = (modules.bottiHelper._spaceIntToString(int(win)) + botData.botCurrency["emoji"]) if (difference in range(0, 16)) else "leider nichts" _addBalance(botData, message.author.id, win) await modules.bottiHelper._sendMessagePingAuthor(message, ":game_die: Zufallszahl ist **{random}**. Abstand von deinem Tipp ist **{difference}**! Damit hast du **{win}** gewonnen!".format(random = randomint, difference = difference, win = winMessage)) except IndexError: await modules.bottiHelper._sendMessagePingAuthor(message, modules.bottiHelper._invalidParams(botData, "guess")) return
10,851
def profile_avatar(user, size=200): """Return a URL to the user's avatar.""" try: # This is mostly for tests. profile = user.profile except (Profile.DoesNotExist, AttributeError): avatar = settings.STATIC_URL + settings.DEFAULT_AVATAR profile = None else: if profile.is_fxa_migrated: avatar = profile.fxa_avatar elif profile.avatar: avatar = profile.avatar.url else: avatar = settings.STATIC_URL + settings.DEFAULT_AVATAR if avatar.startswith("//"): avatar = "https:%s" % avatar if user and hasattr(user, "email"): email_hash = hashlib.md5(force_bytes(user.email.lower())).hexdigest() else: email_hash = "00000000000000000000000000000000" url = "https://secure.gravatar.com/avatar/%s?s=%s" % (email_hash, size) # If the url doesn't start with http (local dev), don't pass it to # to gravatar because it can't use it. if avatar.startswith("https") and profile and profile.is_fxa_migrated: url = avatar elif avatar.startswith("http"): url = url + "&d=%s" % urllib.parse.quote(avatar) return url
10,852
def szz_reverse_blame(ss_path, sha_to_blame_on, buggy_line_num, buggy_file_path_in_ss, buggy_SHA): """Reverse-blames `buggy_line_num` (added in `buggy_SHA`) onto `sha_to_blame_on`.""" ss_repo = Repo(ss_path) ss_name = pathLeaf(ss_path) try: # If buggy_SHA equals sha_to_blame_on, then git-blame-reverse fails. # In our code buggy_SHA and sha_to_blame_on are never equal, but just to be safe... if sha_to_blame_on != buggy_SHA: curr_blame_info = ss_repo.git.blame('--reverse', '-w', '-n', '-f', '--abbrev=40', \ '-L' + buggy_line_num + ',+1', \ '--', buggy_file_path_in_ss, \ buggy_SHA + '..' + sha_to_blame_on, stdout_as_string = False) curr_buggy_line_num = curr_blame_info.split('(')[0].split()[-1] curr_buggy_file_path_in_ss = ' '.join(curr_blame_info.split('(')[0].split()[1:-1]) return [ss_name, curr_buggy_file_path_in_ss, sha_to_blame_on, curr_buggy_line_num] else: return [ss_name, buggy_file_path_in_ss, sha_to_blame_on, buggy_line_num] except Exception as e: sys.stderr.write("\nError in reverse-blame! Continuing with next line_num...\n" + str(e)) return None
10,853
def union(graphs, use_tqdm: bool = False): """Take the union over a collection of graphs into a new graph. Assumes iterator is longer than 2, but not infinite. :param iter[BELGraph] graphs: An iterator over BEL graphs. Can't be infinite. :param use_tqdm: Should a progress bar be displayed? :return: A merged graph :rtype: BELGraph Example usage: >>> import pybel >>> g = pybel.from_bel_script('...') >>> h = pybel.from_bel_script('...') >>> k = pybel.from_bel_script('...') >>> merged = union([g, h, k]) """ it = iter(graphs) if use_tqdm: it = tqdm(it, desc='taking union') try: target = next(it) except StopIteration as e: raise ValueError('no graphs given') from e try: graph = next(it) except StopIteration: return target else: target = target.copy() left_full_join(target, graph) for graph in it: left_full_join(target, graph) return target
10,854
def merge_ondisk(trained_index: faiss.Index, shard_fnames: List[str], ivfdata_fname: str) -> None: """ Add the contents of the indexes stored in shard_fnames into the index trained_index. The on-disk data is stored in ivfdata_fname """ # merge the images into an on-disk index # first load the inverted lists ivfs = [] for fname in shard_fnames: # the IO_FLAG_MMAP is to avoid actually loading the data thus # the total size of the inverted lists can exceed the # available RAM #LOG.info("read " + fname) index = faiss.read_index(fname, faiss.IO_FLAG_MMAP) index_ivf = faiss.extract_index_ivf(index) ivfs.append(index_ivf.invlists) # avoid that the invlists get deallocated with the index index_ivf.own_invlists = False # construct the output index index = trained_index index_ivf = faiss.extract_index_ivf(index) assert index.ntotal == 0, "works only on empty index" # prepare the output inverted lists. They will be written # to merged_index.ivfdata invlists = faiss.OnDiskInvertedLists( index_ivf.nlist, index_ivf.code_size, ivfdata_fname) # merge all the inverted lists ivf_vector = faiss.InvertedListsPtrVector() for ivf in ivfs: ivf_vector.push_back(ivf) #LOG.info("merge %d inverted lists " % ivf_vector.size()) ntotal = invlists.merge_from(ivf_vector.data(), ivf_vector.size()) # now replace the inverted lists in the output index index.ntotal = index_ivf.ntotal = ntotal index_ivf.replace_invlists(invlists, True) invlists.this.disown()
10,855
def get_month_n_days_from_cumulative(monthly_cumulative_days): """ Transform consecutive number of days in monthly data to actual number of days. EnergyPlus monthly results report a total consecutive number of days for each day. Raw data reports table as 31, 59..., this function calculates and returns actual number of days for each month 31, 28... """ old_num = monthly_cumulative_days.pop(0) m_actual_days = [old_num] for num in monthly_cumulative_days: new_num = num - old_num m_actual_days.append(new_num) old_num += new_num return m_actual_days
10,856
def check_split_window(start, stop, lookup, transitions): """Check all possible splits""" for split in range(start + 1, stop): assert start < split < stop m1, first = lookup.get((start, split), (None, None)) m2, second = lookup.get((split, stop), (None, None)) if first and second: subseq = first + second if subseq in transitions: lookup[(start, stop)] = m1 + m2 + 1, transitions[subseq] else: concatenate_ytterbium(start, stop, first, second, m1, m2, lookup)
10,857
def get_text(name): """Returns some text""" return "Hello " + name
10,858
def apply_template(assets): """ Processes the template. Used for overwrite ``docutils.writers._html_base.Writer.apply_template`` method. ``apply_template(<assets>)`` ``assets`` (dictionary) Assets to add at the template, see ``ntdocutils.writer.Writer.assets``. returns function - Template processor. Example ======= .. code:: python apply_template({ "before_styles": '<link rel="stylesheet" href="styles.css" />', "scripts": '<script src="script.js"></script>' '<script src="other_script.js"></script>' }) """ def apply_template(self): template_file = open(self.document.settings.template, "rb") template = str(template_file.read(), "utf-8") template_file.close() # Escape ``%`` that don't are special fields pattern = r"%(?!\((" + "|".join(self.visitor_attributes) + r")\)s)" template = re.subn(pattern, "%%", template)[0] subs = self.interpolation_dict() return template.format(**assets) % subs return apply_template
10,859
def test_easom_bound_fail(outbound): """Test easom bound exception""" with pytest.raises(ValueError): x = outbound(b["easom"].low, b["easom"].high, size=(3, 2)) fx.easom(x)
10,860
def harvest(post): """ Filter the post data for just the funding allocation formset data. """ data = {k: post[k] for k in post if k.startswith("fundingallocation")} return data
10,861
def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'calendar-python-quickstart.json') store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = tools.run_flow(flow, store, flags) else: # Needed only for compatibility with Python 2.6 credentials = tools.run(flow, store) #print('Storing credentials to ' + credential_path) return credentials
10,862
def model2(x, input_size, output_size): """! Fully connected model [InSize]x800x[OutSize] Implementation of a [InSize]x800x[OutSize] fully connected model. Parameters ---------- @param x : placeholder for input data @param input_size : size of input data @param output_size : size of output data Returns ------- @retval logits : output @retval logits_dup : a copy of output @retval w_list : trainable parameters @retval w_list_dup : a copy of trainable parameters """ #================================================================================================================== ## model definition mu = 0 sigma = 0.2 weights = { 'wfc': tf.Variable(tf.truncated_normal(shape=(input_size,800), mean = mu, stddev = sigma, seed = 1)), 'out': tf.Variable(tf.truncated_normal(shape=(800,output_size), mean = mu, stddev = sigma, seed = 1)) } biases = { 'bfc': tf.Variable(tf.zeros(800)), 'out': tf.Variable(tf.zeros(output_size)) } # Flatten input. c_flat = flatten(x) # Layer 1: Fully Connected. Input = input_size. Output = 800. # Activation. fc = fc_relu(c_flat, weights['wfc'], biases['bfc']) # Layer 2: Fully Connected. Input = 800. Output = output_size. logits = tf.add(tf.matmul(fc, weights['out']), biases['out']) w_list = [] for w,b in zip(weights, biases): w_list.append(weights[w]) w_list.append(biases[b]) #================================================================================================================== ## duplicate the model used in ProxSVRG weights_dup = { 'wfc': tf.Variable(tf.truncated_normal(shape=(input_size,800), mean = mu, stddev = sigma, seed = 1)), 'out': tf.Variable(tf.truncated_normal(shape=(800,output_size), mean = mu, stddev = sigma, seed = 1)) } biases_dup = { 'bfc': tf.Variable(tf.zeros(800)), 'out': tf.Variable(tf.zeros(output_size)) } # Flatten input. c_flat_dup = flatten(x) # Layer 1: Fully Connected. Input = input_size. Output = 800. # Activation. fc_dup = fc_relu(c_flat_dup, weights_dup['wfc'], biases_dup['bfc']) # Layer 2: Fully Connected. Input = 800. Output = output_size. logits_dup = tf.add(tf.matmul(fc_dup, weights_dup['out']), biases_dup['out']) w_list_dup = [] for w,b in zip(weights_dup, biases_dup): w_list_dup.append(weights_dup[w]) w_list_dup.append(biases_dup[b]) return logits, logits_dup, w_list, w_list_dup
10,863
def test_optional_step_matching(env_boston, feature_engineer): """Tests that a Space containing `optional` `Categorical` Feature Engineering steps matches with the expected saved Experiments. This regression test is focused on issues that arise when `EngineerStep`s other than the last one in the `FeatureEngineer` are `optional`. The simplified version of this test below, :func:`test_limited_optional_step_matching`, demonstrates that result matching works properly when only the final `EngineerStep` is `optional`""" opt_0 = DummyOptPro(iterations=20, random_state=32) opt_0.forge_experiment(XGBRegressor, feature_engineer=feature_engineer) opt_0.go() opt_1 = ExtraTreesOptPro(iterations=20, random_state=32) opt_1.forge_experiment(XGBRegressor, feature_engineer=feature_engineer) opt_1.get_ready() # Assert `opt_1` matched with all Experiments executed by `opt_0` assert len(opt_1.similar_experiments) == opt_0.successful_iterations
10,864
def init_db(): """For use on command line for setting up the database. """ db.drop_all() db.create_all()
10,865
def cut_bin_depths( dataset: xr.Dataset, depth_range: tp.Union[int, float, list] = None ) -> xr.Dataset: """ Return dataset with cut bin depths if the depth_range are not outside the depth span. Parameters ---------- dataset : depth_range : min or (min, max) to be included in the dataset. Bin depths outside this range will be cut. Returns ------- dataset with depths cut. """ if depth_range: if not isinstance(depth_range, (list, tuple)): if depth_range > dataset.depth.max(): l.log( "depth_range value is greater than the maximum bin depth. Depth slicing aborded." ) else: dataset = dataset.sel(depth=slice(depth_range, None)) l.log(f"Bin of depth inferior to {depth_range} m were cut.") elif len(depth_range) == 2: if dataset.depth[0] > dataset.depth[-1]: depth_range.reverse() if depth_range[0] > dataset.depth.max() or depth_range[1] < dataset.depth.min(): l.log( "depth_range values are outside the actual depth range. Depth slicing aborted." ) else: dataset = dataset.sel(depth=slice(*depth_range)) l.log( f"Bin of depth inferior to {depth_range[0]} m and superior to {depth_range[1]} m were cut." ) else: l.log( f"depth_range expects a maximum of 2 values but {len(depth_range)} were given. Depth slicing aborted." ) return dataset
10,866
async def test_if_action_before_sunrise_no_offset_kotzebue(hass, hass_ws_client, calls): """ Test if action was before sunrise. Local timezone: Alaska time Location: Kotzebue, which has a very skewed local timezone with sunrise at 7 AM and sunset at 3AM during summer After sunrise is true from sunrise until midnight, local time. """ tz = dt_util.get_time_zone("America/Anchorage") dt_util.set_default_time_zone(tz) hass.config.latitude = 66.5 hass.config.longitude = 162.4 await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "id": "sun", "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-07-24 07:21:12 local, sunset: 2015-07-25 03:13:33 local # sunrise: 2015-07-24 15:21:12 UTC, sunset: 2015-07-25 11:13:33 UTC # now = sunrise + 1s -> 'before sunrise' not true now = datetime(2015, 7, 24, 15, 21, 13, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 await assert_automation_condition_trace( hass_ws_client, "sun", {"result": False, "wanted_time_before": "2015-07-24T15:16:46.975735+00:00"}, ) # now = sunrise - 1h -> 'before sunrise' true now = datetime(2015, 7, 24, 14, 21, 12, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 await assert_automation_condition_trace( hass_ws_client, "sun", {"result": True, "wanted_time_before": "2015-07-24T15:16:46.975735+00:00"}, ) # now = local midnight -> 'before sunrise' true now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 await assert_automation_condition_trace( hass_ws_client, "sun", {"result": True, "wanted_time_before": "2015-07-24T15:16:46.975735+00:00"}, ) # now = local midnight - 1s -> 'before sunrise' not true now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 await assert_automation_condition_trace( hass_ws_client, "sun", {"result": False, "wanted_time_before": "2015-07-23T15:12:19.155123+00:00"}, )
10,867
def create_edgelist(file, df): """ creates an edgelist based on genre info """ # load edges from the (sub)genres themselves df1 = (pd .read_csv(file, dtype='str')) # get edges from the book descriptions df df2 = (df[['title', 'subclass']] .rename(columns={'title':'Edge_From', 'subclass':'Edge_To'}) .sort_values(by='Edge_To')) # combine the two dfs df3 = (df1 .append(df2, ignore_index=True)) # consistently assign categories df4 = (df3 .stack() .astype('category') .unstack()) # make the categorical values explicit for later convenience for name in df4.columns: df4['N' + name] = (df4[name] .cat .codes) return df4
10,868
def CodeRange(code1, code2): """ CodeRange(code1, code2) is an RE which matches any character with a code |c| in the range |code1| <= |c| < |code2|. """ if code1 <= nl_code < code2: return Alt(RawCodeRange(code1, nl_code), RawNewline, RawCodeRange(nl_code + 1, code2)) else: return RawCodeRange(code1, code2)
10,869
def test_wrong_data_path(): """ Check that the error is thrown if the connector is a part of the signature, but this particular data path (input or output) is already hidden by a previously connected transformation. """ N = 200 coeff_dtype = numpy.float32 arr_type = Type(numpy.complex64, (N, N)) d = DummyAdvanced(arr_type, coeff_dtype) identity = tr_identity(d.parameter.C) d.parameter.C.connect(identity, identity.o1, C_in=identity.i1) d.parameter.D.connect(identity, identity.i1, D_out=identity.o1) assert list(d.signature.parameters.values()) == [ Parameter('C', Annotation(arr_type, 'o')), Parameter('C_in', Annotation(arr_type, 'i')), Parameter('D_out', Annotation(arr_type, 'o')), Parameter('D', Annotation(arr_type, 'i')), Parameter('coeff1', Annotation(coeff_dtype)), Parameter('coeff2', Annotation(coeff_dtype))] # Now input to C is hidden by the previously connected transformation with pytest.raises(ValueError): d.parameter.C.connect(identity, identity.o1, C_in_prime=identity.i1) # Same goes for D with pytest.raises(ValueError): d.parameter.D.connect(identity, identity.i1, D_out_prime=identity.o1) # Also we cannot make one of the transformation outputs an existing output parameter with pytest.raises(ValueError): d.parameter.C.connect(identity, identity.i1, D_out=identity.o1) # Output of C is still available though d.parameter.C.connect(identity, identity.i1, C_out=identity.o1) assert list(d.signature.parameters.values()) == [ Parameter('C_out', Annotation(arr_type, 'o')), Parameter('C_in', Annotation(arr_type, 'i')), Parameter('D_out', Annotation(arr_type, 'o')), Parameter('D', Annotation(arr_type, 'i')), Parameter('coeff1', Annotation(coeff_dtype)), Parameter('coeff2', Annotation(coeff_dtype))]
10,870
def build_raw_mint(fee, txhash, txix, out_addr1, out_addr2, in_lovelace, policy_id, tokens, script, metadata, out_file = "matx.raw", burn_tokens = []): """ Generates the raw transaction for sending newly minted ingredients. Always sends along 2 Ada, to be on the safe side for the minimal transfer amount Can also burn tokens """ for i, token in enumerate(tokens): if i == 0: mint = f"1 {policy_id}.{token}" else: mint = mint + f" +1 {policy_id}.{token}" command =[CARDANO_CLI_PATH, "transaction", "build-raw", '--fee', str(fee), '--tx-in', f"{txhash}#{txix}", '--tx-out', f"{out_addr1}+{in_lovelace - 4000000}", '--tx-out', f"{out_addr2}+{4000000 - fee}+{mint}", '--minting-script-file', script, '--metadata-json-file', metadata, '--invalid-hereafter', "48716321", '--out-file', out_file] if len(burn_tokens) > 0: for btoken in burn_tokens: mint = mint + f" +-1 {policy_id}.{btoken}" command.append(f"--mint={mint}") _ = subprocess.check_output(command)
10,871
def sort_by_date(data): """ The sort_by_date function sorts the lists by their datetime object :param data: the list of lists containing parsed UA data :return: the sorted date list of lists """ # Supply the reverse option to sort by descending order return [x[0:6:4] for x in sorted(data, key=itemgetter(4), reverse=True)]
10,872
def get_menu_option(): """ Function to display menu options and asking the user to choose one. """ print("1. View their next 5 fixtures...") print("2. View their last 5 fixtures...") print("3. View their entire current season...") print("4. View their position in the table...") print("5. View the club roster...") print("6. View season statistics...") print("7. View team information...") print("8. Sign up to your club's weekly newsletter...") print("9. Calculate odds on next game...") print() return input("CHOOSE AN OPTION BELOW BY ENTERING THE MENU NUMBER OR ENTER 'DONE' ONCE YOU ARE FINISHED: ")
10,873
def pdf(mu_no): """ the probability distribution function which the number of fibers per MU should follow """ return pdf_unscaled(mu_no) / scaling_factor_pdf
10,874
def get_weather_by_key(key): """ Returns weather information for a given database key Args: key (string) -- database key for weather information Returns: None or Dict """ url = "%s/weather/%s.json" % (settings.FIREBASE_URL, key) r = requests.get(url) if r.status_code != 200: return None return r.json()
10,875
def source_receiver_midpoints(survey, **kwargs): """ Calculate source receiver midpoints. Input: :param SimPEG.electromagnetics.static.resistivity.Survey survey: DC survey object Output: :return numpy.ndarray midx: midpoints x location :return numpy.ndarray midz: midpoints z location """ if not isinstance(survey, dc.Survey): raise ValueError("Input must be of type {}".format(dc.Survey)) if len(kwargs) > 0: warnings.warn( "The keyword arguments of this function have been deprecated." " All of the necessary information is now in the DC survey class", DeprecationWarning, ) # Pre-allocate midxy = [] midz = [] for ii, source in enumerate(survey.source_list): tx_locs = source.location if isinstance(tx_locs, list): Cmid = (tx_locs[0][:-1] + tx_locs[1][:-1]) / 2 zsrc = (tx_locs[0][-1] + tx_locs[1][-1]) / 2 tx_sep = np.linalg.norm((tx_locs[0][:-1] - tx_locs[1][:-1])) else: Cmid = tx_locs[:-1] zsrc = tx_locs[-1] Pmids = [] for receiver in source.receiver_list: rx_locs = receiver.locations if isinstance(rx_locs, list): Pmid = (rx_locs[0][:, :-1] + rx_locs[1][:, :-1]) / 2 else: Pmid = rx_locs[:, :-1] Pmids.append(Pmid) Pmid = np.vstack(Pmids) midxy.append((Cmid + Pmid) / 2) diffs = np.linalg.norm((Cmid - Pmid), axis=1) if np.allclose(diffs, 0.0): # likely a wenner type survey. midz = zsrc - tx_sep / 2 * np.ones_like(diffs) else: midz.append(zsrc - diffs / 2) return np.vstack(midxy), np.hstack(midz)
10,876
def set_logger_class() -> None: """ Override python's logging logger class. This should be called as soon as possible. """ if logging.getLoggerClass() is not MCookBookLoggingClass: logging.setLoggerClass(MCookBookLoggingClass) logging.setLogRecordFactory(LogRecord) logging.root.addHandler(LOGGING_TEMP_HANDLER)
10,877
def test_mean_radial_velocity_vs_r4(): """ Brute force comparison of <Vr> calculation to pure python implementation, with PBCs turned on, and cross-correlation is tested """ npts1, npts2 = 150, 151 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts1, 3)) sample2 = np.random.random((npts2, 3)) velocities1 = np.random.uniform(-100, 100, npts1*3).reshape((npts1, 3)) velocities2 = np.random.uniform(-100, 100, npts2*3).reshape((npts2, 3)) rbins = np.array([0, 0.05, 0.2, 0.3]) cython_result_pbc = mean_radial_velocity_vs_r(sample1, velocities1, rbins_absolute=rbins, sample2=sample2, velocities2=velocities2, period=1.) for i, rmin, rmax in zip(range(len(rbins)), rbins[:-1], rbins[1:]): python_result_no_pbc = pure_python_mean_radial_velocity_vs_r( sample1, velocities1, sample2, velocities2, rmin, rmax, Lbox=1) assert np.allclose(cython_result_pbc[i], python_result_no_pbc)
10,878
def fix_source_scale( transformer, output_std: float = 1, n_samples: int = 1000, use_copy: bool = True, ) -> float: """ Adjust the scale for a data source to fix the output variance of a transformer. The transformer's data source must have a `scale` parameter. Parameters ---------- transformer Transformer whose output variance is optimized. This should behave like `Arma`: it needs to have a `transform` method that can be called like `transformer.transform(U=source)`; and it needs an attribute called `default_source`. output_std Value to which to fix the transformer's output standard deviation. n_samples Number of samples to generate for each optimization iteration. use_copy If true, a deep copy of the data source is made for the optimization, so that the source's random generator is unaffected by this procedure. Returns the final value for the scale. """ output_var = output_std ** 2 source = transformer.default_source if use_copy: source_copy = copy.deepcopy(source) else: source_copy = source def objective(scale: float): source_copy.scale = np.abs(scale) samples = transformer.transform(n_samples, X=source_copy) return np.var(samples) / output_var - 1 soln = optimize.root_scalar( objective, x0=np.sqrt(output_var / 2), x1=np.sqrt(2 * output_var), maxiter=100, ) source.scale = np.abs(soln.root) return source.scale
10,879
def samplePinDuringCapture(f, pin, clock): """\ Configure Arduino to enable sampling of a particular light sensor or audio signal input pin. Only enabled pins are read when capture() is subsequently called. :param f: file handle for the serial connection to the Arduino Due :param pin: The pin to enable. :param clock: a :class:`dvbcss.clock` clock object Values for the pin parameter: * 0 enables reading of light sensor 0 (on Arduino analogue pin 0). * 1 enables reading of audio input 0 (on Arduino analogue pin 1). * 2 enables reading of light sensor 1 (on Arduino analogue pin 2). * 3 enables reading of audio input 1 (on Arduino analogue pin 3). :returns: (t1,t2,t3,t4) measuring the specified clock object and arduino clock, as per :func`writeCmdAndTimeRoundTrip` See :func:`writeAndTimeRoundTrip` for details of the meaning of the returned round-trip timing data """ CMD = CMDS_ENABLE_PIN[pin] return writeCmdAndTimeRoundTrip(f, clock, CMD)
10,880
def year_filter(year = None): """ Determine whether the input year is single value or not Parameters ---------- year : The input year Returns ------- boolean whether the inputed year is a single value - True """ if year[0] == year[1]: single_year = True else: single_year = False return single_year
10,881
def distance(p1, p2): """ Return the Euclidean distance between two QPointF objects. Euclidean distance function in 2D using Pythagoras Theorem and linear algebra objects. QPointF and QVector2D member functions. """ if not (isinstance(p1, QPointF) and isinstance(p2, QPointF)): raise ValueError('ValueError, computing distance p1 or p2 not of Type QPointF') return toVector(p2 - p1).length()
10,882
def make_primarybeammap(gps, delays, frequency, model, extension='png', plottype='beamsky', figsize=14, directory=None, resolution=1000, zenithnorm=True, b_add_sources=False): """ """ print("Output beam file resolution = %d , output directory = %s" % (resolution, directory)) # (az_grid, za_grid) = beam_tools.makeAZZA(resolution,'ZEA') #Get grids in radians (az_grid, za_grid, n_total, dOMEGA) = beam_tools.makeAZZA_dOMEGA(resolution, 'ZEA') # TEST SIN vs. ZEA az_grid = az_grid * 180 / math.pi za_grid = za_grid * 180 / math.pi # az_grid+=180.0 alt_grid = 90 - (za_grid) obstime = su.time2tai(gps) # first go from altitude to zenith angle theta = (90 - alt_grid) * math.pi / 180 phi = az_grid * math.pi / 180 beams = {} # this is the response for XX and YY if model == 'analytic' or model == '2014': # Handles theta and phi as floats, 1D, or 2D arrays (and probably higher dimensions) beams['XX'], beams['YY'] = primary_beam.MWA_Tile_analytic(theta, phi, freq=frequency, delays=delays, zenithnorm=zenithnorm, power=True) elif model == 'avg_EE' or model == 'advanced' or model == '2015' or model == 'AEE': beams['XX'], beams['YY'] = primary_beam.MWA_Tile_advanced(theta, phi, freq=frequency, delays=delays, power=True) elif model == 'full_EE' or model == '2016' or model == 'FEE' or model == 'Full_EE': # model_ver = '02' # h5filepath = 'MWA_embedded_element_pattern_V' + model_ver + '.h5' beams['XX'], beams['YY'] = primary_beam.MWA_Tile_full_EE(theta, phi, freq=frequency, delays=delays, zenithnorm=zenithnorm, power=True) # elif model == 'full_EE_AAVS05': # # h5filepath='/Users/230255E/Temp/_1508_Aug/embedded_element/h5/AAVS05_embedded_element_02_rev0.h5' # # h5filepath = 'AAVS05_embedded_element_02_rev0.h5' # beams['XX'], beams['YY'] = primary_beam.MWA_Tile_full_EE(theta, phi, # freq=frequency, delays=delays, # zenithnorm=zenithnorm, power=True) pols = ['XX', 'YY'] # Get Haslam and interpolate onto grid my_map = get_Haslam(frequency) mask = numpy.isnan(za_grid) za_grid[numpy.isnan(za_grid)] = 90.0 # Replace nans as they break the interpolation sky_grid = map_sky(my_map['skymap'], my_map['RA'], my_map['dec'], gps, az_grid, za_grid) sky_grid[mask] = numpy.nan # Remask beyond the horizon # test: # delays1 = numpy.array([[6, 6, 6, 6, # 4, 4, 4, 4, # 2, 2, 2, 2, # 0, 0, 0, 0], # [6, 6, 6, 6, # 4, 4, 4, 4, # 2, 2, 2, 2, # 0, 0, 0, 0]], # dtype=numpy.float32) # za_delays = {'0': delays1 * 0, '14': delays1, '28': delays1 * 2} # tile = mwa_tile.get_AA_Cached() # za_delay = '0' # (ax0, ay0) = tile.getArrayFactor(az_grid, za_grid, frequency, za_delays[za_delay]) # val = numpy.abs(ax0) # val_max = numpy.nanmax(val) # print "VALUE : %.8f %.8f %.8f" % (frequency, val_max[0], val[resolution / 2, resolution / 2]) beamsky_sum_XX = 0 beam_sum_XX = 0 Tant_XX = 0 beam_dOMEGA_sum_XX = 0 beamsky_sum_YY = 0 beam_sum_YY = 0 Tant_YY = 0 beam_dOMEGA_sum_YY = 0 for pol in pols: # Get gridded sky print('frequency=%.2f , polarisation=%s' % (frequency, pol)) beam = beams[pol] beamsky = beam * sky_grid beam_dOMEGA = beam * dOMEGA print('sum(beam)', numpy.nansum(beam)) print('sum(beamsky)', numpy.nansum(beamsky)) beamsky_sum = numpy.nansum(beamsky) beam_sum = numpy.nansum(beam) beam_dOMEGA_sum = numpy.nansum(beam_dOMEGA) Tant = numpy.nansum(beamsky) / numpy.nansum(beam) print('Tant=sum(beamsky)/sum(beam)=', Tant) if pol == 'XX': beamsky_sum_XX = beamsky_sum beam_sum_XX = beam_sum Tant_XX = Tant beam_dOMEGA_sum_XX = beam_dOMEGA_sum if pol == 'YY': beamsky_sum_YY = beamsky_sum beam_sum_YY = beam_sum Tant_YY = Tant beam_dOMEGA_sum_YY = beam_dOMEGA_sum filename = '%s_%.2fMHz_%s_%s' % (gps, frequency / 1.0e6, pol, model) fstring = "%.2f" % (frequency / 1.0e6) if plottype == 'all': plottypes = ['beam', 'sky', 'beamsky', 'beamsky_scaled'] else: plottypes = [plottype] for pt in plottypes: if pt == 'beamsky': textlabel = 'Beam x sky %s (LST %.2f hr), %s MHz, %s-pol, Tant=%.1f K' % (gps, get_LST(gps), fstring, pol, Tant) plot_beamsky(beamsky, frequency, textlabel, filename, extension, obstime=obstime, figsize=figsize, directory=directory) elif pt == 'beamsky_scaled': textlabel = 'Beam x sky (scaled) %s (LST %.2f hr), %s MHz, %s-pol, Tant=%.1f K (max T=%.1f K)' % (gps, get_LST(gps), fstring, pol, Tant, float(numpy.nanmax(beamsky))) plot_beamsky(beamsky, frequency, textlabel, filename + '_scaled', extension, obstime=obstime, figsize=figsize, vmax=numpy.nanmax(beamsky) * 0.4, directory=directory) elif pt == 'beam': textlabel = 'Beam for %s, %s MHz, %s-pol' % (gps, fstring, pol) plot_beamsky(beam, frequency, textlabel, filename + '_beam', extension, obstime=obstime, figsize=figsize, cbar_label='', directory=directory, b_add_sources=b_add_sources, az_grid=az_grid, za_grid=za_grid) elif pt == 'sky': textlabel = 'Sky for %s (LST %.2f hr), %s MHz, %s-pol' % (gps, get_LST(gps), fstring, pol) plot_beamsky(sky_grid, frequency, textlabel, filename + '_sky', extension, obstime=obstime, figsize=figsize, directory=directory, b_add_sources=b_add_sources, az_grid=az_grid, za_grid=za_grid) return (beamsky_sum_XX, beam_sum_XX, Tant_XX, beam_dOMEGA_sum_XX, beamsky_sum_YY, beam_sum_YY, Tant_YY, beam_dOMEGA_sum_YY)
10,883
def view_page(request, content_id=None): """Displays the content in a more detailed way""" if request.method == "GET": if content_id: if content_id.isdigit(): try: # Get the contents details content_data = Content.objects.get(pk=int(content_id)) content_data.fire = int(content_data.contents_history.all().aggregate(Avg("vote"))["vote__avg"] * 10) if content_data.contents_history.all().aggregate(Avg("vote"))["vote__avg"] else 0 try: # Get all the available comments of this particular content comment_data = content_data.content_comments.all() if comment_data: # Convert Data to JSON list comment_list = json.loads(comment_data[0].comment) content_comments = [] for a in comment_list: try: user = User.objects.get(pk=a["user_id"]) content_comments.append({ "id": a["id"], "content_id": a["content_id"], "profile_picture": (user.profile.profile_picture.url).replace("&export=download", "") if user.profile.profile_picture.url else "/static/teeker/assets/default_img/avatar/avataaars.png", "username": user.username, "user_id": user.pk, "comment": a["comment"], "date": a["date"] }) except User.DoesNotExist: print("Broken Comment...") else: content_comments = [] except json.JSONDecodeError: content_data['contents_comment']['comment'] = [] # Check if the content isn't suspended if content_data.suspended and not request.user.is_staff: content_data = { "title": "CONTENT UNAVAILABLE" } # Check if the user is logged in if request.user.is_authenticated: # Check if the content is in the logged in user's recommended list try: if int(content_id) in json.loads(request.user.profile.recommended): content_data.recommended = True else: content_data.recommended = False except json.JSONDecodeError: content_data.recommended = False else: content_data.recommended = False except Content.DoesNotExist: content_data = { "title": "CONTENT UNAVAILABLE" } else: content_data = { "title": "CONTENT UNAVAILABLE" } else: content_data = { "title": "CONTENT UNAVAILABLE" } html_content = { "content_data": content_data, "content_comments": content_comments } return render(request, "teeker/site_templates/view.html", html_content)
10,884
def run_feat_model(fsf_file): """ runs FSL's feat_model which uses the fsf file to generate files necessary to run film_gls to fit design matrix to timeseries""" clean_fsf = fsf_file.strip('.fsf') cmd = 'feat_model %s'%(clean_fsf) out = CommandLine(cmd).run() if not out.runtime.returncode == 0: return None, out.runtime.stderr mat = fsf_file.replace('.fsf', '.mat') return mat, cmd
10,885
def test_list_ncname_length_1_nistxml_sv_iv_list_ncname_length_2_2(mode, save_output, output_format): """ Type list/NCName is restricted by facet length with value 6. """ assert_bindings( schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-length-2.xsd", instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-length-2-2.xml", class_name="NistschemaSvIvListNcnameLength2", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
10,886
def exportDSV(input, delimiter = ',', textQualifier = '"', quoteall = 0, newline = '\n'): """ PROTOTYPE: exportDSV(input, delimiter = ',', textQualifier = '\"', quoteall = 0) DESCRIPTION: Exports to DSV (delimiter-separated values) format. ARGUMENTS: - input is list of lists of data (as returned by importDSV) - delimiter is character used to delimit columns - textQualifier is character used to delimit ambiguous data - quoteall is boolean specifying whether to quote all data or only data that requires it RETURNS: data as string """ if not delimiter or type(delimiter) != type(''): raise InvalidDelimiter if not textQualifier or type(delimiter) != type(''): raise InvalidTextQualifier # double-up all text qualifiers in data (i.e. can't becomes can''t) data = map(lambda i, q = textQualifier: map(lambda j, q = q: str(j).replace(q, q * 2), i), input) if quoteall: # quote every data value data = map(lambda i, q = textQualifier: map(lambda j, q = q: q + j + q, i), data) else: # quote only the values that contain qualifiers, delimiters or newlines data = map(lambda i, q = textQualifier, d = delimiter: map(lambda j, q = q, d = d: ((j.find(q) != -1 or j.find(d) != -1 or j.find('\n') != -1) and (q + j + q)) or j, i), data) # assemble each line with delimiters data = [delimiter.join(line) for line in data] # assemble all lines together, separated by newlines data = newline.join(data) return data
10,887
def DatasetSplit(X, y): #Creating the test set and validation set. # separating the target """ To create the validation set, we need to make sure that the distribution of each class is similar in both training and validation sets. stratify = y (which is the class or tags of each frame) keeps the similar distribution of classes in both the training as well as the validation set.""" # creating the training and validation set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2, stratify = y) # creating dummies of target variable for train and validation set y_train = pd.get_dummies(y_train) y_test = pd.get_dummies(y_test) return X_train, X_test, y_train, y_test
10,888
def test_good_input1() -> None: """ Works on good input """ rv, out = getstatusoutput(f'{RUN} {SAMPLE1}') assert rv == 0 assert out == 'Rosalind_0808 60.919540'
10,889
def x_distance2(mesh): """Signed distances in the triangle planes from the opposite edge towards the node for all evalution points in R """ # TODO: with gradient, needs mesh info pass
10,890
def parse_children(root): """ :param root: root tags of .xml file """ attrib_list = set() for child in root: text = child.text if text: text = text.strip(' \n\t\r') attrib_list = attrib_list | get_words_with_point(text) attrib_list = attrib_list | parse_children(child) for attribute_name, attribute_value in child.attrib.items(): if '.' in attribute_value: attrib_list.add(attribute_value) """ returns list of attribute_value """ return attrib_list
10,891
def load_data(ETF): """ Function to load the ETF data from a file, remove NaN values and set the Date column as index. ... Attributes ---------- ETF : filepath """ data = pd.read_csv(ETF, usecols=[0,4], parse_dates=[0], header=0) data.dropna(subset = ['Close', 'Date'], inplace=True) data_close = pd.DataFrame(data['Close']) data_close.index = pd.to_datetime(data['Date']) return data_close
10,892
def update_disambiguation_report(authors, publication_uri): """ Given the authors structure and thte publication_uri, add to the report if any of the authors need to be disambiguated """ for value in authors.values(): if value[8] == "Disambig": if publication_uri in disambiguation_report: result = disambiguation_report[publication_uri] result[len(result.keys())+1] = value disambiguation_report[publication_uri] = result else: disambiguation_report[publication_uri] = {1:value} return
10,893
def preprocess_LLIL_GOTO(bv, llil_instruction): """ Replaces integer addresses of llil instructions with hex addresses of assembly """ func = get_function_at(bv, llil_instruction.address) # We have to use the lifted IL since the LLIL ignores comparisons and tests lifted_instruction = list( [k for k in find_lifted_il(func, llil_instruction.address) if k.operation == LowLevelILOperation.LLIL_GOTO] )[0] lifted_il = func.lifted_il llil_instruction.dest = hex(lifted_il[lifted_instruction.dest].address).replace("L", "") return llil_instruction
10,894
def _print_config() -> None: """print config""" config = { "Label": needs_response_label, "Minimum Response Time": minimum_response_time, "Exempt User List": exempt_user_list, "Exempt Labels": exempt_labels, "Exempt Authors": exempt_authors, "Repo": repo.name, "Debug Mode": debug_mode, } print(f"{line_break}\nWorkflow Settings:\n") [print(f"{setting}: {value}") for setting, value in config.items()] print(f"{line_break}\n")
10,895
async def get_limited_f_result(request, task_id): """ This endpoint accepts the task_id and returns the result if ready. """ task_result = AsyncResult(task_id) result = { "task_id": task_id, "task_status": task_result.status, "task_result": task_result.result } return json(result)
10,896
async def main() -> None: """Create the aiohttp session and run the example.""" async with ClientSession() as session: logging.basicConfig(level=logging.DEBUG) try: simplisafe = await API.async_from_refresh_token( SIMPLISAFE_REFRESH_TOKEN, session=session ) try: await simplisafe.websocket.async_connect() except CannotConnectError as err: _LOGGER.error( "There was a error while connecting to the server: %s", err ) await simplisafe.websocket.async_listen() except SimplipyError as err: _LOGGER.error(err) except KeyboardInterrupt: pass
10,897
def join_synset(pred_matrix, pb_defns, csvout, reject_non_english, use_model, synset): """ Dumps a (frame, synset) relation to CSVOUT by joining predicate matrix with FinnPropBank. """ # Load mapping from English PropBank senses to English WordNet senses mapping = get_eng_pb_wn_map(pred_matrix, reject_non_english) # Join with mapping from Finnish to English PropBank propbank = get_reader(pb_defns) csvout = get_writer(csvout) for row in propbank: pb_finn = "{}.{:0>2}".format(row['base'], row['number']) if use_model: match = MODEL_RE.match(row['note']) if match: pb = match.group(1) else: pb = None else: pb = row['link_original'] if pb == 'none.01': pb = None if pb is not None and pb in mapping: for wn in mapping[pb]: if synset: wn = lemma_id_to_synset_id(wn) csvout.writerow((pb_finn, wn))
10,898
def _test_qrcode(): """二维码图片上传时识别""" file_name = 'test_object_sdk_qrcode.file' with open(file_name, 'rb') as fp: # fp验证 opts = '{"is_pic_info":1,"rules":[{"fileid":"format.jpg","rule":"QRcode/cover/1"}]}' response, data = client.ci_put_object_from_local_file_and_get_qrcode( Bucket=test_bucket, LocalFilePath=file_name, Key=file_name, EnableMD5=False, PicOperations=opts ) print(response, data) """二维码图片下载时识别""" response, data = client.ci_get_object_qrcode( Bucket=test_bucket, Key=file_name, Cover=0 ) print(response, data)
10,899