content
stringlengths
22
815k
id
int64
0
4.91M
def get_dataframe_from_table(table_name, con): """ put table into DataFrame """ df = pd.read_sql_table(table_name, con) return df
8,800
def linkcard(): """ Linking bank card to refill """ print("Linking card...") pass
8,801
def qemu_pos_add(target_name, nw_name, mac_addr, ipv4_addr, ipv6_addr, consoles = None, disk_size = "30G", mr_partsizes = "1:4:5:5", sd_iftype = 'virtio', extra_cmdline = "", # pylint: disable = unused-argument ram_megs = 2048): """Add a QEMU virtual machine capable of booting over Provisioning OS. This target supports a serial console (*ttyS0*) and a single hard drive that gets fully reinitialized every time the server is restarted. Note this target uses a UEFI bios *and* defines UEFI storage space; this is needed so the right boot order is maintained. Add to a server configuration file ``/etc/ttbd-*/conf_*.py`` >>> target = qemu_pos_add("qemu-x86-64-05a" >>> "nwa", >>> mac_addr = "02:61:00:00:00:05", >>> ipv4_addr = "192.168.95.5", >>> ipv6_addr = "fc00::61x:05") Extra paramenters can be added by using the *extra_cmdline* arguments, such as for example, to add VNC display: >>> extra_cmdline = "-display vnc=0.0.0.0:0", Adding to other networks: >>> ttbl.config.targets['nuc-43'].add_to_interconnect( >>> 'nwb', dict( >>> mac_addr = "02:62:00:00:00:05", >>> ipv4_addr = "192.168.98.5", ipv4_prefix_len = 24, >>> ipv6_addr = "fc00::62:05", ipv6_prefix_len = 112) :param str target_name: name of the target to create :param str nw_name: name of the network to which this target will be connected that provides Provisioning OS services. :param str mac_addr: MAC address for this target (fake one). Will be given to the virtual device created and can't be the same as any other MAC address in the system or the networks. It is recommended to be in the format: >>> 02:HX:00:00:00:HY where HX and HY are two hex digits :param str disk_size: (optional) size specification for the target's hard drive, as understood by QEMU's qemu-img create program. :param list(str) consoles: serial consoles to create (defaults to just one, which is also the minimum). :param int ram_megs: (optional) size of memory in megabytes :param str mr_partsizes: (optional) specification for partition sizes for the multiroot Provisoning OS environment. FIXME: document link :param str extra_cmdline: a string with extra command line to add; %(FIELD)s supported (target tags). """ if consoles == None or consoles == []: consoles = [ 'ttyS0' ] assert isinstance(target_name, basestring) assert isinstance(consoles, list) \ and all([ isinstance(console, basestring) for console in consoles ]) assert len(consoles) >= 1 assert ram_megs > 0 if sd_iftype == 'virtio': pos_boot_dev = 'vda' elif sd_iftype == 'scsi': pos_boot_dev = 'sda' elif sd_iftype == 'ide': pos_boot_dev = 'sda' else: raise AssertionError("Don't know dev name for disk iftype %s" % sd_iftype) target = ttbl.tt_qemu2.tt_qemu( target_name, """\ /usr/bin/qemu-system-x86_64 \ -enable-kvm \ -drive if=pflash,format=raw,readonly,file=/usr/share/edk2/ovmf/OVMF_CODE.fd \ -drive if=pflash,format=raw,file=%%(path)s/OVMF_VARS.fd \ -m %(ram_megs)s \ -drive file=%%(path)s/hd.qcow2,if=%(sd_iftype)s,aio=threads \ -boot order=nc \ %(extra_cmdline)s \ """ % locals(), consoles = consoles, _tags = dict( bsp_models = { 'x86_64': None }, bsps = dict( x86_64 = dict(console = 'x86_64', linux = True), ), consoles = list(consoles), ssh_client = True, pos_capable = dict( boot_to_pos = 'pxe', boot_config = 'uefi', boot_to_normal = 'pxe', mount_fs = 'multiroot', ), pos_boot_interconnect = nw_name, pos_boot_dev = pos_boot_dev, pos_partsizes = mr_partsizes, linux_serial_console_default = consoles[0], ) ) # set up the consoles target.power_on_pre_fns.append(target._power_on_pre_consoles) # Setup the network hookups (requires vlan_pci) target.power_on_pre_fns.append(target._power_on_pre_nw) # tell QEMU to start the VM once we have it all setup target.power_on_post_fns.append(target._qmp_start) target.power_off_post_fns.append(target._power_off_post_nw) target.power_on_pre_fns.append(ttbl.dhcp.power_on_pre_pos_setup) # Create an HD for this guy -- we do it after creating the # target so the state path is created -- double check if the # drive already exists so not to override it? nah, screw # it--it is supposed to be all throwaway subprocess.check_call([ "qemu-img", "create", "-q", "-f", "qcow2", "%s/hd.qcow2" % (target.state_dir), disk_size ]) # reinitialize also the EFI vars storage shutil.copy("/usr/share/OVMF/OVMF_VARS.fd", target.state_dir) ttbl.config.target_add(target, target_type = "qemu-uefi-x86_64") target.add_to_interconnect( nw_name, dict( ipv4_addr = ipv4_addr, ipv4_prefix_len = 24, ipv6_addr = ipv6_addr, ipv6_prefix_len = 112, mac_addr = mac_addr, ) )
8,802
def _find_links_in_headers(*, headers, target_headers: List[str]) -> Dict[str, Dict[str, str]]: """Return a dictionary { rel: { url: 'url', mime_type: 'mime_type' } } containing the target headers.""" found: Dict[str, Dict[str, str]] = {} links = headers.get("link") if links: # [{'url': 'https://micropub.jamesg.blog/micropub', 'rel': 'micropub'} ] parsed_link_headers: List[Dict[str, str]] = requests.utils.parse_header_links(links) else: return found for header in parsed_link_headers: url = header.get("url", "") rel = header.get("rel", "") mime_type = header.get("type", "") if _is_http_url(url) and rel in target_headers: found[rel] = { "url": url, "mime_type": mime_type, } # Add check for x-pingback header if "x-pingback" in target_headers: pingback_url = headers.get("x-pingback") if _is_http_url(pingback_url): # assign as "pingback" key in dictionary found["pingback"] = { "url": url, "mime_type": "", } return found
8,803
def calc_word_frequency(my_string, my_word): """Calculate the number of occurrences of a given word in a given string. Args: my_string (str): String to search my_word (str): The word to search for Returns: int: The number of occurrences of the given word in the given string. """ # Remove all non alphanumeric characters from the string filtered_string = re.sub(r'[^A-Za-z0-9 ]+', '', my_string) # Return the number of occurrences of my_word in the filtered string return filtered_string.split().count(my_word)
8,804
def asyn_lpa_communities(G, weight=None, seed=None): """Returns communities in `G` as detected by asynchronous label propagation. The asynchronous label propagation algorithm is described in [1]_. The algorithm is probabilistic and the found communities may vary on different executions. The algorithm proceeds as follows. After initializing each node with a unique label, the algorithm repeatedly sets the label of a node to be the label that appears most frequently among that nodes neighbors. The algorithm halts when each node has the label that appears most frequently among its neighbors. The algorithm is asynchronous because each node is updated without waiting for updates on the remaining nodes. This generalized version of the algorithm in [1]_ accepts edge weights. Parameters ---------- G : Graph weight : string The edge attribute representing the weight of an edge. If None, each edge is assumed to have weight one. In this algorithm, the weight of an edge is used in determining the frequency with which a label appears among the neighbors of a node: a higher weight means the label appears more often. seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness<randomness>`. Returns ------- communities : iterable Iterable of communities given as sets of nodes. Notes ------ Edge weight attributes must be numerical. References ---------- .. [1] Raghavan, Usha Nandini, Réka Albert, and Soundar Kumara. "Near linear time algorithm to detect community structures in large-scale networks." Physical Review E 76.3 (2007): 036106. """ labels = {n: i for i, n in enumerate(G)} cont = True while cont: cont = False nodes = list(G) seed.shuffle(nodes) # Calculate the label for each node for node in nodes: if len(G[node]) < 1: continue # Get label frequencies. Depending on the order they are processed # in some nodes with be in t and others in t-1, making the # algorithm asynchronous. label_freq = Counter() for v in G[node]: label_freq.update({labels[v]: G.edges[v, node][weight] if weight else 1}) # Choose the label with the highest frecuency. If more than 1 label # has the highest frecuency choose one randomly. max_freq = max(label_freq.values()) best_labels = [label for label, freq in label_freq.items() if freq == max_freq] new_label = seed.choice(best_labels) labels[node] = new_label # Continue until all nodes have a label that is better than other # neighbour labels (only one label has max_freq for each node). cont = cont or len(best_labels) > 1 # TODO In Python 3.3 or later, this should be `yield from ...`. return iter(groups(labels).values())
8,805
def _make_warmstart_dict_env(): """Warm-start VecNormalize by stepping through BitFlippingEnv""" venv = DummyVecEnv([make_dict_env]) venv = VecNormalize(venv) venv.reset() venv.get_original_obs() for _ in range(100): actions = [venv.action_space.sample()] venv.step(actions) return venv
8,806
def explore(graph: UndirectedGraph, reporter: Reporter) -> None: """Naive Bron-Kerbosch algorithm, optimized""" if candidates := graph.connected_vertices(): visit(graph=graph, reporter=reporter, candidates=candidates, excluded=set(), clique=[])
8,807
def train_eval( root_dir, env_name='CartPole-v0', num_iterations=1000, # TODO(b/127576522): rename to policy_fc_layers. actor_fc_layers=(100,), value_net_fc_layers=(100,), use_value_network=False, # Params for collect collect_episodes_per_iteration=2, replay_buffer_capacity=2000, # Params for train learning_rate=1e-3, gamma=0.9, gradient_clipping=None, normalize_returns=True, value_estimation_loss_coef=0.2, # Params for eval num_eval_episodes=10, eval_interval=100, # Params for checkpoints, summaries, and logging train_checkpoint_interval=100, policy_checkpoint_interval=100, rb_checkpoint_interval=200, log_interval=100, summary_interval=100, summaries_flush_secs=1, debug_summaries=True, summarize_grads_and_vars=False, eval_metrics_callback=None): """A simple train and eval for Reinforce.""" root_dir = os.path.expanduser(root_dir) train_dir = os.path.join(root_dir, 'train') eval_dir = os.path.join(root_dir, 'eval') train_summary_writer = tf.compat.v2.summary.create_file_writer( train_dir, flush_millis=summaries_flush_secs * 1000) train_summary_writer.set_as_default() eval_summary_writer = tf.compat.v2.summary.create_file_writer( eval_dir, flush_millis=summaries_flush_secs * 1000) eval_metrics = [ py_metrics.AverageReturnMetric(buffer_size=num_eval_episodes), py_metrics.AverageEpisodeLengthMetric(buffer_size=num_eval_episodes), ] global_step = tf.compat.v1.train.get_or_create_global_step() with tf.compat.v2.summary.record_if( lambda: tf.math.equal(global_step % summary_interval, 0)): eval_py_env = suite_gym.load(env_name) tf_env = tf_py_environment.TFPyEnvironment(suite_gym.load(env_name)) # TODO(b/127870767): Handle distributions without gin. actor_net = actor_distribution_network.ActorDistributionNetwork( tf_env.time_step_spec().observation, tf_env.action_spec(), fc_layer_params=actor_fc_layers) if use_value_network: value_net = value_network.ValueNetwork( tf_env.time_step_spec().observation, fc_layer_params=value_net_fc_layers) tf_agent = reinforce_agent.ReinforceAgent( tf_env.time_step_spec(), tf_env.action_spec(), actor_network=actor_net, value_network=value_net if use_value_network else None, value_estimation_loss_coef=value_estimation_loss_coef, gamma=gamma, optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate), normalize_returns=normalize_returns, gradient_clipping=gradient_clipping, debug_summaries=debug_summaries, summarize_grads_and_vars=summarize_grads_and_vars, train_step_counter=global_step) replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( tf_agent.collect_data_spec, batch_size=tf_env.batch_size, max_length=replay_buffer_capacity) eval_py_policy = py_tf_policy.PyTFPolicy(tf_agent.policy) train_metrics = [ tf_metrics.NumberOfEpisodes(), tf_metrics.EnvironmentSteps(), tf_metrics.AverageReturnMetric(), tf_metrics.AverageEpisodeLengthMetric(), ] collect_policy = tf_agent.collect_policy collect_op = dynamic_episode_driver.DynamicEpisodeDriver( tf_env, collect_policy, observers=[replay_buffer.add_batch] + train_metrics, num_episodes=collect_episodes_per_iteration).run() experience = replay_buffer.gather_all() train_op = tf_agent.train(experience) clear_rb_op = replay_buffer.clear() train_checkpointer = common.Checkpointer( ckpt_dir=train_dir, agent=tf_agent, global_step=global_step, metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics')) policy_checkpointer = common.Checkpointer( ckpt_dir=os.path.join(train_dir, 'policy'), policy=tf_agent.policy, global_step=global_step) rb_checkpointer = common.Checkpointer( ckpt_dir=os.path.join(train_dir, 'replay_buffer'), max_to_keep=1, replay_buffer=replay_buffer) summary_ops = [] for train_metric in train_metrics: summary_ops.append(train_metric.tf_summaries( train_step=global_step, step_metrics=train_metrics[:2])) with eval_summary_writer.as_default(), \ tf.compat.v2.summary.record_if(True): for eval_metric in eval_metrics: eval_metric.tf_summaries(train_step=global_step) init_agent_op = tf_agent.initialize() with tf.compat.v1.Session() as sess: # Initialize the graph. train_checkpointer.initialize_or_restore(sess) rb_checkpointer.initialize_or_restore(sess) # TODO(b/126239733): Remove once Periodically can be saved. common.initialize_uninitialized_variables(sess) sess.run(init_agent_op) sess.run(train_summary_writer.init()) sess.run(eval_summary_writer.init()) # Compute evaluation metrics. global_step_call = sess.make_callable(global_step) global_step_val = global_step_call() metric_utils.compute_summaries( eval_metrics, eval_py_env, eval_py_policy, num_episodes=num_eval_episodes, global_step=global_step_val, callback=eval_metrics_callback, ) collect_call = sess.make_callable(collect_op) train_step_call = sess.make_callable([train_op, summary_ops]) clear_rb_call = sess.make_callable(clear_rb_op) timed_at_step = global_step_call() time_acc = 0 steps_per_second_ph = tf.compat.v1.placeholder( tf.float32, shape=(), name='steps_per_sec_ph') steps_per_second_summary = tf.compat.v2.summary.scalar( name='global_steps_per_sec', data=steps_per_second_ph, step=global_step) for _ in range(num_iterations): start_time = time.time() collect_call() total_loss, _ = train_step_call() clear_rb_call() time_acc += time.time() - start_time global_step_val = global_step_call() if global_step_val % log_interval == 0: logging.info('step = %d, loss = %f', global_step_val, total_loss.loss) steps_per_sec = (global_step_val - timed_at_step) / time_acc logging.info('%.3f steps/sec', steps_per_sec) sess.run( steps_per_second_summary, feed_dict={steps_per_second_ph: steps_per_sec}) timed_at_step = global_step_val time_acc = 0 if global_step_val % train_checkpoint_interval == 0: train_checkpointer.save(global_step=global_step_val) if global_step_val % policy_checkpoint_interval == 0: policy_checkpointer.save(global_step=global_step_val) if global_step_val % rb_checkpoint_interval == 0: rb_checkpointer.save(global_step=global_step_val) if global_step_val % eval_interval == 0: metric_utils.compute_summaries( eval_metrics, eval_py_env, eval_py_policy, num_episodes=num_eval_episodes, global_step=global_step_val, callback=eval_metrics_callback, )
8,808
def record_attendance(lesson_id): """ Record attendance for a lesson. """ # Get the UserLessonAssociation for the current and # the given lesson id. (So we can also display attendance etc.) lesson = Lesson.query.filter(Lesson.lesson_id == lesson_id).first() # Ensure the lesson id/association object is found. if not lesson: abort(404) record_single_attendance_form = RecordSingleAttendanceForm() if request.method == 'POST' and record_single_attendance_form.validate_on_submit(): assoc = UserLessonAssociation.query.filter( UserLessonAssociation.lesson_id == lesson_id ).filter( UserLessonAssociation.user_id == int(record_single_attendance_form.user_id.data) ).first() if assoc: assoc.attendance_code = record_single_attendance_form.attendance_code.data flash("Successfully updated lesson attendance.") else: abort(500) # We only want to send updates if they we're late or not there. if assoc.attendance_code == 'L' or assoc.attendance_code == 'N': # Send an email update. html = 'Attendance for your lesson on: ' + assoc.lesson.get_lesson_date() \ + ' has been updated. Your attendance is now recorded as: ' + \ assoc.get_lesson_attendance_str() # Send a lesson update. send_lesson_update( assoc.user, html, url_for( 'student.view_lesson', lesson_id=lesson_id, _external=True ), parent=True ) if check_attendance_complete(lesson): # The attendance is complete. lesson.update_lesson_details(attendance_recorded=True) else: lesson.update_lesson_details(attendance_recorded=False) # Save Changes db.session.commit() # Refresh return redirect(url_for('staff.record_attendance', lesson_id=lesson_id)) # Render the view lesson template and pass in the association and the lesson object. return render_template( 'staff/record_attendance.html', lesson=lesson, record_single_attendance_form=record_single_attendance_form )
8,809
async def test_hassio_discovery_dont_update_configuration(hass): """Test we can update an existing config entry.""" await setup_deconz_integration(hass) result = await hass.config_entries.flow.async_init( DOMAIN, data={ CONF_HOST: "1.2.3.4", CONF_PORT: 80, CONF_API_KEY: API_KEY, CONF_SERIAL: BRIDGEID, }, context={"source": "hassio"}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured"
8,810
async def websocket_device_automation_get_condition_capabilities(hass, connection, msg): """Handle request for device condition capabilities.""" condition = msg["condition"] capabilities = await _async_get_device_automation_capabilities( hass, DeviceAutomationType.CONDITION, condition ) connection.send_result(msg["id"], capabilities)
8,811
def add_action(action): """Enregistre une action et programme son ouverture le cas échéant. Args: action (.bdd.Action): l'action à enregistrer """ if not action.active: action.active = True action.add() # Ajout tâche ouverture if action.base.trigger_debut == ActionTrigger.temporel: # Temporel : on programme Tache(timestamp=tools.next_occurence(action.base.heure_debut), commande=f"!open {action.id}", action=action).add() elif action.base.trigger_debut == ActionTrigger.perma: # Perma : ON LANCE DIRECT Tache(timestamp=datetime.datetime.now(), commande=f"!open {action.id}", action=action).add()
8,812
def ts_to_datestr(ts, fmt="%Y-%m-%d %H:%M"): """可读性""" return ts_to_datetime(ts).strftime(fmt)
8,813
def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" # TODO(b/27786104): The cast to complex could be removed once arithmetic # supports mixtures of complex64 and real values. return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(op.inputs[0]))
8,814
def so3_rotate(batch_data): """ Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds """ rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle_A = np.random.uniform() * 2 * np.pi rotation_angle_B = np.random.uniform() * 2 * np.pi rotation_angle_C = np.random.uniform() * 2 * np.pi cosval_A = np.cos(rotation_angle_A) sinval_A = np.sin(rotation_angle_A) cosval_B = np.cos(rotation_angle_B) sinval_B = np.sin(rotation_angle_B) cosval_C = np.cos(rotation_angle_C) sinval_C = np.sin(rotation_angle_C) rotation_matrix = np.array([[cosval_B*cosval_C, -cosval_B*sinval_C, sinval_B], [sinval_A*sinval_B*cosval_C+cosval_A*sinval_C, -sinval_A*sinval_B*sinval_C+cosval_A*cosval_C, -sinval_A*cosval_B], [-cosval_A*sinval_B*cosval_C+sinval_A*sinval_C, cosval_A*sinval_B*sinval_C+sinval_A*cosval_C, cosval_A*cosval_B]]) shape_pc = batch_data[k, ...] rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) return rotated_data
8,815
def intmd5(source: str, nbytes=4) -> int: """ Generate a predictive random integer of nbytes*8 bits based on a source string. :param source: seed string to generate random integer. :param nbytes: size of the integer. """ hashobj = hashlib.md5(source.encode()) return int.from_bytes(hashobj.digest()[:nbytes], byteorder="big", signed=False)
8,816
def trisolve(a, b, c, y, inplace=False): """ The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = y TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- a: (n - 1,) vector the lower diagonal of M b: (n,) vector the main diagonal of M c: (n - 1,) vector the upper diagonal of M y: (n,) vector the result of Mx inplace: if True, and if b and y are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = y References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html """ if (a.shape[0] != c.shape[0] or a.shape[0] >= b.shape[0] or b.shape[0] != y.shape[0]): raise ValueError('Invalid diagonal shapes') yshape_in = y.shape if y.ndim == 1: # needs to be (ldb, nrhs) y = y[:, None] rtype = np.result_type(a, b, c, y) if not inplace: # force a copy a = np.array(a, dtype=rtype, copy=True, order='C') b = np.array(b, dtype=rtype, copy=True, order='C') c = np.array(c, dtype=rtype, copy=True, order='C') y = np.array(y, dtype=rtype, copy=True, order='C') # this may also force copies if arrays have inconsistent types / incorrect # order a, b, c, y = (np.array(v, dtype=rtype, copy=False, order='C') for v in (a, b, c, y)) # y will now be modified in place to give the result if rtype == np.float32: _fnndeconv.TDMAs_lapacke(a, b, c, y) elif rtype == np.float64: _fnndeconv.TDMAd_lapacke(a, b, c, y) else: raise ValueError('Unsupported result type: %s' %rtype) return y.reshape(yshape_in)
8,817
def calc_mean_pred(df: pd.DataFrame): """ Make a prediction based on the average of the predictions of phones in the same collection. from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction """ lerp_df = make_lerp_data(df=df) add_lerp = pd.concat([df, lerp_df]) # each time step == only one row, average over all phone latDeg, # lanDeg at each time step # eg. mean(original Deg Pixel4 and interpolated Deg 4XLModded with `make_lerp_data`) mean_pred_result = ( add_lerp.groupby(["collectionName", "millisSinceGpsEpoch"])[ ["latDeg", "lngDeg"] ] .mean() .reset_index() ) base_cols = ["collectionName", "phoneName", "phone", "millisSinceGpsEpoch"] try: mean_pred_df = df[base_cols + ["latDeg_gt", "lngDeg_gt", "speedMps"]].copy() except Exception: mean_pred_df = df[base_cols].copy() mean_pred_df = mean_pred_df.merge( mean_pred_result[["collectionName", "millisSinceGpsEpoch", "latDeg", "lngDeg"]], on=["collectionName", "millisSinceGpsEpoch"], how="left", ) return mean_pred_df
8,818
def get_meals(v2_response, venue_id): """ Extract meals into old format from a DiningV2 JSON response """ result_data = v2_response["result_data"] meals = [] day_parts = result_data["days"][0]["cafes"][venue_id]["dayparts"][0] for meal in day_parts: stations = [] for station in meal["stations"]: items = [] for item_id in station["items"]: item = result_data["items"][item_id] new_item = {} new_item["txtTitle"] = item["label"] new_item["txtPrice"] = "" new_item["txtNutritionInfo"] = "" new_item["txtDescription"] = item["description"] new_item["tblSide"] = "" new_item["tblFarmToFork"] = "" attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]] if len(attrs) == 1: new_item["tblAttributes"] = {"txtAttribute": attrs[0]} elif len(attrs) > 1: new_item["tblAttributes"] = {"txtAttribute": attrs} else: new_item["tblAttributes"] = "" if isinstance(item["options"], list): item["options"] = {} if "values" in item["options"]: for side in item["options"]["values"]: new_item["tblSide"] = {"txtSideName": side["label"]} items.append(new_item) stations.append({"tblItem": items, "txtStationDescription": station["label"]}) meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]}) return meals
8,819
def replace_control_curves(control_names, control_type='circle', controls_path=None, keep_color=True, **kwargs): """ :param control_names: :param control_type: :param controls_path: :param keep_color: :return: """ raise NotImplementedError('Function set_shape not implemented for current DCC!')
8,820
async def async_setup(hass, config_entry): """ Disallow configuration via YAML """ return True
8,821
def blend(im1, im2, mask): """ Blends and shows the given images according to mask :param im1: first image :param im2: second image :param mask: binary mask :return: result blend """ res = [] for i in range(3): res.append(pyramid_blending(im1[:, :, i], im2[:, :, i], mask, 7, 5, 5)) res = np.dstack(res) fig, a = plt.subplots(nrows=2, ncols=2) a[0][0].imshow(im1, cmap='gray') a[0][1].imshow(im2, cmap='gray') a[1][0].imshow(mask, cmap='gray') a[1][1].imshow(res, cmap='gray') plt.show() return res
8,822
def parse(fileName): """ Pull the EXIf info from a photo and sanitize it so for sending as JSON by converting values to strings. """ f = open(fileName, 'rb') exif = exifread.process_file(f, details=False) parsed = {} for key, value in exif.iteritems(): parsed[key] = str(value) return parsed
8,823
def get_scenario(): """ Get scenario """ try: scenario = os.environ['DEPLOY_SCENARIO'] except KeyError: logger.error("Impossible to retrieve the scenario") scenario = "Unknown_scenario" return scenario
8,824
def test_MultiDAE(): """Test the MultiDAE class """ net = MultiDAE_net([1, 2], [2, 1], dropout=.1) model = MultiDAE(net) assert hasattr(model, "network"), "model should have the attribute newtork" assert hasattr(model, "device"), "model should have the attribute device" assert hasattr(model, "learning_rate"), "model should have the attribute learning_rate" assert hasattr(model, "optimizer"), "model should have the attribute optimizer" assert hasattr(model, "lam"), "model should have the attribute lam" assert model.learning_rate == 1e-3, "the learning rate should be 1e-3" assert model.network == net, "the network should be the same as the parameter" assert model.device == torch.device("cpu"), "the device should be cpu" assert model.lam == .2, "lambda should be .2" assert isinstance(model.optimizer, torch.optim.Adam), "optimizer should be of Adam type" assert str(model) == repr(model), "repr and str should have the same effect" gt = torch.FloatTensor([[1, 1], [2, 1]]) pred = torch.FloatTensor([[1, 1], [1, 1]]) torch.manual_seed(12345) assert model.loss_function(pred, gt) != torch.FloatTensor([.0]),\ "the loss should not be 0" values = np.array([1., 1., 1.]) rows = np.array([0, 0, 1]) cols = np.array([0, 1, 1]) train = csr_matrix((values, (rows, cols))) sampler = DataSampler(train, batch_size=1, shuffle=False) x = torch.FloatTensor([[1, 1], [2, 2]]) model.predict(x, True) torch.manual_seed(12345) out_1 = model.predict(x, False)[0] model.train(sampler, num_epochs=10, verbose=4) torch.manual_seed(12345) out_2 = model.predict(x, False)[0] assert not torch.all(out_1.eq(out_2)), "the outputs should be different" tmp = tempfile.NamedTemporaryFile() model.save_model(tmp.name, 1) net = MultiDAE_net([1, 2], [2, 1], dropout=.1) model2 = MultiDAE(net) model2.load_model(tmp.name) torch.manual_seed(12345) out_1 = model.predict(x, False)[0] torch.manual_seed(12345) out_2 = model2.predict(x, False)[0] assert torch.all(out_1.eq(out_2)), "the outputs should be the same"
8,825
def create_data_locker_adapter(form, url, notify): """ Creates a Web Services Adapter in the form (self) and configures it to use the Data Locker located at `url` """ data_locker = api.content.create( type='FormWebServiceAdapter', title='Data Locker', url=url, extraData=extra_data.keys(), failSilently=True, notifyOnFailure=notify, runDisabledAdapters=True, container=form ) print "Data Locker created at: %s" % data_locker.absolute_url()
8,826
def make_request( endpoint: str, method: str = "get", data: Optional[dict] = None, timeout: int = 15 ) -> Response: """Makes a request to the given endpoint and maps the response to a Response class""" method = method.lower() request_method: Callable = getattr(requests, method) if method not in SAFE_METHODS and data is None: raise ValueError("Data must be provided for POST, PUT and PATCH requests.") r: RequestsResponse if method not in SAFE_METHODS: r = request_method(endpoint, json=data, timeout=timeout) else: r = request_method(endpoint, timeout=timeout) return Response(status_code=r.status_code, data=r.json())
8,827
def _is_rpc_timeout(e): """ check whether an exception individual rpc timeout. """ # connection caused socket timeout is being re-raised as # ThriftConnectionTimeoutError now return isinstance(e, socket.timeout)
8,828
def fake_quantize_with_min_max(inputs, f_min, f_max, bit_width, quant_zero=True): """The fake quantization operation kernel. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width Returns: a tensor containing quantized values. """ @tf.function def forward(inputs, f_min, f_max, bit_width, quant_zero): with tf.name_scope("FakeQuantizeWithMinMax"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) if quant_zero: q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min if quant_zero else f_min quantized = quantize(inputs, scale, shift, q_min, q_max) dequantized = dequantize(quantized, scale, shift, q_min, q_max) return dequantized @tf.function def grad_fn(dy): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) if quant_zero: q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) between_min_max = (inputs >= new_f_min) & (inputs <= new_f_max) below_min = (inputs <= new_f_min) above_max = (inputs >= new_f_max) else: between_min_max = (inputs >= f_min) & (inputs <= f_max) below_min = (inputs <= f_min) above_max = (inputs >= f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum(dy * tf.where(below_min, ones, zeros)) grad_wrt_f_max = tf.reduce_sum(dy * tf.where(above_max, ones, zeros)) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None results = forward(inputs, f_min, f_max, bit_width, quant_zero) return results, grad_fn
8,829
def lorentzian(coordinates, center, fwhm): """ Unit integral Lorenzian function. Parameters ---------- coordinates : array-like Can be either a list of ndarrays, as a meshgrid coordinates list, or a single ndarray for 1D computation center : array-like Center of the lorentzian. Should be the same shape as `coordinates.ndim`. fwhm : float Full-width at half-max of the function. Returns ------- out : ndarray Lorentzian function of unit integral. Notes ----- The functional form of the Lorentzian is given by: .. math:: L(x) = \\frac{1}{\pi} \\frac{(\gamma/2)}{(x-c)^2 + (\gamma/2)^2} where :math:`\gamma` is the full-width at half-maximum, and :math:`c` is the center. For n dimensions, the functional form of the Lorentzian is given by: .. math:: L(x_1, ..., x_n) = \\frac{1}{n \pi} \\frac{(\gamma/2)}{(\sum_i{(x_i - c_i)^2} + (\gamma/2)^2)^{\\frac{1+n}{2}}} Example ------- >>> import numpy as np >>> from skued import lorentzian >>> >>> span = np.arange(-10, 10, 0.1) >>> xx, yy = np.meshgrid(span, span) >>> center = [0,0] >>> l = lorentzian( coordinates = [xx,yy], center = [0,0], fwhm = 1) >>> l.shape == xx.shape #True >>> np.sum(l)*0.1**2 #Integral should be unity (spacing = 0.1) """ width = 0.5 * fwhm # 1D is a special case, as coordinates are not given as a list of arrays if not isinstance(coordinates, (list, tuple)): # iterable but not ndarray return (width / pi) / ((coordinates - center) ** 2 + width ** 2) dim = len(coordinates) core = width / ( (sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) + width ** 2) ) ** ((dim + 1) / 2) factor = 1 / (dim * pi) return factor * core
8,830
async def gspider(gspdr): """For .gmute command, globally mutes the replied/tagged person""" # Admin or creator check chat = await gspdr.get_chat() admin = chat.admin_rights creator = chat.creator # If not admin and not creator, return if not admin and not creator: await gspdr.edit(NO_ADMIN) return # Check if the function running under SQL mode try: from userbot.modules.sql_helper.gmute_sql import gmute except AttributeError: await gspdr.edit(NO_SQL) return user, reason = await get_user_from_event(gspdr) if not user: return # If pass, inform and start gmuting await gspdr.edit("`Grabs a huge, sticky duct tape!`") if gmute(user.id) is False: await gspdr.edit("`Error! User probably already gmuted.\nRe-rolls the tape.`") else: if reason: await gspdr.edit(f"`Globally taped!`\nReason: {reason}") else: await gspdr.edit("`Globally taped!`") if BOTLOG: await gspdr.client.send_message( BOTLOG_CHATID, "#GMUTE\n" f"USER: [{user.first_name}](tg://user?id={user.id})\n" f"CHAT: {gspdr.chat.title}(`{gspdr.chat_id}`)", )
8,831
def _to_gzip_base64(self, **kwargs): """ Reads the file as text, then turns to gzip+base64""" data = self.read_text(**kwargs) return Base.b64_gzip_encode(data)
8,832
def HTunigramX1(outf, (prefixes,topics,segments)): """hierarchical topic-based unigram model that requires at most one occurence of a topic word per sentence (all words share a common base Word distribution)""" outf.write("1 1 Sentence --> Words\n") outf.write("1 1 Words --> Words Word\n") for prefix in prefixes: outf.write("1 1 Words --> T_%s\n"%prefix) for topic in topics: if topic != "None": outf.write("1 1 Words --> Topic_%s Word_%s\n"%(topic,topic)) outf.write("1 1 Topic_%s --> Topic_%s Word\n"%(topic,topic)) outf.write("Word_%s --> BaseWord\n"%topic) for prefix in prefixes: if topic in prefix.split('|'): outf.write("1 1 Topic_%s --> T_%s\n"%(topic,prefix)) outf.write("Word --> BaseWord\n") outf.write("BaseWord --> Segments\n") outf.write("1 1 Segments --> Segment\n") outf.write("1 1 Segments --> Segments Segment\n") for segment in segments: outf.write("1 1 Segment --> %s\n"%segment)
8,833
def get_recipe_data(published=False, complete_data=False): """Return published or unpublished recipe data.""" try: Changed = User.alias() recipes = recipemodel.Recipe.select( recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist") ).where( recipemodel.Recipe.published == published ).join( storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id) ).join( tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id) ).join( tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID) ).group_by( recipemodel.Recipe.id) if complete_data: # Load in User table recipes = recipes.select( User, Changed, recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist") ).switch( recipemodel.Recipe ).join( User, pw.JOIN.LEFT_OUTER, on=(User.id == recipemodel.Recipe.created_by).alias("a") ).switch( recipemodel.Recipe ).join( Changed, pw.JOIN.LEFT_OUTER, on=(Changed.id == recipemodel.Recipe.changed_by).alias("b")) data = recipemodel.get_recipes(recipes, complete_data=complete_data) return utils.success_response(msg="Data loaded", data=data, hits=len(data)) except Exception as e: current_app.logger.error(traceback.format_exc()) return utils.error_response(f"Failed to load data: {e}")
8,834
def html_anchor_navigation(base_dir, experiment_dir, modules): """Build header of an experiment with links to all modules used for rendering. :param base_dir: parent folder in which to look for an experiment folders :param experiment_dir: experiment folder :param modules: list of all loaded modules :return: str """ return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
8,835
def connect_and_play(player, name, channel, host, port, logfilename=None, out_function=None, print_state=True, use_debugboard=False, use_colour=False, use_unicode=False): """ Connect to and coordinate a game with a server, return a string describing the result. """ # Configure behaviour of this function depending on parameters: out = out_function if out_function else (lambda *_, **__: None) # no-op if print_state: def display_state(players_str, game): out("displaying game info:") out(players_str, depth=1) out(game, depth=1) else: def display_state(players, game): pass # Set up a connection with the server out("connecting to battleground", depth=-1) out("attempting to connect to the server...") server = Server.from_address(host, port) out("connection established!") # Wait for some matching players out("looking for a game", depth=-1) channel_str = f"channel '{channel}'" if channel else "open channel" out(f"submitting game request as '{name}' in {channel_str}...") server.send(M.PLAY, name=name, channel=channel) server.recv(M.OKAY) out("game request submitted.") out(f"waiting for opponents in {channel_str}...") out("(press ^C to stop waiting)") # (wait through some OKAY-OKAY msg exchanges until a GAME message comes--- # the server is asking if we are still here waiting, or have disconnected) gamemsg = server.recv(M.OKAY|M.GAME) while gamemsg['mtype'] is not M.GAME: server.send(M.OKAY) gamemsg = server.recv(M.OKAY|M.GAME) # when we get a game message, it's time to play! out("setting up game", depth=-1, clear=True) out("opponents found!") out("white player:", gamemsg['white']) out("black player:", gamemsg['black']) # Initialise the player out("initialising player", depth=-1) out("waiting for colour assignment...") initmsg = server.recv(M.INIT) out("playing as", initmsg['colour'], depth=1) out("initialising your player class...") player.init(initmsg['colour']) out("ready to play!") server.send(M.OKAY) # Set up a new game and display the initial state and players out("game start", depth=-1) players_str = format_players_str(gamemsg, player.colour) game = Game(logfilename=logfilename, debugboard=use_debugboard, colourboard=use_colour, unicodeboard=use_unicode) display_state(players_str, game) # Now wait for messages from the sever and respond accordingly while True: msg = server.recv(M.TURN|M.UPD8|M.OVER|M.ERRO) if msg['mtype'] is M.TURN: # it's our turn! out("your turn!", depth=-1, clear=True) display_state(players_str, game) # decide on action and submit it to server action = player.action() server.send(M.ACTN, action=action) elif msg['mtype'] is M.UPD8: # someone made a move! colour = msg['colour'] action = msg['action'] # update our local state, out("receiving update", depth=-1, clear=True) game.update(colour, action) display_state(players_str, game) player.update(colour, action) # then notify server we are ready to continue: server.send(M.OKAY) elif msg['mtype'] is M.OVER: # the game ended! return msg['result'] elif msg['mtype'] is M.ERRO: # seems like the server encountered an error, but not # with our connection raise ServerEncounteredError(msg['reason'])
8,836
def _to_int_and_fraction(d: Decimal) -> typing.Tuple[int, str]: """convert absolute decimal value into integer and decimal (<1)""" t = d.as_tuple() stringified = ''.join(map(str, t.digits)) fraction = '' if t.exponent < 0: int_, fraction = stringified[:t.exponent], stringified[t.exponent:] fraction = fraction.rjust(-t.exponent, '0') else: int_ = stringified + t.exponent * '0' return int(int_ or 0), fraction
8,837
def json_to_obj(my_class_instance): """ Получает на вход JSON-представление, выдает на выходе объект класса MyClass. >>> a = MyClass('me', 'my_surname', True) >>> json_dict = get_json(a) >>> b = json_to_obj(json_dict) <__main__.MyClass object at 0x7fd8e9634510> """ some_dict = json.loads(my_class_instance) return MyClass(**some_dict)
8,838
def all_tags(path) -> {str: str}: """Method to return Exif tags""" file = open(path, "rb") tags = exifread.process_file(file, details=False) return tags
8,839
def static(request): """ Backport django.core.context_processors.static to Django 1.2. """ return {'STATIC_URL': djangoSettings.STATIC_URL}
8,840
def genBoard(): """ Generates an empty board. >>> genBoard() ["A", "B", "C", "D", "E", "F", "G", "H", "I"] """ # Empty board empty = ["A", "B", "C", "D", "E", "F", "G", "H", "I"] # Return it return empty
8,841
def delete_evaluation(EvaluationId=None): """ Assigns the DELETED status to an Evaluation , rendering it unusable. After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED . The results of the DeleteEvaluation operation are irreversible. See also: AWS API Documentation :example: response = client.delete_evaluation( EvaluationId='string' ) :type EvaluationId: string :param EvaluationId: [REQUIRED] A user-supplied ID that uniquely identifies the Evaluation to delete. :rtype: dict :return: { 'EvaluationId': 'string' } """ pass
8,842
def extract_features(segment_paths, output_dir, workers=1, sample_size=None, old_segment_format=True, resample_frequency=None, normalize_signal=False, only_missing_files=True, feature_length_seconds=60, window_size=5): """ Performs feature extraction of the segment files found in *segment_paths*. The features are written to csv files in *output_dir*. See :py:function`feature_extractor.extract` for more info. :param segment_paths: :param output_dir: :param workers: :param sample_size: :param old_segment_format: :param resample_frequency: :param normalize_signal: :param only_missing_files: :param feature_length_seconds: :param window_size: :return: """ feature_extractor.extract(segment_paths, extract_features_for_segment, # Arguments for feature_extractor.extract output_dir=output_dir, workers=workers, sample_size=sample_size, old_segment_format=old_segment_format, resample_frequency=resample_frequency, normalize_signal=normalize_signal, only_missing_files=only_missing_files, # Worker function kwargs: feature_length_seconds=feature_length_seconds, window_size=window_size)
8,843
def snmp_set_via_cli(oid, value, type): """ Sets an SNMP variable using the snmpset command. :param oid: the OID to update :param value: the new value to set the OID to :param type: a single character type as required by the snmpset command (i: INTEGER, u: unsigned INTEGER, t: TIMETICKS, a: IPADDRESS o: OBJID, s: STRING, x: HEX STRING, d: DECIMAL STRING, b: BITS U: unsigned int64, I: signed int64, F: float, D: double) """ dev_null = open(os.devnull) process = subprocess.Popen([ 'snmpset', '-v2c', '-c', 'public', 'localhost:11161', oid, type, value ], stdout=dev_null, stderr=dev_null) process.communicate() if process.returncode != 0: raise SNMPSetCLIError( 'failed to set {0} to {1} (type {2})'.format(oid, value, type) )
8,844
def _resize_sample(source_file_path, target_file_path, _new_dim = '256x256'): """ Resizes a sample to _new_dim. This methods uses ResampleImage from the ANTs library, so you need to have it in your path environment. Note: The methods only works with two-dimensional images. """ from subprocess import call r_code = call(["ResampleImage", '2', source_file_path, target_file_path, _new_dim, "1", "0"]) # Check if everything went ok if r_code != 0: exit('Something went wrong with ResampleImage')
8,845
def _partition_at_level(dendrogram, level) : """Return the partition of the nodes at the given level A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest snapshot_affiliations, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the snapshot_affiliations """ partition = dendrogram[0].copy() for index in range(1, level + 1) : for node, community in partition.items() : partition[node] = dendrogram[index][community] return partition
8,846
def filter_multi_copy_clusters(idx): """ {cluster_id : {taxonomy : {genomeid : [gene_uuid,...]}}} """ logging.info('Filtering out multi-copy genes...') clust_cnt = collections.defaultdict(dict) to_remove = [] for cluster_id,v in idx.items(): per_genome_copy = {} for tax,vv in v.items(): for genome_id,x in vv.items(): per_genome_copy[genome_id] = len(set(x['gene_ids'])) # any multi-copy? if any([x > 1 for x in per_genome_copy.values()]): to_remove.append(cluster_id) for cluster_id in to_remove: idx.pop(cluster_id, None) # status logging.info(' Number of multi-copy clusters removed: {}'.format(len(to_remove))) logging.info(' Number of single-copy clusters remaining: {}'.format(len(idx.keys()))) if len(idx.keys()) < 1: logging.info('Exiting due to a lack of clusters') sys.exit(0) metadata_summary(idx) return idx
8,847
def generate_ab_data(): """ Generate data for a second order reaction A + B -> P d[A]/dt = -k[A][B] d[B]/dt = -k[A][B] d[P]/dt = k[A][B] [P] = ([B]0 - [A]0 h(t)) / (1 - h(t)) where h(t) = ([B]0 / [A]0) e^(kt ([B]0 - [A]0)) Data printed in a .csv file """ times = np.linspace(0, 10, num=100) # s a0 = 0.6 # mol dm^-3 b0 = 0.5 # mol dm^-3 k = 1.7 # mol^-1 dm^3 s^-1 with open('ab.csv', 'w') as data_file: print('Data for A + B -> P where v = k[A][B]', file=data_file) for i, t in enumerate(times): h = (b0 / a0) * np.exp(k * t * (b0 - a0)) p = (b0 - a0 * h) / (1.0 - h) a = a0 - p b = b0 - p # Time, [A], [B], [P] print(f'{t:.6f},{a:.6f},{b:.6f},{p:.6f}', file=data_file) return None
8,848
def autocomplete(segment: str, line: str, parts: typing.List[str]): """ :param segment: :param line: :param parts: :return: """ if parts[-1].startswith('-'): return autocompletion.match_flags( segment=segment, value=parts[-1], shorts=['f', 'a', 'd'], longs=['force', 'append', 'directory'] ) if len(parts) == 1: return autocompletion.match_path(segment, parts[0]) return []
8,849
def main(): """ main execution sequence """ from sklearn import __version__ as version print('sklearn version:', version) # setup DmimData instance (using MQNs) mqn = DMD('DMIM_v1.0.db', SEED) mqn.featurize('mqn') mqn.train_test_split() mqn.center_and_scale() print('training SVR using MQNs ...') mqn_svr = train_svr(mqn.X_train_ss_, mqn.y_train_) print('training set:') print(all_metrics(mqn.y_train_, mqn_svr.predict(mqn.X_train_ss_))) print('test set:') print(all_metrics(mqn.y_test_, mqn_svr.predict(mqn.X_test_ss_))) print() # save the fitted SVR with open('mqn_svr_seed420.pickle', 'wb') as pf: dump(mqn_svr, pf) # and save the scaler with open('mqn_scaler_seed420.pickle', 'wb') as pf: dump(mqn.SScaler_, pf) # setup DmimData instance (using MD3Ds) md3d = DMD('DMIM_v1.0.db', SEED) md3d.featurize('md3d') md3d.train_test_split() md3d.center_and_scale() print('training SVR using MD3Ds ...') md3d_svr = train_svr(md3d.X_train_ss_, md3d.y_train_) print('training set:') print(all_metrics(md3d.y_train_, md3d_svr.predict(md3d.X_train_ss_))) print('test set:') print(all_metrics(md3d.y_test_, md3d_svr.predict(md3d.X_test_ss_))) print() # save the fitted SVR with open('md3d_svr_seed420.pickle', 'wb') as pf: dump(md3d_svr, pf) # and save the scaler with open('md3d_scaler_seed420.pickle', 'wb') as pf: dump(md3d.SScaler_, pf) """ # setup DmimData instance (using combined MQNs and MD3Ds) comb = DMD('DMIM_v1.0.db', SEED) comb.featurize('combined') comb.train_test_split() comb.center_and_scale() print('training SVR using combined MQNs and MD3Ds ...') comb_svr = train_svr(comb.X_train_ss_, comb.y_train_) print('training set:') print(all_metrics(comb.y_train_, comb_svr.predict(comb.X_train_ss_))) print('test set:') print(all_metrics(comb.y_test_, comb_svr.predict(comb.X_test_ss_))) print() # save the fitted SVR with open('comb_svr_seed420.pickle', 'wb') as pf: dump(comb_svr, pf) """ # setup DmimData instance (using combined MQNs and MD3Ds) cust = DMD('DMIM_v1.0.db', SEED) cust.featurize('custom', custom_mqns=['hac', 'c', 'adb', 'asv', 'ctv', 'hbam', 'hbd'], custom_md3ds=['pmi1', 'pmi2', 'pmi3', 'rmd02']) cust.train_test_split() cust.center_and_scale() print('training SVR using custom MQNs and MD3Ds ...') cust_svr = train_svr(cust.X_train_ss_, cust.y_train_) print('training set:') print(all_metrics(cust.y_train_, cust_svr.predict(cust.X_train_ss_))) print('test set:') print(all_metrics(cust.y_test_, cust_svr.predict(cust.X_test_ss_))) print() # save the fitted SVR with open('cust_svr_seed420.pickle', 'wb') as pf: dump(cust_svr, pf) # and save the scaler with open('cust_scaler_seed420.pickle', 'wb') as pf: dump(mqn.SScaler_, pf)
8,850
def elast_tri3(coord, params): """Triangular element with 3 nodes Parameters ---------- coord : ndarray Coordinates for the nodes of the element (3, 2). params : tuple Material parameters in the following order: young : float Young modulus (>0). poisson : float Poisson coefficient (-1, 0.5). dens : float, optional Density (>0). Returns ------- stiff_mat : ndarray Local stiffness matrix for the element (6, 6). mass_mat : ndarray Local mass matrix for the element (6, 6). Examples -------- >>> coord = np.array([ ... [0, 0], ... [1, 0], ... [0, 1]]) >>> params = [8/3, 1/3] >>> stiff, mass = uel3ntrian(coord, params) >>> stiff_ex = 1/2 * np.array([ ... [4, 2, -3, -1, -1, -1], ... [2, 4, -1, -1, -1, -3], ... [-3, -1, 3, 0, 0, 1], ... [-1, -1, 0, 1, 1, 0], ... [-1, -1, 0, 1, 1, 0], ... [-1, -3, 1, 0, 0, 3]]) >>> np.allclose(stiff, stiff_ex) True """ stiff_mat = np.zeros([6, 6]) mass_mat = np.zeros([6, 6]) C = fem.umat(params[:2]) if len(params) == 2: dens = 1 else: dens = params[-1] gpts, gwts = gau.gauss_tri(order=2) for cont in range(gpts.shape[0]): r, s = gpts[cont, :] H, B, det = fem.elast_diff_2d(r, s, coord, fem.shape_tri3) factor = det * gwts[cont] stiff_mat += 0.5 * factor * (B.T @ C @ B) mass_mat += 0.5 * dens * factor * (H.T @ H) return stiff_mat, mass_mat
8,851
def make_offgrid_patches_xcenter_xincrement(n_increments:int, n_centers:int, min_l:float, patch_dim:float, device): """ for each random point in the image and for each increments, make a square patch return: I x C x P x P x 2 """ patches_xcenter = make_offgrid_patches_xcenter(n_centers, min_l, patch_dim, device) # C x P x P x 2 increments = min_l * torch.arange(0,n_increments,device=patches_xcenter.device) # expand patches for each increments size = (n_increments, *patches_xcenter.shape) patches_xcenter_xincrement = patches_xcenter.unsqueeze(0).expand(size) assert torch.allclose(patches_xcenter_xincrement[0,:,:], patches_xcenter) assert torch.allclose(patches_xcenter_xincrement[1,:,:], patches_xcenter) patches_xcenter_xincrement = patches_xcenter_xincrement + increments[:,None,None,None,None] # some checks assert len(patches_xcenter_xincrement.shape) == 5 assert patches_xcenter_xincrement.shape[-1] == 2 assert patches_xcenter_xincrement.shape[0] == n_increments assert patches_xcenter_xincrement.shape[1] == n_centers assert patches_xcenter_xincrement.shape[2] == patches_xcenter_xincrement.shape[3] == patch_dim*2 return patches_xcenter_xincrement
8,852
def load_gazes_from_xml(filepath: str) -> pd.DataFrame: """loads data from the gaze XML file output by itrace. Returns the responses as a pandas DataFrame Parameters ---------- filepath : str path to XML Returns ------- pd.DataFrame Gazes contained in the xml file """ root = ET.parse(filepath) return pd.DataFrame(list(map(lambda e: e.attrib, root.findall("./gazes/response"))))
8,853
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'): """Rotate points angle (in deg) about an axis.""" axis = axis.lower() # Copy original array to if not inplace if not inplace: points = points.copy() # Convert angle to radians if deg: angle *= np.pi / 180 if axis == 'x': y = points[:, 1] * np.cos(angle) - points[:, 2] * np.sin(angle) z = points[:, 1] * np.sin(angle) + points[:, 2] * np.cos(angle) points[:, 1] = y points[:, 2] = z elif axis == 'y': x = points[:, 0] * np.cos(angle) + points[:, 2] * np.sin(angle) z = - points[:, 0] * np.sin(angle) + points[:, 2] * np.cos(angle) points[:, 0] = x points[:, 2] = z elif axis == 'z': x = points[:, 0] * np.cos(angle) - points[:, 1] * np.sin(angle) y = points[:, 0] * np.sin(angle) + points[:, 1] * np.cos(angle) points[:, 0] = x points[:, 1] = y else: raise ValueError('invalid axis. Must be either "x", "y", or "z"') if not inplace: return points
8,854
async def unhandled_exception(request: Request, exc: UnhandledException): """Raises a custom TableKeyError.""" return JSONResponse( status_code=400, content={"message": "Something bad happened" f" Internal Error: {exc.message!r}"}, )
8,855
def register_view(request): """Render HTTML page""" form = CreateUserForm() if request.method == 'POST': form = CreateUserForm(request.POST) if form.is_valid(): form.save() user = form.cleaned_data.get('username') messages.success(request, "Account was created for "+ user) return redirect('loginPage') context = {'form': form} return render(request, 'register.html', {'data': context})
8,856
def test_e_mail(provider: str, user: str, password: str): """Function that performs test on all command of EmailMessage and EmailServer class""" # Test create EmailMessage object message = EmailMessage('Test send email using python') # Test add message message.add_message('Test message') # Test add file message.add_file('NAP.jpg') # Test create EmailServer object server = EMailServer(provider, user, password) print('Test send email via normal procedure') if server.connect() == 'Success': print('Connection success') if server.send_mail(user, message) == 'Success': print('Send email success') else: print('Send email failed') print(server.disconnect()) else: print('Connection failed') print('Test send quick email') print(server.quick_mail(user, message))
8,857
def luv2rgb(luv, *, channel_axis=-1): """Luv to RGB color space conversion. Parameters ---------- luv : (..., 3, ...) array_like The image in CIE Luv format. By default, the final dimension denotes channels. Returns ------- out : (..., 3, ...) ndarray The image in RGB format. Same dimensions as input. Raises ------ ValueError If `luv` is not at least 2-D with shape (..., 3, ...). Notes ----- This function uses luv2xyz and xyz2rgb. """ return xyz2rgb(luv2xyz(luv))
8,858
def deposit_fetcher(record_uuid, data): """Fetch a deposit identifier. :param record_uuid: Record UUID. :param data: Record content. :returns: A :class:`invenio_pidstore.fetchers.FetchedPID` that contains data['_deposit']['id'] as pid_value. """ return FetchedPID( provider=DepositProvider, pid_type=DepositProvider.pid_type, pid_value=str(data['_deposit']['id']), )
8,859
def make_filename(): """"This functions creates a unique filename.""" unique_filename = time.strftime("%Y%m%d-%H%M%S") #unique_filename = str(uuid.uuid1()) #unique_filename = str(uuid.uuid1().hex[0:7]) save_name = 'capture_ferhat_{}.png'.format(unique_filename) return(save_name)
8,860
def compareLists(sentenceList, majorCharacters): """ Compares the list of sentences with the character names and returns sentences that include names. """ characterSentences = defaultdict(list) for sentence in sentenceList: for name in majorCharacters: if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(name), sentence, re.IGNORECASE): characterSentences[name].append(sentence) return characterSentences
8,861
def is_live_site(url): """Ensure that the tool is not used on the production Isaac website. Use of this tool or any part of it on Isaac Physics and related websites is a violation of our terms of use: https://isaacphysics.org/terms """ if re.search("http(s)?://isaac(physics|chemistry|maths|biology|science)\.org", url): return True else: return False
8,862
def generate_train_patch(image_path, mask_path, crop_size=(3000, 3000), steps=(2000, 2000), save_dir=None): """ """ fileid = image_path.split("/")[-1].split(".")[0] save_path = os.path.join(save_dir, fileid) os.makedirs(save_path, exist_ok=True) raw_img = image_read(image_path) raw_msk = image_read(mask_path) h, w, _ = raw_img.shape coord_lst = generate_coord(h, w, crop_size, steps) for i in coord_lst: patch_img = raw_img[i[0]:i[1], i[2]:i[3], :] patch_msk = raw_msk[i[0]:i[1], i[2]:i[3], :] save_name = "{}_{}_{}_{}".format(str(i[0]), str(i[1]), str(i[2]), str(i[3])) cv2.imwrite(os.path.join(save_path, save_name+".png"), patch_img) cv2.imwrite(os.path.join(save_path, save_name+".jpg"), patch_msk)
8,863
def create_test_list(root): """ Create test list. """ fw = open("test_list.txt", 'w') for aoi in os.listdir(root): if not os.path.isdir(os.path.join(root, aoi)): continue img_path = os.path.join(root, aoi, "images_masked_3x_divide") for img_file in os.listdir(img_path): fw.write( os.path.join(aoi, "images_masked_3x_divide", img_file) + " dummy.tif\n") fw.close()
8,864
def score_false(e, sel): """Return scores for internal-terminal nodes""" return e*(~sel).sum()
8,865
def Select_multi_items(list_item,mode='multiple', fact=2, win_widthmm=80, win_heightmm=100, font_size=16): """interactive selection of items among the list list_item Args: list_item (list): list of items used for the selection Returns: val (list): list of selected items without duplicate """ # Standard library imports import os import tkinter as tk import tkinter.font as TkFont # Local imports from .BiblioSys import DISPLAYS,GUI_DISP global val def selected_item(): global val val = [listbox.get(i) for i in listbox.curselection()] if os.name == 'nt': window.destroy() # Getting the ppi of the selected prime display. ppi = DISPLAYS[GUI_DISP]['ppi'] # Setting the window title if mode == 'single': title = 'Single item selection' else: title = 'Multiple items selection' # Creating the gui window window = tk.Tk() # Setting the window geometry parameters font_title = TkFont.Font(family='arial', size=font_size, weight='bold') title_widthmm,_ = _str_size_mm(title, font_title, ppi) win_widthmm = max(title_widthmm*fact,win_widthmm) win_widthpx = str(_mm_to_px(win_widthmm,ppi)) win_heightpx = str(_mm_to_px(win_heightmm,ppi)) #win_heightpx = '500' win_xpx = str(int(DISPLAYS[GUI_DISP]['x']) + 50) win_ypx = str(int(DISPLAYS[GUI_DISP]['y']) + 50) window.geometry(f'{win_widthpx}x{win_heightpx}+{win_xpx}+{win_ypx}') window.attributes("-topmost", True) window.title(title) yscrollbar = tk.Scrollbar(window) yscrollbar.pack(side = tk.RIGHT, fill = tk.Y) selectmode = tk.MULTIPLE if mode == 'single':selectmode = tk.SINGLE listbox = tk.Listbox(window, width=40, height=10, selectmode=selectmode, yscrollcommand = yscrollbar.set) x = list_item for idx,item in enumerate(x): listbox.insert(idx, item) listbox.itemconfig(idx, bg = "white" if idx % 2 == 0 else "white") btn = tk.Button(window, text='OK', command=selected_item) btn.pack(side='bottom') listbox.pack(padx = 10, pady = 10,expand = tk.YES, fill = "both") yscrollbar.config(command = listbox.yview) window.mainloop() return val
8,866
def retrieveXS(filePath, evMin=None, evMax=None): """Open an ENDF file and return the scattering XS""" logging.info('Retrieving scattering cross sections from file {}' .format(filePath)) energies = [] crossSections = [] with open(filePath) as fp: line = fp.readline() while line[0] == '#': line = fp.readline() while line != '' and '#END' not in line: ev, xs = [float(xx) for xx in line.split()[:2]] energies.append(ev) crossSections.append(xs) line = fp.readline() logging.info('Done') energies = numpy.array(energies) crossSections = numpy.array(crossSections) bounds = energies.min(), energies.max() if evMin is None: evMin = bounds[0] else: if bounds[0] > evMin: logging.warning('Could not find requested minimum energy ' '{:.4E} eV in cross section file {}. ' 'Using minimum found: {:.4E} eV' .format(evMin, filePath, bounds[0])) evMin = bounds[0] indices = numpy.where(energies >= evMin) energies = energies[indices] crossSections = crossSections[indices] if evMax is None: evMax = bounds[1] else: if bounds[1] < evMax: logging.warning('Could not find requested maximum energy ' '{:.4E} eV in cross section file {}. ' 'Using maximum found: {:.4E} eV' .format(evMax, filePath, bounds[1])) evMax = bounds[1] indices = numpy.where(energies <= evMax) energies = energies[indices] crossSections = crossSections[indices] return energies, crossSections
8,867
def _parse_java_simple_date_format(fmt): """ Split a SimpleDateFormat into literal strings and format codes with counts. Examples -------- >>> _parse_java_simple_date_format("'Date:' EEEEE, MMM dd, ''yy") ['Date: ', ('E', 5), ', ', ('M', 3), ' ', ('d', 2), ", '", ('y', 2)] """ out = [] quoted = False prev_c = None prev_count = 0 literal_text = '' k = 0 while k < len(fmt): c = fmt[k] k += 1 if not quoted and c == "'" and k < len(fmt) and fmt[k] == "'": # Repeated single quote. if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c k += 1 continue if c == "'": if not quoted: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 if literal_text: out.append(literal_text) literal_text = '' quoted = not quoted continue if quoted: literal_text += c continue if c not in string.ascii_letters: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c continue if c not in 'GyMdhHmsSEDFwWakKzZ': raise ValueError(f"unknown format character {c}") if literal_text != '': out.append(literal_text) literal_text = '' if prev_c is not None and c != prev_c: out.append((prev_c, prev_count)) prev_count = 0 prev_c = c prev_count += 1 else: if quoted: raise ValueError("missing closing quote; input ends " f"with '{literal_text}") if literal_text != '': out.append(literal_text) elif prev_c is not None: out.append((prev_c, prev_count)) return out
8,868
def rekey_by_sample(ht): """Re-key table by sample id to make subsequent ht.filter(ht.S == sample_id) steps 100x faster""" ht = ht.key_by(ht.locus) ht = ht.transmute( ref=ht.alleles[0], alt=ht.alleles[1], het_or_hom_or_hemi=ht.samples.het_or_hom_or_hemi, #GQ=ht.samples.GQ, HL=ht.samples.HL, S=ht.samples.S, ) ht = ht.key_by(ht.S) ht = ht.transmute( chrom=ht.locus.contig.replace("chr", ""), pos=ht.locus.position ) logging.info("Schema after re-key by sample:") ht.describe() return ht
8,869
async def test_coil_switch(hass, regs, expected): """Run test for given config.""" switch_name = "modbus_test_switch" state = await base_test( hass, { CONF_NAME: switch_name, CALL_TYPE_COIL: 1234, CONF_SLAVE: 1, }, switch_name, SWITCH_DOMAIN, None, CONF_COILS, regs, expected, method_discovery=False, scan_interval=5, ) assert state == expected
8,870
def _sample_prior_fixed_model(formula_like, data=None, a_tau=1.0, b_tau=1.0, nu_sq=1.0, n_iter=2000, generate_prior_predictive=False, random_state=None): """Sample from prior for a fixed model.""" rng = check_random_state(random_state) y, X = patsy.dmatrices(formula_like, data=data) y, X = _check_design_matrices(y, X) outcome_names = y.design_info.column_names coef_names = [rdu.get_default_coefficient_name(n) for n in X.design_info.column_names] n_coefs = len(coef_names) beta, tau_sq, lp = _sample_parameters_conjugate_priors( n_coefs, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq, size=n_iter, random_state=rng) chains = collections.OrderedDict({'tau_sq': tau_sq}) for j, t in enumerate(coef_names): chains[t] = beta[:, j] chains['lp__'] = lp outcome_chains = None if generate_prior_predictive: sampled_outcomes, _ = _sample_outcomes( X, beta, tau_sq, random_state=rng) outcome_chains = collections.OrderedDict( {n: sampled_outcomes[..., i] for i, n in enumerate(outcome_names)}) args = {'random_state': random_state, 'n_iter': n_iter} results = {'chains': chains, 'args': args, 'acceptance': 1.0, 'accept_stat': np.ones((n_iter,), dtype=float), 'mean_lp__': np.mean(chains['lp__'])} prior_predictive = None if generate_prior_predictive: prior_predictive = { 'chains': outcome_chains, 'args': args, 'acceptance': 1.0, 'accept_stat': np.ones((n_iter,), dtype=float) } return results, prior_predictive
8,871
def init_keyspace(): """Creates a `test_keyspace` keyspace with a sharding key.""" utils.run_vtctl(['CreateKeyspace', '-sharding_column_name', 'keyspace_id', '-sharding_column_type', KEYSPACE_ID_TYPE,'test_keyspace'])
8,872
def TestSConscript(scons_globals): """Test SConscript file. Args: scons_globals: Global variables dict from the SConscript file. """ # Get globals from SCons scons_globals['Import']('env') env = scons_globals['env'] # Build an object a_obj = env.ComponentObject('a.cpp') # Build a static library env.ComponentLibrary('b', 'b.cpp', COMPONENT_STATIC=True) # Build a shared library env.ComponentLibrary('g', 'g.cpp', COMPONENT_STATIC=False) # Build a program env.Append(LIBS=['b', 'g']) env.ComponentProgram('d', ['d.cpp', a_obj]) # Build a test program env.ComponentTestProgram('e', ['e.cpp', a_obj])
8,873
async def validate_input( hass: core.HomeAssistant, data: dict[str, Any] ) -> dict[str, str]: """Validate the user input allows us to connect. Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. """ zeroconf_instance = await zeroconf.async_get_instance(hass) async_client = get_async_client(hass) device = Device(data[CONF_IP_ADDRESS], zeroconf_instance=zeroconf_instance) await device.async_connect(session_instance=async_client) await device.async_disconnect() return { SERIAL_NUMBER: str(device.serial_number), TITLE: device.hostname.split(".")[0], }
8,874
def closeWindow(plotterInstance=None): """Close the current or the input rendering window.""" if not plotterInstance: plotterInstance = settings.plotter_instance if not plotterInstance: return if plotterInstance.interactor: plotterInstance.interactor.ExitCallback() plotterInstance.closeWindow() return plotterInstance
8,875
def _format_stages_summary(stage_results): """ stage_results (list of (tuples of (success:boolean, stage_name:string, status_msg:string))) returns a string of a report, one line per stage. Something like: Stage: <stage x> :: SUCCESS Stage: <stage y> :: FAILED Stage: <stage z> :: SUCCESS """ #find the longest stage name to pad report lines max_name_len = 0 for entry in stage_results: x, stage_name, y = entry name_len = len(stage_name) if name_len > max_name_len: max_name_len = name_len summary = "" for entry in stage_results: x, stage_name, status_msg = entry summary += 'Stage: ' + stage_name.ljust(max_name_len) + ":: " summary += status_msg + '\n' return summary
8,876
def pack(envelope, pack_info): """Pack envelope into a byte buffer. Parameters ---------- envelope : data structure pack_info : packing information Returns ------- packet : bytes """ ptype = pack_info.ptype packer = packers[ptype] payload = packer.pack(envelope) hdr = dict(packer=packer.kind, ver=packer.version, nbytes=len(payload)) hdr_buf = json.dumps(hdr).encode() packet = hdr_buf + partition + payload return packet
8,877
def initMP3(): """ Initialize the MP3 player """ mpc.stop() # stop what's playing mpc.clear() # clear any existing playlists mpc.repeat(True)
8,878
def test_add_file_success(monkeypatch, capsys): """ Test successful adding of file """ db.initialize_database() test_mount_1 = __make_temp_directory() monkeypatch.setattr(utility, "get_device_serial", lambda path: "test-serial-1") patch_input(monkeypatch, library, lambda message: "test-device-1") assert library.add_device(test_mount_1), "Making test device should succeed" monkeypatch.setattr( utility, "get_file_security", lambda path: { "permissions": "644", "owner": "test-owner", "group": "test-group", }, ) monkeypatch.setattr( library, "__get_device_with_space", lambda size, mount=None, checked=False: ("test-device-1", test_mount_1), ) test_file, test_checksum = __make_temp_file() # Use actual sizes/checksum, since that will be fine # If temp runs out of space, that would be an actual issue for system stability # so don't mock that monkeypatch.setattr( utility, "create_backup_name", lambda file_path: path.basename(file_path) ) assert library.add_file(test_file), "Test file should be added" files = db.get_files() assert len(files) == 1, "Exactly one file should be in the database" hashlib.md5() expected = File() expected.set_properties(path.basename(test_file), test_file, test_checksum) expected.set_security("644", "test-owner", "test-group") expected.device_name = "test-device-1" assert files == [expected], "Only one file is added so far" test_output_path = path.join(test_mount_1, test_file) assert path.isfile(test_output_path), "Output path should be a file" assert test_checksum == utility.checksum_file( test_output_path ), "Output checksum should match input" remove(test_file) assert db.remove_file(test_file), "Test file should be removed"
8,879
def tmpnam_s(): """Implementation of POSIX tmpnam() in scalar context""" ntf = tempfile.NamedTemporaryFile(delete=False) result = ntf.name ntf.close() return result
8,880
def timezone_lookup(): """Force a timezone lookup right now""" TZPP = NSBundle.bundleWithPath_("/System/Library/PreferencePanes/" "DateAndTime.prefPane/Contents/" "Resources/TimeZone.prefPane") TimeZonePref = TZPP.classNamed_('TimeZonePref') ATZAdminPrefererences = TZPP.classNamed_('ATZAdminPrefererences') atzap = ATZAdminPrefererences.defaultPreferences() pref = TimeZonePref.alloc().init() atzap.addObserver_forKeyPath_options_context_(pref, "enabled", 0, 0) result = pref._startAutoTimeZoneDaemon_(0x1) # If this is not set to 1 then AutoTimezone still isn't enabled. # This additional preference check makes this script work with 10.12 if pref.isTimeZoneAutomatic() is not 1: return False return True
8,881
def rollingCPM(dynNetSN:DynGraphSN,k=3): """ This method is based on Palla et al[1]. It first computes overlapping snapshot_communities in each snapshot based on the clique percolation algorithm, and then match snapshot_communities in successive steps using a method based on the union graph. [1] Palla, G., Barabási, A. L., & Vicsek, T. (2007). Quantifying social group evolution. Nature, 446(7136), 664. :param dynNetSN: a dynamic network (DynGraphSN) :param k: the size of cliques used as snapshot_communities building blocks :return: DynCommunitiesSN """ DynCom = DynCommunitiesSN() old_communities = None old_graph = nx.Graph() graphs=dynNetSN.snapshots() for (date, graph) in graphs.items(): communitiesAtT = list(_get_percolated_cliques(graph, k)) #get the percolated cliques (snapshot_affiliations) as a list of set of nodes for c in communitiesAtT: DynCom.add_community(date, c) if old_communities == None: #if first snapshot old_graph = graph dateOld=date old_communities = communitiesAtT else: if len(communitiesAtT)>0: #if there is at least one community union_graph = nx.compose(old_graph, graph) #create the union graph of the current and the previous communities_union = list(_get_percolated_cliques(union_graph, k)) #get the snapshot_affiliations of the union graph jaccardBeforeAndUnion = _included(old_communities, communities_union) #we only care if the value is above 0 jaccardUnionAndAfter = _included(communitiesAtT,communities_union) #we only care if the value is above 0 for c in jaccardBeforeAndUnion: #for each community in the union graph matched = [] born = [] killed = [] allJaccards = set() for oldC in jaccardBeforeAndUnion[c]: for newC in jaccardUnionAndAfter[c]: allJaccards.add(((oldC,newC),_singleJaccard(oldC,newC))) #compute jaccard between candidates before and after allJaccards = sorted(allJaccards, key=itemgetter(1), reverse=True) sortedMatches = [k[0] for k in allJaccards] oldCToMatch = dict(jaccardBeforeAndUnion[c]) #get all coms before newCToMatch = dict(jaccardUnionAndAfter[c]) #get all new coms while len(sortedMatches)>0: #as long as there are couples of unmatched snapshot_affiliations matchedKeys = sortedMatches[0] #pair of snapshot_affiliations of highest jaccard matched.append(matchedKeys) #this pair will be matched del oldCToMatch[matchedKeys[0]] #delete chosen com from possible to match del newCToMatch[matchedKeys[1]] sortedMatches = [k for k in sortedMatches if len(set(matchedKeys) & set(k))==0] #keep only pairs of unmatched snapshot_affiliations if len(oldCToMatch)>0: killed.append(list(oldCToMatch.keys())[0]) if len(newCToMatch)>0: born.append(list(newCToMatch.keys())[0]) for aMatch in matched: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, aMatch[0])), (date, DynCom._com_ID(date, aMatch[1])), dateOld, date, "continue") for kil in killed:#these are actual merge (unmatched snapshot_affiliations are "merged" to new ones) for com in jaccardUnionAndAfter[c]: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, kil)), (date, DynCom._com_ID(date, com)), dateOld, date, "merged") for b in born:#these are actual merge (unmatched snapshot_affiliations are "merged" to new ones) for com in jaccardBeforeAndUnion[c]: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, com)), (date, DynCom._com_ID(date, b)), dateOld, date, "split") old_graph = graph dateOld=date old_communities = communitiesAtT print(DynCom.snapshots) print(DynCom.events.nodes) DynCom._relabel_coms_from_continue_events() return(DynCom)
8,882
def penalty_eqn(s_m, Dt): """ Description: Simple function for calculating the penalty for late submission of a project. Args: :in (1): maximum possible score :in (2): difference between the date of deadline and the date of assignment of the project (in hours) :out (1): rounded result of the calculation """ # difference between the date of deadline and the date of assignment delta_p = s_m/10 # main equation of penalty for late submission p_s = abs((Dt/24)*np.exp(0.5)) + delta_p return round(s_m - p_s)
8,883
def stop(name=None, id=None): """ Stop (terminate) the VM identified by the given id or name. When both a name and id are provided, the id is ignored. name: Name of the defined VM. id: VM id. CLI Example: .. code-block:: bash salt '*' vmctl.stop name=alpine """ ret = {} cmd = ["vmctl", "stop"] if not (name or id): raise SaltInvocationError('Must provide either "name" or "id"') elif name: cmd.append(name) else: cmd.append(id) result = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False) if result["retcode"] == 0: if re.match("^vmctl: sent request to terminate vm.*", result["stderr"]): ret["changes"] = True else: ret["changes"] = False else: raise CommandExecutionError( "Problem encountered running vmctl", info={"errors": [result["stderr"]], "changes": ret}, ) return ret
8,884
def ky_att(xs, b, Mach, k0, Att=-20): """ Returns the spanwise gust wavenumber 'ky_att' with response at 'xs' attenuated by 'Att' decibels Parameters ---------- xs : float Chordwise coordinate of reference point, defined in interval (-b, +b]. b : float Airfoil semi chord. Mach : float Mean flow Mach number. k0 : float Acoustic wavenumber 'k0'. Can be obtained from the temporal frequency 'f' [in Hz] and the speed of sound 'c0' [in m/s] as 'k0 = 2*pi*f/c0'. Att : float, optional Level of attenuation of the surface pressure at point 'xs', in decibels. Defaults to -20 dB. Returns ------- ky_att : float Subcritical gust spanwise wavenumber 'ky_att' such that the aerofoil response at point 'xs' is 'Att' dB reduced. """ beta = np.sqrt(1-Mach**2) # critical gust spanwise wavenumber ky_crit = k0/beta term1 = -(beta**2)*np.log(10**(Att/20))/(k0*(xs + b)) return ky_crit*np.sqrt(term1**2 + 1)
8,885
def print_losses(loss, epoch, loss_val=None): """ Convenience function to print a set of losses. can be used by SVD autoencoder. Args: loss (float): Loss value epoch (int): Current epoch """ print("%d: [loss: %f, acc: %.2f%%]" % (epoch, loss[0], 100*loss[1])) if loss_val is not None: print("%d val: [loss: %f, acc: %.2f%%]" % (epoch, loss_val[0], 100*loss_val[1]))
8,886
def test_alm_mean(get_covmats): """Test the ALM mean""" n_matrices, n_channels = 3, 3 covmats = get_covmats(n_matrices, n_channels) C_alm = mean_alm(covmats) C_riem = mean_riemann(covmats) assert C_alm == approx(C_riem)
8,887
def has_multiline_items(strings: Optional[Strings]) -> bool: """Check whether one of the items in the list has multiple lines.""" return any(is_multiline(item) for item in strings) if strings else False
8,888
def eval_ctx( layer: int = 0, globals_: Optional[DictStrAny] = None, locals_: Optional[DictStrAny] = None ) -> Tuple[DictStrAny, DictStrAny]: """获取一个上下文的全局和局部变量 Args: layer (int, optional): 层数. Defaults to 0. globals_ (Optional[DictStrAny], optional): 全局变量. Defaults to None. locals_ (Optional[DictStrAny], optional): 局部变量. Defaults to None. Returns: Tuple[DictStrAny, DictStrAny]: 全局和局部变量字典. """ frame = inspect.stack()[layer + 1].frame # add the current frame global_dict, local_dict = frame.f_globals, frame.f_locals global_dict.update(globals_ or {}) local_dict.update(locals_ or {}) return global_dict, local_dict
8,889
def find_ports(device): """ Find the port chain a device is plugged on. This is done by searching sysfs for a device that matches the device bus/address combination. Useful when the underlying usb lib does not return device.port_number for whatever reason. """ bus_id = device.bus dev_id = device.address for dirent in os.listdir(USB_SYS_PREFIX): matches = re.match(USB_PORTS_STR + '$', dirent) if matches: bus_str = readattr(dirent, 'busnum') if bus_str: busnum = float(bus_str) else: busnum = None dev_str = readattr(dirent, 'devnum') if dev_str: devnum = float(dev_str) else: devnum = None if busnum == bus_id and devnum == dev_id: return str(matches.groups()[1])
8,890
def extract_vcalendar(allriscontainer): """Return a list of committee meetings extracted from html content.""" vcalendar = { 'vevents': findall_events(allriscontainer), } if vcalendar.get('vevents'): base_url = allriscontainer.base_url vcalendar['url'] = find_calendar_url(base_url) vcalendar['uid'] = find_calendar_uid(base_url) vcalendar['borough'] = find_calendar_borough(base_url) vcalendar['committee'] = find_calendar_committee(allriscontainer) vcalendar['name'] = '{}: {}'.format( vcalendar['borough'], vcalendar['committee'] ) return vcalendar
8,891
def rnn_helper(inp, length, cell_type=None, direction="forward", name=None, reuse=None, *args, **kwargs): """Adds ops for a recurrent neural network layer. This function calls an actual implementation of a recurrent neural network based on `cell_type`. There are three modes depending on the value of `direction`: forward: Adds a forward RNN. backward: Adds a backward RNN. bidirectional: Adds both forward and backward RNNs and creates a bidirectional RNN. Args: inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`]. length: A 1-D tensor of shape [`batch_size`] and type int64. Each element represents the length of the corresponding sequence in `inp`. cell_type: Cell type of RNN. Currently can only be "lstm". direction: One of "forward", "backward", "bidirectional". name: Name of the op. *args: Other arguments to the layer. **kwargs: Keyword arugments to the layer. Returns: A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`]. """ assert cell_type is not None rnn_func = None if cell_type == "lstm": rnn_func = lstm_layer assert rnn_func is not None assert direction in ["forward", "backward", "bidirectional"] with tf.variable_scope(name, reuse=reuse): if direction in ["forward", "bidirectional"]: forward = rnn_func( inp=inp, length=length, backward=False, name="forward", reuse=reuse, *args, **kwargs) if isinstance(forward, tuple): # lstm_layer returns a tuple (output, memory). We only need the first # element. forward = forward[0] if direction in ["backward", "bidirectional"]: backward = rnn_func( inp=inp, length=length, backward=True, name="backward", reuse=reuse, *args, **kwargs) if isinstance(backward, tuple): # lstm_layer returns a tuple (output, memory). We only need the first # element. backward = backward[0] if direction == "forward": out = forward elif direction == "backward": out = backward else: out = tf.concat(axis=2, values=[forward, backward]) return out
8,892
def test_get_arrival_jobs(demand_rate_fixture, add_max_rate_fixture, class_fixture): """Check that the sample average approximates the actual demand mean rate.""" np.random.seed(42) buffer_processing_matrix = np.max(demand_rate_fixture) * 1.1 * np.eye(4) job_gen = class_fixture(demand_rate=demand_rate_fixture, buffer_processing_matrix=buffer_processing_matrix, add_max_rate=add_max_rate_fixture) num_samples = int(1e5) samples = np.zeros((4, 1)) for _ in range(num_samples): new_sample = job_gen.get_arrival_jobs() assert np.all(new_sample >= 0) samples += new_sample max_rate = np.max(np.abs(buffer_processing_matrix)) mean_samples = (samples / num_samples) * (max_rate * (1 + add_max_rate_fixture)) np.testing.assert_almost_equal(mean_samples, demand_rate_fixture, decimal=1)
8,893
def get_data(dataset): """ :return: encodings array of (2048, n) labels list of (n) """ query = "SELECT * FROM embeddings WHERE label IS NOT NULL" cursor, connection = db_actions.connect(dataset) cursor.execute(query) result_list = cursor.fetchall() encodings = np.zeros((2048, len(result_list))) labels = [] for i in range(len(result_list)): encodings[:, i] = result_list[i][0] labels.append(result_list[i][1].encode()) encodings = np.nan_to_num(encodings) labels = [x.decode('utf-8') for x in labels] return encodings.astype('float32'), labels
8,894
def read_hdr(name, order='C'): """Read hdr file.""" # get dims from .hdr h = open(name + ".hdr", "r") h.readline() # skip line l = h.readline() h.close() dims = [int(i) for i in l.split()] if order == 'C': dims.reverse() return dims
8,895
def IsTouchDevice(dev): """Check if a device is a touch device. Args: dev: evdev.InputDevice Returns: True if dev is a touch device. """ keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, []) return evdev.ecodes.BTN_TOUCH in keycaps
8,896
def load_users(): """ Loads users csv :return: """ with open(USERS, "r") as file: # creates dictionary to separate csv values to make it easy to iterate between them # the hash() function is used to identify the values in the csv, as they have their individual hash # keys, and as the csv is immutable it'll be the same throughout users = {} for user in file: user = user.strip().split(",") user_tuple = create_user(*user[:5], int(user[5])) users[hash(user_tuple)] = user_tuple return users
8,897
def main(argv): """ Push specified revision as a specified bookmark to repo. """ args = parse_arguments(argv) pulled = check_output([args.mercurial_binary, 'pull', '-B', args.bookmark, args.repo]).decode('ascii') print(pulled) if re.match("adding changesets", pulled): print("Unseen changes found on bookmark", args.bookmark, "you should probably rebase first", file=sys.stderr) check_call([args.mercurial_binary, 'bookmark', '-f', '-r', args.rev, args.bookmark]) check_call([args.mercurial_binary, 'push', '-B', args.bookmark, args.repo]) return 0
8,898
def clustering_consistency_check(G): """ Check consistency of a community detection algorithm by running it a number of times. """ Hun = G.to_undirected() Hun = nx.convert_node_labels_to_integers(Hun,label_attribute='skeletonname') WHa = np.zeros((len(Hun.nodes()),len(Hun.nodes()))) for i in range(100): partition = community.best_partition(Hun, randomize=None, resolution=1.0) for com in set(partition.values()) : list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com] list_nodes = np.array(list_nodes) WHa[np.ix_(list_nodes,list_nodes)] += 1 print('Iteration:', i) return WHa
8,899