code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def _poll_async_result(self, result, templar, task_vars=None): <NEW_LINE> <INDENT> if task_vars is None: <NEW_LINE> <INDENT> task_vars = self._job_vars <NEW_LINE> <DEDENT> async_jid = result.get('ansible_job_id') <NEW_LINE> if async_jid is None: <NEW_LINE> <INDENT> return dict(failed=True, msg="No job id was returned by the async task") <NEW_LINE> <DEDENT> async_task = Task().load(dict(action='async_status jid=%s' % async_jid, environment=self._task.environment)) <NEW_LINE> async_handler = self._shared_loader_obj.action_loader.get( 'async_status', task=async_task, connection=self._connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, ) <NEW_LINE> time_left = self._task.async_val <NEW_LINE> while time_left > 0: <NEW_LINE> <INDENT> time.sleep(self._task.poll) <NEW_LINE> try: <NEW_LINE> <INDENT> async_result = async_handler.run(task_vars=task_vars) <NEW_LINE> if (int(async_result.get('finished', 0)) == 1 or ('failed' in async_result and async_result.get('_ansible_parsed', False)) or 'skipped' in async_result): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e)) <NEW_LINE> display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc())) <NEW_LINE> try: <NEW_LINE> <INDENT> async_handler._connection.reset() <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> time_left -= self._task.poll <NEW_LINE> if time_left <= 0: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> time_left -= self._task.poll <NEW_LINE> <DEDENT> <DEDENT> if int(async_result.get('finished', 0)) != 1: <NEW_LINE> <INDENT> if async_result.get('_ansible_parsed'): <NEW_LINE> <INDENT> return dict(failed=True, msg="async task did not complete within the requested time - %ss" % self._task.async_val) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return dict(failed=True, msg="async task produced unparseable results", async_result=async_result) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return async_result
|
Polls for the specified JID to be complete
|
625941bb4428ac0f6e5ba6a8
|
def controlled_by(self, qubit: 'cirq.Qid') -> 'Cell': <NEW_LINE> <INDENT> return self
|
The same cell, but with an explicit control on its main operations.
Cells with effects that do not need to be controlled are permitted to
return themselves unmodified.
Args:
qubit: The control qubit.
Returns:
A modified cell with an additional control.
|
625941bbab23a570cc250036
|
def search_item(self): <NEW_LINE> <INDENT> self.search_item_root = Tk() <NEW_LINE> self.window = Search_item(self.search_item_root) <NEW_LINE> self.search_item_root.title("Wyszukaj pozycję") <NEW_LINE> self.search_item_root.geometry("1300x450") <NEW_LINE> self.master.withdraw() <NEW_LINE> self.search_item_root.mainloop()
|
Create window for browser
|
625941bb1f037a2d8b9460b5
|
def get_module_name(self): <NEW_LINE> <INDENT> modname = 'coverage_test_' + self.noise + str(self.n) <NEW_LINE> self.n += 1 <NEW_LINE> return modname
|
Return the module name to use for this test run.
|
625941bb283ffb24f3c557c2
|
def __init__( self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8, ): <NEW_LINE> <INDENT> super(MobileNetV2, self).__init__() <NEW_LINE> block = InvertedResidual <NEW_LINE> input_channel = 32 <NEW_LINE> last_channel = 1280 <NEW_LINE> if inverted_residual_setting is None: <NEW_LINE> <INDENT> inverted_residual_setting = [ [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] <NEW_LINE> <DEDENT> if ( len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4 ): <NEW_LINE> <INDENT> raise ValueError( "inverted_residual_setting should be non-empty " "or a 4-element list, got {}".format(inverted_residual_setting) ) <NEW_LINE> <DEDENT> input_channel = _make_divisible(input_channel * width_mult, round_nearest) <NEW_LINE> self.last_channel = _make_divisible( last_channel * max(1.0, width_mult), round_nearest ) <NEW_LINE> features = [ M.ConvBnRelu2d( 3, input_channel, kernel_size=3, padding=1, stride=2, bias=False ) ] <NEW_LINE> for t, c, n, s in inverted_residual_setting: <NEW_LINE> <INDENT> output_channel = _make_divisible(c * width_mult, round_nearest) <NEW_LINE> for i in range(n): <NEW_LINE> <INDENT> stride = s if i == 0 else 1 <NEW_LINE> features.append( block(input_channel, output_channel, stride, expand_ratio=t) ) <NEW_LINE> input_channel = output_channel <NEW_LINE> <DEDENT> <DEDENT> features.append( M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False) ) <NEW_LINE> self.features = M.Sequential(*features) <NEW_LINE> self.classifier = M.Sequential( M.Dropout(0.2), M.Linear(self.last_channel, num_classes), ) <NEW_LINE> self.classifier.disable_quantize() <NEW_LINE> self.quant = M.QuantStub() <NEW_LINE> self.dequant = M.DequantStub() <NEW_LINE> for m in self.modules(): <NEW_LINE> <INDENT> if isinstance(m, M.Conv2d): <NEW_LINE> <INDENT> M.init.msra_normal_(m.weight, mode="fan_out") <NEW_LINE> if m.bias is not None: <NEW_LINE> <INDENT> M.init.zeros_(m.bias) <NEW_LINE> <DEDENT> <DEDENT> elif isinstance(m, M.BatchNorm2d): <NEW_LINE> <INDENT> M.init.ones_(m.weight) <NEW_LINE> M.init.zeros_(m.bias) <NEW_LINE> <DEDENT> elif isinstance(m, M.Linear): <NEW_LINE> <INDENT> M.init.normal_(m.weight, 0, 0.01) <NEW_LINE> M.init.zeros_(m.bias)
|
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels
in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer
to be a multiple of this number
Set to 1 to turn off rounding
|
625941bb30bbd722463cbc7a
|
def train(self): <NEW_LINE> <INDENT> for e in range(self.config.hp.n_epochs): <NEW_LINE> <INDENT> self.epoch(e)
|
Call this function to start the training
|
625941bb99fddb7c1c9de24a
|
def single(self): <NEW_LINE> <INDENT> if self._config.get("pidfile"): <NEW_LINE> <INDENT> pid_write(self._config["pidfile"], os.getpid(), excl=True) <NEW_LINE> <DEDENT> self.pre_run() <NEW_LINE> self.run() <NEW_LINE> if self._config.get("pidfile"): <NEW_LINE> <INDENT> pid_remove(self._config.get("pidfile"))
|
Do single action.
|
625941bbadb09d7d5db6c649
|
def topKFrequent(self, nums, k): <NEW_LINE> <INDENT> import heapq <NEW_LINE> import collections <NEW_LINE> heap=[] <NEW_LINE> count=collections.Counter(nums) <NEW_LINE> for key,cnt in count.items(): <NEW_LINE> <INDENT> if len(heap)<k: <NEW_LINE> <INDENT> heapq.heappush(heap,(cnt,key)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if cnt>heap[0][0]: <NEW_LINE> <INDENT> heapq.heappop(heap) <NEW_LINE> heapq.heappush(heap,(cnt,key)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return [x[1] for x in heap]
|
:type nums: List[int]
:type k: int
:rtype: List[int]
|
625941bb50812a4eaa59c1db
|
def verlet_next_vel(vel_t,accel_t,accel_t_plus_dt): <NEW_LINE> <INDENT> vel_t_plus_dt = vel_t.copy() <NEW_LINE> return vel_t_plus_dt
|
We want to return velocity of the particle at the next moment t_plus_dt,
based on its velocity at time t, and its acceleration at time t and t_plus_dt
|
625941bbb7558d58953c4dd1
|
@bp.route('/logout') <NEW_LINE> def logout(): <NEW_LINE> <INDENT> logout_user() <NEW_LINE> return redirect(url_for('index'))
|
Log out an active user.
|
625941bb63d6d428bbe443a6
|
def __init__(self): <NEW_LINE> <INDENT> self.Status = None <NEW_LINE> self.ErrCode = None <NEW_LINE> self.ErrMsg = None <NEW_LINE> self.ItemSet = None
|
:param Status: 编辑任务状态。
1:执行中;2:成功;3:失败。
:type Status: int
:param ErrCode: 编辑任务失败错误码。
0:成功;其他值:失败。
:type ErrCode: int
:param ErrMsg: 编辑任务失败错误描述。
:type ErrMsg: str
:param ItemSet: 智能封面结果集。
注意:此字段可能返回 null,表示取不到有效值。
:type ItemSet: list of CoverTaskResultItem
|
625941bbd18da76e23532389
|
@task <NEW_LINE> def check_repo_status(expected_tag=None, **kwargs): <NEW_LINE> <INDENT> prepare_env(**kwargs) <NEW_LINE> with cd(os.path.join(env.remote_source_root, env.project_repo_name)): <NEW_LINE> <INDENT> if exists('env_dependencies.txt'): <NEW_LINE> <INDENT> run('rm env_dependencies.txt') <NEW_LINE> <DEDENT> run('git checkout master') <NEW_LINE> run('source ~/.venvs/bcpp/bin/activate && pip freeze > env_dependencies.txt') <NEW_LINE> result = run('git describe --tags') <NEW_LINE> if result != expected_tag: <NEW_LINE> <INDENT> warn(red(f'master is not at {expected_tag}')) <NEW_LINE> <DEDENT> data = run('cat env_dependencies.txt') <NEW_LINE> data = [d[:-1] for d in data.split('\n')] <NEW_LINE> requirements_list = get_pip_freeze_list_from_requirements( requirements_file=env.requirements_file) <NEW_LINE> for requirement in requirements_list: <NEW_LINE> <INDENT> if requirement not in data: <NEW_LINE> <INDENT> warn(red(f'{requirement} is not in {env.host}'))
|
Check repo tag.
fab -P -R mmathethe utils.check_repo_status:bootstrap_path=/Users/imosweu/source/bcpp/fabfile/conf/,expected_tag=0.1.47 --user=django
|
625941bb091ae35668666e1b
|
def get_random_note(self): <NEW_LINE> <INDENT> note_idx = np.random.randint(0, self.num_actions - 1) <NEW_LINE> return np.array([[note_idx]])
|
Samle a note uniformly at random.
Returns:
random note
|
625941bb925a0f43d2549d2b
|
def prep_level(self): <NEW_LINE> <INDENT> self.level_image = self.font.render(str(self.stats.level), True, self.text_color, self.ai_settings.bg_color) <NEW_LINE> self.level_rect = self.level_image.get_rect() <NEW_LINE> self.level_rect.right = self.score_rect.right <NEW_LINE> self.level_rect.top = self.score_rect.bottom + 10
|
Turn the level into a rendered iamge.
|
625941bbd58c6744b4257b17
|
def build_pytrees_docker(self): <NEW_LINE> <INDENT> package_name = 'py_trees' <NEW_LINE> container_name = self.pytrees_pkg_name+str(self.id) <NEW_LINE> self.pytreesd = { 'build': { 'context' : './docker', 'dockerfile': 'Dockerfile.pytrees', }, 'container_name': container_name, 'runtime': 'runc', 'depends_on': ['master', f'motion_ctrl{self.id}'], 'env_file': [env_path], 'volumes': ['/tmp/.docker.xauth:/tmp/.docker.xauth:rw', '/tmp/.X11-unix:/tmp/.X11-unix:rw', '/var/run/dbus:/var/run/dbus:ro', '/etc/machine-id:/etc/machine-id:ro', '${XDG_RUNTIME_DIR}/pulse/native:${XDG_RUNTIME_DIR}/pulse/native', '~/.config/pulse/cookie:/root/.config/pulse/cookie', './docker/py_trees_ros_behaviors:/ros_ws/src/py_trees_ros_behaviors/' ], 'environment': [f"ROS_HOSTNAME={container_name}", "ROS_MASTER_URI=http://master:11311", f"ROBOT_NAME=turtlebot{self.id}", f"SKILLS={self.skills}", f"ROBOT_CONFIG={json.dumps(self.config)}"], 'command': '/bin/bash -c "colcon build && source /ros_ws/install/setup.bash && ros2 launch py_trees_ros_behaviors tutorial_seven_docking_cancelling_failing_launch.py"', 'tty': True, 'networks': ['morsegatonet'] }
|
py_trees1:
build:
context: ./docker
dockerfile: Dockerfile.pytrees
container_name: py_trees1
runtime: runc
depends_on:
- motion_ctrl
env_file:
- .env
devices:
- "/dev/dri"
- "/dev/snd"
environment:
- "ROS_HOSTNAME=py_trees1"
- "ROS_MASTER_URI=http://motion_ctrl:11311"
- "QT_X11_NO_MITSHM=1"
- "DISPLAY=$DISPLAY"
- "XAUTHORITY=$XAUTH"
- "QT_GRAPHICSSYSTEM=native"
- "PULSE_SERVER=unix:${XDG_RUNTIME_DIR}/pulse/native"
- "ROBOT_NAME=turtlebot1"
volumes:
- /tmp/.docker.xauth:/tmp/.docker.xauth:rw
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- /var/run/dbus:/var/run/dbus:ro
- /etc/machine-id:/etc/machine-id:ro
- ${XDG_RUNTIME_DIR}/pulse/native:${XDG_RUNTIME_DIR}/pulse/native
- ~/.config/pulse/cookie:/root/.config/pulse/cookie
- ./docker/py_trees_ros_behaviors:/ros_ws/src/py_trees_ros_behaviors/
command: python3 /ros_ws/src/bridge.py
command: /bin/bash -c "source /opt/ros/noetic/setup.bash && ros2 run ros1_bridge dynamic_bridge --bridge-all-topics "
tty: true
networks:
morsegatonet:
ipv4_address: 10.2.0.8
|
625941bbbe7bc26dc91cd4bc
|
@app.route('/release') <NEW_LINE> def release(): <NEW_LINE> <INDENT> servo(GRIPPER, GRIPPER_OPEN) <NEW_LINE> return "Gripper opened"
|
Opens gripper
|
625941bbd58c6744b4257b18
|
@auth.route("/", methods=["GET"]) <NEW_LINE> def home_page(): <NEW_LINE> <INDENT> return render_template("home_page.html")
|
Returns simple home page.
|
625941bb5166f23b2e1a5010
|
def get_by_name(self, name): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self._choice_name_dict[name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise exceptions.ChoiceDoesNotExist('There is no choice with name=%s' % name)
|
Pass the name of a choice and get the dict representation of that choice.
Raises a ChoiceDoesNotExist exception if there is no choice with that name.
:param name: the name of the choice
:return: dict
|
625941bbaad79263cf3908f3
|
def get_missing(input: Stream, desired: List[str]) -> List[str]: <NEW_LINE> <INDENT> present = TimeseriesUtility.get_channels(stream=input) <NEW_LINE> return list(set(desired).difference(set(present)))
|
Return missing channels from input
|
625941bb56b00c62f0f1450e
|
def test_compute_prescription_order_line_count(self): <NEW_LINE> <INDENT> exp = self.sale_7.order_line.mapped('prescription_order_line_id').ids <NEW_LINE> exp = len(exp) <NEW_LINE> res = len(self.sale_7.prescription_order_line_ids.ids) <NEW_LINE> self.assertEqual( res, exp, )
|
Test rx line count properly computed
|
625941bb7b25080760e39312
|
@schedule_blueprint.route("/drop") <NEW_LINE> def drop(): <NEW_LINE> <INDENT> if not utils.is_logged_in(session): <NEW_LINE> <INDENT> return redirect(url_for("index")) <NEW_LINE> <DEDENT> feeder_id = request.args.get("id", None) <NEW_LINE> if not feeder_id: <NEW_LINE> <INDENT> return redirect(url_for("home_blueprint.home")) <NEW_LINE> <DEDENT> user_info = db.getUserByUsername(utils.get_username(session)) <NEW_LINE> feeder_info = check_user_owns_feeder(user_info, feeder_id) <NEW_LINE> if not feeder_info: <NEW_LINE> <INDENT> return redirect(url_for("home_blueprint.home")) <NEW_LINE> <DEDENT> drop_offset = dt.timezone(dt.timedelta(hours=8)) <NEW_LINE> drop_time = dt.datetime.now(drop_offset) <NEW_LINE> db.addScheduleItem(feeder_id, scheduleType="S", time=drop_time) <NEW_LINE> return redirect(url_for("home_blueprint.home", id=feeder_id))
|
Schedules an immediate drop of food for the pet feeder.
|
625941bb7047854f462a12c4
|
def read_namespaced_deployment(self, name, namespace, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
|
read_namespaced_deployment # noqa: E501
read the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param name: name of the Deployment (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1Deployment
|
625941bb7b180e01f3dc46bc
|
def test_update_committed_object(self): <NEW_LINE> <INDENT> self._db_map.add_object_classes({"id": 1, "name": "some_class"}) <NEW_LINE> self._db_map.add_objects({"id": 1, "name": "nemo", "class_id": 1}) <NEW_LINE> self._db_map.commit_session("update") <NEW_LINE> ids, intgr_error_log = self._db_map.update_objects({"id": 1, "name": "klaus"}) <NEW_LINE> sq = self._db_map.object_sq <NEW_LINE> objects = {x.id: x.name for x in self._db_map.query(sq).filter(sq.c.id.in_(ids))} <NEW_LINE> self.assertEqual(intgr_error_log, []) <NEW_LINE> self.assertEqual(objects[1], "klaus") <NEW_LINE> self.assertEqual(self._db_map.query(self._db_map.object_sq).filter_by(id=1).first().name, "klaus") <NEW_LINE> self._db_map.commit_session("update") <NEW_LINE> self.assertEqual(self._db_map.query(self._db_map.object_sq).filter_by(id=1).first().name, "klaus")
|
Test that updating objects works.
|
625941bb1b99ca400220a968
|
def compute_similarity_matrix_slow(self, chroma): <NEW_LINE> <INDENT> num_samples = chroma.shape[1] <NEW_LINE> time_time_similarity = np.zeros((num_samples, num_samples)) <NEW_LINE> for i in range(num_samples): <NEW_LINE> <INDENT> for j in range(num_samples): <NEW_LINE> <INDENT> time_time_similarity[i, j] = 1 - ( np.linalg.norm(chroma[:, i] - chroma[:, j]) / sqrt(12)) <NEW_LINE> <DEDENT> <DEDENT> return time_time_similarity
|
Slow but straightforward way to compute time time similarity matrix
|
625941bb8e71fb1e9831d664
|
def best_defender_1(rank, max_age): <NEW_LINE> <INDENT> filter_1 = df['Preferred Positions Type'] == 'Back' <NEW_LINE> filter_2 = df['Age'] < max_age <NEW_LINE> defenders = df[(filter_1) & (filter_2)].reset_index() <NEW_LINE> return defenders.sort_values(by=['Overall','Name'], ascending=False).iloc[rank-1]['Name']
|
Returns a string of the nth ranked defender under a certain age
|
625941bbd10714528d5ffb97
|
def New(*args, **kargs): <NEW_LINE> <INDENT> obj = itkIntensityWindowingImageFilterIUC2IUL2.__New_orig__() <NEW_LINE> import itkTemplate <NEW_LINE> itkTemplate.New(obj, *args, **kargs) <NEW_LINE> return obj
|
New() -> itkIntensityWindowingImageFilterIUC2IUL2
Create a new object of the class itkIntensityWindowingImageFilterIUC2IUL2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkIntensityWindowingImageFilterIUC2IUL2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkIntensityWindowingImageFilterIUC2IUL2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
|
625941bb7d43ff24873a2b54
|
def __init__(self): <NEW_LINE> <INDENT> self.InstanceId = None <NEW_LINE> self.NodeFlag = None <NEW_LINE> self.Offset = None <NEW_LINE> self.Limit = None <NEW_LINE> self.HardwareResourceType = None <NEW_LINE> self.SearchFields = None
|
:param InstanceId: Cluster instance ID in the format of emr-xxxxxxxx
:type InstanceId: str
:param NodeFlag: Node flag. Valid values:
<li>all: gets the information of nodes in all types except TencentDB information.</li>
<li>master: gets master node information.</li>
<li>core: gets core node information.</li>
<li>task: gets task node information.</li>
<li>common: gets common node information.</li>
<li>router: gets router node information.</li>
<li>db: gets TencentDB information in normal status.</li>
Note: only the above values are supported for the time being. Entering other values will cause errors.
:type NodeFlag: str
:param Offset: Page number. Default value: 0, indicating the first page.
:type Offset: int
:param Limit: Number of returned results per page. Default value: 100. Maximum value: 100
:type Limit: int
:param HardwareResourceType: Resource type. Valid values: all, host, pod. Default value: all
:type HardwareResourceType: str
:param SearchFields: Searchable field
:type SearchFields: list of SearchItem
|
625941bb9b70327d1c4e0c8b
|
def handle_promise(self, request, context): <NEW_LINE> <INDENT> context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
|
Proposers will receive these
|
625941bb099cdd3c635f0b14
|
def createwidgets(self): <NEW_LINE> <INDENT> self.frame1 = Frame(self) <NEW_LINE> frame = self.frame1 <NEW_LINE> self.listbox = Listbox(frame, height = 10, width = 30, selectmode = SINGLE, takefocus = 1) <NEW_LINE> self.yscrool = Scrollbar(frame, orient = VERTICAL) <NEW_LINE> self.listbox.grid(row = 0, column = 0, sticky = NS) <NEW_LINE> self.yscroll.grid(row = 0, column = 1, sticky = NS) <NEW_LINE> self.yscroll.config(command = self.listbox.yview) <NEW_LINE> self.listbox.config(yscrollcommand = self.yscroll.set) <NEW_LINE> self.listbox.bind("<ButtonRelease-1>", self.selsectitem) <NEW_LINE> self.frame1.grid(row = 0, column = 0)
|
ボタンなどウィンドウの部品を作る
|
625941bbcdde0d52a9e52ee7
|
def departure_S(temp, press, temp_crit, press_crit, acentric_factor): <NEW_LINE> <INDENT> R = 8.314459848 <NEW_LINE> dadt = dadT(temp, temp_crit, press_crit, acentric_factor) <NEW_LINE> b = b_factor(temp_crit, press_crit) <NEW_LINE> vol = volume(temp, press, temp_crit, press_crit, acentric_factor) <NEW_LINE> Z = press * vol / (R * temp) <NEW_LINE> B = (b * press) / (R * temp) <NEW_LINE> dS = R * np.log(Z - B) - dadt / (2 * np.sqrt(2) * b) * np.log((Z + (1 - np.sqrt(2)) * B) / (Z + (1 + np.sqrt(2)) * B)) <NEW_LINE> return dS
|
Entropy Departure Function using the Peng-Robingson Equation of State
:param temp: Current Temperature, T (K)
:type temp: float
:param press: Current Pressure, P (Pa)
:type press: float
:param temp_crit: Substance Critical Temperature, Tc (K)
:type temp_crit: float
:param press_crit: Substance Critical Pressure, Pc (Pa)
:type press_crit: float
:param acentric_factor: Acentric Factor, omega (unitless)
:type acentric_factor: float
:return: Entropy Departure Function, Delta S
:rtype: float
|
625941bb627d3e7fe0d68d06
|
def test_one_line_tag_append(): <NEW_LINE> <INDENT> e = OneLineTag("the initial content") <NEW_LINE> with pytest.raises(NotImplementedError): <NEW_LINE> <INDENT> e.append("some more content")
|
You should not be able to append content to a OneLineTag
|
625941bb1d351010ab8559d5
|
def background(attempts, dead, hurt, player_number, attempts_bg): <NEW_LINE> <INDENT> attempt = [] <NEW_LINE> attempt_change = [] <NEW_LINE> for i in range(attempts_bg, 0, -1): <NEW_LINE> <INDENT> attempt_change.append(i) <NEW_LINE> <DEDENT> for i in range(len(attempt_change)): <NEW_LINE> <INDENT> if attempts == attempt_change[i]: <NEW_LINE> <INDENT> attempts_bg = attempt_change[-1-i] <NEW_LINE> <DEDENT> <DEDENT> attempt.append(attempts_bg) <NEW_LINE> attempt.append(player_number) <NEW_LINE> attempt.append(dead) <NEW_LINE> attempt.append(hurt) <NEW_LINE> background_list.append(attempt) <NEW_LINE> print() <NEW_LINE> for i in range(len(background_list)): <NEW_LINE> <INDENT> print(background_list[i][0], "-",background_list[i][1], "-", background_list[i][2], "DEAD", background_list[i][3], "HURT" ) <NEW_LINE> <DEDENT> print() <NEW_LINE> return background_list
|
Returns all the data of each attempt, recalling the number, dead and hurt.
|
625941bb097d151d1a222d13
|
def launch_training_job(parent_dir, data_dir, early_stop, job_name, params): <NEW_LINE> <INDENT> model_dir = os.path.join(parent_dir, job_name) <NEW_LINE> if not os.path.exists(model_dir): <NEW_LINE> <INDENT> os.makedirs(model_dir) <NEW_LINE> <DEDENT> json_path = os.path.join(model_dir, 'params.json') <NEW_LINE> params.save(json_path) <NEW_LINE> os.chdir(curr_dir) <NEW_LINE> cmd = "{python} train.py --model_dir={model_dir} --data_dir {data_dir} --early_stop {early_stop}".format(python=PYTHON, model_dir=model_dir, data_dir=data_dir, early_stop=early_stop) <NEW_LINE> print(cmd) <NEW_LINE> check_call(cmd, shell=True)
|
Launch training of the model with a set of hyperparameters in parent_dir/job_name
Args:
model_dir: (string) directory containing config, weights and log
data_dir: (string) directory containing the dataset
params: (dict) containing hyperparameters
|
625941bb5fdd1c0f98dc00e9
|
def getAllPorts(self): <NEW_LINE> <INDENT> return self._sendRequest('GET', '/api/ports')
|
getAllPorts :
Fetch a list containing summaries for all the ports in the system.
Sample usage:
>>> nto.getAllPorts()
[{u'id': 58, u'name': u'P1-01'}, {u'id': 59, u'name': u'P1-02'}, {u'id': 60, u'name': u'P1-03'}, {u'id': 61, u'name': u'P1-04'}, {u'id': 62, u'name': u'P1-05'}, {u'id': 63, u'name': u'P1-06'}, {u'id': 64, u'name': u'P1-07'}, {u'id': 65, u'name': u'P1-08'}, {u'id': 66, u'name': u'P1-09'}, {u'id': 67, u'name': u'P1-10'}, {u'id': 68, u'name': u'P1-11'}, {u'id': 69, u'name': u'P1-12'}, {u'id': 70, u'name': u'P1-13'}, {u'id': 71, u'name': u'P1-14'}, {u'id': 72, u'name': u'P1-15'}, {u'id': 73, u'name': u'P1-16'}]
|
625941bb8e71fb1e9831d665
|
@app.route("/") <NEW_LINE> def index(): <NEW_LINE> <INDENT> return "To send a message use: /USERNAME/MESSAGE"
|
Main page with instructions
|
625941bbcc40096d6159580a
|
def u_ping(info, msg): <NEW_LINE> <INDENT> say(info, 'pong')
|
!ping - test answer
|
625941bb4d74a7450ccd407b
|
def _prop_helper(self, a, b, a_weights, b_weights, t_weights): <NEW_LINE> <INDENT> inter = tf.multiply(tf.matmul(tf.cast(a, tf.float32), a_weights), tf.matmul(b, b_weights)) <NEW_LINE> return tf.matmul(inter, tf.transpose(t_weights))
|
a and b should be matricies of row vectors
|
625941bbd99f1b3c44c6744d
|
def to_axes(self, axes): <NEW_LINE> <INDENT> if not self.is_configured('X') or not self.is_configured('Y') or not self.is_configured('C'): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> X = self['X'] <NEW_LINE> Y = self['Y'] <NEW_LINE> C = self['C'] <NEW_LINE> mappable = axes.pcolorfast(X, Y, C, **self) <NEW_LINE> if self.is_configured('layer_colorbar'): <NEW_LINE> <INDENT> self['layer_colorbar'].set_mappable(mappable)
|
Plot the data to matplotlib.axes.Axes instance AXES.
|
625941bb796e427e537b047a
|
def _get_lambdas(fep_files): <NEW_LINE> <INDENT> lambda_fwd_map, lambda_bwd_map = {}, {} <NEW_LINE> is_ascending = set() <NEW_LINE> endpoint_windows = [] <NEW_LINE> for fep_file in sorted(fep_files, key=_filename_sort_key): <NEW_LINE> <INDENT> with anyopen(fep_file, 'r') as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> l = line.strip().split() <NEW_LINE> if l[0] == '#NEW': <NEW_LINE> <INDENT> lambda1, lambda2 = float(l[6]), float(l[8]) <NEW_LINE> lambda_idws = float(l[10]) if 'LAMBDA_IDWS' in l else None <NEW_LINE> <DEDENT> elif l[0] == '#Free': <NEW_LINE> <INDENT> lambda1, lambda2, lambda_idws = float(l[7]), float(l[8]), None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if 0.0 in (lambda1, lambda2) or 1.0 in (lambda1, lambda2): <NEW_LINE> <INDENT> endpoint_windows.append((lambda1, lambda2)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if lambda2 != lambda1: <NEW_LINE> <INDENT> is_ascending.add(lambda2 > lambda1) <NEW_LINE> <DEDENT> if lambda_idws is not None and lambda1 != lambda_idws: <NEW_LINE> <INDENT> is_ascending.add(lambda1 > lambda_idws) <NEW_LINE> <DEDENT> <DEDENT> if len(is_ascending) > 1: <NEW_LINE> <INDENT> raise ValueError(f'Lambda values change direction in {fep_file}, relative to the other files: {lambda1} -> {lambda2} (IDWS: {lambda_idws})') <NEW_LINE> <DEDENT> if lambda1 in lambda_fwd_map and lambda_fwd_map[lambda1] != lambda2: <NEW_LINE> <INDENT> logger.error(f'fwd: lambda1 {lambda1} has lambda2 {lambda_fwd_map[lambda1]} in {fep_file} but it has already been {lambda2}') <NEW_LINE> raise ValueError('More than one lambda2 value for a particular lambda1') <NEW_LINE> <DEDENT> lambda_fwd_map[lambda1] = lambda2 <NEW_LINE> if lambda_idws is not None: <NEW_LINE> <INDENT> if lambda1 in lambda_bwd_map and lambda_bwd_map[lambda1] != lambda_idws: <NEW_LINE> <INDENT> logger.error(f'bwd: lambda1 {lambda1} has lambda_idws {lambda_bwd_map[lambda1]} but it has already been {lambda_idws}') <NEW_LINE> raise ValueError('More than one lambda_idws value for a particular lambda1') <NEW_LINE> <DEDENT> lambda_bwd_map[lambda1] = lambda_idws <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> is_ascending = next(iter(is_ascending)) <NEW_LINE> all_lambdas = set() <NEW_LINE> all_lambdas.update(lambda_fwd_map.keys()) <NEW_LINE> all_lambdas.update(lambda_fwd_map.values()) <NEW_LINE> all_lambdas.update(lambda_bwd_map.keys()) <NEW_LINE> all_lambdas.update(lambda_bwd_map.values()) <NEW_LINE> return list(sorted(all_lambdas, reverse=not is_ascending))
|
Retrieves all lambda values included in the FEP files provided.
We have to do this in order to tolerate truncated and restarted fepout files.
The IDWS lambda is not present at the termination of the window, presumably
for backwards compatibility with ParseFEP and probably other things.
For a given lambda1, there can be only one lambda2 and at most one lambda_idws.
Parameters
----------
fep_files: str or list of str
Path(s) to fepout files to extract data from.
Returns
-------
List of floats, or None if there is more than one lambda_idws for each lambda1.
|
625941bb60cbc95b062c6401
|
def createClient(config): <NEW_LINE> <INDENT> ds0 = config.datasources[0] <NEW_LINE> client = txovirt.Client( ds0.zOVirtUrl, ds0.zOVirtUser, ds0.zOVirtDomain, ds0.zOVirtPassword) <NEW_LINE> return client
|
return a client object based on the passed in parameters
|
625941bb45492302aab5e178
|
def checkio(anything): <NEW_LINE> <INDENT> class allTrue: <NEW_LINE> <INDENT> def __lt__(self, other): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def __le__(self, other): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def __gt__(self, other): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def __ge__(self, other): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return allTrue()
|
try to return anything else :)
|
625941bbbaa26c4b54cb0fdb
|
def stop(self): <NEW_LINE> <INDENT> with self._lock: <NEW_LINE> <INDENT> self._cond.notify_all() <NEW_LINE> <DEDENT> self._zeroconf.close()
|
Stop searching.
When stopped, this object can not be restarted
|
625941bb4a966d76dd550ec5
|
def __init__(self, coordinator: DataUpdateCoordinator, channel_number: int) -> None: <NEW_LINE> <INDENT> self.channel_number = channel_number <NEW_LINE> super().__init__(coordinator) <NEW_LINE> self._attr_unique_id = f"{self.mac_address}_{channel_number}"
|
Initialize the channel sensor.
|
625941bb6fece00bbac2d5f4
|
def _combine_multi_gpu_gauges_data(header_list, case_folder, file_tag): <NEW_LINE> <INDENT> gauges_array = [] <NEW_LINE> value_array = [] <NEW_LINE> for i in range(len(header_list)): <NEW_LINE> <INDENT> domain_header = header_list[i] <NEW_LINE> gauge_pos_file = os.path.join(case_folder, str(i), 'input', 'field', 'gauges_pos.dat') <NEW_LINE> gauge_xy = np.loadtxt(gauge_pos_file, dtype='float64', ndmin=2) <NEW_LINE> if gauge_xy.size>=2: <NEW_LINE> <INDENT> gauge_ind = _find_gauges_inside_domain(domain_header, gauge_xy) <NEW_LINE> gauges_array.append(gauge_xy[gauge_ind,:]) <NEW_LINE> file_name = os.path.join(case_folder, str(i), 'output', file_tag+'_gauges.dat') <NEW_LINE> times, values = _read_one_gauge_file(file_name, gauge_ind) <NEW_LINE> value_array.append(values) <NEW_LINE> <DEDENT> <DEDENT> gauges_array = np.concatenate(gauges_array, axis=0) <NEW_LINE> gauges_array, ind = np.unique(gauges_array, axis=0, return_index=True) <NEW_LINE> if values.ndim == 2: <NEW_LINE> <INDENT> value_array = np.concatenate(value_array, axis=1) <NEW_LINE> value_array = value_array[:, ind] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value_array = np.concatenate(value_array, axis=2) <NEW_LINE> value_array = value_array[:, :, ind] <NEW_LINE> <DEDENT> return gauges_array, times, value_array
|
Combine gauges outputs from multi-gpu models according to gauges
position data.
gauges_pos.dat for each domain must be available
|
625941bbf7d966606f6a9eba
|
def normalize_variant(chrom, offset, ref_sequence, alt_sequences, genome, flank_length=30): <NEW_LINE> <INDENT> start = offset - 1 <NEW_LINE> end = start + len(ref_sequence) <NEW_LINE> position = Position( chrom=chrom, chrom_start=start, chrom_stop=end, is_forward_strand=True) <NEW_LINE> return NormalizedVariant(position, ref_sequence, alt_sequences, genome=genome)
|
Normalize variant according to the GATK/VCF standard.
chrom: chromsome containing variant.
offset: 1-based coordinate of reference allele in the genome.
ref_sequence: reference allele.
alt_sequences: list of all alternate sequences.
genome: pygr-compatiable genome object.
|
625941bb3317a56b86939b20
|
def ground_motion_fields(rupture, sites, imts, gsim, truncation_level, realizations, correlation_model=None, rupture_site_filter=filters.rupture_site_noop_filter, seed=None): <NEW_LINE> <INDENT> ruptures_sites = list(rupture_site_filter([(rupture, sites)])) <NEW_LINE> if not ruptures_sites: <NEW_LINE> <INDENT> return dict((imt, numpy.zeros((len(sites), realizations))) for imt in imts) <NEW_LINE> <DEDENT> [(rupture, sites)] = ruptures_sites <NEW_LINE> gc = GmfComputer(rupture, sites, [str(imt) for imt in imts], [gsim], truncation_level, correlation_model) <NEW_LINE> res = gc.compute(seed, gsim, realizations) <NEW_LINE> result = {} <NEW_LINE> for imti, imt in enumerate(gc.imts): <NEW_LINE> <INDENT> if rupture_site_filter is not filters.rupture_site_noop_filter: <NEW_LINE> <INDENT> result[imt] = sites.expand(res[imti], placeholder=0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[imt] = res[imti] <NEW_LINE> <DEDENT> <DEDENT> return result
|
Given an earthquake rupture, the ground motion field calculator computes
ground shaking over a set of sites, by randomly sampling a ground shaking
intensity model. A ground motion field represents a possible 'realization'
of the ground shaking due to an earthquake rupture. If a non-trivial
filtering function is passed, the final result is expanded and filled
with zeros in the places corresponding to the filtered out sites.
.. note::
This calculator is using random numbers. In order to reproduce the
same results numpy random numbers generator needs to be seeded, see
http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html
:param openquake.hazardlib.source.rupture.Rupture rupture:
Rupture to calculate ground motion fields radiated from.
:param openquake.hazardlib.site.SiteCollection sites:
Sites of interest to calculate GMFs.
:param imts:
List of intensity measure type objects (see
:mod:`openquake.hazardlib.imt`).
:param gsim:
Ground-shaking intensity model, instance of subclass of either
:class:`~openquake.hazardlib.gsim.base.GMPE` or
:class:`~openquake.hazardlib.gsim.base.IPE`.
:param truncation_level:
Float, number of standard deviations for truncation of the intensity
distribution, or ``None``.
:param realizations:
Integer number of GMF realizations to compute.
:param correlation_model:
Instance of correlation model object. See
:mod:`openquake.hazardlib.correlation`. Can be ``None``, in which case
non-correlated ground motion fields are calculated. Correlation model
is not used if ``truncation_level`` is zero.
:param rupture_site_filter:
Optional rupture-site filter function. See
:mod:`openquake.hazardlib.calc.filters`.
:param int seed:
The seed used in the numpy random number generator
:returns:
Dictionary mapping intensity measure type objects (same
as in parameter ``imts``) to 2d numpy arrays of floats,
representing different realizations of ground shaking intensity
for all sites in the collection. First dimension represents
sites and second one is for realizations.
|
625941bbd4950a0f3b08c209
|
def set_settings(self, settings): <NEW_LINE> <INDENT> self._settings = settings
|
Specifies settings for cursor.
:param settings: dictionary of query settings
:return: None
|
625941bbd10714528d5ffb98
|
def exitGame(): <NEW_LINE> <INDENT> print() <NEW_LINE> print() <NEW_LINE> print("------------------------- Exit Game -------------------------") <NEW_LINE> print()
|
Display a GoodBye message and Exit the game
|
625941bbfb3f5b602dac3547
|
def crawl_blog_info(self, uin): <NEW_LINE> <INDENT> rep = self.get_blog_info(uin, 0, 1) <NEW_LINE> data = json.loads(jscallback2dict(rep.text)) <NEW_LINE> if data.get('code') == 0: <NEW_LINE> <INDENT> coll = self.db.plcount <NEW_LINE> res = coll.find_one({"uin": uin}) <NEW_LINE> RZ = res.get("count").get("RZ") <NEW_LINE> if not RZ: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> num = 15 <NEW_LINE> pos = 0 <NEW_LINE> count = 0 <NEW_LINE> coll = self.db.blogs <NEW_LINE> while count < RZ: <NEW_LINE> <INDENT> rep = self.get_blog_info(uin, pos, num) <NEW_LINE> data = json.loads(jscallback2dict(rep.text)) <NEW_LINE> for i in range(len(data['data']['list'])): <NEW_LINE> <INDENT> data['data']['list'][i]['hostUin'] = uin <NEW_LINE> <DEDENT> if data['data']['list']: <NEW_LINE> <INDENT> coll.insert_many(data['data']['list']) <NEW_LINE> print("Successfully insert %d blog infos " % len(data['data']['list'])) <NEW_LINE> pos += num <NEW_LINE> count += num <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print(data.get('code'), data.get('message'))
|
爬取指定好友的所有日志基本信息
:param int | uin: 好友账号
|
625941bb26068e7796caeb91
|
@task <NEW_LINE> def pg_load(c, database, only_data=True): <NEW_LINE> <INDENT> if not only_data: <NEW_LINE> <INDENT> c.run(f'psql -U postgres -d "{database}" -f schema.sql') <NEW_LINE> <DEDENT> c.run(f'psql -U postgres -d "{database}" -f data.sql')
|
--database=<name> [--only-data]
|
625941bb30c21e258bdfa353
|
def get_sequence(self, seq_type="genomic"): <NEW_LINE> <INDENT> seq_types = ("genomic", "cds", "cdna", "protein") <NEW_LINE> if seq_type not in seq_types: <NEW_LINE> <INDENT> raise Exception("Invalid sequence type ({}). Known types are: " "{}".format(seq_type, ", ".join(seq_types))) <NEW_LINE> <DEDENT> url = get_url_prefix(self.build) <NEW_LINE> url += "sequence/id/{}?content-type=application/json&type={}" <NEW_LINE> url = url.format(self.enst, seq_type) <NEW_LINE> res = query_ensembl(url) <NEW_LINE> seq = sequences.Sequence( res["id"], res["seq"], "AA" if seq_type == "protein" else "DNA", ) <NEW_LINE> return seq
|
Build a Sequence object representing the transcript.
:param seq_type: This can be either genomic, cds, cdna or protein.
:type seq_type: str
:returns: A Sequence object representing the feature.
:rtype: :py:class:`gepyto.structures.sequences.Sequence`
|
625941bbaad79263cf3908f4
|
def log_progress(*, extra="", force=False): <NEW_LINE> <INDENT> toc = time.perf_counter() <NEW_LINE> if ( not force and counts["done"] < log_progress.last_log_count + LOG_COUNT_INTERVAL and toc < log_progress.last_log_time + LOG_TIME_INTERVAL ): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> counts_str = ", ".join( f"{key}={value}" for key, value in sorted(counts.items()) ) <NEW_LINE> delta_n = counts["done"] - log_progress.last_log_count <NEW_LINE> delta_t = toc - log_progress.last_log_time <NEW_LINE> rate = delta_n / delta_t <NEW_LINE> app_log.info( f"{label} counts{' ' + extra if extra else ''} (elapsed={toc-tic:.0f}s {rate:.1f} it/s): {counts_str}" ) <NEW_LINE> log_progress.last_log_count = counts["done"] <NEW_LINE> log_progress.last_log_time = toc
|
Log the current deletion counts
|
625941bbfb3f5b602dac3548
|
@app.route("/", methods=['GET', 'POST']) <NEW_LINE> @app.route("/index.html", methods=['GET', 'POST']) <NEW_LINE> def home(): <NEW_LINE> <INDENT> if request.method == 'POST': <NEW_LINE> <INDENT> text = chat_bot.predict(text=request.json['text']) <NEW_LINE> return jsonify({"data": {"text": text}}) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return render_template("index.html")
|
Handles home route
|
625941bba79ad161976cbffd
|
def __draw_aim(self): <NEW_LINE> <INDENT> self.gui.bindTexture(1) <NEW_LINE> self.gui.modelMatrix = scale(array([0.2, 0.2, 0.2], 'f')) <NEW_LINE> self.gui.sendMatrices() <NEW_LINE> if self.target: <NEW_LINE> <INDENT> self.gui.setColor(array([1, 0, 0, 1], 'f')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.gui.setColor(array([1, 1, 1, 1], 'f')) <NEW_LINE> <DEDENT> self.quad.draw() <NEW_LINE> if self.target: <NEW_LINE> <INDENT> self.gui.modelMatrix = mul(translate(array([self.target_display_pos[0] * self.gui.aspect, self.target_display_pos[1], 0], 'f')), scale(array([0.2, 0.2, 0.2], 'f'))) <NEW_LINE> self.gui.sendMatrices() <NEW_LINE> self.quad.draw()
|
Draw aim
|
625941bbdd821e528d63b063
|
def getFuelPriceData(self, fuelFile, cpiFile): <NEW_LINE> <INDENT> df = pd.read_excel(fuelFile, sheetname='Data 4', skiprows=2) <NEW_LINE> df = df.rename(columns={ 'Date': 'MONTH', 'San Francisco All Grades All Formulations Retail Gasoline Prices (Dollars per Gallon)': 'FUEL_PRICE' }) <NEW_LINE> df = df[['MONTH', 'FUEL_PRICE']] <NEW_LINE> df['MONTH'] = df['MONTH'].apply(pd.DateOffset(days=-14)) <NEW_LINE> dfcpi = self.getCPIFactors(cpiFile) <NEW_LINE> df = pd.merge(df, dfcpi, how='left', on=['MONTH'], sort=True) <NEW_LINE> df['FUEL_PRICE_2010USD'] = df['FUEL_PRICE'] * df['CPI_FACTOR'] <NEW_LINE> df = df[['MONTH', 'FUEL_PRICE', 'FUEL_PRICE_2010USD', 'CPI']] <NEW_LINE> return df
|
Gets the fuel price data and returns it as a dataframe
fuelFile - file containing data from EIA
cpiFile - inflation factors
|
625941bbd7e4931a7ee9ddd4
|
def fbeta_score(y_true, y_pred, beta=1): <NEW_LINE> <INDENT> if beta < 0: <NEW_LINE> <INDENT> raise ValueError('The lowest choosable beta is zero (only precision).') <NEW_LINE> <DEDENT> if K.sum(K.round(K.clip(y_true, 0, 1))) == 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> p = precision(y_true, y_pred) <NEW_LINE> r = recall(y_true, y_pred) <NEW_LINE> bb = beta ** 2 <NEW_LINE> fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon()) <NEW_LINE> return fbeta_score
|
Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
|
625941bbd53ae8145f87a12e
|
def collect_glib_etc_files(*path): <NEW_LINE> <INDENT> glib_config_dirs = get_glib_sysconf_dirs() <NEW_LINE> if glib_config_dirs is None: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> destdir = os.path.join('etc', *path[:-1]) <NEW_LINE> collected = [] <NEW_LINE> for config_dir in glib_config_dirs: <NEW_LINE> <INDENT> p = os.path.join(config_dir, *path) <NEW_LINE> collected += collect_system_data_files(p, destdir=destdir, include_py_files=False) <NEW_LINE> <DEDENT> return collected
|
path is relative to the system config directory (eg, /etc)
|
625941bb7cff6e4e8111783e
|
def check_security_policy(self, requires=None): <NEW_LINE> <INDENT> _log.debug("Security: check_security_policy") <NEW_LINE> if self.sec_conf and "authorization" in self.sec_conf: <NEW_LINE> <INDENT> return self.get_authorization_decision(requires) <NEW_LINE> <DEDENT> return True
|
Check if access is permitted for the actor by the security policy
|
625941bb44b2445a33931f58
|
def test_fmtsize(self): <NEW_LINE> <INDENT> self.assertEqual("1023 B", tools.fmtsize(1023)) <NEW_LINE> self.assertEqual("5120 B", tools.fmtsize(5 * 1024)) <NEW_LINE> self.assertEqual("9216 B", tools.fmtsize(9 * 1024)) <NEW_LINE> self.assertEqual("123.0 MB", tools.fmtsize(123 * 1024 ** 2)) <NEW_LINE> self.assertEqual("10.0 GB", tools.fmtsize(10200 * 1024 ** 2)) <NEW_LINE> self.assertEqual("321.0 GB", tools.fmtsize(321 * 1024 ** 3))
|
Basic fmtusage.
|
625941bbbe383301e01b5344
|
def as_operation(self, timer=datetime.utcnow): <NEW_LINE> <INDENT> now = timer() <NEW_LINE> op = messages.Operation( endTime=timestamp.to_rfc3339(now), startTime=timestamp.to_rfc3339(now), importance=messages.Operation.ImportanceValueValuesEnum.LOW) <NEW_LINE> if self.operation_id: <NEW_LINE> <INDENT> op.operationId = self.operation_id <NEW_LINE> <DEDENT> if self.operation_name: <NEW_LINE> <INDENT> op.operationName = self.operation_name <NEW_LINE> <DEDENT> if self.api_key and self.api_key_valid: <NEW_LINE> <INDENT> op.consumerId = 'api_key:' + self.api_key <NEW_LINE> <DEDENT> elif self.consumer_project_id: <NEW_LINE> <INDENT> op.consumerId = 'project:' + self.consumer_project_id <NEW_LINE> <DEDENT> return op
|
Makes an ``Operation`` from this instance.
Returns:
an ``Operation``
|
625941bb0383005118ecf49c
|
def test_deny_empty_stories(): <NEW_LINE> <INDENT> with pytest.raises(StoryDefinitionError) as exc_info: <NEW_LINE> <INDENT> class Action: <NEW_LINE> <INDENT> @story <NEW_LINE> def do(I): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> assert str(exc_info.value) == "Story should have at least one step defined"
|
We can not define a story which does not have any steps.
This will make it impossible to determine the right executor in the stories
composition.
|
625941bb45492302aab5e179
|
def edges_iter_dir(self, u, v=None, dir_code=0): <NEW_LINE> <INDENT> def filter_edges(): <NEW_LINE> <INDENT> if dir_code < 0: <NEW_LINE> <INDENT> for edge in self.edges_iter(u, v): <NEW_LINE> <INDENT> edge_dir = self.edge(edge)["direction"] <NEW_LINE> if any(map(lambda dir_perm: dir_perm.index(u) > 0, edge_dir)): <NEW_LINE> <INDENT> yield edge <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif dir_code > 0: <NEW_LINE> <INDENT> for edge in self.edges_iter(u, v): <NEW_LINE> <INDENT> edge_dir = self.edge(edge)["direction"] <NEW_LINE> if any(map(lambda dir_perm: dir_perm.index(u) < len(dir_perm) - 1, edge_dir)): <NEW_LINE> <INDENT> yield edge <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if dir_code == 0: <NEW_LINE> <INDENT> return self.edges_iter(u, v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return filter_edges()
|
Get edges incident to node u filtered by direction.
:param u: node id.
:param dir_code: Direction code - If 0 all edges are
returned, if 1 only outgoing edges are returned,
if -1 only incoming edges are returned.
|
625941bb67a9b606de4a7d75
|
def OnLeft(self, *args): <NEW_LINE> <INDENT> self.rob.SetMotors(-self.SpeedValue, self.SpeedValue)
|
Process the Left event.
|
625941bbb830903b967e97ce
|
def __init__(self, rng, input, n_in, n_out, activation, W=None, b=None): <NEW_LINE> <INDENT> self.input = input <NEW_LINE> if not W: <NEW_LINE> <INDENT> W_values = np.asarray(rng.uniform( low=-np.sqrt(6. / (n_in + n_out)), high=np.sqrt(6. / (n_in + n_out)), size=(n_in, n_out)), dtype=theano.config.floatX) <NEW_LINE> if activation == theano.tensor.nnet.sigmoid: <NEW_LINE> <INDENT> W_values *= 4 <NEW_LINE> <DEDENT> W = theano.shared(value = W_values, name = 'W', borrow = True) <NEW_LINE> <DEDENT> if not b: <NEW_LINE> <INDENT> b_values = np.zeros((n_out,), dtype=theano.config.floatX) <NEW_LINE> b = theano.shared(value = b_values, name = 'b', borrow = True) <NEW_LINE> <DEDENT> self.W = W <NEW_LINE> self.b = b <NEW_LINE> linOutput = T.dot(input, self.W) + self.b <NEW_LINE> self.output = (linOutput if activation is None else activation(linOutput)) <NEW_LINE> self.params = [self.W, self.b]
|
ニューラルネットワークの層
:type rng: numpy.random.RandomState
:param rng: 重みの初期化に使う乱数発生器
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden layer
|
625941bb30c21e258bdfa354
|
def __init__(self, hostAndPort) : <NEW_LINE> <INDENT> self.executable = ['gpg'] <NEW_LINE> self.keyserverOptions = ['--keyserver', hostAndPort]
|
@param hostAndPort: e.g. 'hkp://localhost:11371'
@type hostAndPort: str
|
625941bb30dc7b7665901822
|
def process_corpus(self): <NEW_LINE> <INDENT> if self.corpusSettings is None or len(self.corpusSettings) <= 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.load_meta() <NEW_LINE> nTokens, nWords, nAnalyzed = 0, 0, 0 <NEW_LINE> if self.srcExt != 'json': <NEW_LINE> <INDENT> srcDir = os.path.join('..', self.srcExt) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> srcDir = os.path.join(self.corpusSettings['corpus_dir'], 'json_input') <NEW_LINE> <DEDENT> targetDir = os.path.join(self.corpusSettings['corpus_dir'], self.corpusSettings['corpus_name']) <NEW_LINE> for path, dirs, files in os.walk(srcDir): <NEW_LINE> <INDENT> for filename in files: <NEW_LINE> <INDENT> if not filename.lower().endswith('.' + self.srcExt): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> targetPath = path.replace(srcDir, targetDir) <NEW_LINE> if targetPath == path: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if not os.path.exists(targetPath): <NEW_LINE> <INDENT> os.makedirs(targetPath) <NEW_LINE> <DEDENT> fnameSrc = os.path.join(path, filename) <NEW_LINE> fnameTarget = os.path.join(targetPath, filename) <NEW_LINE> fextTarget = '.json' <NEW_LINE> if self.corpusSettings['gzip']: <NEW_LINE> <INDENT> fextTarget = '.json.gz' <NEW_LINE> <DEDENT> fnameTarget = self.rxStripExt.sub(fextTarget, fnameTarget) <NEW_LINE> self.log_message('Processing ' + fnameSrc + '...') <NEW_LINE> curTokens, curWords, curAnalyzed = self.convert_file(fnameSrc, fnameTarget) <NEW_LINE> nTokens += curTokens <NEW_LINE> nWords += curWords <NEW_LINE> nAnalyzed += curAnalyzed <NEW_LINE> <DEDENT> <DEDENT> print('Conversion to JSON finished.', nTokens, 'tokens total,', nWords, 'words total.') <NEW_LINE> if nWords > 0: <NEW_LINE> <INDENT> print(nAnalyzed, 'words parsed (' + str(nAnalyzed / nWords * 100) + '%).') <NEW_LINE> <DEDENT> if 'cg_disambiguate' in self.corpusSettings and self.corpusSettings['cg_disambiguate']: <NEW_LINE> <INDENT> translator = JSON2CG() <NEW_LINE> translator.process_corpus()
|
Take every text file from the source directory subtree, turn it
into a parsed json and store it in the target directory.
This is the main function of the class.
|
625941bb4e696a04525c9305
|
def build_model(self): <NEW_LINE> <INDENT> states = layers.Input(shape=(self.state_size,), name='states') <NEW_LINE> actions = layers.Input(shape=(self.action_size,), name='actions') <NEW_LINE> net_states = layers.Dense(units=128, kernel_regularizer=regularizers.l2(0.01))(states) <NEW_LINE> net = layers.BatchNormalization()(net_states) <NEW_LINE> net = layers.Activation("relu")(net_states) <NEW_LINE> net_states = layers.Dense(units=64, kernel_regularizer=regularizers.l2(0.01))(net_states) <NEW_LINE> net_actions = layers.Dense(units=128, kernel_regularizer=regularizers.l2(0.01))(actions) <NEW_LINE> net = layers.BatchNormalization()(net_actions) <NEW_LINE> net = layers.Activation("relu")(net_actions) <NEW_LINE> net_actions = layers.Dense(units=64, kernel_regularizer=regularizers.l2(0.01))(net_actions) <NEW_LINE> net = layers.Add()([net_states, net_actions]) <NEW_LINE> net = layers.Activation('relu')(net) <NEW_LINE> Q_values = layers.Dense(units=1, name='q_values')(net) <NEW_LINE> self.model = models.Model(inputs=[states, actions], outputs=Q_values) <NEW_LINE> optimizer = optimizers.Adam() <NEW_LINE> self.model.compile(optimizer=optimizer, loss='mse') <NEW_LINE> action_gradients = K.gradients(Q_values, actions) <NEW_LINE> self.get_action_gradients = K.function( inputs=[*self.model.input, K.learning_phase()], outputs=action_gradients)
|
Build a critic (value) network that maps (state, action) pairs -> Q-values.
|
625941bbdc8b845886cb53ed
|
def to_bytes(bytes_or_str): <NEW_LINE> <INDENT> if isinstance(bytes_or_str, str): <NEW_LINE> <INDENT> value = bytes_or_str.encode('utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = bytes_or_str <NEW_LINE> <DEDENT> return value
|
Take bytes or str instance and always return a bytes
|
625941bbeab8aa0e5d26da17
|
def setUp(self): <NEW_LINE> <INDENT> self.options = Options() <NEW_LINE> software = ['bumps', 'dfo', 'gsl', 'mantid', 'minuit', 'ralfit', 'scipy', 'scipy_ls'] <NEW_LINE> self.options.software = software
|
Initializes options class with defaults
|
625941bb4f88993c3716bf27
|
def submit_ts(job): <NEW_LINE> <INDENT> job.workflow.addParameter(Parameter("JOB_ID", "000000", "string", "", "", True, False, "Temporary fix")) <NEW_LINE> job.workflow.addParameter(Parameter("PRODUCTION_ID", "000000", "string", "", "", True, False, "Temporary fix")) <NEW_LINE> job.setType('MCSimulation') <NEW_LINE> trans = Transformation() <NEW_LINE> trans.setType("MCSimulation") <NEW_LINE> trans.setDescription("MC Prod3 BaseLine NSB test") <NEW_LINE> trans.setLongDescription("corsika-simtel production") <NEW_LINE> trans.setBody(job.workflow.toXML()) <NEW_LINE> res = trans.addTransformation() <NEW_LINE> if not res['OK']: <NEW_LINE> <INDENT> print(res['Message']) <NEW_LINE> DIRAC.exit(-1) <NEW_LINE> <DEDENT> trans.setStatus("Active") <NEW_LINE> trans.setAgentType("Automatic") <NEW_LINE> return res
|
Create a transformation executing the job workflow
|
625941bbab23a570cc250038
|
def extract_features(filename): <NEW_LINE> <INDENT> data, sample_rate = librosa.load(filename, sr=16000) <NEW_LINE> mfcc = librosa.feature.mfcc(y=data, sr=sample_rate, n_fft=1600, hop_length=800) <NEW_LINE> return mfcc.T
|
Extract audio features of the file
Args:
filename: audio filename
Returns:
feature matrix [time_step, features]
|
625941bb63f4b57ef0000fda
|
def test_conversion(self): <NEW_LINE> <INDENT> primitive = UserIdentityNegotiation() <NEW_LINE> primitive.user_identity_type = 1 <NEW_LINE> primitive.primary_field = b'test' <NEW_LINE> item = primitive.from_primitive() <NEW_LINE> primitive.user_identity_type = 2 <NEW_LINE> primitive.secondary_field = None <NEW_LINE> with self.assertRaises(ValueError): <NEW_LINE> <INDENT> item = primitive.from_primitive() <NEW_LINE> <DEDENT> primitive = UserIdentityNegotiation() <NEW_LINE> primitive.server_response = b'Test' <NEW_LINE> item = primitive.from_primitive() <NEW_LINE> self.assertTrue(item.encode() == b'\x59\x00\x00\x06\x00\x04\x54\x65\x73\x74')
|
Check converting to PDU item works correctly
|
625941bb55399d3f0558856c
|
def __init__(self, sensor_service, config=None): <NEW_LINE> <INDENT> super(KafkaMessageSensor, self).__init__(sensor_service=sensor_service, config=config) <NEW_LINE> self._logger = self._sensor_service.get_logger(__name__) <NEW_LINE> message_sensor = self._config.get('message_sensor') <NEW_LINE> if not message_sensor: <NEW_LINE> <INDENT> raise ValueError('[KafkaMessageSensor]: "message_sensor" config value is required!') <NEW_LINE> <DEDENT> self._hosts = message_sensor.get('hosts') <NEW_LINE> if not self._hosts: <NEW_LINE> <INDENT> raise ValueError( '[KafkaMessageSensor]: "message_sensor.hosts" config value is required!') <NEW_LINE> <DEDENT> self._topics = set(message_sensor.get('topics', [])) <NEW_LINE> if not self._topics: <NEW_LINE> <INDENT> raise ValueError( '[KafkaMessageSensor]: "message_sensor.topics" should list at least one topic!') <NEW_LINE> <DEDENT> self._group_id = message_sensor.get('group_id') or self.DEFAULT_GROUP_ID <NEW_LINE> self._client_id = message_sensor.get('client_id') or self.DEFAULT_CLIENT_ID <NEW_LINE> self._consumer = None
|
Parse config variables, set defaults.
|
625941bb66656f66f7cbc063
|
def get_parameter_vector(self, include_frozen=False): <NEW_LINE> <INDENT> if include_frozen: <NEW_LINE> <INDENT> return self.parameter_vector <NEW_LINE> <DEDENT> return self.parameter_vector[self.unfrozen_mask]
|
Get an array of the parameter values in the correct order
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
|
625941bb01c39578d7e74cfb
|
def add_experiences(self, info, rewards, epsi, actions, a_dist, value): <NEW_LINE> <INDENT> for (agent, history) in self.history_dict.items(): <NEW_LINE> <INDENT> if agent in info.agents: <NEW_LINE> <INDENT> idx = info.agents.index(agent) <NEW_LINE> if not info.local_done[idx]: <NEW_LINE> <INDENT> if self.use_observations: <NEW_LINE> <INDENT> for i, _ in enumerate(info.observations): <NEW_LINE> <INDENT> history['observations%d' % i].append([info.observations[i][idx]]) <NEW_LINE> <DEDENT> <DEDENT> if self.use_states: <NEW_LINE> <INDENT> actID = info.states[idx][0]+1 <NEW_LINE> history['action_ref'][actID] = len(history['states']) <NEW_LINE> history['states'].append(info.states[idx]) <NEW_LINE> if self.print_debug: <NEW_LINE> <INDENT> print ("add actID", actID, actions[idx], action_to_str(actions[idx])) <NEW_LINE> <DEDENT> <DEDENT> if self.is_continuous: <NEW_LINE> <INDENT> history['epsilons'].append(epsi[idx]) <NEW_LINE> <DEDENT> history['actions'].append(actions[idx]) <NEW_LINE> history['rewards'].append(rewards[idx]) <NEW_LINE> history['action_probs'].append(a_dist[idx]) <NEW_LINE> history['value_estimates'].append(value[idx][0]) <NEW_LINE> history['cumulative_reward'] += rewards[idx] <NEW_LINE> history['episode_steps'] += 1
|
Adds experiences to each agent's experience history.
:param info: Current BrainInfo.
:param rewards: Next Rewards.
:param epsi: Epsilon value (for continuous control)
:param actions: Chosen actions.
:param a_dist: Action probabilities.
:param value: Value estimates.
|
625941bbff9c53063f47c0ae
|
def delete_sub_container(self, sub_container_id): <NEW_LINE> <INDENT> pass
|
Deletes the subcontainer. Fails if the subcontainer is not empty.
Automatically creates a new record version.
|
625941bb377c676e91272063
|
def canMeasureWater(self, x, y, z): <NEW_LINE> <INDENT> def gcd(a, b): <NEW_LINE> <INDENT> a, b = max(a, b), min(a, b) <NEW_LINE> if b == 0: <NEW_LINE> <INDENT> return a <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return gcd(b, a%b) <NEW_LINE> <DEDENT> <DEDENT> if z > x+y: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> factor = gcd(x, y) <NEW_LINE> if factor == 1: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif factor == 0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif z % factor == 0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
|
:type x: int
:type y: int
:type z: int
:rtype: bool
|
625941bb30bbd722463cbc7c
|
def _repr_(self): <NEW_LINE> <INDENT> return "Arrangements of the set %s of length %s"%(list(self._set),self.k)
|
TESTS::
sage: Arrangements([1,2,3],2)
Arrangements of the set [1, 2, 3] of length 2
|
625941bb3eb6a72ae02ec38d
|
def poet_freq(self, poet): <NEW_LINE> <INDENT> f_all_words = [] <NEW_LINE> for poem in self.liblist: <NEW_LINE> <INDENT> if poet == poem.poet: <NEW_LINE> <INDENT> f_all_words += poem.f_words <NEW_LINE> <DEDENT> <DEDENT> fdist = FreqDist(f_all_words) <NEW_LINE> return fdist
|
sorts all most frequent words used of one author
:return: list
|
625941bbd164cc6175782c07
|
def gradient(self, r): <NEW_LINE> <INDENT> sigma = self.params['sigma'] <NEW_LINE> epsilon = self.params['epsilon'] <NEW_LINE> s = sigma / r <NEW_LINE> s6 = s**6; s12 = s6 * s6 <NEW_LINE> grad = 4.0 * epsilon * ((-12.0/r) * s12 - (-6/r) * s6) <NEW_LINE> grad = 0.5 * (r - 5.0) <NEW_LINE> return grad
|
V' = 4 \epsilon [-12/r x^12 - (-6/r) x^6]
|
625941bb76e4537e8c351531
|
def fetch_anndata(path, from_gcs): <NEW_LINE> <INDENT> _, ext = os.path.splitext(path) <NEW_LINE> if from_gcs: <NEW_LINE> <INDENT> with tempfile.NamedTemporaryFile(delete=False) as tmp_file: <NEW_LINE> <INDENT> tmp_path = tmp_file.name <NEW_LINE> <DEDENT> tf.io.gfile.copy(path, tmp_path, overwrite=True) <NEW_LINE> path = tmp_path <NEW_LINE> <DEDENT> if ext == '.h5ad': <NEW_LINE> <INDENT> adata = anndata.read_h5ad(path) <NEW_LINE> <DEDENT> elif ext == '.loom': <NEW_LINE> <INDENT> adata = anndata.read_loom(path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise app.UsageError('Only supports loom and h5ad files.') <NEW_LINE> <DEDENT> return adata
|
Reads the input data and turns it into an anndata.AnnData object.
|
625941bb24f1403a92600a22
|
def get_stack(self): <NEW_LINE> <INDENT> return get_stack(self.dstack)
|
Returns the contents of the primary stack.
|
625941bb6fb2d068a760ef53
|
def page_list_return(total, current=1): <NEW_LINE> <INDENT> min_page = current - 2 if current - 4 > 0 else 1 <NEW_LINE> max_page = min_page + 4 if min_page + 4 < total else total <NEW_LINE> return range(min_page, max_page + 1)
|
page
分页,返回本次分页的最小页数到最大页数列表
|
625941bb3c8af77a43ae3656
|
def Dismiss(self): <NEW_LINE> <INDENT> while len(self.player_list) > 0: <NEW_LINE> <INDENT> playerObj = self.player_list.pop() <NEW_LINE> playerObj.room = None <NEW_LINE> playerObj.role_id = 0 <NEW_LINE> <DEDENT> self.map = None <NEW_LINE> player.PlayerMgr().DelRoom(self)
|
解散房间(地图)
|
625941bbff9c53063f47c0af
|
def test_name_sorted_regions_eq_slug_sorted_regions(self): <NEW_LINE> <INDENT> self.assertEqual(len(regions.REGIONS_CHOICES_NAME), len(regions.REGIONS_CHOICES_SORTED_BY_NAME())) <NEW_LINE> self.assertSetEqual(regions.REGIONS_CHOICES_NAME, regions.REGIONS_CHOICES_SORTED_BY_NAME())
|
Check data is the same, irrespective of ordering.
|
625941bb5fc7496912cc383f
|
def update(self, delta=1): <NEW_LINE> <INDENT> self._value += delta
|
Increment the counter by a delta.
Args:
delta (int): Delta to increment counter with.
|
625941bb76d4e153a657e9e9
|
def _process_img(self, div: Tag): <NEW_LINE> <INDENT> full_original_image_path = os.path.join(self._working_directory, div.a['href']) <NEW_LINE> title = div.a['title'] <NEW_LINE> alt = div.img['alt'] <NEW_LINE> full_thumbnail_path = os.path.join(self._working_directory, div.img['src']) <NEW_LINE> self._plain_text += '\n' <NEW_LINE> if not os.path.exists(full_original_image_path) or not os.access(full_original_image_path, os.R_OK) or not os.access(full_original_image_path, os.W_OK): <NEW_LINE> <INDENT> full_original_image_path = None <NEW_LINE> <DEDENT> if not os.path.exists(full_thumbnail_path) or not os.access(full_thumbnail_path, os.R_OK) or not os.access(full_thumbnail_path, os.W_OK): <NEW_LINE> <INDENT> full_thumbnail_path = None <NEW_LINE> <DEDENT> return ImageInText(title, alt, full_original_image_path, full_thumbnail_path, div.a['href'], div.img['src'])
|
Process an in text image.
:param div: The beautiful soup div element containing an in text image.
:return: ImageInText instance.
|
625941bbbe7bc26dc91cd4be
|
def get_allowed_variants(self): <NEW_LINE> <INDENT> data = [] <NEW_LINE> for variant in self.allowed_variants.all(): <NEW_LINE> <INDENT> data.append({ 'id': variant.id, 'title': variant.title, 'price': variant.price, }) <NEW_LINE> <DEDENT> return json.dumps(data)
|
One type of item (coffee or tea) can have a variant (hot or cold)
|
625941bb3eb6a72ae02ec38e
|
def filter_annotations(annotations, tagsFilter=[]): <NEW_LINE> <INDENT> annosOut = [] <NEW_LINE> for anno in annotations: <NEW_LINE> <INDENT> tags = anno.get_child("tags").get_value() <NEW_LINE> for tag in tags: <NEW_LINE> <INDENT> if tag in tagsFilter: <NEW_LINE> <INDENT> annosOut.append(anno) <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return annosOut
|
Args:
annotations: [list of Node()] the annotations to use
tagsFilter [list of strings]: if an annotation contains one of the tagFilters, then we accept it
Returns:
list of accepted annotations
|
625941bbc432627299f04afd
|
def _read_from_input(self, io: IO) -> str: <NEW_LINE> <INDENT> ret = io.read_line(4096) <NEW_LINE> if not ret: <NEW_LINE> <INDENT> raise RuntimeError("Aborted") <NEW_LINE> <DEDENT> return ret.strip()
|
Read user input.
|
625941bbd6c5a10208143f01
|
def displaycover(index): <NEW_LINE> <INDENT> if(index==0): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[0], (0,0)) <NEW_LINE> <DEDENT> elif(index==1): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[1], (200,0)) <NEW_LINE> <DEDENT> elif(index==2): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[2], (400,0)) <NEW_LINE> <DEDENT> elif(index==3): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[3], (600,0)) <NEW_LINE> <DEDENT> elif(index==4): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[4], (0,200)) <NEW_LINE> <DEDENT> elif(index==5): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[5], (200,200)) <NEW_LINE> <DEDENT> elif(index==6): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[6], (400,200)) <NEW_LINE> <DEDENT> elif(index==7): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[7], (600,200)) <NEW_LINE> <DEDENT> elif(index==8): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[8], (0,400)) <NEW_LINE> <DEDENT> elif(index==9): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[9], (200,400)) <NEW_LINE> <DEDENT> elif(index==10): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[10], (400,400)) <NEW_LINE> <DEDENT> elif(index==11): <NEW_LINE> <INDENT> SCREEN.blit(tilelist[11], (600,400))
|
Drawing one cover at a time
|
625941bb3617ad0b5ed67db3
|
def test_address(self): <NEW_LINE> <INDENT> self.dc.set_address("::1") <NEW_LINE> self.dc.set_port(self.next_free_port) <NEW_LINE> self.dc.start() <NEW_LINE> self.assertTrue(self.dc.is_alive()) <NEW_LINE> self.assertTrue(self.dc.is_alive()) <NEW_LINE> self.assertTrue(self.dc.is_alive()) <NEW_LINE> durl = self.dc.get_depot_url() <NEW_LINE> verdata = urlopen("{0}/versions/0/".format(durl))
|
Verify that depot address can be set.
|
625941bb7b180e01f3dc46bd
|
def add(self, entry, data): <NEW_LINE> <INDENT> pass
|
Adds a snapshot.
(1) entry - Snapshot entry.
(2) data - Snapshot data.
Parameters:
- entry
- data
|
625941bb16aa5153ce362331
|
@bp_api.route('/push/<link_id>/info', methods=['GET']) <NEW_LINE> def push_info(link_id): <NEW_LINE> <INDENT> wallet = PushWallet.get_or_none(link_id=link_id) <NEW_LINE> if not wallet: <NEW_LINE> <INDENT> return jsonify({'error': 'Link does not exist'}), HTTPStatus.NOT_FOUND <NEW_LINE> <DEDENT> return jsonify({ 'seen': wallet.seen, 'sender': wallet.sender, 'recipient': wallet.recipient, 'is_protected': wallet.password_hash is not None, 'customization_id': wallet.customization_setting_id, })
|
swagger: swagger/core/push-info.yml
|
625941bb56ac1b37e626408f
|
def _merge_a_into_b_simple(self, a, b): <NEW_LINE> <INDENT> for k, v in a.items(): <NEW_LINE> <INDENT> b[k] = v <NEW_LINE> <DEDENT> return b
|
Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a. Do not do any checking.
|
625941bb460517430c394046
|
def transform(input_data): <NEW_LINE> <INDENT> if isinstance(input_data, str): <NEW_LINE> <INDENT> if ' ' in input_data: <NEW_LINE> <INDENT> return input_data.split(' ')[0] <NEW_LINE> <DEDENT> <DEDENT> return input_data
|
Get the first word of a string with more than one word.
:param input_data: Text with more than one word.
:return: The first word of the input string.
:rtype: str or type of the input data if not a string
|
625941bb21a7993f00bc7ba4
|
def _on_buffer_text_modified(self, change): <NEW_LINE> <INDENT> if self._ignore_changes: return <NEW_LINE> if self._scratchpad_active and (self._transaction_changes is None or not self._transaction_changes): <NEW_LINE> <INDENT> self._scratchpad.append(change) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self._transaction_changes is None: <NEW_LINE> <INDENT> warnings.warn('Buffer modified outside of transaction.') <NEW_LINE> with self.transaction(): <NEW_LINE> <INDENT> self._transaction_changes.append(change) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self._transaction_changes.append(change)
|
:type change: keypad.buffer.TextModification
|
625941bb8e05c05ec3eea22b
|
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'profpoll.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
|
Run administrative tasks.
|
625941bb8a43f66fc4b53f22
|
def begin_get_vpnclient_connection_health( self, resource_group_name, virtual_network_gateway_name, **kwargs ): <NEW_LINE> <INDENT> polling = kwargs.pop('polling', True) <NEW_LINE> cls = kwargs.pop('cls', None) <NEW_LINE> lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) <NEW_LINE> cont_token = kwargs.pop('continuation_token', None) <NEW_LINE> if cont_token is None: <NEW_LINE> <INDENT> raw_result = self._get_vpnclient_connection_health_initial( resource_group_name=resource_group_name, virtual_network_gateway_name=virtual_network_gateway_name, cls=lambda x,y,z: x, **kwargs ) <NEW_LINE> <DEDENT> kwargs.pop('error_map', None) <NEW_LINE> kwargs.pop('content_type', None) <NEW_LINE> def get_long_running_output(pipeline_response): <NEW_LINE> <INDENT> deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response) <NEW_LINE> if cls: <NEW_LINE> <INDENT> return cls(pipeline_response, deserialized, {}) <NEW_LINE> <DEDENT> return deserialized <NEW_LINE> <DEDENT> path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) <NEW_LINE> elif polling is False: polling_method = NoPolling() <NEW_LINE> else: polling_method = polling <NEW_LINE> if cont_token: <NEW_LINE> <INDENT> return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
Get VPN client connection health detail per P2S client connection of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientConnectionHealthDetailListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VpnClientConnectionHealthDetailListResult]
:raises ~azure.core.exceptions.HttpResponseError:
|
625941bb7d43ff24873a2b56
|
def __resolve_links(self): <NEW_LINE> <INDENT> for promise in SectionPromise.promises: <NEW_LINE> <INDENT> promise.resolve() <NEW_LINE> <DEDENT> SectionPromise.promises = []
|
resolves all linked references (SectionPromise instances).
|
625941bb9b70327d1c4e0c8d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.