hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c71bd63983d00aa5161a24349a0af33c353d0e7b
| 1,152
|
py
|
Python
|
scoring/urls.py
|
alextenczar/3-julian-alex
|
9f2aa71769dd6eb6e7dd9e63236c3e7874f02de7
|
[
"MIT"
] | null | null | null |
scoring/urls.py
|
alextenczar/3-julian-alex
|
9f2aa71769dd6eb6e7dd9e63236c3e7874f02de7
|
[
"MIT"
] | 3
|
2021-06-09T19:34:38.000Z
|
2022-02-10T12:25:27.000Z
|
scoring/urls.py
|
alextenczar/3-julian-alex
|
9f2aa71769dd6eb6e7dd9e63236c3e7874f02de7
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.conf.urls import url
import scoring.views
from scoring.views import *
from scoring.views.remove_all_data import remove_all_data
from scoring.views.display.display_top_projects import display_top_projects
#from .views import HomeListView
urlpatterns = [
# url(r'add/score/$', add_score, name='add_score'),
# url(r'^edit/$', edit_score, name='edit_score'),
path('', add_score, name='add_score'),
path('display_judges/', display_judges, name='display_judges'),
path('display_projects/', display_projects, name='display_projects'),
path('display_judge_assignments/', display_judge_assignments, name='display_judge_assignments'),
path('display_students/', display_students, name='display_students'),
path('import_file/', import_file, name='import_file'),
path('export_judge_assignment/', export_judge_assignment, name='export_judge_assignment'),
path('remove_all_data/', remove_all_data, name='remove_all_data'),
path('calculate_scores/', calculate_scores, name='calculate_scores'),
path('display_top_projects/', display_top_projects, name='display_top_projects')
]
| 48
| 100
| 0.765625
|
4f55df480d362f8e9409d541dcc65ab6a74ca401
| 1,929
|
py
|
Python
|
setup.py
|
0xh4di/impacket
|
a400ca15946c6c1f4d7a9dfc852032510b75816d
|
[
"Apache-1.1"
] | 2
|
2020-01-07T18:02:15.000Z
|
2020-08-07T17:22:23.000Z
|
setup.py
|
0xh4di/impacket
|
a400ca15946c6c1f4d7a9dfc852032510b75816d
|
[
"Apache-1.1"
] | null | null | null |
setup.py
|
0xh4di/impacket
|
a400ca15946c6c1f4d7a9dfc852032510b75816d
|
[
"Apache-1.1"
] | 1
|
2019-11-01T02:32:37.000Z
|
2019-11-01T02:32:37.000Z
|
#!/usr/bin/env python
# $Id$
import glob
import os
import platform
import sys
from setuptools import setup
PACKAGE_NAME = "impacket"
if platform.system() != 'Darwin':
data_files = [(os.path.join('share', 'doc', PACKAGE_NAME), ['README.md', 'LICENSE']+glob.glob('doc/*'))]
else:
data_files = []
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = PACKAGE_NAME,
version = "0.9.19-dev",
description = "Network protocols Constructors and Dissectors",
url = "https://www.secureauth.com/labs/open-source-tools/impacket",
author = "SecureAuth Corporation",
author_email = "oss@secureauth.com",
maintainer = "Alberto Solino",
maintainer_email = "bethus@gmail.com",
license = "Apache modified",
long_description = read('README.md'),
long_description_content_type="text/markdown",
platforms = ["Unix","Windows"],
packages=['impacket', 'impacket.dcerpc', 'impacket.examples', 'impacket.dcerpc.v5', 'impacket.dcerpc.v5.dcom',
'impacket.krb5', 'impacket.ldap', 'impacket.examples.ntlmrelayx',
'impacket.examples.ntlmrelayx.clients', 'impacket.examples.ntlmrelayx.servers',
'impacket.examples.ntlmrelayx.servers.socksplugins', 'impacket.examples.ntlmrelayx.utils',
'impacket.examples.ntlmrelayx.attacks'],
scripts = glob.glob(os.path.join('examples', '*.py')),
data_files = data_files,
install_requires=['pyasn1>=0.2.3', 'pycryptodomex', 'pyOpenSSL>=0.13.1', 'six', 'ldap3==2.5.1', 'ldapdomaindump', 'flask>=1.0'],
extras_require={
'pyreadline:sys_platform=="win32"': [],
'python_version<"2.7"': [ 'argparse' ],
},
classifiers = [
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2.6",
]
)
| 37.823529
| 134
| 0.621047
|
29d2213a634f3e60b8f2fd50545371d1e8af192e
| 10,286
|
py
|
Python
|
trax/rl/policy_tasks.py
|
stephenjfox/trax
|
918b1ce2ad63a24cb957ebc8e8ea0af1ee272666
|
[
"Apache-2.0"
] | null | null | null |
trax/rl/policy_tasks.py
|
stephenjfox/trax
|
918b1ce2ad63a24cb957ebc8e8ea0af1ee272666
|
[
"Apache-2.0"
] | null | null | null |
trax/rl/policy_tasks.py
|
stephenjfox/trax
|
918b1ce2ad63a24cb957ebc8e8ea0af1ee272666
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Policy network training tasks.
Policy tasks encapsulate the training process of a policy network into a simple,
replaceable component. To implement a policy-based Agent using policy tasks:
1. Subclass the base Agent class.
2. In __init__(), initialize the policy training and evaluation tasks, and
a trax.supervised.training.Loop instance using them.
3. In train_epoch(), call the Loop to train the network.
4. In policy(), call network_policy() defined in this module.
"""
import numpy as np
from trax import layers as tl
from trax.fastmath import numpy as jnp
from trax.rl import distributions
from trax.supervised import training
class PolicyTrainTask(training.TrainTask):
"""Task for policy training.
Trains the policy based on action advantages.
"""
def __init__(
self,
trajectory_batch_stream,
optimizer,
lr_schedule,
policy_distribution,
advantage_estimator,
value_fn,
weight_fn=(lambda x: x),
advantage_normalization=True,
advantage_normalization_epsilon=1e-5,
head_selector=(),
):
"""Initializes PolicyTrainTask.
Args:
trajectory_batch_stream: Generator of trax.rl.task.TimeStepBatch.
optimizer: Optimizer for network training.
lr_schedule: Learning rate schedule for network training.
policy_distribution: Distribution over actions.
advantage_estimator: Function
(rewards, returns, values, dones) -> advantages, created by one of the
functions from trax.rl.advantages.
value_fn: Function TimeStepBatch -> array (batch_size, seq_len)
calculating the baseline for advantage calculation. Can be used to
implement actor-critic algorithms, by substituting a call to the value
network as value_fn.
weight_fn: Function float -> float to apply to advantages. Examples:
- A2C: weight_fn = id
- AWR: weight_fn = exp
- behavioral cloning: weight_fn(_) = 1
advantage_normalization: Whether to normalize advantages.
advantage_normalization_epsilon: Epsilon to use then normalizing
advantages.
head_selector: Layer to apply to the network output to select the value
head. Only needed in multitask training. By default, use a no-op layer,
signified by an empty sequence of layers, ().
"""
self.trajectory_batch_stream = trajectory_batch_stream
self._value_fn = value_fn
self._advantage_estimator = advantage_estimator
self._weight_fn = weight_fn
self._advantage_normalization = advantage_normalization
self._advantage_normalization_epsilon = advantage_normalization_epsilon
self.policy_distribution = policy_distribution
labeled_data = map(self.policy_batch, trajectory_batch_stream)
sample_batch = self.policy_batch(
next(trajectory_batch_stream), shape_only=True
)
loss_layer = distributions.LogLoss(distribution=policy_distribution)
loss_layer = tl.Serial(head_selector, loss_layer)
super().__init__(
labeled_data, loss_layer, optimizer,
sample_batch=sample_batch,
lr_schedule=lr_schedule,
loss_name='policy_loss',
)
def calculate_advantages(self, trajectory_batch, shape_only=False):
(batch_size, seq_len) = trajectory_batch.observation.shape[:2]
assert trajectory_batch.action.shape[:2] == (batch_size, seq_len)
assert trajectory_batch.mask.shape == (batch_size, seq_len)
if shape_only:
values = np.zeros((batch_size, seq_len))
else:
# Compute the value, i.e. baseline in advantage computation.
values = np.array(self._value_fn(trajectory_batch))
assert values.shape == (batch_size, seq_len)
# Compute the advantages using the chosen advantage estimator.
return self._advantage_estimator(
rewards=trajectory_batch.reward,
returns=trajectory_batch.return_,
dones=trajectory_batch.done,
values=values,
discount_mask=trajectory_batch.env_info.discount_mask,
)
def calculate_weights(self, advantages):
"""Calculates advantage-based weights for log loss in policy training."""
if self._advantage_normalization:
# Normalize advantages.
advantages -= jnp.mean(advantages)
advantage_std = jnp.std(advantages)
advantages /= advantage_std + self._advantage_normalization_epsilon
weights = self._weight_fn(advantages)
assert weights.shape == advantages.shape
return weights
def trim_and_mask_batch(self, trajectory_batch, advantages):
(batch_size, seq_len) = trajectory_batch.observation.shape[:2]
adv_seq_len = advantages.shape[1]
# The advantage sequence should be shorter by the margin. Margin is the
# number of timesteps added to the trajectory slice, to make the advantage
# estimation more accurate. adv_seq_len determines the length of the target
# sequence, and is later used to trim the inputs and targets in the training
# batch. Example for margin 2:
# observations.shape == (4, 5, 6)
# rewards.shape == values.shape == (4, 5)
# advantages.shape == (4, 3)
assert adv_seq_len <= seq_len
assert advantages.shape == (batch_size, adv_seq_len)
# Trim observations, actions and mask to match the target length.
observations = trajectory_batch.observation[:, :adv_seq_len]
actions = trajectory_batch.action[:, :adv_seq_len]
mask = trajectory_batch.mask[:, :adv_seq_len]
# Apply the control mask, so we only compute policy loss for controllable
# timesteps.
mask *= trajectory_batch.env_info.control_mask[:, :adv_seq_len]
return (observations, actions, mask)
def policy_batch(self, trajectory_batch, shape_only=False):
"""Computes a policy training batch based on a trajectory batch.
Args:
trajectory_batch: trax.rl.task.TimeStepBatch with a batch of trajectory
slices. Elements should have shape (batch_size, seq_len, ...).
shape_only: Whether to return dummy zero arrays of correct shape. Useful
for initializing models.
Returns:
Triple (observations, actions, weights), where weights are the
advantage-based weights for the policy loss. Shapes:
- observations: (batch_size, seq_len) + observation_shape
- actions: (batch_size, seq_len) + action_shape
- weights: (batch_size, seq_len)
"""
advantages = self.calculate_advantages(
trajectory_batch, shape_only=shape_only
)
(observations, actions, mask) = self.trim_and_mask_batch(
trajectory_batch, advantages
)
weights = self.calculate_weights(advantages) * mask / jnp.sum(mask)
return (observations, actions, weights)
class PolicyEvalTask(training.EvalTask):
"""Task for policy evaluation."""
def __init__(self, train_task, n_eval_batches=1, head_selector=()):
"""Initializes PolicyEvalTask.
Args:
train_task: PolicyTrainTask used to train the policy network.
n_eval_batches: Number of batches per evaluation.
head_selector: Layer to apply to the network output to select the value
head. Only needed in multitask training.
"""
self._train_task = train_task
self._policy_dist = train_task.policy_distribution
labeled_data = map(self._eval_batch, train_task.trajectory_batch_stream)
sample_batch = self._eval_batch(
next(train_task.trajectory_batch_stream), shape_only=True
)
# TODO(pkozakowski): Implement more metrics.
metrics = {
'policy_entropy': self.entropy_metric,
}
metrics.update(self.advantage_metrics)
metrics.update(self.weight_metrics)
metrics = {
name: tl.Serial(head_selector, metric)
for (name, metric) in metrics.items()
}
(metric_names, metric_layers) = zip(*metrics.items())
# Select the appropriate head for evaluation.
super().__init__(
labeled_data, metric_layers,
sample_batch=sample_batch,
metric_names=metric_names,
n_eval_batches=n_eval_batches,
)
def _eval_batch(self, trajectory_batch, shape_only=False):
advantages = self._train_task.calculate_advantages(
trajectory_batch, shape_only=shape_only
)
(observations, actions, mask) = self._train_task.trim_and_mask_batch(
trajectory_batch, advantages
)
return (observations, actions, advantages, mask)
@property
def entropy_metric(self):
def Entropy(policy_inputs, actions, advantages, mask):
del actions, advantages, mask
return jnp.mean(self._policy_dist.entropy(policy_inputs))
return tl.Fn('Entropy', Entropy)
@property
def advantage_metrics(self):
def make_metric(aggregate_fn): # pylint: disable=invalid-name
def AdvantageMetric(policy_inputs, actions, advantages, mask):
del policy_inputs, actions, mask
return aggregate_fn(advantages)
return tl.Fn('AdvantageMetric', AdvantageMetric)
return {
'advantage_' + name: make_metric(fn) for (name, fn) in [
('mean', jnp.mean),
('std', jnp.std),
]
}
@property
def weight_metrics(self):
def make_metric(aggregate_fn): # pylint: disable=invalid-name
def WeightMetric(policy_inputs, actions, advantages, mask):
del policy_inputs, actions, mask
weights = self._train_task.calculate_weights(advantages)
return aggregate_fn(weights)
return tl.Fn('WeightMetric', WeightMetric)
return { # pylint: disable=g-complex-comprehension
'weight_' + name: make_metric(fn) for (name, fn) in [
('mean', jnp.mean),
('std', jnp.std),
('min', jnp.min),
('max', jnp.max),
]
}
| 39.110266
| 80
| 0.710286
|
6ba208341be1b5e7243d0d08c411f43ee9b1e220
| 295
|
py
|
Python
|
icinga_notificator/functions/signals.py
|
pershinghar/icinga-notificator
|
44ca598157d93f9a5410844acaefdec0bb1f11cf
|
[
"MIT"
] | 1
|
2019-10-05T15:30:14.000Z
|
2019-10-05T15:30:14.000Z
|
icinga_notificator/functions/signals.py
|
pershinghar/icinga-notificator
|
44ca598157d93f9a5410844acaefdec0bb1f11cf
|
[
"MIT"
] | null | null | null |
icinga_notificator/functions/signals.py
|
pershinghar/icinga-notificator
|
44ca598157d93f9a5410844acaefdec0bb1f11cf
|
[
"MIT"
] | 1
|
2020-08-19T13:32:40.000Z
|
2020-08-19T13:32:40.000Z
|
#!/usr/bin/env/python3
#
# Signal functions used to control script
#
import logging
def signalHandler(sig, frame):
""" Simple function to handle singnals """
# can be improved
logging.debug("%s detected", sig)
logging.info("icinga-notificator exited (%s)", sig)
exit(0)
| 19.666667
| 55
| 0.667797
|
01630179b7428c4a319c851392b99f29c9884c7c
| 3,019
|
py
|
Python
|
src/pretix/plugins/sendmail/urls.py
|
Janfred/pretix
|
bdd9751f0e5c440d1c8b56c933db2288e4014f4c
|
[
"Apache-2.0"
] | 1,248
|
2015-04-24T13:32:06.000Z
|
2022-03-29T07:01:36.000Z
|
src/pretix/plugins/sendmail/urls.py
|
Janfred/pretix
|
bdd9751f0e5c440d1c8b56c933db2288e4014f4c
|
[
"Apache-2.0"
] | 2,113
|
2015-02-18T18:58:16.000Z
|
2022-03-31T11:12:32.000Z
|
src/pretix/plugins/sendmail/urls.py
|
Janfred/pretix
|
bdd9751f0e5c440d1c8b56c933db2288e4014f4c
|
[
"Apache-2.0"
] | 453
|
2015-05-13T09:29:06.000Z
|
2022-03-24T13:39:16.000Z
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: FlaviaBastos
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/sendmail/$', views.SenderView.as_view(),
name='send'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/sendmail/history/', views.EmailHistoryView.as_view(),
name='history'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/sendmail/rules/create', views.CreateRule.as_view(),
name='rule.create'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/sendmail/rules/(?P<rule>[^/]+)/delete',
views.DeleteRule.as_view(),
name='rule.delete'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/sendmail/rules/(?P<rule>[^/]+)',
views.UpdateRule.as_view(),
name='rule.update'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/sendmail/rules', views.ListRules.as_view(),
name='rule.list'),
]
| 54.890909
| 120
| 0.712488
|
a70f3a74a4dec848ed9c05f846a869080ec1a7df
| 18,308
|
py
|
Python
|
meta-refkit-core/lib/licensecheck.py
|
kraj/intel-iot-refkit
|
04cd5afec0c41deeb5e1a48b43a0a31e708295c1
|
[
"MIT"
] | 36
|
2017-02-20T04:04:28.000Z
|
2022-02-17T05:36:33.000Z
|
meta-refkit-core/lib/licensecheck.py
|
kraj/intel-iot-refkit
|
04cd5afec0c41deeb5e1a48b43a0a31e708295c1
|
[
"MIT"
] | 284
|
2017-02-06T08:51:52.000Z
|
2021-11-03T16:52:16.000Z
|
meta-refkit-core/lib/licensecheck.py
|
kraj/intel-iot-refkit
|
04cd5afec0c41deeb5e1a48b43a0a31e708295c1
|
[
"MIT"
] | 65
|
2017-02-03T12:36:16.000Z
|
2021-02-18T11:00:46.000Z
|
#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# This module offers an API to find out if an image contains any licensing
# conflicts if certain license classes (typically (LGPLv3)) are not allowed.
#
# There are several problems which we are trying to solve:
#
# 1. Bitbake doesn't allow us to find out runtime dependencies without
# building the whole image. We can't use the tinfoil API for this reason, but
# instead we are using oe-pkg-util for finding out package (not recipe)
# licenses and runtime dependencies. See bug
# https://bugzilla.yoctoproject.org/show_bug.cgi?id=10932 for discussion.
#
# 2. Many components are dual-licensed, for example using LGPLv3 and GPLv2.
# This means that if we have a non-GPLv3 configuration, we must use the
# component under the other license (in this case GPLv2). Many licenses are
# not GPLv2-compatible however. This test tries to propagate the licenses
# through the runtime dependency tree to find out if any usable licenses
# remain on top level.
#
# 3. Runtime dependencies do not always propagate licenses. GPLv2 and GPLv3
# are propagated by linking, but many runtime dependencies are for D-Bus APIs
# and process execution. This test uses a whitelist file to find out which
# components can be depended against without having to consider their licenses
# for the top-level component.
#
# Important: This test does by no means guarantee that there is license
# compliance. Having the license compatibility rules in a map is not precise
# enough, and many licenses are just omitted. This test is just meant to help
# detect obvious image problems, and it might not do even that in all cases.
# Especially the dual-licensing rules are not very accurate due to the way
# Bitbake recipes express dual-licensing and multi-licensing.
#
# AUTHORS
# Ismo Puustinen <ismo.puustinen@intel.com>
import os
import unittest
import re
import glob
from shutil import rmtree, copy
import subprocess
class LicenseNode():
# Node licenses mean the licenses which can be alternatively be used for
# using the software. In essence, "dual-licensing". The more complex cases,
# where the different source parts can be licensed with different licenses,
# are not handled.
def __init__(self, licenses, children, name=""):
self.licenses = set(licenses)
self.propagated = []
self.children = children
self.name = name
def __str__(self):
return self.name + ": " + str(list(self.licenses)) + " -> " + str(list(self.propagated))
def printTree(self, indent=0):
print(indent * "\t" + str(self))
for child in self.children:
child.printTree(indent + 1)
class LicenseCheck():
# This table contains the "allowed" licenses. It means that a package
# licensed with the table "key" can link against packages licensed with
# table "values", and still retain the original outbound license. For
# instance, if GPLv2 code links against a BSD3-licensed component, the
# outbould license is still allowed to be GPLv2.
# Assumption: GCC exception criteria are always fulfilled.
allowed = {
"MIT" : [ "MIT", "LGPLv2", "LGPLv2.1", "LGPLv3", "BSD3", "Zlib", "PD", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"Apache-2.0" : [ "Apache-2.0", "MIT", "LGPLv2", "LGPLv2.1", "LGPLv3", "BSD3", "Zlib", "PD", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "bzip2" ],
"BSD3" : [ "BSD3", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "Zlib", "PD", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"LGPLv2" : [ "LGPLv2", "LGPLv2.1", "LGPLv3", "BSD3", "MIT", "Zlib", "PD", "Unicode", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"LGPLv2.1" : [ "LGPLv2.1", "LGPLv2", "LGPLv3", "BSD3", "MIT", "Zlib", "PD", "Unicode", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"LGPLv3" : [ "LGPLv2", "LGPLv2.1", "LGPLv3", "BSD3", "MIT", "Zlib", "PD", "Unicode", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"GPLv2" : [ "GPLv2", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "PSFv2", "GPL-3.0-with-GCC-exception", "bzip2" ],
"GPLv3" : [ "GPLv3", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"openssl" : [ "openssl", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"Zlib" : [ "Zlib", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"AFL-2" : [ "AFL-2", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"PD" : [ "PD", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "Unicode", "MPL-2.0", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"MPL-2.0" : [ "MPL-2.0", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "PSFv2", "GPL-3.0-with-GCC-exception", "Apache-2.0", "bzip2" ],
"PSFv2" : [ "PSFv2", "MPL-2.0", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "GPL-3.0-with-GCC-exception", "Apache-2.0", "openssl", "bzip2" ],
"GPL-3.0-with-GCC-exception" : [ "GPL-3.0-with-GCC-exception", "PSFv2", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "Apache-2.0", "bzip2" ],
"bzip2" : [ "bzip2", "GPL-3.0-with-GCC-exception", "PSFv2", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Unicode", "Apache-2.0", "MPL-2.0" ],
"Unicode" : [ "Unicode", "bzip2", "GPL-3.0-with-GCC-exception", "PSFv2", "LGPLv2", "LGPLv2.1", "LGPLv3", "MIT", "BSD3", "Zlib", "PD", "Apache-2.0", "MPL-2.0" ],
}
# Some combinations are explicitly disallowed for linking. For example,
# GPLv2 code can't be linked against GPLv3 code.
disallowed = {
"GPLv2" : [ "GPLv3", "openssl", "AFL-2" ],
"GPLv3" : [ "GPLv2", "openssl", "AFL-2" ],
"openssl" : [ "GPLv2", "GPLv3" ],
"AFL-2" : [ "GPLv2", "GPLv3" ],
"Apache-2.0" : [ "GPLv2" ],
}
# However, if (for example) MIT-licensed code links against a GPLv2-licensed
# library, the outbound license "degrades" to GPLv2. This is the default
# case if the result is not found in allowed or disallowed tables. Later,
# consider changing this to have an explicit degrade table.
# A conversion table for "or later" clauses in recipes and other
# substitutions which might be safely done.
orlater = {
"GPLv2+" : [ "GPLv2", "GPLv3" ],
"GPLv2.0+" : [ "GPLv2", "GPLv3" ],
"GPL-2.0+" : [ "GPLv2", "GPLv3" ],
"GPLv3+" : [ "GPLv3" ],
"AGPL-3.0" : [ "GPLv3" ],
"GPL-3.0-with-autoconf-exception" : [ "GPLv3" ],
"LGPLv2+" : [ "LGPLv2", "LGPLv2.1", "LGPLv3" ],
"LGPLv2.1+" : [ "LGPLv2.1", "LGPLv3" ],
"LGPL-2.1+" : [ "LGPLv2.1", "LGPLv3" ],
"LGPLv3+" : [ "LGPLv3" ],
"MIT-style" : [ "MIT" ],
"ICU" : [ "Unicode" ],
"BSD" : [ "BSD3" ],
"BSD-3-Clause" : [ "BSD3" ],
"BSD-2-Clause" : [ "BSD3" ], # license compatibility is close enough
"Libpng" : [ "Zlib" ],
}
# The oe-pkg-util lookups are very slow. Cache the returned values.
packageCache = {}
recipeCache = {}
rdepsCache = {}
licenseCache = {}
def __init__(self, whitelistFile=None, prohibited=[]):
"""Initialize the licensecheck object.
A LicenseCheck object is used to analyse runtime licensing of
packages. In order to use this class, you have to first build
the package you want to inspect. This is due to the limitations
in how BitBake handles runtime dependencies.
The 'whitelistFile' parameter contains a filename, which points
to a file containing a '\\n'-separated list of packages which
can be excluded from the runtime dependecy tree, typically for
the reason that they are known not to propagate licenses for the
components which have a runtime dependency on them. This
happens, for example, when a component uses a D-Bus API or execs
another process. The 'prohibited' parameter contains a list of
licenses which are prohibited for any reason. For example, to
prevent (L)GPLv3 licenses, set prohibited = ["GPLv3", "LGPLv3"].
"""
self.whiteList = []
self.prohibited = prohibited
if whitelistFile:
with open(whitelistFile) as f:
lines = f.readlines()
for line in lines:
self.whiteList.append(line.strip())
def _parseLicenseString(self, s):
# Replace & with |. The reasoning is that typically for projects with
# multiple licenses the most liberal licenses are used for libraries.
# This is of course not certain, but a good approximation.
s = s.replace("&", "|")
# Remove "(" and ")", because we don't deal with complex licensing
# schemes.
s = s.replace("(", "").replace(")", "")
# Split the string into a list. Remove duplicates.
licenses = set([l.strip() for l in s.split("|")])
# Convert the "+"-licenses to real ones.
finalLicenses = []
for l in licenses:
if l in LicenseCheck.orlater:
finalLicenses += LicenseCheck.orlater[l]
else:
finalLicenses.append(l)
return finalLicenses
def _calculateConstraints(self, constraints, licenses, degradedLicenses):
# Every sublist in constraints list is how a single package dependency
# is licensed. Find the least restrictive outbound licenses for this
# package.
# Go through all the licenses that are compatible with the top package
# and see if all dependencies could be used from code licensed with that
# license.
if len(licenses) == 0 or len(licenses - degradedLicenses) == 0:
return set()
candidateLicenses = licenses.copy()
for license in licenses:
if license in LicenseCheck.disallowed:
bad = LicenseCheck.disallowed[license]
else:
bad = []
if license in LicenseCheck.allowed:
good = LicenseCheck.allowed[license]
else:
good = []
for dependencyLicenses in constraints:
possible = False
compatibleLicenses = set()
for dependencyLicense in dependencyLicenses:
if dependencyLicense in bad:
# Can't use this license, see other top-level licenses
# for this dependency.
continue
elif dependencyLicense in good:
possible = True
# This license can be used as-is. Cancel previous
# degradations.
compatibleLicenses.clear()
break
else:
possible = True
# We can possibly degrade to this license. Save it for
# next round if the top-level license candidate hasn't
# been already degraded.
if not license in degradedLicenses:
compatibleLicenses.add(dependencyLicense)
# Can we handle this dependency with this top-level license?
if not possible:
if license in candidateLicenses:
candidateLicenses.remove(license)
elif len(compatibleLicenses) > 0:
# We need to degrade our top-level license into something
# that is supported by the dependency license. Then we need
# to go through the dependencies again to see if this
# license fits. The algorithm doesn't yet support finding
# "common ancestor" licenses, but instead we just degrade to
# the licenses that the dependency has and are compatible.
candidateLicenses = candidateLicenses.union(compatibleLicenses)
degradedLicenses.add(license)
# Else we can directly use this license.
if candidateLicenses == licenses:
# The license set didn't change and is stable. We can go with it.
return licenses - degradedLicenses - set(self.prohibited)
# Else check the dependencies again with the new candidateLicenses. This
# is guaranteed to finish if the license degradation graph is acyclical.
return self._calculateConstraints(constraints, candidateLicenses, degradedLicenses)
def _getRecipe(self, package):
if package in LicenseCheck.packageCache:
return LicenseCheck.packageCache[package]
rRecipeProp = subprocess.check_output(["oe-pkgdata-util", "lookup-recipe", package]).decode("utf-8").strip()
LicenseCheck.packageCache[package] = rRecipeProp
return rRecipeProp
def _getPackage(self, recipe):
rPackageProp = None
if recipe in LicenseCheck.recipeCache:
return LicenseCheck.recipeCache[recipe]
try:
rPackageProp = subprocess.check_output(["oe-pkgdata-util", "lookup-pkg", recipe]).decode("utf-8").strip()
except subprocess.CalledProcessError:
print("'oe-pkgdata-util lookup-pkg %s' failed!" % recipe)
LicenseCheck.recipeCache[recipe] = rPackageProp
return rPackageProp
def _getRdeps(self, package):
if package in LicenseCheck.rdepsCache:
return LicenseCheck.rdepsCache[package]
rundepsProp = subprocess.check_output(["oe-pkgdata-util", "read-value", "RDEPENDS", package]).decode("utf-8")
rundeps = [token for token in rundepsProp.strip().split() if not token[0] == "(" and not token[-1] == ")"]
rRundeps = filter(None, [self._getPackage(package) for package in rundeps])
LicenseCheck.rdepsCache[package] = rRundeps
return rRundeps
def _getLicenses(self, package):
if package in LicenseCheck.licenseCache:
return LicenseCheck.licenseCache[package]
licenseProp = subprocess.check_output(["oe-pkgdata-util", "read-value", "LICENSE", package]).decode("utf-8")
LicenseCheck.licenseCache[package] = licenseProp
return licenseProp
def _findChildren(self, name, chain=[]):
results = []
rundeps = self._getRdeps(name)
for d in rundeps:
recipe = self._getRecipe(d)
if recipe in self.whiteList:
# Do not process whitelisted dependencies.
continue
if d in chain:
# Take away possible loops.
continue
# print(str(chain) + " -> " + d + ": " + str(rundeps))
licenses = self._parseLicenseString(self._getLicenses(d))
children = self._findChildren(d, chain + [d])
childNode = LicenseNode(licenses, children, d)
results.append(childNode)
return results
# Public API methods: propagate, createTree and testPackage.
def propagate(self, node):
"""Propagate licenses for a runtime dependency tree.
Set value to 'propagated' for every node in the runtime
dependency tree (with parameter 'node' as root). The value will
be the calculated set of possible runtime licenses. If a value
is empty after this call, the runtime licensing script was not
able to find a suitable license for this package (or one of its
dependencies). Missing the license from the licence maps is a
common reason for this.
"""
childNodes = node.children
if len(childNodes) == 0:
# Push local constraints up!
# If some licenses are prohibited, just don't propagate them.
node.propagated = node.licenses.copy()
for p in self.prohibited:
if p in node.propagated:
node.propagated.remove(p)
return node.propagated.copy()
constraints = []
for childNode in childNodes:
constraints.append(self.propagate(childNode))
cs = self._calculateConstraints(constraints, node.licenses, set())
node.propagated = cs.copy()
return cs
def createTree(self, package):
"""Create a runtime dependency tree for a package.
Return a runtime dependency tree for package in 'package'
parameter.
"""
rundeps = self._getRdeps(package)
licenses = self._parseLicenseString(self._getLicenses(package))
children = self._findChildren(package, [package])
root = LicenseNode(licenses, children, package)
return root
def testPackage(self, package):
"""Test whether a package passes the license check.
Return True if the package in 'package' parameter passes the
license check. Return False if the license check fails. This is
a helper method using 'createTree' and 'propagate' methods.
"""
tree = self.createTree(package)
if tree:
licenses = self.propagate(tree)
if licenses:
return True
else:
# did not find a suitable license, print the tree for debugging
print("No suitable license found for %s:" % package)
tree.printTree()
else:
print("No such package (%s)" % package)
return False
| 45.77
| 183
| 0.600448
|
318fc98a000ad1cd8ce2fb00a6b6b136a0ba16ad
| 4,112
|
py
|
Python
|
tests/build/test_build_case.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/build/test_build_case.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/build/test_build_case.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from pprint import pprint as pp
from scout.exceptions import (PedigreeError, ConfigError, IntegrityError)
from scout.build import build_case
def test_build_case(parsed_case, adapter, institute_obj, dummypanel_obj):
adapter.institute_collection.insert_one(institute_obj)
adapter.panel_collection.insert_one(dummypanel_obj)
# GIVEN a parsed case
# WHEN bulding a case model
case_obj = build_case(parsed_case, adapter)
# THEN make sure it is built in the proper way
assert case_obj['_id'] == parsed_case['case_id']
assert case_obj['display_name'] == parsed_case['display_name']
assert case_obj['owner'] == parsed_case['owner']
assert case_obj['collaborators'] == parsed_case['collaborators']
assert len(case_obj['individuals']) == len(parsed_case['individuals'])
assert case_obj['synopsis'] == ''
assert case_obj['status'] == 'inactive'
assert case_obj['is_research'] == False
assert case_obj['research_requested'] == False
assert case_obj['rerun_requested'] == False
assert case_obj['analysis_date'] == parsed_case['analysis_date']
assert len(case_obj['dynamic_gene_list']) == 0
assert case_obj['genome_build'] == parsed_case['genome_build']
assert case_obj['rank_model_version'] == parsed_case['rank_model_version']
assert case_obj['rank_score_threshold'] == parsed_case['rank_score_threshold']
assert case_obj['sv_rank_model_version'] == parsed_case['sv_rank_model_version']
assert case_obj['madeline_info'] == parsed_case['madeline_info']
assert case_obj['delivery_report'] == parsed_case['delivery_report']
for vcf in case_obj['vcf_files']:
assert vcf in parsed_case['vcf_files']
# assert case_obj['diagnosis_phenotypes'] == []
# assert case_obj['diagnosis_genes'] == []
if (parsed_case['vcf_files'].get('vcf_sv') or parsed_case['vcf_files'].get('vcf_sv_research')):
assert case_obj['has_svvariants'] == True
else:
assert case_obj['has_svvariants'] == False
def test_build_minimal_case(adapter, institute_obj):
adapter.institute_collection.insert_one(institute_obj)
# GIVEN a case without case id
case_info = {
'case_id': 'test-case',
'owner': 'cust000'
}
# WHEN case is built
case_obj = build_case(case_info, adapter)
# THEN assert that it worked
assert case_obj['_id'] == case_info['case_id']
def test_build_case_no_case_id(adapter):
# GIVEN a case without case id
case_info = {}
# WHEN case is built
# THEN a PedigreeError should be raised
with pytest.raises(Exception):
build_case(case_info, adapter)
def test_build_case_no_display_name(adapter, institute_obj):
adapter.institute_collection.insert_one(institute_obj)
# GIVEN a case without case id
case_info = {
'case_id': 'test-case',
'owner': 'cust000'
}
# WHEN case is built
case_obj = build_case(case_info, adapter)
# THEN assert that display_name was set to case_id
assert case_obj['display_name'] == case_info['case_id']
def test_build_case_no_owner(adapter, institute_obj):
adapter.institute_collection.insert_one(institute_obj)
# GIVEN a case where owner does not exist in the database
case_info = {
'case_id': 'test-case',
'owner': 'cust001'
}
# WHEN case is built
# THEN a IntegrityError should be raised since the owner has to exist in the database
with pytest.raises(IntegrityError):
case_obj = build_case(case_info, adapter)
def test_build_case_non_existing_owner(adapter, institute_obj):
adapter.institute_collection.insert_one(institute_obj)
# GIVEN a case without owner
case_info = {
'case_id': 'test-case',
}
# WHEN case is built
with pytest.raises(ConfigError):
# THEN a ConfigError should be raised since a case has to have a owner
case_obj = build_case(case_info, adapter)
# def test_build_case_config(parsed_case):
# case_obj = build_case(parsed_case)
# print(case_obj.to_json())
# assert False
| 34.554622
| 99
| 0.705739
|
17fa3f0d77b2bfe2c6e37bfa862f80d4dee94911
| 33,077
|
py
|
Python
|
packages/python/plotly/plotly/validators/densitymapbox/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/densitymapbox/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 5
|
2020-03-19T04:33:53.000Z
|
2020-05-03T01:53:02.000Z
|
packages/python/plotly/plotly/validators/densitymapbox/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 4
|
2020-02-15T22:10:00.000Z
|
2020-02-16T15:54:15.000Z
|
import _plotly_utils.basevalidators
class ZsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="zsrc", parent_name="densitymapbox", **kwargs):
super(ZsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ZminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmin", parent_name="densitymapbox", **kwargs):
super(ZminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"zauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ZmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmid", parent_name="densitymapbox", **kwargs):
super(ZmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ZmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmax", parent_name="densitymapbox", **kwargs):
super(ZmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"zauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ZautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="zauto", parent_name="densitymapbox", **kwargs):
super(ZautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="z", parent_name="densitymapbox", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="densitymapbox", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="densitymapbox", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="densitymapbox", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="densitymapbox", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="densitymapbox", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SubplotValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="subplot", parent_name="densitymapbox", **kwargs):
super(SubplotValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "mapbox"),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="densitymapbox", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showscale", parent_name="densitymapbox", **kwargs):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="densitymapbox", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="densitymapbox", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class RadiussrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="radiussrc", parent_name="densitymapbox", **kwargs):
super(RadiussrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class RadiusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="radius", parent_name="densitymapbox", **kwargs):
super(RadiusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="densitymapbox", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="densitymapbox", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="densitymapbox", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="densitymapbox", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class LonsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="lonsrc", parent_name="densitymapbox", **kwargs):
super(LonsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class LonValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="lon", parent_name="densitymapbox", **kwargs):
super(LonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="legendgroup", parent_name="densitymapbox", **kwargs
):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class LatsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="latsrc", parent_name="densitymapbox", **kwargs):
super(LatsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class LatValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="lat", parent_name="densitymapbox", **kwargs):
super(LatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="densitymapbox", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="densitymapbox", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertextsrc", parent_name="densitymapbox", **kwargs
):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="densitymapbox", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="densitymapbox", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="densitymapbox", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="densitymapbox", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on plot.ly for align
.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hoverinfosrc", parent_name="densitymapbox", **kwargs
):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="densitymapbox", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["lon", "lat", "z", "text", "name"]),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="customdatasrc", parent_name="densitymapbox", **kwargs
):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="densitymapbox", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(self, plotly_name="colorscale", parent_name="densitymapbox", **kwargs):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorBarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="densitymapbox", **kwargs):
super(ColorBarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.densitymapbox.c
olorbar.Tickformatstop instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.densitymapbox.colorbar.tickformatstopdefaults
), sets the default property values to use for
elements of
densitymapbox.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.densitymapbox.colorbar.Tit
le instance or dict with compatible properties
titlefont
Deprecated: Please use
densitymapbox.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
densitymapbox.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="coloraxis", parent_name="densitymapbox", **kwargs):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BelowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="below", parent_name="densitymapbox", **kwargs):
super(BelowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="densitymapbox", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "style"),
**kwargs
)
| 36.549171
| 88
| 0.597999
|
97c9efe980bdfb38395c18902792715c663fc7ae
| 2,601
|
py
|
Python
|
Day25/day25.py
|
rnegrin90/AdventOfCode2017
|
2746d6c402cf00d1d5af353ffa9f9d44f6b1c7d1
|
[
"MIT"
] | null | null | null |
Day25/day25.py
|
rnegrin90/AdventOfCode2017
|
2746d6c402cf00d1d5af353ffa9f9d44f6b1c7d1
|
[
"MIT"
] | null | null | null |
Day25/day25.py
|
rnegrin90/AdventOfCode2017
|
2746d6c402cf00d1d5af353ffa9f9d44f6b1c7d1
|
[
"MIT"
] | null | null | null |
import re
from common import get_input
class Action:
def __init__(self, action):
write, direction, next_state = action
self.write = write
self.direction = direction
self.next_state = next_state
class State:
def __init__(self, name, zero, one):
self.name = name
self.action = {0: zero, 1: one}
def read_actions(pos, input):
value = re.search(r'- Write the value (\d).', input[pos]).groups()[0]
direction = re.search(r'- Move one slot to the ([lr])(?:eft|ight).', input[pos+1]).groups()[0]
next_state = re.search(r'- Continue with state (\w).', input[pos+2]).groups()[0]
return int(value), direction.upper(), next_state
def create_turing_machine(input):
states = {}
i = 0
while i < len(input):
if i < 2:
start_state = re.search(r'Begin in state (\w).', input[i]).groups()[0]
checksum = re.search(r'Perform a diagnostic checksum after (\d+) steps.', input[i+1]).groups()[0]
i = 3
else:
state_name = re.search(r'In state (\w):', input[i]).groups()[0]
action_zero = Action(read_actions(i+2, input))
action_one = Action(read_actions(i+6, input))
states[state_name] = State(state_name, action_zero, action_one)
i += 10
return start_state, int(checksum), states
movements = {
'L': -1,
'R': 1
}
def process(input):
state, checksum, states = create_turing_machine(input)
tape = [0]
cursor = 0
for _ in range(0, checksum):
current_value = tape[cursor]
tape[cursor] = states[state].action[current_value].write
cursor += movements[states[state].action[current_value].direction]
state = states[state].action[current_value].next_state
while cursor >= len(tape):
tape.append(0)
while cursor < 0:
tape.insert(0, 0)
cursor += 1
return sum(tape)
test_input = """Begin in state A.
Perform a diagnostic checksum after 6 steps.
In state A:
If the current value is 0:
- Write the value 1.
- Move one slot to the right.
- Continue with state B.
If the current value is 1:
- Write the value 0.
- Move one slot to the left.
- Continue with state B.
In state B:
If the current value is 0:
- Write the value 1.
- Move one slot to the left.
- Continue with state A.
If the current value is 1:
- Write the value 1.
- Move one slot to the right.
- Continue with state A.""".split('\n')
assert process(test_input) == 3
print(process(get_input()))
| 28.271739
| 109
| 0.605536
|
855b6d6a83e715294d29bd96722abf643354e650
| 919
|
py
|
Python
|
starform/star.py
|
TommyDong1998/StarForm
|
d1c1ca704f3a72a043f1982a60000a99e386ad2e
|
[
"MIT"
] | 1
|
2021-04-04T22:11:19.000Z
|
2021-04-04T22:11:19.000Z
|
starform/star.py
|
TommyDong1998/StarFormation
|
d1c1ca704f3a72a043f1982a60000a99e386ad2e
|
[
"MIT"
] | null | null | null |
starform/star.py
|
TommyDong1998/StarFormation
|
d1c1ca704f3a72a043f1982a60000a99e386ad2e
|
[
"MIT"
] | null | null | null |
class Star:
# Base class for a single action in a formation. Contains nodes this star point to.
def __init__(self):
self._nodes = []
self.params={"args":tuple(), "kwargs":{}}
def link(self, star2):
# Chain stars. Execution star.action() -> star2.action()
self._nodes.append(star2)
return self
def unlink(self, star2):
# Chain stars. Execution star.action() -> star2.action()
self._nodes.remove(star2)
return self
def action(self, s):
return s
def _action(self, s=None):
# action with processing
if s is not None:
return self.action(s, *self.params['args'], **self.params['kwargs'])
return self.action(*self.params['args'], **self.params['kwargs'])
def bind(self, *args, **kwargs):
self.params['args'] = args
self.params['kwargs'] = kwargs
return self
| 27.848485
| 87
| 0.583243
|
8126f34ccb9664d7f1cd9be12250ad22f1d5f2b5
| 9,938
|
py
|
Python
|
others_code/evaluation_object.py
|
hosshonarvar/Image-Segmentation-others-code
|
de654a2a2a6a8e2f5d47861909760a07eb26c14e
|
[
"MIT"
] | 1
|
2021-06-29T05:05:43.000Z
|
2021-06-29T05:05:43.000Z
|
others_code/evaluation_object.py
|
hosshonarvar/Image-Segmentation-others-code
|
de654a2a2a6a8e2f5d47861909760a07eb26c14e
|
[
"MIT"
] | null | null | null |
others_code/evaluation_object.py
|
hosshonarvar/Image-Segmentation-others-code
|
de654a2a2a6a8e2f5d47861909760a07eb26c14e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 16:12:45 2017
This file is utilized to calculate the loss, accuracy, per_class_accuracy, and MOI(mean of intersection)
"""
import tensorflow as tf
import numpy as np
def cal_loss(logits, labels, num_classes):
if (num_classes is 12):
loss_weight = np.array([
0.2595,
0.1826,
4.5640,
0.1417,
0.9051,
0.3826,
9.6446,
1.8418,
0.6823,
6.2478,
7.3614,
1.0974
])
if (num_classes is 3):
loss_weight = np.array([1, 1, 1])
# HH: by setting the loss wight to 1, we are not doing any frequency balancing (natural frequency)
if (num_classes is 2):
loss_weight = np.array([1, 1])
# class 0 to 11, but the class 11 is ignored, so maybe the class 11 is background!
labels = tf.to_int64(labels)
loss, accuracy, prediction = weighted_loss(logits, labels, num_classes, frequency=loss_weight)
return loss, accuracy, prediction
def weighted_loss(logits, labels, number_class, frequency):
"""
The reference paper is : https://arxiv.org/pdf/1411.4734.pdf
Median Frequency Balancing: alpha_c = median_freq/freq(c).
median_freq is the median of these frequencies
freq(c) is the number of pixles of class c divided by the total number of pixels in images where c is present
we weight each pixels by alpha_c
Inputs:
logits is the output from the inference, which is the output of the decoder layers without softmax.
labels: true label information
number_class: In the CamVid data set, it's 11 classes, or 12, because class 11 seems to be background?
frequency: is the frequency of each class
Outputs:
Loss
Accuracy
"""
label_flatten = tf.reshape(labels, [-1])
label_onehot = tf.one_hot(label_flatten, depth=number_class)
logits_reshape = tf.reshape(logits, [-1, number_class])
cross_entropy = tf.nn.weighted_cross_entropy_with_logits(targets=label_onehot, logits=logits_reshape,
pos_weight=frequency)
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.summary.scalar('loss', cross_entropy_mean)
correct_prediction = tf.equal(tf.argmax(logits_reshape, -1), label_flatten)
accuracy = tf.reduce_mean(tf.to_float(correct_prediction))
tf.summary.scalar('accuracy', accuracy)
return cross_entropy_mean, accuracy, tf.argmax(logits_reshape, -1)
def normal_loss(logits, labels, number_class):
"""
Calculate the normal loss instead of median frequency balancing
Inputs:
logits, the output from decoder layers, without softmax, shape [Num_batch,height,width,Number_class]
lables: the atual label information, shape [Num_batch,height,width,1]
number_class:12
Output:loss,and accuracy
Using tf.nn.sparse_softmax_cross_entropy_with_logits assume that each pixel have and only have one specific
label, instead of having a probability belongs to labels. Also assume that logits is not softmax, because it
will conduct a softmax internal to be efficient, this is the reason that we don't do softmax in the inference
function!
"""
label_flatten = tf.to_int64(tf.reshape(labels, [-1]))
logits_reshape = tf.reshape(logits, [-1, number_class])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_flatten, logits=logits_reshape,
name='normal_cross_entropy')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.summary.scalar('loss', cross_entropy_mean)
correct_prediction = tf.equal(tf.argmax(logits_reshape, -1), label_flatten)
accuracy = tf.reduce_mean(tf.to_float(correct_prediction))
tf.summary.scalar('accuracy', accuracy)
return cross_entropy_mean, accuracy, tf.argmax(logits_reshape, -1)
def per_class_acc(predictions, label_tensor, num_class):
"""
This function is copied from "Implement slightly different segnet on tensorflow"
"""
labels = label_tensor
size = predictions.shape[0]
hist = np.zeros((num_class, num_class))
for i in range(size):
hist += fast_hist(labels[i].flatten(), predictions[i].argmax(2).flatten(), num_class)
acc_total = np.diag(hist).sum() / hist.sum()
print('accuracy = %f' % np.nanmean(acc_total))
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print('mean IU = %f' % np.nanmean(iu))
for ii in range(num_class):
if float(hist.sum(1)[ii]) == 0:
acc = 0.0
else:
acc = np.diag(hist)[ii] / float(hist.sum(1)[ii])
print(" class # %d accuracy = %f " % (ii, acc))
def fast_hist(a, b, n):
"""
This function is copied from "Implement slightly different segnet on tensorflow"
"""
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def get_hist(predictions, labels):
"""
This function is copied from "Implement slightly different segnet on tensorflow"
"""
num_class = predictions.shape[3] # becomes 2 for aerial - correct
batch_size = predictions.shape[0]
hist = np.zeros((num_class, num_class))
for i in range(batch_size):
hist += fast_hist(labels[i].flatten(), predictions[i].argmax(2).flatten(), num_class)
return hist
def print_hist_summary(hist):
"""
This function is copied from "Implement slightly different segnet on tensorflow"
"""
acc_total = np.diag(hist).sum() / hist.sum()
print('accuracy = %f' % np.nanmean(acc_total))
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print('mean IU = %f' % np.nanmean(iu))
for ii in range(hist.shape[0]):
if float(hist.sum(1)[ii]) == 0:
acc = 0.0
else:
acc = np.diag(hist)[ii] / float(hist.sum(1)[ii])
print(" class # %d accuracy = %f " % (ii, acc))
def train_op(total_loss, opt):
"""
Input:
total_loss: The loss
Learning_Rate: learning_rate for different optimization algorithm, for AdamOptimizer 0.001, for SGD 0.1
global_step: global_step is used to track how many batches had been passed. In the training process, the intial
value for global_step is 0 (tf.variable(0,trainable=False)), then after one batch of images passed, the loss is
passed into the optimizer to update the weight, then the global step increased by one. Number of batches seen
by the graph.. Reference: https://stackoverflow.com/questions/41166681/what-does-tensorflow-global-step-mean
FLAG: utilized to denote which optimization method are we using, because for segnet, we can easily use Adam, but
for segnet bayes, from the paper it says SGD will be more helpful to learn.
Output
The train_op
"""
global_step = tf.Variable(0, trainable=False)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if (opt == "ADAM"):
optimizer = tf.train.AdamOptimizer(0.001, epsilon=0.0001)
print("Running with Adam Optimizer with learning rate:", 0.001)
elif (opt == "SGD"):
base_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(base_learning_rate, global_step, decay_steps=1000, decay_rate=0.0005)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
print("Running with Gradient Descent Optimizer with learning rate", 0.1)
else:
raise ValueError("Optimizer is not recognized")
grads = optimizer.compute_gradients(total_loss, var_list=tf.trainable_variables())
training_op = optimizer.apply_gradients(grads, global_step=global_step)
return training_op, global_step
def MAX_VOTE(pred,prob,NUM_CLASS):
"""
logit: the shape should be [NUM_SAMPLES,Batch_size, image_h,image_w,NUM_CLASS]
pred: the shape should be[NUM_SAMPLES,NUM_PIXELS]
label: the real label information for each image
prob: the probability, the shape should be [NUM_SAMPLES,image_h,image_w,NUM_CLASS]
Output:
logit: which will feed into the Normal loss function to calculate loss and also accuracy!
"""
image_h = 360
image_w = 480
NUM_SAMPLES = np.shape(pred)[0]
#transpose the prediction to be [NUM_PIXELS,NUM_SAMPLES]
pred_tot = np.transpose(pred)
prob_re = np.reshape(prob,[NUM_SAMPLES,image_h*image_w,NUM_CLASS])
prediction = []
variance_final = []
step = 0
for i in pred_tot:
value = np.bincount(i,minlength = NUM_CLASS)
value_max = np.argmax(value)
#indices = [k for k,j in enumerate(i) if j == value_max]
indices = np.where(i == value_max)[0]
prediction.append(value_max)
variance_final.append(np.var(prob_re[indices,step,:],axis = 0))
step = step+1
return variance_final,prediction
def var_calculate(pred,prob_variance, image_w, image_h):
"""
Inputs:
pred: predicted label, shape is [NUM_PIXEL,1]
prob_variance: the total variance for 12 classes wrt each pixel, prob_variance shape [image_h,image_w,12]
Output:
var_one: corresponding variance in terms of the "optimal" label
"""
NUM_CLASS = np.shape(prob_variance)[-1]
var_sep = [] #var_sep is the corresponding variance if this pixel choose label k
length_cur = 0 #length_cur represent how many pixels has been read for one images
for row in np.reshape(prob_variance,[image_h*image_w,NUM_CLASS]):
temp = row[pred[length_cur]]
length_cur += 1
var_sep.append(temp)
var_one = np.reshape(var_sep,[image_h,image_w]) #var_one is the corresponding variance in terms of the "optimal" label
return var_one
| 41.58159
| 124
| 0.669451
|
798ade3500bc567c3fb9d96d87639c1868ce7230
| 3,069
|
py
|
Python
|
src/SparseSC/utils/misc.py
|
microsoft/SparseSC
|
4cf0a98858919d50c6127be782a145e49d96897e
|
[
"MIT"
] | 34
|
2019-05-14T11:05:58.000Z
|
2022-02-04T19:16:21.000Z
|
src/SparseSC/utils/misc.py
|
Microsoft/SparseSC
|
f56880cbc42cf7d867390ab7d8a33f0618829f41
|
[
"MIT"
] | 14
|
2019-01-16T18:57:01.000Z
|
2019-04-17T16:50:23.000Z
|
src/SparseSC/utils/misc.py
|
Microsoft/SparseSC
|
f56880cbc42cf7d867390ab7d8a33f0618829f41
|
[
"MIT"
] | 18
|
2019-05-14T11:06:01.000Z
|
2021-09-13T12:30:58.000Z
|
# Allow capturing output
# Modified (to not capture stderr too) from https://stackoverflow.com/questions/5136611/
import contextlib
import sys
from .print_progress import it_progressbar, it_progressmsg
@contextlib.contextmanager
def capture():
STDOUT = sys.stdout
try:
sys.stdout = DummyFile()
yield
finally:
sys.stdout = STDOUT
class DummyFile(object):
def write(self, x):
pass
@contextlib.contextmanager
def capture_all():
STDOUT, STDERR = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = DummyFile(), DummyFile()
yield
finally:
sys.stdout, sys.stderr = STDOUT, STDERR
def par_map(part_fn, it, F, loop_verbose, n_multi=0, header="LOOP"):
if n_multi>0:
from multiprocessing import Pool
with Pool(n_multi) as p:
#p.map evals the it so can't use it_progressbar(it)
if loop_verbose==1:
rets = []
print(header + ":")
for ret in it_progressbar(p.imap(part_fn, it), count=F):
rets.append(ret)
elif loop_verbose==2:
rets = []
for ret in it_progressmsg(p.imap(part_fn, it), prefix=header, count=F):
rets.append(ret)
else:
rets = p.map(part_fn, it)
else:
if loop_verbose==1:
print(header + ":")
it = it_progressbar(it, count=F)
elif loop_verbose==2:
it = it_progressmsg(it, prefix=header, count=F)
rets = list(map(part_fn, it))
return rets
class PreDemeanScaler:
"""
Units are defined by rows and cols are "pre" and "post" separated.
Demeans each row by the "pre" mean.
"""
# maybe fit should just take Y and T0 (in init())?
# Try in sklearn.pipeline with fit() for that and predict (on default Y_post)
# might want wrappers around fit to make that work fine with pipeline (given its standard arguments).
# maybe call the vars X rather than Y?
def __init__(self):
self.means = None
# self.T0 = T0
def fit(self, Y):
"""
Ex. fit(Y.iloc[:,0:T0])
"""
import numpy as np
self.means = np.mean(Y, axis=1)
def transform(self, Y):
return (Y.T - self.means).T
def inverse_transform(self, Y):
return (Y.T + self.means).T
def _ensure_good_donor_pool(custom_donor_pool, control_units):
N0 = custom_donor_pool.shape[1]
custom_donor_pool_c = custom_donor_pool[control_units, :]
for i in range(N0):
custom_donor_pool_c[i, i] = False
custom_donor_pool[control_units, :] = custom_donor_pool_c
return custom_donor_pool
def _get_fit_units(model_type, control_units, treated_units, N):
if model_type == "retrospective":
return control_units
elif model_type == "prospective":
return range(N)
elif model_type == "prospective-restricted:":
return treated_units
# model_type=="full":
return range(N) # same as control_units
| 28.416667
| 105
| 0.610622
|
7a9f400054ce3a62c9e98c5c85dbfaa81f293670
| 2,816
|
py
|
Python
|
tests/test_financiar.py
|
lucasmag/crowdfunding-blockchain
|
3b033d853fc16c5e4b0a556ae0879d580ddf08ae
|
[
"MIT"
] | 1
|
2022-03-31T21:11:46.000Z
|
2022-03-31T21:11:46.000Z
|
tests/test_financiar.py
|
lucasmag/crowdfunding-blockchain
|
3b033d853fc16c5e4b0a556ae0879d580ddf08ae
|
[
"MIT"
] | null | null | null |
tests/test_financiar.py
|
lucasmag/crowdfunding-blockchain
|
3b033d853fc16c5e4b0a556ae0879d580ddf08ae
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pytest
from brownie import Campanha, FabricaDeCampanhas, MockV3Aggregator, exceptions
from scripts.deploy import deploy_fabrica_de_campanhas
from scripts.helper import buscar_conta
from tests.decorators import pular_caso_rede_nao_local
@pular_caso_rede_nao_local
def test_financiar():
conta = buscar_conta()
campanha = test_criar_campanha()
valor_a_financiar = campanha.buscarValorFinanciamentoMinimo()
tx = campanha.financiar({"from": conta, "value": valor_a_financiar})
tx.wait(1)
financiadores = campanha.buscarFinanciadores()
print(f"Financiadores: {financiadores}")
assert campanha.valorInvestidoPorCadaInvestidor(conta.address) == valor_a_financiar
@pular_caso_rede_nao_local
def test_colher_investimentos():
conta = buscar_conta()
campanha = test_criar_campanha()
valor_a_financiar = campanha.buscarValorFinanciamentoMinimo()
tx = campanha.financiar({"from": conta, "value": valor_a_financiar})
tx.wait(1)
tx2 = campanha.colherInvestimentos({"from": conta})
tx2.wait(1)
assert campanha.valorInvestidoPorCadaInvestidor(conta.address) == 0
@pular_caso_rede_nao_local
def test_converter_eth_para_real():
conta = buscar_conta()
valor_minimo = 50 # reais
campanha = test_criar_campanha()
valor_financiamento_minimo = campanha.buscarValorFinanciamentoMinimo()
valor_em_real = campanha.converterEthParaReal(valor_financiamento_minimo, {"from": conta})
assert int(valor_em_real/(10**18)) == valor_minimo
@pular_caso_rede_nao_local
def test_valor_financiamento_minimo():
def e18(num):
return num * 10 ** 18
campanha = test_criar_campanha()
preco_eth, decimais = campanha.buscarPrecoEth()
valor_financiamento_minimo = campanha.buscarValorFinanciamentoMinimo()
print(valor_financiamento_minimo)
valor_esperado = int((e18(50) * e18(1) / 5.6) / preco_eth) + 100
assert valor_financiamento_minimo == valor_esperado
@pular_caso_rede_nao_local
def test_criar_campanha():
conta = buscar_conta()
fabrica = deploy_fabrica_de_campanhas()
data_limite = datetime.now().timestamp()
tx = fabrica.criarCampanha("Campanha 1", 50, data_limite, {"from": conta})
tx.wait(1)
campanha = Campanha.at(tx.return_value)
assert campanha
return campanha
@pular_caso_rede_nao_local
def test_colher_investimentos_usuario_inautorizado():
conta = buscar_conta()
campanha = test_criar_campanha()
valor_a_financiar = campanha.buscarValorFinanciamentoMinimo()
tx = campanha.financiar({"from": conta, "value": valor_a_financiar})
tx.wait(1)
conta_inautorazada = buscar_conta(1)
with pytest.raises(exceptions.VirtualMachineError):
campanha.colherInvestimentos({"from": conta_inautorazada})
| 28.16
| 94
| 0.756037
|
c7484303f45e415467d302071b2d3fe32cf798b7
| 2,346
|
py
|
Python
|
orders/migrations/0001_initial.py
|
Kaique425/ecommerce
|
684988771ae7758a1e550092bf8fbd20f1b250e9
|
[
"MIT"
] | null | null | null |
orders/migrations/0001_initial.py
|
Kaique425/ecommerce
|
684988771ae7758a1e550092bf8fbd20f1b250e9
|
[
"MIT"
] | null | null | null |
orders/migrations/0001_initial.py
|
Kaique425/ecommerce
|
684988771ae7758a1e550092bf8fbd20f1b250e9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-09-16 23:10
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import localflavor.br.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0005_alter_product_slug'),
]
operations = [
migrations.CreateModel(
name='Orders',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cpf', localflavor.br.models.BRCPFField(max_length=14, verbose_name='CPF')),
('name', models.CharField(max_length=256, verbose_name='Nome Completo')),
('email', models.EmailField(max_length=254)),
('postal_code', localflavor.br.models.BRPostalCodeField(max_length=9, verbose_name='CEP')),
('address', models.CharField(max_length=250, verbose_name='Endereço')),
('number', models.CharField(max_length=250, verbose_name='Numero')),
('compliment', models.CharField(blank=True, max_length=250, verbose_name='Complemento')),
('district', models.CharField(max_length=250, verbose_name='Bairro')),
('state', localflavor.br.models.BRStateField(max_length=2, verbose_name='Estado')),
('city', models.CharField(max_length=250, verbose_name='Cidade')),
('created', models.DateTimeField(auto_now_add=True)),
('paid', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(20)])),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Item', to='orders.orders')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='products.product')),
],
),
]
| 49.914894
| 162
| 0.630009
|
eab461bffe828f23024f01fcee16cd0003e9c64e
| 1,396
|
py
|
Python
|
scripts/wasm-rebuild/docker-scripts/isolate_tests.py
|
MrChico/solidity
|
5b4ea1eb895d5edc9a24ee5c6f96d8580eceec08
|
[
"MIT"
] | null | null | null |
scripts/wasm-rebuild/docker-scripts/isolate_tests.py
|
MrChico/solidity
|
5b4ea1eb895d5edc9a24ee5c6f96d8580eceec08
|
[
"MIT"
] | null | null | null |
scripts/wasm-rebuild/docker-scripts/isolate_tests.py
|
MrChico/solidity
|
5b4ea1eb895d5edc9a24ee5c6f96d8580eceec08
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import sys
import re
import os
import hashlib
from os.path import join, isfile
def extract_test_cases(path):
lines = open(path, 'rb').read().splitlines()
inside = False
delimiter = ''
tests = []
for l in lines:
if inside:
if l.strip().endswith(')' + delimiter + '";'):
tests[-1] += l.strip()[:-(3 + len(delimiter))]
inside = False
else:
tests[-1] += l + '\n'
else:
m = re.search(r'R"([^(]*)\((.*)$', l.strip())
if m:
inside = True
delimiter = m.group(1)
tests += [m.group(2)]
return tests
def extract_and_write(f, path):
if f.endswith('.sol'):
cases = [open(path, 'r').read()]
else:
cases = extract_test_cases(path)
write_cases(f, cases)
def write_cases(f, tests):
cleaned_filename = f.replace(".","_").replace("-","_").replace(" ","_").lower()
for test in tests:
remainder = re.sub(r'^ {4}', '', test, 0, re.MULTILINE)
open('test_%s_%s.sol' % (hashlib.sha256(test).hexdigest(), cleaned_filename), 'w').write(remainder)
if __name__ == '__main__':
path = sys.argv[1]
for root, subdirs, files in os.walk(path):
if '_build' in subdirs:
subdirs.remove('_build')
for f in files:
path = join(root, f)
extract_and_write(f, path)
| 24.928571
| 107
| 0.541547
|
6dd9440a8ef477b181b340ee08eb86103826ad3e
| 2,589
|
py
|
Python
|
setup.py
|
elifesciences/sciencebeam-gym
|
3ad654e08775e0c0cdd256753e14093bb5a42d44
|
[
"MIT"
] | 25
|
2017-07-25T12:44:55.000Z
|
2020-09-30T22:16:50.000Z
|
setup.py
|
elifesciences/sciencebeam-gym
|
3ad654e08775e0c0cdd256753e14093bb5a42d44
|
[
"MIT"
] | 192
|
2017-11-29T08:57:03.000Z
|
2022-03-29T18:44:41.000Z
|
setup.py
|
elifesciences/sciencebeam-gym
|
3ad654e08775e0c0cdd256753e14093bb5a42d44
|
[
"MIT"
] | 6
|
2019-02-01T18:49:33.000Z
|
2020-07-26T08:18:46.000Z
|
from __future__ import print_function
import os
import subprocess
import shlex
from getpass import getuser
from distutils.command.build import build # type: ignore
from setuptools import (
find_packages,
setup,
Command
)
import numpy as np
CUSTOM_COMMANDS = [
shlex.split(command_line) for command_line in [
'apt-get update',
'apt-get --assume-yes install libxml2',
'apt-get --assume-yes install poppler-utils',
'apt-get --assume-yes install libgl1'
]
]
with open(os.path.join('requirements.txt'), 'r', encoding='utf-8') as f:
REQUIRED_PACKAGES = f.readlines()
packages = find_packages()
# This class handles the pip install mechanism.
class CustomBuild(build):
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = build.sub_commands + [('CustomCommands', None)]
class CustomCommands(Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def _run_custom_command(self, command_list):
if getuser() != 'root' or os.environ.get('SCIENCEBEAM_GYM_NO_APT'):
print('Skipping setup command (not root): %s' % command_list)
return
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s (output: %s)' %
(command_list, p.returncode, stdout_data)
)
def run(self):
for command in CUSTOM_COMMANDS:
self._run_custom_command(command)
setup(
name='sciencebeam_gym',
version='0.0.1',
install_requires=REQUIRED_PACKAGES,
packages=packages,
include_package_data=True,
description='ScienceBeam Gym',
include_dirs=[np.get_include()],
cmdclass={
'build': CustomBuild,
'CustomCommands': CustomCommands
}
)
| 28.141304
| 83
| 0.659328
|
bfd6ff0405e3fa3e3cab6c227b9cd9363eecbad3
| 5,363
|
py
|
Python
|
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/misc/chomsky.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/misc/chomsky.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/misc/chomsky.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Chomsky random text generator, version 1.1, Raymond Hettinger, 2005/09/13
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440546
"""
CHOMSKY is an aid to writing linguistic papers in the style
of the great master. It is based on selected phrases taken
from actual books and articles written by Noam Chomsky.
Upon request, it assembles the phrases in the elegant
stylistic patterns that Chomsky is noted for.
To generate n sentences of linguistic wisdom, type
(CHOMSKY n) -- for example
(CHOMSKY 5) generates half a screen of linguistic truth.
"""
from __future__ import print_function
leadins = """To characterize a linguistic level L,
On the other hand,
This suggests that
It appears that
Furthermore,
We will bring evidence in favor of the following thesis:
To provide a constituent structure for T(Z,K),
From C1, it follows that
For any transformation which is sufficiently diversified in \
application to be of any interest,
Analogously,
Clearly,
Note that
Of course,
Suppose, for instance, that
Thus
With this clarification,
Conversely,
We have already seen that
By combining adjunctions and certain deformations,
I suggested that these results would follow from the assumption that
If the position of the trace in (99c) were only relatively \
inaccessible to movement,
However, this assumption is not correct, since
Comparing these examples with their parasitic gap counterparts in \
(96) and (97), we see that
In the discussion of resumptive pronouns following (81),
So far,
Nevertheless,
For one thing,
Summarizing, then, we assume that
A consequence of the approach just outlined is that
Presumably,
On our assumptions,
It may be, then, that
It must be emphasized, once again, that
Let us continue to suppose that
Notice, incidentally, that """
# List of LEADINs to buy time.
subjects = """ the notion of level of grammaticalness
a case of semigrammaticalness of a different sort
most of the methodological work in modern linguistics
a subset of English sentences interesting on quite independent grounds
the natural general principle that will subsume this case
an important property of these three types of EC
any associated supporting element
the appearance of parasitic gaps in domains relatively inaccessible \
to ordinary extraction
the speaker-hearer's linguistic intuition
the descriptive power of the base component
the earlier discussion of deviance
this analysis of a formative as a pair of sets of features
this selectionally introduced contextual feature
a descriptively adequate grammar
the fundamental error of regarding functional notions as categorial
relational information
the systematic use of complex symbols
the theory of syntactic features developed earlier"""
# List of SUBJECTs chosen for maximum professorial macho.
verbs = """can be defined in such a way as to impose
delimits
suffices to account for
cannot be arbitrary in
is not subject to
does not readily tolerate
raises serious doubts about
is not quite equivalent to
does not affect the structure of
may remedy and, at the same time, eliminate
is not to be considered in determining
is to be regarded as
is unspecified with respect to
is, apparently, determined by
is necessary to impose an interpretation on
appears to correlate rather closely with
is rather different from"""
#List of VERBs chosen for autorecursive obfuscation.
objects = """ problems of phonemic and morphological analysis.
a corpus of utterance tokens upon which conformity has been defined \
by the paired utterance test.
the traditional practice of grammarians.
the levels of acceptability from fairly high (e.g. (99a)) to virtual \
gibberish (e.g. (98d)).
a stipulation to place the constructions into these various categories.
a descriptive fact.
a parasitic gap construction.
the extended c-command discussed in connection with (34).
the ultimate standard that determines the accuracy of any proposed grammar.
the system of base rules exclusive of the lexicon.
irrelevant intervening contexts in selectional rules.
nondistinctness in the sense of distinctive feature theory.
a general convention regarding the forms of the grammar.
an abstract underlying order.
an important distinction in language use.
the requirement that branching is not tolerated within the dominance \
scope of a complex symbol.
the strong generative capacity of the theory."""
# List of OBJECTs selected for profound sententiousness.
import textwrap, random
from itertools import chain, islice
from nltk.compat import izip
def generate_chomsky(times=5, line_length=72):
parts = []
for part in (leadins, subjects, verbs, objects):
phraselist = list(map(str.strip, part.splitlines()))
random.shuffle(phraselist)
parts.append(phraselist)
output = chain(*islice(izip(*parts), 0, times))
print(textwrap.fill(" ".join(output), line_length))
if __name__ == '__main__':
generate_chomsky()
| 40.022388
| 80
| 0.726459
|
b078a4c7a13250985e350c91f13a52fa406e90de
| 9,278
|
py
|
Python
|
backend/tests/review/test_mergeinstructors.py
|
pennlabs/penn-courses
|
6fd16c151e34a9660e883a41458a72cef6c1f8cd
|
[
"MIT"
] | 32
|
2019-04-02T19:02:48.000Z
|
2022-03-05T17:32:52.000Z
|
backend/tests/review/test_mergeinstructors.py
|
pennlabs/penn-courses
|
6fd16c151e34a9660e883a41458a72cef6c1f8cd
|
[
"MIT"
] | 281
|
2019-05-20T01:19:38.000Z
|
2022-03-31T08:17:13.000Z
|
backend/tests/review/test_mergeinstructors.py
|
pennlabs/penn-courses
|
6fd16c151e34a9660e883a41458a72cef6c1f8cd
|
[
"MIT"
] | 2
|
2020-04-27T20:53:02.000Z
|
2021-09-26T16:40:28.000Z
|
from io import StringIO
from django.contrib.auth.models import User
from django.core import management
from django.db.models.functions import Lower
from django.test import TestCase
from courses.models import Instructor, Section
from courses.util import get_or_create_course_and_section
from review.management.commands.mergeinstructors import (
INSTRUCTORS_UNMERGED,
batch_duplicates,
resolve_duplicates,
strategies,
)
from review.models import Review
class BatchDuplicateTestCase(TestCase):
def setUp(self):
Instructor.objects.create(name="A")
Instructor.objects.create(name="a")
Instructor.objects.create(name="b")
def test_batch_duplicates(self):
dupes = batch_duplicates(
Instructor.objects.all().annotate(name_lower=Lower("name")), lambda x: x.name_lower
)
self.assertEqual(1, len(dupes))
self.assertEqual("a", dupes[0][0].name.lower())
def test_batch_duplicates_none_ignored(self):
Instructor.objects.create(name="B")
dupes = batch_duplicates(
Instructor.objects.all().annotate(name_lower=Lower("name")),
lambda x: x.name_lower if x.name_lower == "b" else None,
)
self.assertEqual(1, len(dupes))
self.assertEqual("b", dupes[0][0].name.lower())
class ResolveDuplicatesTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(username="user1")
self.user2 = User.objects.create_user(username="user2")
self.inst_A = Instructor.objects.create(name="A")
self.inst_a = Instructor.objects.create(name="a")
self.inst_b = Instructor.objects.create(name="b")
self.course1, self.section1, _, _ = get_or_create_course_and_section("CIS-120-001", "2020C")
self.course2, self.section2, _, _ = get_or_create_course_and_section("CIS-120-001", "2019C")
self.review1 = Review.objects.create(section=self.section1, instructor=self.inst_A)
self.section1.instructors.add(self.inst_A)
self.review2 = Review.objects.create(section=self.section2, instructor=self.inst_a)
self.section2.instructors.add(self.inst_a)
self.stats = dict()
def stat(key, amt=1, element=None):
"""
Helper function to keep track of how many rows we are changing
"""
value = self.stats.get(key, 0)
if element is None:
self.stats[key] = value + amt
else:
self.stats.setdefault(key, []).append(element)
self.stat = stat
def test_basic_merge(self):
resolve_duplicates([[self.inst_A, self.inst_a]], False, self.stat)
self.assertEqual(2, Instructor.objects.count())
self.assertFalse(Instructor.objects.filter(name="a").exists())
self.assertEqual(2, Review.objects.filter(instructor=self.inst_A).count())
self.assertEqual(2, Section.objects.filter(instructors=self.inst_A).count())
def test_basic_merge_dryrun_doesnt_modify(self):
resolve_duplicates([[self.inst_A, self.inst_a]], True, self.stat)
self.assertEqual(3, Instructor.objects.count())
self.assertEqual(1, Review.objects.filter(instructor=self.inst_A).count())
self.assertEqual(1, Section.objects.filter(instructors=self.inst_A).count())
self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count())
self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count())
def test_merge_with_user(self):
self.inst_a.user = self.user1
self.inst_a.save()
resolve_duplicates([[self.inst_A, self.inst_a]], False, self.stat)
self.assertEqual(2, Instructor.objects.count())
self.assertFalse(Instructor.objects.filter(name="A").exists())
self.assertEqual(2, Review.objects.filter(instructor=self.inst_a).count())
self.assertEqual(2, Section.objects.filter(instructors=self.inst_a).count())
def test_merge_with_both_having_same_user(self):
self.inst_a.user = self.user1
self.inst_a.save()
self.inst_A.user = self.user1
self.inst_A.save()
resolve_duplicates([[self.inst_A, self.inst_a]], False, self.stat)
self.assertEqual(2, Instructor.objects.count())
self.assertFalse(Instructor.objects.filter(name="a").exists())
self.assertEqual(2, Review.objects.filter(instructor=self.inst_A).count())
self.assertEqual(2, Section.objects.filter(instructors=self.inst_A).count())
def test_merge_aborts_with_different_users(self):
self.inst_a.user = self.user1
self.inst_a.save()
self.inst_A.user = self.user2
self.inst_A.save()
resolve_duplicates([[self.inst_A, self.inst_a]], False, self.stat)
self.assertEqual(3, Instructor.objects.count())
self.assertEqual(1, Review.objects.filter(instructor=self.inst_A).count())
self.assertEqual(1, Section.objects.filter(instructors=self.inst_A).count())
self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count())
self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count())
self.assertListEqual([self.inst_A.pk, self.inst_a.pk], self.stats[INSTRUCTORS_UNMERGED][0])
class MergeStrategyTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(username="user1")
self.user2 = User.objects.create_user(username="user2")
self.inst_A = Instructor.objects.create(name="A")
self.inst_a = Instructor.objects.create(name="a")
self.inst_b = Instructor.objects.create(name="b")
def test_case_insensitive(self):
self.assertListEqual([[self.inst_a, self.inst_A]], strategies["case-insensitive"]())
def test_case_insensitive_recent_first(self):
self.inst_A.save()
self.assertListEqual([[self.inst_A, self.inst_a]], strategies["case-insensitive"]())
def test_pennid(self):
self.inst_A.user = self.user1
self.inst_A.save()
self.inst_a.user = self.user1
self.inst_a.save()
self.assertListEqual([[self.inst_a, self.inst_A]], strategies["pennid"]())
class MergeInstructorsCommandTestCase(TestCase):
COMMAND_NAME = "mergeinstructors"
def setUp(self):
self.out = StringIO()
self.err = StringIO()
self.user1 = User.objects.create_user(username="user1")
self.user2 = User.objects.create_user(username="user2")
self.inst_A = Instructor.objects.create(name="A")
self.inst_a = Instructor.objects.create(name="a")
self.inst_b = Instructor.objects.create(name="b")
self.course1, self.section1, _, _ = get_or_create_course_and_section("CIS-120-001", "2020C")
self.course2, self.section2, _, _ = get_or_create_course_and_section("CIS-120-001", "2019C")
self.review1 = Review.objects.create(section=self.section1, instructor=self.inst_A)
self.section1.instructors.add(self.inst_A)
self.review2 = Review.objects.create(section=self.section2, instructor=self.inst_a)
self.section2.instructors.add(self.inst_a)
def test_with_all_strats(self):
self.inst_a.user = self.user1
self.inst_b.user = self.user1
self.inst_a.save()
self.inst_b.save()
management.call_command(
self.COMMAND_NAME, "--all", stdout=self.out, stderr=self.err,
)
self.assertEqual(1, Instructor.objects.all().count())
self.assertEqual(2, Review.objects.filter(instructor=self.inst_b).count())
self.assertEqual(2, Section.objects.filter(instructors=self.inst_b).count())
def test_with_one_strat(self):
management.call_command(
self.COMMAND_NAME, "--strategy=case-insensitive", stdout=self.out, stderr=self.err,
)
self.assertEqual(2, Instructor.objects.all().count())
self.assertEqual(2, Review.objects.filter(instructor=self.inst_a).count())
self.assertEqual(2, Section.objects.filter(instructors=self.inst_a).count())
def test_with_manual_override(self):
self.inst_A.user = self.user1
self.inst_b.user = self.user2
self.inst_A.save()
self.inst_b.save()
management.call_command(
self.COMMAND_NAME,
f"-i {self.inst_b.pk}",
f"-i {self.inst_A.pk}",
stdout=self.out,
stderr=self.err,
)
self.assertEqual(2, Instructor.objects.all().count())
self.assertFalse(Instructor.objects.filter(name="b").exists())
self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count())
self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count())
def test_with_dry_run(self):
self.inst_a.user = self.user1
self.inst_b.user = self.user1
self.inst_a.save()
self.inst_b.save()
management.call_command(
self.COMMAND_NAME, "--all", "--dryrun", stdout=self.out, stderr=self.err,
)
self.assertEqual(3, Instructor.objects.all().count())
self.assertEqual(0, Review.objects.filter(instructor=self.inst_b).count())
self.assertEqual(0, Section.objects.filter(instructors=self.inst_b).count())
| 42.559633
| 100
| 0.671804
|
9be821df5cd1baeb94c3be5ef7a82eb9d42b863f
| 3,230
|
py
|
Python
|
setup.py
|
yoyoasa/bplsqlparse
|
e068865284426ea85b1dc675c71049f18e5ba24d
|
[
"BSD-3-Clause"
] | 2
|
2019-10-25T06:28:06.000Z
|
2019-10-25T06:28:07.000Z
|
setup.py
|
yoyoasa/bplsqlparse
|
e068865284426ea85b1dc675c71049f18e5ba24d
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
yoyoasa/bplsqlparse
|
e068865284426ea85b1dc675c71049f18e5ba24d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This setup script is part of python-bplsqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
import re
from setuptools import setup, find_packages
def get_version():
"""Parse __init__.py for version number instead of importing the file."""
VERSIONFILE = 'bplsqlparse/__init__.py'
VSRE = r'^__version__ = [\'"]([^\'"]*)[\'"]'
with open(VERSIONFILE) as f:
verstrline = f.read()
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
raise RuntimeError('Unable to find version in {fn}'.format(fn=VERSIONFILE))
LONG_DESCRIPTION = """
``bplsqlparse`` is a non-validating SQL parser module.
It provides support for parsing, splitting and formatting SQL statements.
Visit the `project page <https://github.com/andialbrecht/bplsqlparse>`_ for
additional information and documentation.
**Example Usage**
Splitting SQL statements::
>>> import bplsqlparse
>>> bplsqlparse.split('select * from foo; select * from bar;')
[u'select * from foo; ', u'select * from bar;']
Formatting statemtents::
>>> sql = 'select * from foo where id in (select id from bar);'
>>> print bplsqlparse.format(sql, reindent=True, keyword_case='upper')
SELECT *
FROM foo
WHERE id IN
(SELECT id
FROM bar);
Parsing::
>>> sql = 'select * from someschema.mytable where id = 1'
>>> res = bplsqlparse.parse(sql)
>>> res
(<Statement 'select...' at 0x9ad08ec>,)
>>> stmt = res[0]
>>> str(stmt) # converting it back to unicode
'select * from someschema.mytable where id = 1'
>>> # This is how the internal representation looks like:
>>> stmt.tokens
(<DML 'select' at 0x9b63c34>,
<Whitespace ' ' at 0x9b63e8c>,
<Operator '*' at 0x9b63e64>,
<Whitespace ' ' at 0x9b63c5c>,
<Keyword 'from' at 0x9b63c84>,
<Whitespace ' ' at 0x9b63cd4>,
<Identifier 'somes...' at 0x9b5c62c>,
<Whitespace ' ' at 0x9b63f04>,
<Where 'where ...' at 0x9b5caac>)
"""
setup(
name='bplsqlparse',
version=get_version(),
author='Andi Albrecht',
author_email='albrecht.andi@gmail.com',
url='https://github.com/andialbrecht/bplsqlparse',
description='Non-validating SQL parser',
long_description=LONG_DESCRIPTION,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Database',
'Topic :: Software Development',
],
packages=find_packages(exclude=('tests',)),
entry_points={
'console_scripts': [
'sqlformat = bplsqlparse.__main__:main',
]
},
)
| 29.633028
| 79
| 0.634056
|
56e9cbfa88e0caf900a6cfa2e8a9a8c59f8db40d
| 106,484
|
py
|
Python
|
tests/dummy_data.py
|
chakki-works/typot
|
8ed20fc2102acccc8703aae4d206d3906f5dca47
|
[
"Apache-2.0"
] | 345
|
2017-05-30T10:04:54.000Z
|
2022-02-18T07:49:39.000Z
|
tests/dummy_data.py
|
chakki-works/typot
|
8ed20fc2102acccc8703aae4d206d3906f5dca47
|
[
"Apache-2.0"
] | 4
|
2017-05-31T00:30:30.000Z
|
2017-12-11T17:08:40.000Z
|
tests/dummy_data.py
|
chakki-works/typot
|
8ed20fc2102acccc8703aae4d206d3906f5dca47
|
[
"Apache-2.0"
] | 17
|
2017-06-05T10:40:32.000Z
|
2020-04-20T09:03:39.000Z
|
pull_request_created = """
{
"action": "opened",
"number": 2,
"pull_request": {
"url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2",
"id": 122156051,
"html_url": "https://github.com/chakki-works/typot-demo/pull/2",
"diff_url": "https://github.com/chakki-works/typot-demo/pull/2.diff",
"patch_url": "https://github.com/chakki-works/typot-demo/pull/2.patch",
"issue_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/2",
"number": 2,
"state": "open",
"locked": false,
"title": "Update README.md",
"user": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"body": "Pull Request Create Test",
"created_at": "2017-05-24T06:50:51Z",
"updated_at": "2017-05-24T06:50:51Z",
"closed_at": null,
"merged_at": null,
"merge_commit_sha": null,
"assignee": null,
"assignees": [
],
"requested_reviewers": [
],
"milestone": null,
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/commits",
"review_comments_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/comments",
"review_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/2/comments",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/2511e9cc5b0fa1f44feebc9c29d08304e7744269",
"head": {
"label": "chakki-works:dev",
"ref": "dev",
"sha": "2511e9cc5b0fa1f44feebc9c29d08304e7744269",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T02:39:34Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 1,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 1,
"forks": 0,
"open_issues": 1,
"watchers": 0,
"default_branch": "master"
}
},
"base": {
"label": "chakki-works:master",
"ref": "master",
"sha": "a2573fb6cc5612219823765d0113938c666c1855",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T02:39:34Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 1,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 1,
"forks": 0,
"open_issues": 1,
"watchers": 0,
"default_branch": "master"
}
},
"_links": {
"self": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2"
},
"html": {
"href": "https://github.com/chakki-works/typot-demo/pull/2"
},
"issue": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/2"
},
"comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/2/comments"
},
"review_comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/comments"
},
"review_comment": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}"
},
"commits": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/commits"
},
"statuses": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/statuses/2511e9cc5b0fa1f44feebc9c29d08304e7744269"
}
},
"merged": false,
"mergeable": null,
"rebaseable": null,
"mergeable_state": "unknown",
"merged_by": null,
"comments": 0,
"review_comments": 0,
"maintainer_can_modify": false,
"commits": 1,
"additions": 1,
"deletions": 1,
"changed_files": 1
},
"repository": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T02:39:34Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 1,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 1,
"forks": 0,
"open_issues": 1,
"watchers": 0,
"default_branch": "master"
},
"organization": {
"login": "chakki-works",
"id": 25578516,
"url": "https://api.github.com/orgs/chakki-works",
"repos_url": "https://api.github.com/orgs/chakki-works/repos",
"events_url": "https://api.github.com/orgs/chakki-works/events",
"hooks_url": "https://api.github.com/orgs/chakki-works/hooks",
"issues_url": "https://api.github.com/orgs/chakki-works/issues",
"members_url": "https://api.github.com/orgs/chakki-works/members{/member}",
"public_members_url": "https://api.github.com/orgs/chakki-works/public_members{/member}",
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"description": "Our mission is to enable everyone leave the office by the tea time"
},
"sender": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"installation": {
"id": 1111
}
}
"""
pull_request_closed = """
{
"action": "closed",
"number": 2,
"pull_request": {
"url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2",
"id": 122156051,
"html_url": "https://github.com/chakki-works/typot-demo/pull/2",
"diff_url": "https://github.com/chakki-works/typot-demo/pull/2.diff",
"patch_url": "https://github.com/chakki-works/typot-demo/pull/2.patch",
"issue_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/2",
"number": 2,
"state": "closed",
"locked": false,
"title": "Update README.md",
"user": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"body": "Pull Request Create Test",
"created_at": "2017-05-24T06:50:51Z",
"updated_at": "2017-05-24T07:05:40Z",
"closed_at": "2017-05-24T07:05:39Z",
"merged_at": "2017-05-24T07:05:39Z",
"merge_commit_sha": "7d895e85cddbb7b0adf976d14ef717f2617a6876",
"assignee": null,
"assignees": [
],
"requested_reviewers": [
],
"milestone": null,
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/commits",
"review_comments_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/comments",
"review_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/2/comments",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/2511e9cc5b0fa1f44feebc9c29d08304e7744269",
"head": {
"label": "chakki-works:dev",
"ref": "dev",
"sha": "2511e9cc5b0fa1f44feebc9c29d08304e7744269",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T07:05:39Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 1,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master"
}
},
"base": {
"label": "chakki-works:master",
"ref": "master",
"sha": "a2573fb6cc5612219823765d0113938c666c1855",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T07:05:39Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 1,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master"
}
},
"_links": {
"self": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2"
},
"html": {
"href": "https://github.com/chakki-works/typot-demo/pull/2"
},
"issue": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/2"
},
"comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/2/comments"
},
"review_comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/comments"
},
"review_comment": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}"
},
"commits": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/2/commits"
},
"statuses": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/statuses/2511e9cc5b0fa1f44feebc9c29d08304e7744269"
}
},
"merged": true,
"mergeable": null,
"rebaseable": null,
"mergeable_state": "unknown",
"merged_by": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"comments": 1,
"review_comments": 0,
"maintainer_can_modify": false,
"commits": 1,
"additions": 1,
"deletions": 1,
"changed_files": 1
},
"repository": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T07:05:39Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 1,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master"
},
"organization": {
"login": "chakki-works",
"id": 25578516,
"url": "https://api.github.com/orgs/chakki-works",
"repos_url": "https://api.github.com/orgs/chakki-works/repos",
"events_url": "https://api.github.com/orgs/chakki-works/events",
"hooks_url": "https://api.github.com/orgs/chakki-works/hooks",
"issues_url": "https://api.github.com/orgs/chakki-works/issues",
"members_url": "https://api.github.com/orgs/chakki-works/members{/member}",
"public_members_url": "https://api.github.com/orgs/chakki-works/public_members{/member}",
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"description": "Our mission is to enable everyone leave the office by the tea time"
},
"sender": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"installation": {
"id": 11111
}
}
"""
review_changed = r"""
{
"action": "edited",
"changes": {
"body": {
"from": "\"hoge\" is typo? \n- [ ] hoge-1\n- [ ] hoge-2\n- [ ] hoge-3"
}
},
"comment": {
"url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments/118856819",
"pull_request_review_id": 40687574,
"id": 118856819,
"diff_hunk": "@@ -1,2 +1,7 @@\n # typot-demo",
"path": "README.md",
"position": 1,
"original_position": 1,
"commit_id": "adbdfa392e0cab7766dbcae6cee82bf5fd11a471",
"original_commit_id": "adbdfa392e0cab7766dbcae6cee82bf5fd11a471",
"user": {
"login": "typot[bot]",
"id": 28912751,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/typot%5Bbot%5D",
"html_url": "https://github.com/apps/typot",
"followers_url": "https://api.github.com/users/typot%5Bbot%5D/followers",
"following_url": "https://api.github.com/users/typot%5Bbot%5D/following{/other_user}",
"gists_url": "https://api.github.com/users/typot%5Bbot%5D/gists{/gist_id}",
"starred_url": "https://api.github.com/users/typot%5Bbot%5D/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/typot%5Bbot%5D/subscriptions",
"organizations_url": "https://api.github.com/users/typot%5Bbot%5D/orgs",
"repos_url": "https://api.github.com/users/typot%5Bbot%5D/repos",
"events_url": "https://api.github.com/users/typot%5Bbot%5D/events{/privacy}",
"received_events_url": "https://api.github.com/users/typot%5Bbot%5D/received_events",
"type": "Bot",
"site_admin": false
},
"body": "\"hoge\" is typo? \n- [ ] hoge-1\n- [x] hoge-2\n- [ ] hoge-3",
"created_at": "2017-05-29T01:59:50Z",
"updated_at": "2017-05-29T01:59:50Z",
"html_url": "https://github.com/chakki-works/typot-demo/pull/3#discussion_r118856819",
"pull_request_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3",
"_links": {
"self": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments/118856819"
},
"html": {
"href": "https://github.com/chakki-works/typot-demo/pull/3#discussion_r118856819"
},
"pull_request": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3"
}
}
},
"pull_request": {
"url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3",
"id": 122168599,
"html_url": "https://github.com/chakki-works/typot-demo/pull/3",
"diff_url": "https://github.com/chakki-works/typot-demo/pull/3.diff",
"patch_url": "https://github.com/chakki-works/typot-demo/pull/3.patch",
"issue_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/3",
"number": 3,
"state": "open",
"locked": false,
"title": "Update README.md",
"user": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"body": "Update README.md description",
"created_at": "2017-05-24T08:12:08Z",
"updated_at": "2017-05-29T02:00:04Z",
"closed_at": null,
"merged_at": null,
"merge_commit_sha": "1ccc304340b7d8d062b256dc48294acbb7991bdd",
"assignee": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"assignees": [
{
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
}
],
"requested_reviewers": [
],
"milestone": null,
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3/commits",
"review_comments_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3/comments",
"review_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/3/comments",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/adbdfa392e0cab7766dbcae6cee82bf5fd11a471",
"head": {
"label": "chakki-works:dev",
"ref": "dev",
"sha": "adbdfa392e0cab7766dbcae6cee82bf5fd11a471",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T08:12:08Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 2,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 1,
"forks": 0,
"open_issues": 1,
"watchers": 0,
"default_branch": "master"
}
},
"base": {
"label": "chakki-works:master",
"ref": "master",
"sha": "7d895e85cddbb7b0adf976d14ef717f2617a6876",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T08:12:08Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 2,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 1,
"forks": 0,
"open_issues": 1,
"watchers": 0,
"default_branch": "master"
}
},
"_links": {
"self": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3"
},
"html": {
"href": "https://github.com/chakki-works/typot-demo/pull/3"
},
"issue": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/3"
},
"comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/3/comments"
},
"review_comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3/comments"
},
"review_comment": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}"
},
"commits": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/3/commits"
},
"statuses": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/statuses/adbdfa392e0cab7766dbcae6cee82bf5fd11a471"
}
}
},
"repository": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-24T08:12:08Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 2,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 1,
"forks": 0,
"open_issues": 1,
"watchers": 0,
"default_branch": "master"
},
"organization": {
"login": "chakki-works",
"id": 25578516,
"url": "https://api.github.com/orgs/chakki-works",
"repos_url": "https://api.github.com/orgs/chakki-works/repos",
"events_url": "https://api.github.com/orgs/chakki-works/events",
"hooks_url": "https://api.github.com/orgs/chakki-works/hooks",
"issues_url": "https://api.github.com/orgs/chakki-works/issues",
"members_url": "https://api.github.com/orgs/chakki-works/members{/member}",
"public_members_url": "https://api.github.com/orgs/chakki-works/public_members{/member}",
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"description": "Our mission is to enable everyone leave the office by the tea time"
},
"sender": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"installation": {
"id": 11111
}
}
"""
fix_target_pr = r"""
{
"action": "opened",
"number": 4,
"pull_request": {
"url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/4",
"id": 122827018,
"html_url": "https://github.com/chakki-works/typot-demo/pull/4",
"diff_url": "https://github.com/chakki-works/typot-demo/pull/4.diff",
"patch_url": "https://github.com/chakki-works/typot-demo/pull/4.patch",
"issue_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/4",
"number": 4,
"state": "open",
"locked": false,
"title": "Typo exists in content.md",
"user": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"body": "",
"created_at": "2017-05-29T04:37:11Z",
"updated_at": "2017-05-29T04:37:11Z",
"closed_at": null,
"merged_at": null,
"merge_commit_sha": null,
"assignee": null,
"assignees": [
],
"requested_reviewers": [
],
"milestone": null,
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/4/commits",
"review_comments_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/4/comments",
"review_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/4/comments",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/f0d3ab26d259ef474996d22dcff21dfce6ca4492",
"head": {
"label": "chakki-works:content",
"ref": "content",
"sha": "f0d3ab26d259ef474996d22dcff21dfce6ca4492",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-29T04:36:46Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 2,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 2,
"forks": 0,
"open_issues": 2,
"watchers": 0,
"default_branch": "master"
}
},
"base": {
"label": "chakki-works:master",
"ref": "master",
"sha": "5d85936b799357162921c646d596b32ba29e2717",
"user": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"repo": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-29T04:36:46Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 2,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 2,
"forks": 0,
"open_issues": 2,
"watchers": 0,
"default_branch": "master"
}
},
"_links": {
"self": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/4"
},
"html": {
"href": "https://github.com/chakki-works/typot-demo/pull/4"
},
"issue": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/4"
},
"comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/issues/4/comments"
},
"review_comments": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/4/comments"
},
"review_comment": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/comments{/number}"
},
"commits": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/pulls/4/commits"
},
"statuses": {
"href": "https://api.github.com/repos/chakki-works/typot-demo/statuses/f0d3ab26d259ef474996d22dcff21dfce6ca4492"
}
},
"merged": false,
"mergeable": null,
"rebaseable": null,
"mergeable_state": "unknown",
"merged_by": null,
"comments": 0,
"review_comments": 0,
"maintainer_can_modify": false,
"commits": 1,
"additions": 2,
"deletions": 0,
"changed_files": 1
},
"repository": {
"id": 92240778,
"name": "typot-demo",
"full_name": "chakki-works/typot-demo",
"owner": {
"login": "chakki-works",
"id": 25578516,
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/chakki-works",
"html_url": "https://github.com/chakki-works",
"followers_url": "https://api.github.com/users/chakki-works/followers",
"following_url": "https://api.github.com/users/chakki-works/following{/other_user}",
"gists_url": "https://api.github.com/users/chakki-works/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chakki-works/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chakki-works/subscriptions",
"organizations_url": "https://api.github.com/users/chakki-works/orgs",
"repos_url": "https://api.github.com/users/chakki-works/repos",
"events_url": "https://api.github.com/users/chakki-works/events{/privacy}",
"received_events_url": "https://api.github.com/users/chakki-works/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/chakki-works/typot-demo",
"description": "to test typot",
"fork": false,
"url": "https://api.github.com/repos/chakki-works/typot-demo",
"forks_url": "https://api.github.com/repos/chakki-works/typot-demo/forks",
"keys_url": "https://api.github.com/repos/chakki-works/typot-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/chakki-works/typot-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/chakki-works/typot-demo/teams",
"hooks_url": "https://api.github.com/repos/chakki-works/typot-demo/hooks",
"issue_events_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/chakki-works/typot-demo/events",
"assignees_url": "https://api.github.com/repos/chakki-works/typot-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/chakki-works/typot-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/chakki-works/typot-demo/tags",
"blobs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/chakki-works/typot-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/chakki-works/typot-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/chakki-works/typot-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/chakki-works/typot-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/chakki-works/typot-demo/languages",
"stargazers_url": "https://api.github.com/repos/chakki-works/typot-demo/stargazers",
"contributors_url": "https://api.github.com/repos/chakki-works/typot-demo/contributors",
"subscribers_url": "https://api.github.com/repos/chakki-works/typot-demo/subscribers",
"subscription_url": "https://api.github.com/repos/chakki-works/typot-demo/subscription",
"commits_url": "https://api.github.com/repos/chakki-works/typot-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/chakki-works/typot-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/chakki-works/typot-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/chakki-works/typot-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/chakki-works/typot-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/chakki-works/typot-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/chakki-works/typot-demo/merges",
"archive_url": "https://api.github.com/repos/chakki-works/typot-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/chakki-works/typot-demo/downloads",
"issues_url": "https://api.github.com/repos/chakki-works/typot-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/chakki-works/typot-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/chakki-works/typot-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/chakki-works/typot-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/chakki-works/typot-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/chakki-works/typot-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/chakki-works/typot-demo/deployments",
"created_at": "2017-05-24T02:24:37Z",
"updated_at": "2017-05-24T02:24:37Z",
"pushed_at": "2017-05-29T04:36:46Z",
"git_url": "git://github.com/chakki-works/typot-demo.git",
"ssh_url": "git@github.com:chakki-works/typot-demo.git",
"clone_url": "https://github.com/chakki-works/typot-demo.git",
"svn_url": "https://github.com/chakki-works/typot-demo",
"homepage": null,
"size": 2,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 2,
"forks": 0,
"open_issues": 2,
"watchers": 0,
"default_branch": "master"
},
"organization": {
"login": "chakki-works",
"id": 25578516,
"url": "https://api.github.com/orgs/chakki-works",
"repos_url": "https://api.github.com/orgs/chakki-works/repos",
"events_url": "https://api.github.com/orgs/chakki-works/events",
"hooks_url": "https://api.github.com/orgs/chakki-works/hooks",
"issues_url": "https://api.github.com/orgs/chakki-works/issues",
"members_url": "https://api.github.com/orgs/chakki-works/members{/member}",
"public_members_url": "https://api.github.com/orgs/chakki-works/public_members{/member}",
"avatar_url": "https://avatars0.githubusercontent.com/u/25578516?v=3",
"description": "Our mission is to enable everyone leave the office by the tea time"
},
"sender": {
"login": "icoxfog417",
"id": 544269,
"avatar_url": "https://avatars3.githubusercontent.com/u/544269?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/icoxfog417",
"html_url": "https://github.com/icoxfog417",
"followers_url": "https://api.github.com/users/icoxfog417/followers",
"following_url": "https://api.github.com/users/icoxfog417/following{/other_user}",
"gists_url": "https://api.github.com/users/icoxfog417/gists{/gist_id}",
"starred_url": "https://api.github.com/users/icoxfog417/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/icoxfog417/subscriptions",
"organizations_url": "https://api.github.com/users/icoxfog417/orgs",
"repos_url": "https://api.github.com/users/icoxfog417/repos",
"events_url": "https://api.github.com/users/icoxfog417/events{/privacy}",
"received_events_url": "https://api.github.com/users/icoxfog417/received_events",
"type": "User",
"site_admin": false
},
"installation": {
"id": 11111
}
}
"""
diff_sample = """
diff --git a/content.md b/content.md
index b7ab025..67ea8f3 100644
--- a/content.md
+++ b/content.md
@@ -1,5 +1,7 @@
# Typot Demo Content
* Now release humans from checking the typos
-* Now relase humans from checking the typos
+* Now release humans from checking the typos
* Now release humans fram checkingg the typos
+
+ohh, typos!
"""
diff_from_middle = """
diff --git a/README.md b/README.md
index dc3326a..ab9eb25 100644
--- a/README.md
+++ b/README.md
@@ -3,3 +3,5 @@
Typot automatically detect & fix typo!
You can try its feature on this repository.
+
+I made mistoke here.
"""
| 55.985279
| 125
| 0.653469
|
282a375e0c8373b47cb58bb41783ef1cfea3cba8
| 19,625
|
py
|
Python
|
features/knowledgegraph.py
|
karafecho/icees-api
|
fc2b6865974075e42b88ccff96b3e9cc90da7596
|
[
"MIT"
] | 1
|
2018-08-06T18:53:34.000Z
|
2018-08-06T18:53:34.000Z
|
features/knowledgegraph.py
|
karafecho/icees-api
|
fc2b6865974075e42b88ccff96b3e9cc90da7596
|
[
"MIT"
] | 1
|
2021-03-31T21:45:33.000Z
|
2021-03-31T21:45:33.000Z
|
features/knowledgegraph.py
|
karafecho/icees-api
|
fc2b6865974075e42b88ccff96b3e9cc90da7596
|
[
"MIT"
] | 1
|
2021-03-22T16:02:06.000Z
|
2021-03-22T16:02:06.000Z
|
import json
import os
import datetime
import traceback
import itertools
from functools import reduce, partial
import re
import logging
from sqlalchemy import Table, Column, Integer, String, MetaData, create_engine, func, Sequence, between
from sqlalchemy.sql import select
import numpy as np
import inflection
from tx.functional.either import Left, Right
from tx.functional.maybe import Nothing, Just
import tx.functional.maybe as maybe
from tx.functional.utils import compose
from utils import to_qualifiers
from .features import features, lookUpFeatureClass, features_dict
from .model import get_ids_by_feature, select_associations_to_all_features, select_feature_matrix
from .identifiers import get_identifiers, get_features_by_identifier
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
schema = {
"population_of_individual_organisms": {
"chemical_substance": ["correlated_with"],
"disease": ["correlated_with"],
"phenotypic_feature": ["correlated_with"],
"disease_or_phenotypic_feature": ["correlated_with"],
"chemical_substance": ["correlated_with"],
"environment": ["correlated_with"],
"activity_and_behavior": ["correlated_with"],
"drug": ["correlated_with"],
"named_thing": ["correlated_with"]
}
}
subtypes = {
"chemical_substance": ["drug"],
"disease_or_phenotypic_feature": ["disease", "phenotypic_feature"],
"named_thing": ["chemical_substance", "disease_or_phenotypeic_feature", "environment"]
}
TOOL_VERSION = "4.0.0"
def closure_subtype(node_type):
return reduce(lambda x, y : x + y, map(closure_subtype, subtypes.get(node_type, [])), [node_type])
def name_to_ids(table, filter_regex, node_name):
return list(dict.fromkeys(filter(lambda x: re.match(filter_regex, x), get_identifiers(table, node_name, True))))
def gen_edge_id(cohort_id, node_name, node_id):
return cohort_id + "_" + node_name + "_" + node_id
def gen_node_id_and_equivalent_ids(node_ids):
id_prefixes = ["CHEBI", "CHEMBL", "DRUGBANK", "PUBCHEM", "MESH", "HMDB", "INCHI", "INCHIKEY", "UNII", "KEGG", "gtpo"]
inode_id = next(((i, node_id) for i, node_id in enumerate(node_ids) if any(node_id.upper().startswith(x.upper() + ":") for x in id_prefixes)), None)
if inode_id is None:
return node_ids[0], node_ids[1:]
else:
i, node_id = inode_id
return node_id, node_ids[:i] + node_ids[i+1:]
def result(source_id, source_curie, edge_id, node_name, target_id, table, filter_regex, score, score_name):
node_ids = name_to_ids(table, filter_regex, node_name)
if len(node_ids) == 0:
return Nothing
else:
node_id, *equivalent_ids = gen_node_id_and_equivalent_ids(node_ids)
return Just({
"node_bindings" : [
{"qg_id": source_id, "kg_id": source_curie},
{"qg_id": target_id, "kg_id": node_id}
],
"edge_bindings" : [
{"qg_id": edge_id, "kg_id": gen_edge_id(source_curie, node_name, node_id)}
],
"score": score,
"score_name": score_name
})
def knowledge_graph_node(node_name, table, filter_regex, biolink_class):
node_ids = name_to_ids(table, filter_regex, node_name)
if len(node_ids) == 0:
return Nothing
else:
node_id, equivalent_ids = gen_node_id_and_equivalent_ids(node_ids)
return Just({
"name": node_name,
"id": node_id,
"equivalent_identifiers": equivalent_ids,
"type": [biolink_class]
})
def knowledge_graph_edge(source_id, node_name, table, filter_regex, feature_property):
node_ids = name_to_ids(table, filter_regex, node_name)
if len(node_ids) == 0:
return Nothing
else:
node_id, *equivalent_ids = gen_node_id_and_equivalent_ids(node_ids)
edge_name = "correlated_with"
return Just({
"type": edge_name,
"id": gen_edge_id(source_id, node_name, node_id),
"source_id": source_id,
"target_id": node_id,
"edge_attributes": feature_property
})
def get(conn, query):
try:
message = query.get("message", query)
query_options = query.get("query_options", {})
cohort_id, table, year, cohort_features, size = message_cohort(conn, query_options)
maximum_p_value = query["query_options"].get("maximum_p_value", MAX_P_VAL_DEFAULT)
filter_regex = query["query_options"].get("regex", ".*")
feature = to_qualifiers(query["query_options"]["feature"])
query_graph = message.get("query_graph", message.get("machine_question"))
nodes = query_graph["nodes"]
edges = query_graph["edges"]
if len(nodes) != 2:
raise NotImplementedError("Number of nodes in query graph must be 2")
if len(edges) != 1:
raise NotImplementedError("Number of edges in query graph must be 1")
nodes_dict = {node_get_id(node): node for node in nodes}
[edge] = edges
source_id = edge["source_id"]
source_node = nodes_dict[source_id]
source_node_type = source_node["type"]
source_curie = cohort_id
if source_node_type not in schema:
raise NotImplementedError("Sounce node must be one of " + str(schema.keys()))
target_id = edge["target_id"]
target_node_type = nodes_dict[target_id]["type"]
supported_node_types = schema[source_node_type]
if target_node_type not in supported_node_types:
raise NotImplementedError("Target node must be one of " + str(supported_node_types.keys()))
supported_edge_types = supported_node_types[target_node_type]
edge_id = edge_get_id(edge)
edge_type = edge["type"]
if edge_type not in supported_edge_types:
raise NotImplementedError("Edge must be one of " + str(supported_edge_types))
cohort_id, size = get_ids_by_feature(conn, table, year, cohort_features)
supported_types = closure_subtype(target_node_type)
feature_list = select_associations_to_all_features(conn, table, year, cohort_id, feature, maximum_p_value, lambda x : inflection.underscore(x.biolink_class) in supported_types)
logger.info(f"feature_list = {feature_list}")
nodes = {}
knowledge_graph_edges = []
results = []
for feature in feature_list:
feature_b = feature["feature_b"]
feature_name = feature_b["feature_name"]
biolink_class = feature_b["biolink_class"]
p_value = feature["p_value"]
knowledge_graph_node(feature_name, table, filter_regex, biolink_class).bind(lambda node: add_node(nodes, node))
knowledge_graph_edge(source_curie, feature_name, table, filter_regex, feature).bind(lambda edge: knowledge_graph_edges.append(edge))
result(source_id, cohort_id, edge_id, feature_name, target_id, table, filter_regex, p_value, "p value").bind(lambda item: results.append(item))
knowledge_graph_nodes = [{
"name": "cohort",
"id": cohort_id,
"type": ["population_of_individual_organisms"]
}] + list(nodes.values())
knowledge_graph = {
"nodes": knowledge_graph_nodes,
"edges": knowledge_graph_edges
}
n_results = len(results)
message = {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"n_results": n_results,
"message_code": "OK",
"code_description": "",
"query_graph": query_graph,
"knowledge_graph": knowledge_graph,
"results": results
}
except Exception as e:
traceback.print_exc()
message = {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"message_code": "Error",
"code_description": str(e),
}
return message
def query_feature(table, feature):
feature_def = features_dict[table][feature]
ty = feature_def["type"]
if ty == "string":
if "enum" not in feature_def:
return Left("node has type string but has no enum")
else:
return Right({
"feature_name": feature,
"feature_qualifiers": [{
"operator":"=",
"value":v
} for v in feature_def["enum"]]
})
elif ty == "integer":
if "maximum" not in feature_def or "minimum" not in feature_def:
return Left("node has type integer but has no maximum or has no minimum")
else:
return Right({
"feature_name": feature,
"feature_qualifiers": [{
"operator":"=",
"value":v
} for v in range(feature_def["minimum"], feature_def["maximum"]+1)]
})
else:
return Left(f"unsupported node type {ty}")
def co_occurrence_feature_edge(conn, table, year, cohort_features, src_feature, tgt_feature):
return (
query_feature(table, src_feature)
.bind(lambda src_query_feature: (
query_feature(table, tgt_feature)
.map(lambda tgt_query_feature: (
select_feature_matrix(conn, table, year, cohort_features, year, src_query_feature, tgt_query_feature)["p_value"]
))
))
)
def feature_names(table, node_curie):
return (
maybe.from_python(node_curie)
.rec(Right, Left("no curie specified at node"))
.bind(partial(get_features_by_identifier, table))
)
def co_occurrence_edge(conn, table, year, cohort_features, src_node, tgt_node):
def handle_src_and_tgt_features(src_features, tgt_features):
edge_property_value = []
for src_feature in src_features:
for tgt_feature in tgt_features:
edge = co_occurrence_feature_edge(conn, table, year, cohort_features, src_feature, tgt_feature)
if isinstance(edge, Right):
edge_property_value.append({
"src_feature": src_feature,
"tgt_feature": tgt_feature,
"p_value": edge.value
})
else:
return edge
if len(edge_property_value) == 0:
return Left("no edge found")
else:
return Right(edge_property_value)
return (
feature_names(table, src_node["id"])
.bind(lambda src_features: (
feature_names(table, tgt_node["id"])
.bind(lambda tgt_features: (
handle_src_and_tgt_features(src_features, tgt_features)
))
))
)
def generate_edge_id(src_node, tgt_node):
return node_get_id(src_node) + "_" + node_get_id(tgt_node)
def node_get_id(node):
node_id = node.get("id")
return node_id if node_id is not None else node.get("node_id")
def edge_get_id(node):
edge_id = node.get("id")
return edge_id if edge_id is not None else node.get("edge_id")
def attr(s):
return lambda d: maybe.from_python(d.get(s))
def generate_edge(src_node, tgt_node, edge_attributes=None):
return {
"id": generate_edge_id(src_node, tgt_node),
"type": "correlated_with",
"source_id": node_get_id(src_node),
"target_id": node_get_id(tgt_node),
**({
"edge_attributes": edge_attributes
} if edge_attributes is not None else {})
}
def convert(attribute_map, qnode):
return {
k : res.value for k, k_qnode in attribute_map.items() if isinstance((res := k_qnode(qnode)), Just)
}
def convert_qnode_to_node(qnode):
attribute_map = {
"id": attr("curie"),
"type": attr("type")
}
return convert(attribute_map, qnode)
def convert_qedge_to_edge(qedge):
attribute_map = {
"id": compose(edge_get_id, Just),
"type": attr("type"),
"relation": attr("relation"),
"source_id": attr("source_id"),
"target_id": attr("target_id"),
"negated": attr("negated")
}
return convert(attribute_map, qedge)
def message_cohort(conn, cohort_definition):
cohort_id = cohort_definition.get("cohort_id")
if cohort_id is None:
table = cohort_definition.get("table", "patient")
year = cohort_definition.get("year")
features = cohort_definition.get("cohort_features", {})
cohort_id, size = get_ids_by_feature(conn, table, year, features)
else:
cohort_definition = get_cohort_definition_by_id(cohort_id)
if cohort_definition is Nothing:
raise RuntimeError("cohort with cohort_id not found")
else:
table = cohort_definition["table"]
year = cohort_definition["year"]
features = cohort_defintion["features"]
size = cohort_definition["size"]
return cohort_id, table, year, features, size
MAX_P_VAL_DEFAULT = 1
def co_occurrence_overlay(conn, query):
try:
message = query["message"]
query_options = query.get("query_options", {})
cohort_id, table, year, features, size = message_cohort(conn, query_options)
kgraph = message.get("knowledge_graph")
knodes = kgraph["nodes"]
kedges = kgraph["edges"]
nodes = knodes
edges = kedges
overlay_edges = []
for src_node in knodes:
for tgt_node in knodes:
edge_attributes = co_occurrence_edge(conn, table, year, features, src_node, tgt_node)
if isinstance(edge_attributes, Left):
return {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"message_code": "Error",
"code_description": edge_attributes.value,
}
else:
overlay_edges.append(generate_edge(src_node, tgt_node, edge_attributes=edge_attributes.value))
knowledge_graph = {
"nodes": nodes,
"edges": edges + overlay_edges
}
message = {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"message_code": "OK",
"code_description": "",
"knowledge_graph": knowledge_graph,
}
except Exception as e:
traceback.print_exc()
message = {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"message_code": "Error",
"code_description": str(e),
}
return message
def add_node(nodes, node):
node_id = node_get_id(node)
node_curr = nodes.get(node_id)
if node_curr is None:
nodes[node_id] = node
else:
node_curr["name"] += f",{node['name']}"
def one_hop(conn, query):
try:
message = query["message"]
query_options = query.get("query_options", {})
cohort_id, table, year, cohort_features, size = message_cohort(conn, query_options)
maximum_p_value = query.get("query_options", {}).get("maximum_p_value", MAX_P_VAL_DEFAULT)
filter_regex = query.get("query_options", {}).get("regex", ".*")
query_graph = message["query_graph"]
nodes = query_graph["nodes"]
edges = query_graph["edges"]
if len(nodes) != 2:
raise NotImplementedError("Number of nodes in query graph must be 2")
if len(edges) != 1:
raise NotImplementedError("Number of edges in query graph must be 1")
nodes_dict = {node_get_id(node): node for node in nodes}
[edge] = edges
source_id = edge["source_id"]
source_node = nodes_dict[source_id]
source_node_type = source_node.get("type")
source_curie = source_node["curie"]
msource_node_feature_names = feature_names(table, source_curie)
if isinstance(msource_node_feature_names, Left):
raise NotImplementedError(msource_node_feature_names)
else:
source_node_feature_names = msource_node_feature_names.value
target_id = edge["target_id"]
target_node_type = nodes_dict[target_id]["type"]
edge_id = edge_get_id(edge)
feature_set = {}
supported_types = closure_subtype(target_node_type)
for source_node_feature_name in source_node_feature_names:
feature = query_feature(table, source_node_feature_name).value
ataf = select_associations_to_all_features(conn, table, year, cohort_id, feature, maximum_p_value, feature_set=lambda x : inflection.underscore(x.biolink_class) in supported_types)
for feature in ataf:
feature_name = feature["feature_b"]["feature_name"]
biolink_class = feature["feature_b"]["biolink_class"]
if feature_name in feature_set:
_, feature_properties = feature_set[feature_name]
feature_properties.append(feature)
else:
feature_set[feature_name] = biolink_class, [feature]
nodes = {}
knowledge_graph_edges = []
results = []
def p_values(feature_list):
return [feature["p_value"] for feature in feature_list]
for feature_name, (biolink_class, feature_list) in feature_set.items():
knowledge_graph_node(feature_name, table, filter_regex, biolink_class).bind(lambda node: add_node(nodes, node))
knowledge_graph_edge(source_curie, feature_name, table, filter_regex, feature_list).bind(lambda edge: knowledge_graph_edges.append(edge))
result(source_id, source_curie, edge_id, feature_name, target_id, table, filter_regex, p_values(feature_list), "p value").bind(lambda item: results.append(item))
knowledge_graph_nodes = [convert_qnode_to_node(source_node), *nodes.values()]
knowledge_graph = {
"nodes": knowledge_graph_nodes,
"edges": knowledge_graph_edges
}
n_results = len(results)
message = {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"n_results": n_results,
"message_code": "OK",
"code_description": "",
"query_graph": query_graph,
"knowledge_graph": knowledge_graph,
"results": results
}
except Exception as e:
traceback.print_exc()
message = {
"reasoner_id": "ICEES",
"tool_version": TOOL_VERSION,
"datetime": datetime.datetime.now().strftime("%Y-%m-%D %H:%M:%S"),
"message_code": "Error",
"code_description": traceback.format_exc(),
}
return message
def get_schema():
return schema
| 36.141805
| 192
| 0.612688
|
e86595a635f8cef138dfe7c23220292b48c5e932
| 2,338
|
py
|
Python
|
src/gtk/toga_gtk/widgets/internal/rows/texticon.py
|
freespace/toga
|
2ae96ddede34b5164b1be3d80a18aa87336f28f0
|
[
"BSD-3-Clause"
] | 1,261
|
2019-03-31T16:28:47.000Z
|
2022-03-31T09:01:23.000Z
|
src/gtk/toga_gtk/widgets/internal/rows/texticon.py
|
freespace/toga
|
2ae96ddede34b5164b1be3d80a18aa87336f28f0
|
[
"BSD-3-Clause"
] | 597
|
2019-04-02T20:02:42.000Z
|
2022-03-30T10:28:47.000Z
|
src/gtk/toga_gtk/widgets/internal/rows/texticon.py
|
freakboy3742/toga
|
3ae8b90fa397384d3df0378ca32449333494f282
|
[
"BSD-3-Clause"
] | 318
|
2019-03-31T18:32:00.000Z
|
2022-03-30T18:07:13.000Z
|
import html
from toga_gtk.libs import Gtk, Pango
from .base import HiddenButtonsRow
class TextIconRow(HiddenButtonsRow):
"""
Create a TextIconRow from a toga.sources.Row.
A reference to the original row is kept in self.toga_row, this is useful for comparisons.
"""
def __init__(self, factory: callable, *args, **kwargs):
super().__init__(*args, **kwargs)
# This is the factory of the DetailedList implementation.
self.factory = factory
self.icon = self.get_icon(self.interface, self.factory)
text = Gtk.Label(xalign=0)
# The three line below are necessary for right to left text.
text.set_hexpand(True)
text.set_ellipsize(Pango.EllipsizeMode.END)
text.set_margin_end(12)
text_markup = self.markup(self.interface)
text.set_markup(text_markup)
content = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
vbox.pack_start(text, True, True, 0)
if self.icon is not None:
content.pack_start(self.icon, False, False, 6)
content.pack_start(vbox, True, True, 5)
self.add_content(content)
self._delete_button = Gtk.Button.new_from_icon_name("user-trash-symbolic", Gtk.IconSize.BUTTON)
self._delete_button.connect("clicked", self.gtk_on_delete_clicked)
self.add_button(self._delete_button)
@property
def title(self):
return self.interface.title
@property
def subtitle(self):
return self.interface.subtitle
def get_icon(self, row, factory):
if getattr(row, "icon") is None:
return None
else:
row.icon.bind(factory)
dpr = self.get_scale_factor()
return getattr(row.icon._impl, "native_" + str(32*dpr))
@staticmethod
def markup(row):
markup = [
html.escape(row.title or ''),
'\n',
'<small>', html.escape(row.subtitle or ''), '</small>',
]
return ''.join(markup)
def on_right_click(self, rect):
handler = self._dl.interface.on_delete
if handler is not None:
self.toggle_content()
def gtk_on_delete_clicked(self, w: Gtk.ListBoxRow):
self._dl.interface.data.remove(self.interface)
| 29.974359
| 103
| 0.636441
|
a99ae5cd4f2e91170f7655f3668b7ba3520fc087
| 1,515
|
py
|
Python
|
app.py
|
gnmerritt/slack-translation-strings
|
517b97d412a0e16e36dccfddce2071b5d756c23f
|
[
"MIT"
] | null | null | null |
app.py
|
gnmerritt/slack-translation-strings
|
517b97d412a0e16e36dccfddce2071b5d756c23f
|
[
"MIT"
] | null | null | null |
app.py
|
gnmerritt/slack-translation-strings
|
517b97d412a0e16e36dccfddce2071b5d756c23f
|
[
"MIT"
] | null | null | null |
import random
import os
from threading import Thread
import requests
from flask import Flask, jsonify, request
app = Flask(__name__)
VERIFICATION_TOKEN = os.environ.get('V_TOKEN')
APP_TOKEN = os.environ.get('A_TOKEN')
CHANNEL = os.environ.get('A_IN_CHANNEL')
@app.route('/slack', methods=['POST'])
def pick_char():
data = request.get_json()
token = data.get('token', None)
if token != VERIFICATION_TOKEN:
Exception(f"IllegalRequest, got t='{token}'")
challenge = data.get('challenge', None)
if challenge is not None:
return challenge
event = data.get('event', {})
type = event.get('type', None)
channel = event.get('channel')
if type == 'message' and channel == CHANNEL:
text = event.get('text', '')
user = event.get('user', None)
if text and user is not None:
t = Thread(target=mangle_post, args=(user, text))
t.start()
return "Ok"
def mangle_post(user, text):
data = {'text': make_translation(user, text), 'channel': "#translations"}
print(f"sending {data}")
headers = {'Authorization': f"Bearer {APP_TOKEN}"}
res = requests.post('https://slack.com/api/chat.postMessage', json=data, headers=headers)
print(f"got res={res}, json={res.json()}")
def make_translation(user, text):
words = text.split(" ")
some = [w for w in words if random.uniform(0, 1) > 0.25]
return f"<@{user}>: {'_'.join(some)}"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5002)
| 27.053571
| 93
| 0.631683
|
30ea244bac967a5be460320471025ad65e8ac5dd
| 81,840
|
py
|
Python
|
homeassistant/components/google_assistant/trait.py
|
cociweb/home-assistant_core
|
e2c7d1e31d6deb68724fb842893cc5c49d608d81
|
[
"Apache-2.0"
] | 2
|
2021-07-30T19:15:52.000Z
|
2021-07-30T19:16:00.000Z
|
homeassistant/components/google_assistant/trait.py
|
cociweb/home-assistant_core
|
e2c7d1e31d6deb68724fb842893cc5c49d608d81
|
[
"Apache-2.0"
] | 74
|
2020-08-05T07:20:27.000Z
|
2022-03-23T12:47:28.000Z
|
homeassistant/components/google_assistant/trait.py
|
marecabo/home-assistant
|
e33774a61e7fcc88aff752dfa4618dd26a746872
|
[
"Apache-2.0"
] | 1
|
2021-07-30T19:16:02.000Z
|
2021-07-30T19:16:02.000Z
|
"""Implement the Google Smart Home traits."""
from __future__ import annotations
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
button,
camera,
cover,
fan,
group,
input_boolean,
input_select,
light,
lock,
media_player,
scene,
script,
select,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.humidifier import const as humidifier
from homeassistant.components.lock import STATE_JAMMED, STATE_UNLOCKING
from homeassistant.components.media_player.const import MEDIA_TYPE_CHANNEL
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_BATTERY_LEVEL,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
CAST_APP_ID_HOMEASSISTANT_MEDIA,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers.network import get_url
from homeassistant.util import color as color_util, dt, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_ALREADY_STOPPED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NO_AVAILABLE_CHANNEL,
ERR_NOT_SUPPORTED,
ERR_UNSUPPORTED_INPUT,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = f"{PREFIX_TRAITS}CameraStream"
TRAIT_ONOFF = f"{PREFIX_TRAITS}OnOff"
TRAIT_DOCK = f"{PREFIX_TRAITS}Dock"
TRAIT_STARTSTOP = f"{PREFIX_TRAITS}StartStop"
TRAIT_BRIGHTNESS = f"{PREFIX_TRAITS}Brightness"
TRAIT_COLOR_SETTING = f"{PREFIX_TRAITS}ColorSetting"
TRAIT_SCENE = f"{PREFIX_TRAITS}Scene"
TRAIT_TEMPERATURE_SETTING = f"{PREFIX_TRAITS}TemperatureSetting"
TRAIT_TEMPERATURE_CONTROL = f"{PREFIX_TRAITS}TemperatureControl"
TRAIT_LOCKUNLOCK = f"{PREFIX_TRAITS}LockUnlock"
TRAIT_FANSPEED = f"{PREFIX_TRAITS}FanSpeed"
TRAIT_MODES = f"{PREFIX_TRAITS}Modes"
TRAIT_INPUTSELECTOR = f"{PREFIX_TRAITS}InputSelector"
TRAIT_OPENCLOSE = f"{PREFIX_TRAITS}OpenClose"
TRAIT_VOLUME = f"{PREFIX_TRAITS}Volume"
TRAIT_ARMDISARM = f"{PREFIX_TRAITS}ArmDisarm"
TRAIT_HUMIDITY_SETTING = f"{PREFIX_TRAITS}HumiditySetting"
TRAIT_TRANSPORT_CONTROL = f"{PREFIX_TRAITS}TransportControl"
TRAIT_MEDIA_STATE = f"{PREFIX_TRAITS}MediaState"
TRAIT_CHANNEL = f"{PREFIX_TRAITS}Channel"
TRAIT_LOCATOR = f"{PREFIX_TRAITS}Locator"
TRAIT_ENERGYSTORAGE = f"{PREFIX_TRAITS}EnergyStorage"
TRAIT_SENSOR_STATE = f"{PREFIX_TRAITS}SensorState"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = f"{PREFIX_COMMANDS}OnOff"
COMMAND_GET_CAMERA_STREAM = f"{PREFIX_COMMANDS}GetCameraStream"
COMMAND_DOCK = f"{PREFIX_COMMANDS}Dock"
COMMAND_STARTSTOP = f"{PREFIX_COMMANDS}StartStop"
COMMAND_PAUSEUNPAUSE = f"{PREFIX_COMMANDS}PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = f"{PREFIX_COMMANDS}BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = f"{PREFIX_COMMANDS}ColorAbsolute"
COMMAND_ACTIVATE_SCENE = f"{PREFIX_COMMANDS}ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = f"{PREFIX_COMMANDS}ThermostatSetMode"
COMMAND_LOCKUNLOCK = f"{PREFIX_COMMANDS}LockUnlock"
COMMAND_FANSPEED = f"{PREFIX_COMMANDS}SetFanSpeed"
COMMAND_FANSPEEDRELATIVE = f"{PREFIX_COMMANDS}SetFanSpeedRelative"
COMMAND_MODES = f"{PREFIX_COMMANDS}SetModes"
COMMAND_INPUT = f"{PREFIX_COMMANDS}SetInput"
COMMAND_NEXT_INPUT = f"{PREFIX_COMMANDS}NextInput"
COMMAND_PREVIOUS_INPUT = f"{PREFIX_COMMANDS}PreviousInput"
COMMAND_OPENCLOSE = f"{PREFIX_COMMANDS}OpenClose"
COMMAND_OPENCLOSE_RELATIVE = f"{PREFIX_COMMANDS}OpenCloseRelative"
COMMAND_SET_VOLUME = f"{PREFIX_COMMANDS}setVolume"
COMMAND_VOLUME_RELATIVE = f"{PREFIX_COMMANDS}volumeRelative"
COMMAND_MUTE = f"{PREFIX_COMMANDS}mute"
COMMAND_ARMDISARM = f"{PREFIX_COMMANDS}ArmDisarm"
COMMAND_MEDIA_NEXT = f"{PREFIX_COMMANDS}mediaNext"
COMMAND_MEDIA_PAUSE = f"{PREFIX_COMMANDS}mediaPause"
COMMAND_MEDIA_PREVIOUS = f"{PREFIX_COMMANDS}mediaPrevious"
COMMAND_MEDIA_RESUME = f"{PREFIX_COMMANDS}mediaResume"
COMMAND_MEDIA_SEEK_RELATIVE = f"{PREFIX_COMMANDS}mediaSeekRelative"
COMMAND_MEDIA_SEEK_TO_POSITION = f"{PREFIX_COMMANDS}mediaSeekToPosition"
COMMAND_MEDIA_SHUFFLE = f"{PREFIX_COMMANDS}mediaShuffle"
COMMAND_MEDIA_STOP = f"{PREFIX_COMMANDS}mediaStop"
COMMAND_REVERSE = f"{PREFIX_COMMANDS}Reverse"
COMMAND_SET_HUMIDITY = f"{PREFIX_COMMANDS}SetHumidity"
COMMAND_SELECT_CHANNEL = f"{PREFIX_COMMANDS}selectChannel"
COMMAND_LOCATE = f"{PREFIX_COMMANDS}Locate"
COMMAND_CHARGE = f"{PREFIX_COMMANDS}Charge"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
def _next_selected(items: list[str], selected: str | None) -> str | None:
"""Return the next item in a item list starting at given value.
If selected is missing in items, None is returned
"""
try:
index = items.index(selected)
except ValueError:
return None
next_item = 0 if index == len(items) - 1 else index + 1
return items[next_item]
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain == light.DOMAIN:
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.brightness_supported(color_modes)
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
if self.state.domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": f"{get_url(self.hass)}{url}",
"cameraStreamReceiverAppId": CAST_APP_ID_HOMEASSISTANT_MEDIA,
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
humidifier.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
if self.state.attributes.get(ATTR_ASSUMED_STATE, False):
return {"commandOnlyOnOff": True}
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state not in (STATE_OFF, STATE_UNKNOWN)}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
if (domain := self.state.domain) == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.color_temp_supported(color_modes) or light.color_supported(
color_modes
)
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
color_modes = attrs.get(light.ATTR_SUPPORTED_COLOR_MODES)
response = {}
if light.color_supported(color_modes):
response["colorModel"] = "hsv"
if light.color_temp_supported(color_modes):
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
color_mode = self.state.attributes.get(light.ATTR_COLOR_MODE)
color = {}
if light.color_supported([color_mode]):
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if light.color_temp_supported([color_mode]):
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=not self.config.should_report_state,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=not self.config.should_report_state,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (button.DOMAIN, scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# None of the supported domains can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
service = SERVICE_TURN_ON
if self.state.domain == button.DOMAIN:
service = button.SERVICE_PRESS
# Don't block for scripts or buttons, as they can be slow.
await self.hass.services.async_call(
self.state.domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=(not self.config.should_report_state)
and self.state.domain not in (button.DOMAIN, script.DOMAIN),
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class LocatorTrait(_Trait):
"""Trait to offer locate functionality.
https://developers.google.com/actions/smarthome/traits/locator
"""
name = TRAIT_LOCATOR
commands = [COMMAND_LOCATE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_LOCATE
def sync_attributes(self):
"""Return locator attributes for a sync request."""
return {}
def query_attributes(self):
"""Return locator query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a locate command."""
if params.get("silence", False):
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Silencing a Locate request is not yet supported",
)
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_LOCATE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class EnergyStorageTrait(_Trait):
"""Trait to offer EnergyStorage functionality.
https://developers.google.com/actions/smarthome/traits/energystorage
"""
name = TRAIT_ENERGYSTORAGE
commands = [COMMAND_CHARGE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_BATTERY
def sync_attributes(self):
"""Return EnergyStorage attributes for a sync request."""
return {
"isRechargeable": True,
"queryOnlyEnergyStorage": True,
}
def query_attributes(self):
"""Return EnergyStorage query attributes."""
battery_level = self.state.attributes.get(ATTR_BATTERY_LEVEL)
if battery_level == 100:
descriptive_capacity_remaining = "FULL"
elif 75 <= battery_level < 100:
descriptive_capacity_remaining = "HIGH"
elif 50 <= battery_level < 75:
descriptive_capacity_remaining = "MEDIUM"
elif 25 <= battery_level < 50:
descriptive_capacity_remaining = "LOW"
elif 0 <= battery_level < 25:
descriptive_capacity_remaining = "CRITICALLY_LOW"
return {
"descriptiveCapacityRemaining": descriptive_capacity_remaining,
"capacityRemaining": [{"rawValue": battery_level, "unit": "PERCENTAGE"}],
"capacityUntilFull": [
{"rawValue": 100 - battery_level, "unit": "PERCENTAGE"}
],
"isCharging": self.state.state == vacuum.STATE_DOCKED,
"isPluggedIn": self.state.state == vacuum.STATE_DOCKED,
}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Controlling charging of a vacuum is not yet supported",
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == vacuum.DOMAIN:
return True
if domain == cover.DOMAIN and features & cover.SUPPORT_STOP:
return True
return False
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
if domain == cover.DOMAIN:
return {}
def query_attributes(self):
"""Return StartStop query attributes."""
domain = self.state.domain
state = self.state.state
if domain == vacuum.DOMAIN:
return {
"isRunning": state == vacuum.STATE_CLEANING,
"isPaused": state == vacuum.STATE_PAUSED,
}
if domain == cover.DOMAIN:
return {"isRunning": state in (cover.STATE_CLOSING, cover.STATE_OPENING)}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return await self._execute_vacuum(command, data, params, challenge)
if domain == cover.DOMAIN:
return await self._execute_cover(command, data, params, challenge)
async def _execute_vacuum(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
async def _execute_cover(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"] is False:
if (
self.state.state
in (
cover.STATE_CLOSING,
cover.STATE_OPENING,
)
or self.state.attributes.get(ATTR_ASSUMED_STATE)
):
await self.hass.services.async_call(
self.state.domain,
cover.SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
else:
raise SmartHomeError(
ERR_ALREADY_STOPPED, "Cover is already stopped"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Starting a cover is not supported"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, f"Command {command} is not supported"
)
@register_trait
class TemperatureControlTrait(_Trait):
"""Trait for devices (other than thermostats) that support controlling temperature. Workaround for Temperature sensors.
https://developers.google.com/assistant/smarthome/traits/temperaturecontrol
"""
name = TRAIT_TEMPERATURE_CONTROL
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
def sync_attributes(self):
"""Return temperature attributes for a sync request."""
return {
"temperatureUnitForUX": _google_temp_unit(
self.hass.config.units.temperature_unit
),
"queryOnlyTemperatureSetting": True,
"temperatureRange": {
"minThresholdCelsius": -100,
"maxThresholdCelsius": 100,
},
}
def query_attributes(self):
"""Return temperature states."""
response = {}
unit = self.hass.config.units.temperature_unit
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
temp = round(temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1)
response["temperatureSetpointCelsius"] = temp
response["temperatureAmbientCelsius"] = temp
return response
async def execute(self, command, data, params, challenge):
"""Unsupported."""
raise SmartHomeError(ERR_NOT_SUPPORTED, "Execute is not supported by sensor")
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == climate.DOMAIN
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = modes
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
unit = self.hass.config.units.temperature_unit
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation, "none")
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=not self.config.should_report_state,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=not self.config.should_report_state,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=not self.config.should_report_state,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = [COMMAND_SET_HUMIDITY]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == humidifier.DOMAIN:
return True
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
elif domain == humidifier.DOMAIN:
response["humiditySetpointRange"] = {
"minPercent": round(
float(self.state.attributes[humidifier.ATTR_MIN_HUMIDITY])
),
"maxPercent": round(
float(self.state.attributes[humidifier.ATTR_MAX_HUMIDITY])
),
}
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
elif domain == humidifier.DOMAIN:
target_humidity = attrs.get(humidifier.ATTR_HUMIDITY)
if target_humidity is not None:
response["humiditySetpointPercent"] = round(float(target_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
if self.state.domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
if command == COMMAND_SET_HUMIDITY:
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: self.state.entity_id,
humidifier.ATTR_HUMIDITY: params["humidity"],
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
if self.state.state == STATE_JAMMED:
return {"isJammed": True}
# If its unlocking its not yet unlocked so we consider is locked
return {"isLocked": self.state.state in (STATE_UNLOCKING, STATE_LOCKED)}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
state_to_support = {
STATE_ALARM_ARMED_HOME: alarm_control_panel.const.SUPPORT_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: alarm_control_panel.const.SUPPORT_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: alarm_control_panel.const.SUPPORT_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: alarm_control_panel.const.SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: alarm_control_panel.const.SUPPORT_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def _supported_states(self):
"""Return supported states."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return [
state
for state, required_feature in self.state_to_support.items()
if features & required_feature != 0
]
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self._supported_states():
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "next_state" in self.state.attributes:
armed_state = self.state.attributes["next_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
# If no arm level given, we can only arm it if there is
# only one supported arm type. We never default to triggered.
if not (arm_level := params.get("armLevel")):
states = self._supported_states()
if STATE_ALARM_TRIGGERED in states:
states.remove(STATE_ALARM_TRIGGERED)
if len(states) != 1:
raise SmartHomeError(ERR_NOT_SUPPORTED, "ArmLevel missing")
arm_level = states[0]
if self.state.state == arm_level:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[arm_level]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED, COMMAND_REVERSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN:
return features & fan.SUPPORT_SET_SPEED
if domain == climate.DOMAIN:
return features & climate.SUPPORT_FAN_MODE
return False
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
domain = self.state.domain
speeds = []
result = {}
if domain == fan.DOMAIN:
reversible = bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
)
result.update(
{
"reversible": reversible,
"supportsFanSpeedPercent": True,
}
)
elif domain == climate.DOMAIN:
modes = self.state.attributes.get(climate.ATTR_FAN_MODES) or []
for mode in modes:
speed = {
"speed_name": mode,
"speed_values": [{"speed_synonym": [mode], "lang": "en"}],
}
speeds.append(speed)
result.update(
{
"reversible": False,
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
}
)
return result
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
domain = self.state.domain
response = {}
if domain == climate.DOMAIN:
speed = attrs.get(climate.ATTR_FAN_MODE) or "off"
response["currentFanSpeedSetting"] = speed
if domain == fan.DOMAIN:
percent = attrs.get(fan.ATTR_PERCENTAGE) or 0
response["currentFanSpeedPercent"] = percent
return response
async def execute_fanspeed(self, data, params):
"""Execute an SetFanSpeed command."""
domain = self.state.domain
if domain == climate.DOMAIN:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_FAN_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_FAN_MODE: params["fanSpeed"],
},
blocking=not self.config.should_report_state,
context=data.context,
)
if domain == fan.DOMAIN:
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PERCENTAGE: params["fanSpeedPercent"],
},
blocking=not self.config.should_report_state,
context=data.context,
)
async def execute_reverse(self, data, params):
"""Execute a Reverse command."""
if self.state.domain == fan.DOMAIN:
if self.state.attributes.get(fan.ATTR_DIRECTION) == fan.DIRECTION_FORWARD:
direction = fan.DIRECTION_REVERSE
else:
direction = fan.DIRECTION_FORWARD
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_DIRECTION: direction},
blocking=not self.config.should_report_state,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a smart home command."""
if command == COMMAND_FANSPEED:
await self.execute_fanspeed(data, params)
elif command == COMMAND_REVERSE:
await self.execute_reverse(data, params)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"preset mode": ["preset mode", "mode", "preset"],
"sound mode": ["sound mode", "effects"],
"option": ["option", "setting", "mode", "value"],
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN and features & fan.SUPPORT_PRESET_MODE:
return True
if domain == input_select.DOMAIN:
return True
if domain == select.DOMAIN:
return True
if domain == humidifier.DOMAIN and features & humidifier.SUPPORT_MODES:
return True
if domain == light.DOMAIN and features & light.SUPPORT_EFFECT:
return True
if domain != media_player.DOMAIN:
return False
return features & media_player.SUPPORT_SELECT_SOUND_MODE
def _generate(self, name, settings):
"""Generate a list of modes."""
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(setting, [setting]),
"lang": "en",
}
],
}
)
return mode
def sync_attributes(self):
"""Return mode attributes for a sync request."""
modes = []
for domain, attr, name in (
(fan.DOMAIN, fan.ATTR_PRESET_MODES, "preset mode"),
(media_player.DOMAIN, media_player.ATTR_SOUND_MODE_LIST, "sound mode"),
(input_select.DOMAIN, input_select.ATTR_OPTIONS, "option"),
(select.DOMAIN, select.ATTR_OPTIONS, "option"),
(humidifier.DOMAIN, humidifier.ATTR_AVAILABLE_MODES, "mode"),
(light.DOMAIN, light.ATTR_EFFECT_LIST, "effect"),
):
if self.state.domain != domain:
continue
if (items := self.state.attributes.get(attr)) is not None:
modes.append(self._generate(name, items))
# Shortcut since all domains are currently unique
break
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if self.state.domain == fan.DOMAIN:
if fan.ATTR_PRESET_MODES in attrs:
mode_settings["preset mode"] = attrs.get(fan.ATTR_PRESET_MODE)
elif self.state.domain == media_player.DOMAIN:
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
elif self.state.domain == input_select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == humidifier.DOMAIN:
if ATTR_MODE in attrs:
mode_settings["mode"] = attrs.get(ATTR_MODE)
elif self.state.domain == light.DOMAIN and light.ATTR_EFFECT in attrs:
mode_settings["effect"] = attrs.get(light.ATTR_EFFECT)
if mode_settings:
response["on"] = self.state.state not in (STATE_OFF, STATE_UNKNOWN)
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute a SetModes command."""
settings = params.get("updateModeSettings")
if self.state.domain == fan.DOMAIN:
preset_mode = settings["preset mode"]
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PRESET_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PRESET_MODE: preset_mode,
},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if self.state.domain == input_select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
input_select.DOMAIN,
input_select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
input_select.ATTR_OPTION: option,
},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if self.state.domain == select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
select.DOMAIN,
select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
select.ATTR_OPTION: option,
},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if self.state.domain == humidifier.DOMAIN:
requested_mode = settings["mode"]
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_MODE,
{
ATTR_MODE: requested_mode,
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if self.state.domain == light.DOMAIN:
requested_effect = settings["effect"]
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_EFFECT: requested_effect,
},
blocking=not self.config.should_report_state,
context=data.context,
)
return
if self.state.domain == media_player.DOMAIN and (
sound_mode := settings.get("sound mode")
):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=not self.config.should_report_state,
context=data.context,
)
_LOGGER.info(
"Received an Options command for unrecognised domain %s",
self.state.domain,
)
return
@register_trait
class InputSelectorTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/assistant/smarthome/traits/inputselector
"""
name = TRAIT_INPUTSELECTOR
commands = [COMMAND_INPUT, COMMAND_NEXT_INPUT, COMMAND_PREVIOUS_INPUT]
SYNONYMS = {}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN and (
features & media_player.SUPPORT_SELECT_SOURCE
):
return True
return False
def sync_attributes(self):
"""Return mode attributes for a sync request."""
attrs = self.state.attributes
inputs = [
{"key": source, "names": [{"name_synonym": [source], "lang": "en"}]}
for source in attrs.get(media_player.ATTR_INPUT_SOURCE_LIST, [])
]
payload = {"availableInputs": inputs, "orderedInputs": True}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
return {"currentInput": attrs.get(media_player.ATTR_INPUT_SOURCE, "")}
async def execute(self, command, data, params, challenge):
"""Execute an SetInputSource command."""
sources = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE_LIST) or []
source = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE)
if command == COMMAND_INPUT:
requested_source = params.get("newInput")
elif command == COMMAND_NEXT_INPUT:
requested_source = _next_selected(sources, source)
elif command == COMMAND_PREVIOUS_INPUT:
requested_source = _next_selected(list(reversed(sources)), source)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if requested_source not in sources:
raise SmartHomeError(ERR_UNSUPPORTED_INPUT, "Unsupported input")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (
cover.DEVICE_CLASS_DOOR,
cover.DEVICE_CLASS_GARAGE,
cover.DEVICE_CLASS_GATE,
)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE, COMMAND_OPENCLOSE_RELATIVE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
response["discreteOnlyOpenClose"] = True
elif (
self.state.domain == cover.DOMAIN
and features & cover.SUPPORT_SET_POSITION == 0
):
response["discreteOnlyOpenClose"] = True
if (
features & cover.SUPPORT_OPEN == 0
and features & cover.SUPPORT_CLOSE == 0
):
response["queryOnlyOpenClose"] = True
if self.state.attributes.get(ATTR_ASSUMED_STATE):
response["commandOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
# When it's an assumed state, we will return empty state
# This shouldn't happen because we set `commandOnlyOpenClose`
# but Google still queries. Erroring here will cause device
# to show up offline.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
return response
if domain == cover.DOMAIN:
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
should_verify = False
if command == COMMAND_OPENCLOSE_RELATIVE:
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is None:
raise SmartHomeError(
ERR_NOT_SUPPORTED,
"Current position not know for relative command",
)
position = max(0, min(100, position + params["openRelativePercent"]))
else:
position = params["openPercent"]
if position == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif position == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif features & cover.SUPPORT_SET_POSITION:
service = cover.SERVICE_SET_COVER_POSITION
if position > 0:
should_verify = True
svc_params[cover.ATTR_POSITION] = position
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "No support for partial open close"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN,
service,
svc_params,
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class VolumeTrait(_Trait):
"""Trait to control volume of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE, COMMAND_MUTE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if trait is supported."""
if domain == media_player.DOMAIN:
return features & (
media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_STEP
)
return False
def sync_attributes(self):
"""Return volume attributes for a sync request."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return {
"volumeCanMuteAndUnmute": bool(features & media_player.SUPPORT_VOLUME_MUTE),
"commandOnlyVolume": self.state.attributes.get(ATTR_ASSUMED_STATE, False),
# Volume amounts in SET_VOLUME and VOLUME_RELATIVE are on a scale
# from 0 to this value.
"volumeMaxLevel": 100,
# Default change for queries like "Hey Google, volume up".
# 10% corresponds to the default behavior for the
# media_player.volume{up,down} services.
"levelStepSize": 10,
}
def query_attributes(self):
"""Return volume query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if muted is not None:
response["isMuted"] = bool(muted)
return response
async def _set_volume_absolute(self, data, level):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level,
},
blocking=not self.config.should_report_state,
context=data.context,
)
async def _execute_set_volume(self, data, params):
level = max(0, min(100, params["volumeLevel"]))
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_SET
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self._set_volume_absolute(data, level / 100)
async def _execute_volume_relative(self, data, params):
relative = params["relativeSteps"]
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & media_player.SUPPORT_VOLUME_SET:
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
target = max(0.0, min(1.0, current + relative / 100))
await self._set_volume_absolute(data, target)
elif features & media_player.SUPPORT_VOLUME_STEP:
svc = media_player.SERVICE_VOLUME_UP
if relative < 0:
svc = media_player.SERVICE_VOLUME_DOWN
relative = -relative
for _ in range(relative):
await self.hass.services.async_call(
media_player.DOMAIN,
svc,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=not self.config.should_report_state,
context=data.context,
)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
async def _execute_mute(self, data, params):
mute = params["mute"]
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_MUTE
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_MUTED: mute,
},
blocking=not self.config.should_report_state,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a volume command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
elif command == COMMAND_MUTE:
await self._execute_mute(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
if challenge.get("pin") != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
MEDIA_COMMAND_SUPPORT_MAPPING = {
COMMAND_MEDIA_NEXT: media_player.SUPPORT_NEXT_TRACK,
COMMAND_MEDIA_PAUSE: media_player.SUPPORT_PAUSE,
COMMAND_MEDIA_PREVIOUS: media_player.SUPPORT_PREVIOUS_TRACK,
COMMAND_MEDIA_RESUME: media_player.SUPPORT_PLAY,
COMMAND_MEDIA_SEEK_RELATIVE: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SEEK_TO_POSITION: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SHUFFLE: media_player.SUPPORT_SHUFFLE_SET,
COMMAND_MEDIA_STOP: media_player.SUPPORT_STOP,
}
MEDIA_COMMAND_ATTRIBUTES = {
COMMAND_MEDIA_NEXT: "NEXT",
COMMAND_MEDIA_PAUSE: "PAUSE",
COMMAND_MEDIA_PREVIOUS: "PREVIOUS",
COMMAND_MEDIA_RESUME: "RESUME",
COMMAND_MEDIA_SEEK_RELATIVE: "SEEK_RELATIVE",
COMMAND_MEDIA_SEEK_TO_POSITION: "SEEK_TO_POSITION",
COMMAND_MEDIA_SHUFFLE: "SHUFFLE",
COMMAND_MEDIA_STOP: "STOP",
}
@register_trait
class TransportControlTrait(_Trait):
"""Trait to control media playback.
https://developers.google.com/actions/smarthome/traits/transportcontrol
"""
name = TRAIT_TRANSPORT_CONTROL
commands = [
COMMAND_MEDIA_NEXT,
COMMAND_MEDIA_PAUSE,
COMMAND_MEDIA_PREVIOUS,
COMMAND_MEDIA_RESUME,
COMMAND_MEDIA_SEEK_RELATIVE,
COMMAND_MEDIA_SEEK_TO_POSITION,
COMMAND_MEDIA_SHUFFLE,
COMMAND_MEDIA_STOP,
]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
for feature in MEDIA_COMMAND_SUPPORT_MAPPING.values():
if features & feature:
return True
return False
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == media_player.DOMAIN:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
support = []
for command, feature in MEDIA_COMMAND_SUPPORT_MAPPING.items():
if features & feature:
support.append(MEDIA_COMMAND_ATTRIBUTES[command])
response["transportControlSupportedCommands"] = support
return response
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a media command."""
service_attrs = {ATTR_ENTITY_ID: self.state.entity_id}
if command == COMMAND_MEDIA_SEEK_RELATIVE:
service = media_player.SERVICE_MEDIA_SEEK
rel_position = params["relativePositionMs"] / 1000
seconds_since = 0 # Default to 0 seconds
if self.state.state == STATE_PLAYING:
now = dt.utcnow()
upd_at = self.state.attributes.get(
media_player.ATTR_MEDIA_POSITION_UPDATED_AT, now
)
seconds_since = (now - upd_at).total_seconds()
position = self.state.attributes.get(media_player.ATTR_MEDIA_POSITION, 0)
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(position + seconds_since + rel_position, 0), max_position
)
elif command == COMMAND_MEDIA_SEEK_TO_POSITION:
service = media_player.SERVICE_MEDIA_SEEK
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(params["absPositionMs"] / 1000, 0), max_position
)
elif command == COMMAND_MEDIA_NEXT:
service = media_player.SERVICE_MEDIA_NEXT_TRACK
elif command == COMMAND_MEDIA_PAUSE:
service = media_player.SERVICE_MEDIA_PAUSE
elif command == COMMAND_MEDIA_PREVIOUS:
service = media_player.SERVICE_MEDIA_PREVIOUS_TRACK
elif command == COMMAND_MEDIA_RESUME:
service = media_player.SERVICE_MEDIA_PLAY
elif command == COMMAND_MEDIA_SHUFFLE:
service = media_player.SERVICE_SHUFFLE_SET
# Google Assistant only supports enabling shuffle
service_attrs[media_player.ATTR_MEDIA_SHUFFLE] = True
elif command == COMMAND_MEDIA_STOP:
service = media_player.SERVICE_MEDIA_STOP
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
service,
service_attrs,
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class MediaStateTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/mediastate
"""
name = TRAIT_MEDIA_STATE
commands = []
activity_lookup = {
STATE_OFF: "INACTIVE",
STATE_IDLE: "STANDBY",
STATE_PLAYING: "ACTIVE",
STATE_ON: "STANDBY",
STATE_PAUSED: "STANDBY",
STATE_STANDBY: "STANDBY",
STATE_UNAVAILABLE: "INACTIVE",
STATE_UNKNOWN: "INACTIVE",
}
playback_lookup = {
STATE_OFF: "STOPPED",
STATE_IDLE: "STOPPED",
STATE_PLAYING: "PLAYING",
STATE_ON: "STOPPED",
STATE_PAUSED: "PAUSED",
STATE_STANDBY: "STOPPED",
STATE_UNAVAILABLE: "STOPPED",
STATE_UNKNOWN: "STOPPED",
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == media_player.DOMAIN
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"supportActivityState": True, "supportPlaybackState": True}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {
"activityState": self.activity_lookup.get(self.state.state, "INACTIVE"),
"playbackState": self.playback_lookup.get(self.state.state, "STOPPED"),
}
@register_trait
class ChannelTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/channel
"""
name = TRAIT_CHANNEL
commands = [COMMAND_SELECT_CHANNEL]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if (
domain == media_player.DOMAIN
and (features & media_player.SUPPORT_PLAY_MEDIA)
and device_class == media_player.DEVICE_CLASS_TV
):
return True
return False
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"availableChannels": [], "commandOnlyChannels": True}
def query_attributes(self):
"""Return channel query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute an setChannel command."""
if command == COMMAND_SELECT_CHANNEL:
channel_number = params.get("channelNumber")
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if not channel_number:
raise SmartHomeError(
ERR_NO_AVAILABLE_CHANNEL,
"Channel is not available",
)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_CONTENT_ID: channel_number,
media_player.ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
},
blocking=not self.config.should_report_state,
context=data.context,
)
@register_trait
class SensorStateTrait(_Trait):
"""Trait to get sensor state.
https://developers.google.com/actions/smarthome/traits/sensorstate
"""
sensor_types = {
sensor.DEVICE_CLASS_AQI: ("AirQuality", "AQI"),
sensor.DEVICE_CLASS_CO: ("CarbonDioxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_CO2: ("CarbonMonoxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_PM25: ("PM2.5", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_PM10: ("PM10", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_VOLATILE_ORGANIC_COMPOUNDS: (
"VolatileOrganicCompounds",
"PARTS_PER_MILLION",
),
}
name = TRAIT_SENSOR_STATE
commands = []
@classmethod
def supported(cls, domain, features, device_class, _):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class in cls.sensor_types
def sync_attributes(self):
"""Return attributes for a sync request."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"sensorStatesSupported": {
"name": data[0],
"numericCapabilities": {"rawValueUnit": data[1]},
}
}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"currentSensorStateData": [
{"name": data[0], "rawValue": self.state.state}
]
}
| 35.049251
| 123
| 0.609335
|
32bfc60eeb94ccb83bcd3064ed0d5956779e4b66
| 11,524
|
py
|
Python
|
venv/Lib/site-packages/gevent/libuv/_corecffi_build.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | 4
|
2022-01-07T13:37:33.000Z
|
2022-03-31T03:21:17.000Z
|
venv/Lib/site-packages/gevent/libuv/_corecffi_build.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | 1
|
2022-01-27T04:21:58.000Z
|
2022-01-27T04:21:58.000Z
|
venv/Lib/site-packages/gevent/libuv/_corecffi_build.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
# pylint: disable=no-member
# This module is only used to create and compile the gevent.libuv._corecffi module;
# nothing should be directly imported from it except `ffi`, which should only be
# used for `ffi.compile()`; programs should import gevent._corecfffi.
# However, because we are using "out-of-line" mode, it is necessary to examine
# this file to know what functions are created and available on the generated
# module.
from __future__ import absolute_import, print_function
import os
import os.path # pylint:disable=no-name-in-module
import platform
import sys
from cffi import FFI
sys.path.append(".")
try:
import _setuputils
except ImportError:
print("This file must be imported with setup.py in the current working dir.")
raise
__all__ = []
WIN = sys.platform.startswith('win32')
LIBUV_EMBED = _setuputils.should_embed('libuv')
ffi = FFI()
thisdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.abspath(os.path.join(thisdir, '..'))
setup_py_dir = os.path.abspath(os.path.join(thisdir, '..', '..', '..'))
libuv_dir = os.path.abspath(os.path.join(setup_py_dir, 'deps', 'libuv'))
def read_source(name):
# pylint:disable=unspecified-encoding
with open(os.path.join(thisdir, name), 'r') as f:
return f.read()
_cdef = read_source('_corecffi_cdef.c')
_source = read_source('_corecffi_source.c')
# These defines and uses help keep the C file readable and lintable by
# C tools.
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
# nlink_t is not used in libuv.
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int',
'')
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', 'nlink_t')
_cdef = _cdef.replace('#define GEVENT_UV_OS_SOCK_T int', '')
# uv_os_sock_t is int on POSIX and SOCKET on Win32, but socket is
# just another name for handle, which is just another name for 'void*'
# which we will treat as an 'unsigned long' or 'unsigned long long'
# since it comes through 'fileno()' where it has been cast as an int.
# See class watcher.io
_void_pointer_as_integer = 'intptr_t'
_cdef = _cdef.replace("GEVENT_UV_OS_SOCK_T", 'int' if not WIN else _void_pointer_as_integer)
LIBUV_INCLUDE_DIRS = [
os.path.join(libuv_dir, 'include'),
os.path.join(libuv_dir, 'src'),
]
# Initially based on https://github.com/saghul/pyuv/blob/v1.x/setup_libuv.py
def _libuv_source(rel_path):
# Certain versions of setuptools, notably on windows, are *very*
# picky about what we feed to sources= "setup() arguments must
# *always* be /-separated paths relative to the setup.py
# directory, *never* absolute paths." POSIX doesn't have that issue.
path = os.path.join('deps', 'libuv', 'src', rel_path)
return path
LIBUV_SOURCES = [
_libuv_source('fs-poll.c'),
_libuv_source('inet.c'),
_libuv_source('threadpool.c'),
_libuv_source('uv-common.c'),
_libuv_source('version.c'),
_libuv_source('uv-data-getter-setters.c'),
_libuv_source('timer.c'),
_libuv_source('idna.c'),
_libuv_source('strscpy.c')
]
if WIN:
LIBUV_SOURCES += [
_libuv_source('win/async.c'),
_libuv_source('win/core.c'),
_libuv_source('win/detect-wakeup.c'),
_libuv_source('win/dl.c'),
_libuv_source('win/error.c'),
_libuv_source('win/fs-event.c'),
_libuv_source('win/fs.c'),
# getaddrinfo.c refers to ConvertInterfaceIndexToLuid
# and ConvertInterfaceLuidToNameA, which are supposedly in iphlpapi.h
# and iphlpapi.lib/dll. But on Windows 10 with Python 3.5 and VC 14 (Visual Studio 2015),
# I get an undefined warning from the compiler for those functions and
# a link error from the linker, so this file can't be included.
# This is possibly because the functions are defined for Windows Vista, and
# Python 3.5 builds with at earlier SDK?
# Fortunately we don't use those functions.
#_libuv_source('win/getaddrinfo.c'),
# getnameinfo.c refers to uv__getaddrinfo_translate_error from
# getaddrinfo.c, which we don't have.
#_libuv_source('win/getnameinfo.c'),
_libuv_source('win/handle.c'),
_libuv_source('win/loop-watcher.c'),
_libuv_source('win/pipe.c'),
_libuv_source('win/poll.c'),
_libuv_source('win/process-stdio.c'),
_libuv_source('win/process.c'),
_libuv_source('win/signal.c'),
_libuv_source('win/snprintf.c'),
_libuv_source('win/stream.c'),
_libuv_source('win/tcp.c'),
_libuv_source('win/thread.c'),
_libuv_source('win/tty.c'),
_libuv_source('win/udp.c'),
_libuv_source('win/util.c'),
_libuv_source('win/winapi.c'),
_libuv_source('win/winsock.c'),
]
else:
LIBUV_SOURCES += [
_libuv_source('unix/async.c'),
_libuv_source('unix/core.c'),
_libuv_source('unix/dl.c'),
_libuv_source('unix/fs.c'),
_libuv_source('unix/getaddrinfo.c'),
_libuv_source('unix/getnameinfo.c'),
_libuv_source('unix/loop-watcher.c'),
_libuv_source('unix/loop.c'),
_libuv_source('unix/pipe.c'),
_libuv_source('unix/poll.c'),
_libuv_source('unix/process.c'),
_libuv_source('unix/signal.c'),
_libuv_source('unix/stream.c'),
_libuv_source('unix/tcp.c'),
_libuv_source('unix/thread.c'),
_libuv_source('unix/tty.c'),
_libuv_source('unix/udp.c'),
]
if sys.platform.startswith('linux'):
LIBUV_SOURCES += [
_libuv_source('unix/linux-core.c'),
_libuv_source('unix/linux-inotify.c'),
_libuv_source('unix/linux-syscalls.c'),
_libuv_source('unix/procfs-exepath.c'),
_libuv_source('unix/proctitle.c'),
_libuv_source('unix/random-sysctl-linux.c'),
]
elif sys.platform == 'darwin':
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/darwin.c'),
_libuv_source('unix/darwin-proctitle.c'),
_libuv_source('unix/fsevents.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/proctitle.c'),
]
elif sys.platform.startswith(('freebsd', 'dragonfly')): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/freebsd.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('openbsd'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/openbsd.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('netbsd'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/netbsd.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('sunos'): # pragma: no cover
# Not tested.
LIBUV_SOURCES += [
_libuv_source('unix/no-proctitle.c'),
_libuv_source('unix/sunos.c'),
]
elif sys.platform.startswith('aix'): # pragma: no cover
# Not tested.
LIBUV_SOURCES += [
_libuv_source('unix/aix.c'),
_libuv_source('unix/aix-common.c'),
]
elif sys.platform.startswith('haiku'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/haiku.c')
]
elif sys.platform.startswith('cygwin'): # pragma: no cover
# Not tested.
# Based on Cygwin package sources /usr/src/libuv-1.32.0-1.src/libuv-1.32.0/Makefile.am
# Apparently the same upstream at https://github.com/libuv/libuv/blob/v1.x/Makefile.am
LIBUV_SOURCES += [
_libuv_source('unix/cygwin.c'),
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/no-fsevents.c'),
_libuv_source('unix/no-proctitle.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/posix-poll.c'),
_libuv_source('unix/procfs-exepath.c'),
_libuv_source('unix/sysinfo-loadavg.c'),
_libuv_source('unix/sysinfo-memory.c'),
]
LIBUV_MACROS = [
('LIBUV_EMBED', int(LIBUV_EMBED)),
]
def _define_macro(name, value):
LIBUV_MACROS.append((name, value))
LIBUV_LIBRARIES = []
def _add_library(name):
LIBUV_LIBRARIES.append(name)
if sys.platform != 'win32':
_define_macro('_LARGEFILE_SOURCE', 1)
_define_macro('_FILE_OFFSET_BITS', 64)
if sys.platform.startswith('linux'):
_add_library('dl')
_add_library('rt')
_define_macro('_GNU_SOURCE', 1)
_define_macro('_POSIX_C_SOURCE', '200112')
elif sys.platform == 'darwin':
_define_macro('_DARWIN_USE_64_BIT_INODE', 1)
_define_macro('_DARWIN_UNLIMITED_SELECT', 1)
elif sys.platform.startswith('netbsd'): # pragma: no cover
_add_library('kvm')
elif sys.platform.startswith('sunos'): # pragma: no cover
_define_macro('__EXTENSIONS__', 1)
_define_macro('_XOPEN_SOURCE', 500)
_add_library('kstat')
_add_library('nsl')
_add_library('sendfile')
_add_library('socket')
if platform.release() == '5.10':
# https://github.com/libuv/libuv/issues/1458
# https://github.com/giampaolo/psutil/blob/4d6a086411c77b7909cce8f4f141bbdecfc0d354/setup.py#L298-L300
_define_macro('SUNOS_NO_IFADDRS', '')
elif sys.platform.startswith('aix'): # pragma: no cover
_define_macro('_LINUX_SOURCE_COMPAT', 1)
if os.uname().sysname != 'OS400':
_add_library('perfstat')
elif WIN:
# All other gevent .pyd files link to the specific minor-version Python
# DLL, so we should do the same here. In virtual environments that don't
# contain the major-version python?.dll stub, _corecffi.pyd would otherwise
# cause the Windows DLL loader to search the entire PATH for a DLL with
# that name. This might end up bringing a second, ABI-incompatible Python
# version into the process, which can easily lead to crashes.
# See https://github.com/gevent/gevent/pull/1814/files
_define_macro('_CFFI_NO_LIMITED_API', 1)
_define_macro('_GNU_SOURCE', 1)
_define_macro('WIN32', 1)
_define_macro('_CRT_SECURE_NO_DEPRECATE', 1)
_define_macro('_CRT_NONSTDC_NO_DEPRECATE', 1)
_define_macro('_CRT_SECURE_NO_WARNINGS', 1)
_define_macro('_WIN32_WINNT', '0x0600')
_define_macro('WIN32_LEAN_AND_MEAN', 1)
_add_library('advapi32')
_add_library('iphlpapi')
_add_library('psapi')
_add_library('shell32')
_add_library('user32')
_add_library('userenv')
_add_library('ws2_32')
if not LIBUV_EMBED:
del LIBUV_SOURCES[:]
del LIBUV_INCLUDE_DIRS[:]
_add_library('uv')
LIBUV_INCLUDE_DIRS.append(parentdir)
ffi.cdef(_cdef)
ffi.set_source(
'gevent.libuv._corecffi',
_source,
sources=LIBUV_SOURCES,
depends=LIBUV_SOURCES,
include_dirs=LIBUV_INCLUDE_DIRS,
libraries=list(LIBUV_LIBRARIES),
define_macros=list(LIBUV_MACROS),
extra_compile_args=list(_setuputils.IGNORE_THIRD_PARTY_WARNINGS),
)
if __name__ == '__main__':
# See notes in libev/_corecffi_build.py for how to test this.
#
# Other than the obvious directory changes, the changes are:
#
# CPPFLAGS=-Ideps/libuv/include/ -Isrc/gevent/
ffi.compile(verbose=True)
| 35.027356
| 110
| 0.672943
|
1352a4470681f97e6abff8a7cb6cf5d67e6dfa38
| 267
|
py
|
Python
|
app1/templatetags/app1-tags.py
|
mohsenbjp/mysite
|
cb54c116fc684bb53b6fcb5f481d4f9c21f46455
|
[
"MIT"
] | null | null | null |
app1/templatetags/app1-tags.py
|
mohsenbjp/mysite
|
cb54c116fc684bb53b6fcb5f481d4f9c21f46455
|
[
"MIT"
] | 3
|
2021-11-17T11:58:00.000Z
|
2021-11-27T17:18:21.000Z
|
app1/templatetags/app1-tags.py
|
mohsenbjp/mysite
|
cb54c116fc684bb53b6fcb5f481d4f9c21f46455
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
from blog.models import Post,Category
@register.inclusion_tag('app1/app1-latestpost.html')
def latestpost():
posts=Post.objects.filter(status=1).order_by('-published_date')[0:6]
return{'posts':posts}
| 29.666667
| 72
| 0.764045
|
d3f00d6ebd27cf88f742af98cf7bfa47a4fd2eb9
| 1,348
|
py
|
Python
|
lib/surface/compute/snapshots/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/compute/snapshots/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/snapshots/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating snapshots."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Snapshots(base.Group):
"""List, describe, and delete Compute Engine snapshots."""
Snapshots.category = base.INSTANCES_CATEGORY
Snapshots.detailed_help = {
'DESCRIPTION': """
List, describe, and delete Compute Engine snapshots.
For more information about snapshots, see the
[snapshots documentation](https://cloud.google.com/compute/docs/disks/create-snapshots).
See also: [Snapshots API](https://cloud.google.com/compute/docs/reference/rest/v1/snapshots).
""",
}
| 33.7
| 101
| 0.746291
|
dd5d856fdb181d44d62284ca1f73badcb8374866
| 563
|
py
|
Python
|
tests/distributions/common.py
|
illyakaynov/tf2rl
|
03e8212e3871537cdb43cb9576b0d4686ab558e9
|
[
"MIT"
] | 1
|
2020-10-12T23:44:04.000Z
|
2020-10-12T23:44:04.000Z
|
tests/distributions/common.py
|
illyakaynov/tf2rl
|
03e8212e3871537cdb43cb9576b0d4686ab558e9
|
[
"MIT"
] | null | null | null |
tests/distributions/common.py
|
illyakaynov/tf2rl
|
03e8212e3871537cdb43cb9576b0d4686ab558e9
|
[
"MIT"
] | null | null | null |
import unittest
class CommonDist(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dim = 3
cls.batch_size = 4
cls.dist = None
def _skip_test_in_parent(self):
if self.dist is None:
return
else:
raise NotImplementedError
def test_kl(self):
self._skip_test_in_parent()
def test_log_likelihood(self):
self._skip_test_in_parent()
def test_ent(self):
self._skip_test_in_parent()
def test_sample(self):
self._skip_test_in_parent()
| 20.107143
| 37
| 0.62167
|
64737f4473db9ffc92b7d4388977b39b7e4c270d
| 1,700
|
py
|
Python
|
src/.ipynb_checkpoints/classes-checkpoint.py
|
jeffreyhwatson/terry_stops_project
|
9aa82ee4c2148e7f675d6eea5ab24409d0f2b129
|
[
"CC-BY-2.0"
] | 2
|
2021-09-25T03:00:55.000Z
|
2021-09-25T03:45:37.000Z
|
src/classes.py
|
jeffreyhwatson/terry_stops_project
|
9aa82ee4c2148e7f675d6eea5ab24409d0f2b129
|
[
"CC-BY-2.0"
] | null | null | null |
src/classes.py
|
jeffreyhwatson/terry_stops_project
|
9aa82ee4c2148e7f675d6eea5ab24409d0f2b129
|
[
"CC-BY-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.compose import make_column_selector
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from sklearn.metrics import make_scorer, plot_confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.base import clone
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import make_pipeline as make_sm_pipeline
class Harness:
def __init__(self, scorer, random_state=2021):
self.scorer = scorer
self.history = pd.DataFrame(columns=['Name', 'Accuracy (F1)', 'Notes'])
def report(self, model, X, y, name, notes='', cv=5,):
scores = cross_val_score(model, X, y,
scoring=self.scorer, cv=cv)
frame = pd.DataFrame([[name, scores.mean(), notes]], columns=['Name', 'Accuracy (F1)', 'Notes'])
self.history = self.history.append(frame)
self.history = self.history.reset_index(drop=True)
self.history = self.history.sort_values('Accuracy (F1)')
self.print_error(name, scores.mean())
# print(scores)
return scores
def print_error(self, name, Accuracy):
print(f'{name} has an average F1 of {Accuracy}')
| 38.636364
| 104
| 0.741176
|
2e9c62f2286cdcd56374bb460f92ef0085381407
| 1,020
|
py
|
Python
|
Batch/Batch/src/batchJob.py
|
csbuja/browsecloud
|
da32017b6346026b4044d9a5f8efb0b00877e3a0
|
[
"MIT"
] | 159
|
2019-06-30T17:51:29.000Z
|
2022-03-24T10:01:32.000Z
|
Batch/Batch/src/batchJob.py
|
rbs-pli/browsecloud
|
da32017b6346026b4044d9a5f8efb0b00877e3a0
|
[
"MIT"
] | 6
|
2019-08-12T04:04:28.000Z
|
2022-03-02T04:57:15.000Z
|
Batch/Batch/src/batchJob.py
|
rbs-pli/browsecloud
|
da32017b6346026b4044d9a5f8efb0b00877e3a0
|
[
"MIT"
] | 18
|
2019-07-01T05:22:58.000Z
|
2020-12-22T09:02:07.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from jobStatus import JobStatus
class BatchJob():
def __init__(self, id_in: str, jobStatus_in, progress_in: int):
self.id = id_in
self.jobStatus = jobStatus_in
self.progress = progress_in
def next(self):
if self.jobStatus.value in [JobStatus.NotStarted, JobStatus.PreProcessing, JobStatus.Training]:
self.jobStatus = JobStatus(self.jobStatus.value + 1)
else:
raise ValueError("Invalid job status value.")
def makeProgress(self, progress: int):
if progress < 0 or progress > 100:
raise ValueError("Invalid progress value.")
else:
self.progress = progress
if __name__ == "__main__":
b = BatchJob("", JobStatus.NotStarted, 0, "", 5, 24)
b.next()
assert(b.jobStatus == JobStatus.PreProcessing)
b.makeProgress(5)
assert(b.progress == 5)
import json
print(json.dumps(b.__dict__))
| 29.142857
| 103
| 0.65
|
4e197365a7098e6f08cdd253353c295d8b714e05
| 15,894
|
py
|
Python
|
homeassistant/components/cover/template.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
homeassistant/components/cover/template.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 125
|
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
homeassistant/components/cover/template.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 8
|
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
"""
Support for covers which integrate with other components.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.template/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.cover import (
ENTITY_ID_FORMAT, CoverDevice, PLATFORM_SCHEMA,
SUPPORT_OPEN_TILT, SUPPORT_CLOSE_TILT, SUPPORT_STOP_TILT,
SUPPORT_SET_TILT_POSITION, SUPPORT_OPEN, SUPPORT_CLOSE, SUPPORT_STOP,
SUPPORT_SET_POSITION, ATTR_POSITION, ATTR_TILT_POSITION)
from homeassistant.const import (
CONF_FRIENDLY_NAME, CONF_ENTITY_ID,
EVENT_HOMEASSISTANT_START, MATCH_ALL,
CONF_VALUE_TEMPLATE, CONF_ICON_TEMPLATE,
CONF_ENTITY_PICTURE_TEMPLATE, CONF_OPTIMISTIC,
STATE_OPEN, STATE_CLOSED)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_OPEN, STATE_CLOSED, 'true', 'false']
CONF_COVERS = 'covers'
CONF_POSITION_TEMPLATE = 'position_template'
CONF_TILT_TEMPLATE = 'tilt_template'
OPEN_ACTION = 'open_cover'
CLOSE_ACTION = 'close_cover'
STOP_ACTION = 'stop_cover'
POSITION_ACTION = 'set_cover_position'
TILT_ACTION = 'set_cover_tilt_position'
CONF_TILT_OPTIMISTIC = 'tilt_optimistic'
CONF_VALUE_OR_POSITION_TEMPLATE = 'value_or_position'
CONF_OPEN_OR_CLOSE = 'open_or_close'
TILT_FEATURES = (SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT |
SUPPORT_SET_TILT_POSITION)
COVER_SCHEMA = vol.Schema({
vol.Inclusive(OPEN_ACTION, CONF_OPEN_OR_CLOSE): cv.SCRIPT_SCHEMA,
vol.Inclusive(CLOSE_ACTION, CONF_OPEN_OR_CLOSE): cv.SCRIPT_SCHEMA,
vol.Optional(STOP_ACTION): cv.SCRIPT_SCHEMA,
vol.Exclusive(CONF_POSITION_TEMPLATE,
CONF_VALUE_OR_POSITION_TEMPLATE): cv.template,
vol.Exclusive(CONF_VALUE_TEMPLATE,
CONF_VALUE_OR_POSITION_TEMPLATE): cv.template,
vol.Optional(CONF_POSITION_TEMPLATE): cv.template,
vol.Optional(CONF_TILT_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_TILT_OPTIMISTIC): cv.boolean,
vol.Optional(POSITION_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(TILT_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ENTITY_ID): cv.entity_ids
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): vol.Schema({cv.slug: COVER_SCHEMA}),
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Template cover."""
covers = []
for device, device_config in config[CONF_COVERS].items():
friendly_name = device_config.get(CONF_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
position_template = device_config.get(CONF_POSITION_TEMPLATE)
tilt_template = device_config.get(CONF_TILT_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(
CONF_ENTITY_PICTURE_TEMPLATE)
open_action = device_config.get(OPEN_ACTION)
close_action = device_config.get(CLOSE_ACTION)
stop_action = device_config.get(STOP_ACTION)
position_action = device_config.get(POSITION_ACTION)
tilt_action = device_config.get(TILT_ACTION)
optimistic = device_config.get(CONF_OPTIMISTIC)
tilt_optimistic = device_config.get(CONF_TILT_OPTIMISTIC)
if position_action is None and open_action is None:
_LOGGER.error('Must specify at least one of %s' or '%s',
OPEN_ACTION, POSITION_ACTION)
continue
template_entity_ids = set()
if state_template is not None:
temp_ids = state_template.extract_entities()
if str(temp_ids) != MATCH_ALL:
template_entity_ids |= set(temp_ids)
if position_template is not None:
temp_ids = position_template.extract_entities()
if str(temp_ids) != MATCH_ALL:
template_entity_ids |= set(temp_ids)
if tilt_template is not None:
temp_ids = tilt_template.extract_entities()
if str(temp_ids) != MATCH_ALL:
template_entity_ids |= set(temp_ids)
if icon_template is not None:
temp_ids = icon_template.extract_entities()
if str(temp_ids) != MATCH_ALL:
template_entity_ids |= set(temp_ids)
if entity_picture_template is not None:
temp_ids = entity_picture_template.extract_entities()
if str(temp_ids) != MATCH_ALL:
template_entity_ids |= set(temp_ids)
if not template_entity_ids:
template_entity_ids = MATCH_ALL
entity_ids = device_config.get(CONF_ENTITY_ID, template_entity_ids)
covers.append(
CoverTemplate(
hass,
device, friendly_name, state_template,
position_template, tilt_template, icon_template,
entity_picture_template, open_action, close_action,
stop_action, position_action, tilt_action,
optimistic, tilt_optimistic, entity_ids
)
)
if not covers:
_LOGGER.error("No covers added")
return False
async_add_devices(covers)
return True
class CoverTemplate(CoverDevice):
"""Representation of a Template cover."""
def __init__(self, hass, device_id, friendly_name, state_template,
position_template, tilt_template, icon_template,
entity_picture_template, open_action, close_action,
stop_action, position_action, tilt_action,
optimistic, tilt_optimistic, entity_ids):
"""Initialize the Template cover."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
self._name = friendly_name
self._template = state_template
self._position_template = position_template
self._tilt_template = tilt_template
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._open_script = None
if open_action is not None:
self._open_script = Script(hass, open_action)
self._close_script = None
if close_action is not None:
self._close_script = Script(hass, close_action)
self._stop_script = None
if stop_action is not None:
self._stop_script = Script(hass, stop_action)
self._position_script = None
if position_action is not None:
self._position_script = Script(hass, position_action)
self._tilt_script = None
if tilt_action is not None:
self._tilt_script = Script(hass, tilt_action)
self._optimistic = (optimistic or
(not state_template and not position_template))
self._tilt_optimistic = tilt_optimistic or not tilt_template
self._icon = None
self._entity_picture = None
self._position = None
self._tilt_value = None
self._entities = entity_ids
if self._template is not None:
self._template.hass = self.hass
if self._position_template is not None:
self._position_template.hass = self.hass
if self._tilt_template is not None:
self._tilt_template.hass = self.hass
if self._icon_template is not None:
self._icon_template.hass = self.hass
if self._entity_picture_template is not None:
self._entity_picture_template.hass = self.hass
@asyncio.coroutine
def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_cover_state_listener(entity, old_state, new_state):
"""Handle target device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_cover_startup(event):
"""Update template on startup."""
async_track_state_change(
self.hass, self._entities, template_cover_state_listener)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_cover_startup)
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._position == 0
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
if self._position_template or self._position_script:
return self._position
return None
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._tilt_value
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
return self._entity_picture
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE
if self._stop_script is not None:
supported_features |= SUPPORT_STOP
if self._position_script is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= TILT_FEATURES
return supported_features
@property
def should_poll(self):
"""Return the polling state."""
return False
@asyncio.coroutine
def async_open_cover(self, **kwargs):
"""Move the cover up."""
if self._open_script:
yield from self._open_script.async_run()
elif self._position_script:
yield from self._position_script.async_run({"position": 100})
if self._optimistic:
self._position = 100
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_close_cover(self, **kwargs):
"""Move the cover down."""
if self._close_script:
yield from self._close_script.async_run()
elif self._position_script:
yield from self._position_script.async_run({"position": 0})
if self._optimistic:
self._position = 0
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_stop_cover(self, **kwargs):
"""Fire the stop action."""
if self._stop_script:
yield from self._stop_script.async_run()
@asyncio.coroutine
def async_set_cover_position(self, **kwargs):
"""Set cover position."""
self._position = kwargs[ATTR_POSITION]
yield from self._position_script.async_run(
{"position": self._position})
if self._optimistic:
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_open_cover_tilt(self, **kwargs):
"""Tilt the cover open."""
self._tilt_value = 100
yield from self._tilt_script.async_run({"tilt": self._tilt_value})
if self._tilt_optimistic:
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_close_cover_tilt(self, **kwargs):
"""Tilt the cover closed."""
self._tilt_value = 0
yield from self._tilt_script.async_run(
{"tilt": self._tilt_value})
if self._tilt_optimistic:
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
self._tilt_value = kwargs[ATTR_TILT_POSITION]
yield from self._tilt_script.async_run({"tilt": self._tilt_value})
if self._tilt_optimistic:
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_update(self):
"""Update the state from the template."""
if self._template is not None:
try:
state = self._template.async_render().lower()
if state in _VALID_STATES:
if state in ('true', STATE_OPEN):
self._position = 100
else:
self._position = 0
else:
_LOGGER.error(
'Received invalid cover is_on state: %s. Expected: %s',
state, ', '.join(_VALID_STATES))
self._position = None
except TemplateError as ex:
_LOGGER.error(ex)
self._position = None
if self._position_template is not None:
try:
state = float(self._position_template.async_render())
if state < 0 or state > 100:
self._position = None
_LOGGER.error("Cover position value must be"
" between 0 and 100."
" Value was: %.2f", state)
else:
self._position = state
except TemplateError as ex:
_LOGGER.error(ex)
self._position = None
except ValueError as ex:
_LOGGER.error(ex)
self._position = None
if self._tilt_template is not None:
try:
state = float(self._tilt_template.async_render())
if state < 0 or state > 100:
self._tilt_value = None
_LOGGER.error("Tilt value must be between 0 and 100."
" Value was: %.2f", state)
else:
self._tilt_value = state
except TemplateError as ex:
_LOGGER.error(ex)
self._tilt_value = None
except ValueError as ex:
_LOGGER.error(ex)
self._tilt_value = None
for property_name, template in (
('_icon', self._icon_template),
('_entity_picture', self._entity_picture_template)):
if template is None:
continue
try:
setattr(self, property_name, template.async_render())
except TemplateError as ex:
friendly_property_name = property_name[1:].replace('_', ' ')
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning('Could not render %s template %s,'
' the state is unknown.',
friendly_property_name, self._name)
return
try:
setattr(self, property_name,
getattr(super(), property_name))
except AttributeError:
_LOGGER.error('Could not render %s template %s: %s',
friendly_property_name, self._name, ex)
| 38.115108
| 79
| 0.633069
|
c8fea39cf935add40923f9a379cf3844d01f3159
| 744
|
py
|
Python
|
build/__init__.py
|
amagee/commandfrog
|
3136c4447ca8a96fdbbfb837d61d42d63b5e19dc
|
[
"MIT"
] | null | null | null |
build/__init__.py
|
amagee/commandfrog
|
3136c4447ca8a96fdbbfb837d61d42d63b5e19dc
|
[
"MIT"
] | null | null | null |
build/__init__.py
|
amagee/commandfrog
|
3136c4447ca8a96fdbbfb837d61d42d63b5e19dc
|
[
"MIT"
] | null | null | null |
import subprocess
from pathlib import Path
from functools import reduce
import operator
def build():
subprocess.run("rm -rf dist", cwd="commandfrog", check=True, shell=True)
operations = [p.stem for p in (Path("commandfrog") / "operations").glob("*.py")]
# This creates a binary in `commandfrog/dist/run`.
subprocess.run(
[
"pyinstaller",
*reduce(
operator.iconcat,
[["--hidden-import", f"commandfrog.operations.{f}"] for f in operations]
),
"-F",
"run.py",
],
cwd="commandfrog",
check=True,
)
subprocess.run("cp commandfrog/dist/run commandfrog-builds/commandfrog", check=True, shell=True)
| 26.571429
| 100
| 0.579301
|
92488c520eb6d4dbf4de2fe4924d448da4e562d8
| 603
|
py
|
Python
|
mason/workflows/workflow_module.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 4
|
2021-04-12T17:49:34.000Z
|
2022-01-23T19:54:29.000Z
|
mason/workflows/workflow_module.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 24
|
2021-04-30T18:40:25.000Z
|
2021-05-12T20:52:06.000Z
|
mason/workflows/workflow_module.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 3
|
2021-04-12T19:40:43.000Z
|
2021-09-07T21:56:36.000Z
|
from importlib import import_module
from typing import Union
from mason.engines.scheduler.models.dags.executed_dag_step import ExecutedDagStep
from mason.engines.scheduler.models.dags.failed_dag_step import FailedDagStep
from mason.engines.scheduler.models.dags.invalid_dag_step import InvalidDagStep
from mason.engines.scheduler.models.dags.valid_dag_step import ValidDagStep
from mason.util.environment import MasonEnvironment
from mason.util.string import to_class_case
from mason.workflows.invalid_workflow import InvalidWorkflow
from mason.workflows.workflow_definition import WorkflowDefinition
| 46.384615
| 81
| 0.883914
|
6be5539d7fdfef0521c3f8a8f1415694ba2b42d3
| 9,104
|
py
|
Python
|
py/probe/runtime_probe/probe_config_definition.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 3
|
2022-01-06T16:52:52.000Z
|
2022-03-07T11:30:47.000Z
|
py/probe/runtime_probe/probe_config_definition.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | null | null | null |
py/probe/runtime_probe/probe_config_definition.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-24T01:47:22.000Z
|
2021-10-24T01:47:22.000Z
|
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(yhong): Integrate the module with go/cros-probe.
import re
from cros.factory.probe.runtime_probe import probe_config_types
from cros.factory.utils import type_utils
@type_utils.CachedGetter
def _GetAllProbeStatementDefinitions():
def _GetASCIIStringErrorMsg(length1, length2=None):
if length2 is None:
return f'format error, expect a {length1}-byte ASCII string'
return (f'format error, expect a ASCII string of length {length1} to '
f'{length2}')
probe_statement_definitions = {}
# Create battery builder
builder = probe_config_types.ProbeStatementDefinitionBuilder('battery')
builder.AddProbeFunction('generic_battery',
'Read battery information from sysfs.')
builder.AddStrOutputField('manufacturer',
('Manufacturing name exposed from the ACPI '
'interface.'))
builder.AddStrOutputField('model_name',
'Model name exposed from the ACPI interface.')
builder.AddStrOutputField('technology',
'Technology exposed from the ACPI interface.')
probe_statement_definitions['battery'] = builder.Build()
# Create storage builder
builder = probe_config_types.ProbeStatementDefinitionBuilder('storage')
builder.AddProbeFunction('generic_storage',
('A method that tries various of way to detect the '
'storage component.'))
builder.AddStrOutputField('type', 'HW interface type of the storage.')
builder.AddIntOutputField('sectors', 'Sector size.')
builder.AddProbeFunction('mmc_storage', 'Probe function for eMMC storage.')
probe_function_names = ['generic_storage', 'mmc_storage']
builder.AddHexOutputField('mmc_hwrev', 'Hardware revision in CID register.',
probe_function_names=probe_function_names,
num_value_digits=1)
builder.AddHexOutputField(
'mmc_manfid', 'Manufacturer ID (MID) in CID register.',
probe_function_names=probe_function_names, num_value_digits=2)
builder.AddHexOutputField(
'mmc_oemid', 'OEM/Application ID (OID) in CID register.',
probe_function_names=probe_function_names, num_value_digits=4)
builder.AddStrOutputField(
'mmc_name', 'Product name (PNM) in CID register.',
probe_function_names=probe_function_names,
value_pattern=re.compile('^[ -~]{4,6}$'),
value_format_error_msg=_GetASCIIStringErrorMsg(4, 6))
builder.AddHexOutputField(
'mmc_prv', 'Product revision (PRV) in CID register.',
probe_function_names=probe_function_names, num_value_digits=2)
builder.AddHexOutputField('mmc_serial', 'Product Serial Number (PSN)',
probe_function_names=probe_function_names,
num_value_digits=8)
builder.AddProbeFunction('nvme_storage', 'Probe function for NVMe storage.')
probe_function_names = ['generic_storage', 'nvme_storage']
builder.AddHexOutputField('pci_vendor', 'PCI Vendor ID.',
probe_function_names=probe_function_names,
num_value_digits=4)
builder.AddHexOutputField('pci_device', 'PCI Device ID.',
probe_function_names=probe_function_names,
num_value_digits=4)
builder.AddHexOutputField('pci_class', 'PCI Device Class Indicator.',
probe_function_names=probe_function_names,
num_value_digits=6)
builder.AddStrOutputField('nvme_model', 'NVMe model name.',
probe_function_names=probe_function_names)
builder.AddProbeFunction('ata_storage', 'Probe function for ATA storage.')
probe_function_names = ['generic_storage', 'ata_storage']
builder.AddStrOutputField('ata_vendor', 'Vendor name.',
probe_function_names=probe_function_names,
value_pattern=re.compile('^ATA$'),
value_format_error_msg=_GetASCIIStringErrorMsg(8))
builder.AddStrOutputField('ata_model', 'Model name.',
probe_function_names=probe_function_names,
value_format_error_msg=_GetASCIIStringErrorMsg(32))
probe_statement_definitions['storage'] = builder.Build()
# Create network builder
for network_type in ['cellular', 'ethernet', 'wireless']:
builder = probe_config_types.ProbeStatementDefinitionBuilder(network_type)
builder.AddProbeFunction(
f'{network_type}_network',
(f'A method that tries various of way to detect the {network_type} '
'component.'))
builder.AddStrOutputField(
'bus_type', 'HW interface type of the component.',
value_pattern=re.compile('(pci|usb|sdio)$'),
value_format_error_msg='Must be either "pci", "usb", or "sdio"')
builder.AddHexOutputField('pci_vendor_id', 'PCI Vendor ID.',
num_value_digits=4)
builder.AddHexOutputField('pci_device_id', 'PCI Device ID.',
num_value_digits=4)
builder.AddHexOutputField('pci_revision', 'PCI Revision Info.',
num_value_digits=2)
builder.AddHexOutputField('pci_subsystem', 'PCI subsystem ID.',
num_value_digits=4)
builder.AddHexOutputField('usb_vendor_id', 'USB Vendor ID.',
num_value_digits=4)
builder.AddHexOutputField('usb_product_id', 'USB Product ID.',
num_value_digits=4)
builder.AddHexOutputField('usb_bcd_device', 'USB BCD Device Info.',
num_value_digits=4)
builder.AddHexOutputField('sdio_vendor_id', 'SDIO Vendor ID.',
num_value_digits=4)
builder.AddHexOutputField('sdio_device_id', 'SDIO Device ID.',
num_value_digits=4)
probe_statement_definitions[network_type] = builder.Build()
# Create dram builder
builder = probe_config_types.ProbeStatementDefinitionBuilder('dram')
builder.AddProbeFunction('memory', 'Probe memory from DMI.')
builder.AddStrOutputField('part', 'Part number.')
builder.AddIntOutputField('size', 'Memory size in MiB.')
builder.AddIntOutputField('slot', 'Memory slot index.')
probe_statement_definitions['dram'] = builder.Build()
# Create input_device builder
for category in ['stylus', 'touchpad', 'touchscreen']:
builder = probe_config_types.ProbeStatementDefinitionBuilder(category)
builder.AddProbeFunction('input_device', 'Probe input devices from procfs.')
builder.AddStrOutputField('name', 'Model name.')
builder.AddHexOutputField('product', 'Product ID.')
builder.AddHexOutputField('vendor', 'Vendor ID.', num_value_digits=4)
builder.AddStrOutputField('fw_version', 'Firmware version.')
probe_statement_definitions[category] = builder.Build()
builder = probe_config_types.ProbeStatementDefinitionBuilder('camera')
builder.AddProbeFunction('usb_camera',
('A method that probes camera devices on USB bus.'))
builder.AddStrOutputField('bus_type', 'HW interface type of the component.',
value_pattern=re.compile('usb$'),
value_format_error_msg=('Currently must be "usb".'))
builder.AddHexOutputField('usb_vendor_id', 'USB Vendor ID.',
num_value_digits=4)
builder.AddHexOutputField('usb_product_id', 'USB Product ID.',
num_value_digits=4)
builder.AddHexOutputField('usb_bcd_device', 'USB BCD Device Info.',
num_value_digits=4)
builder.AddStrOutputField('usb_removable', 'Whether the device is removable.')
probe_statement_definitions['camera'] = builder.Build()
builder = probe_config_types.ProbeStatementDefinitionBuilder('display_panel')
builder.AddProbeFunction('edid', 'A method that probes devices via edid.')
builder.AddIntOutputField('height', 'The height of the device.')
builder.AddHexOutputField('product_id', 'The product ID, 16 bits',
num_value_digits=4)
builder.AddStrOutputField(
'vendor', 'The vendor code, 3 letters',
value_pattern=re.compile('[A-Z]{3}$'),
value_format_error_msg='Must be a 3-letter all caps string.')
builder.AddIntOutputField('width', 'The width of the device.')
probe_statement_definitions['display_panel'] = builder.Build()
return probe_statement_definitions
def GetProbeStatementDefinition(name):
"""Get the probe statement definition of the given name.
Please refer to `_ConstructAllProbeStatementDefinitions()` for the available
name list.`
Args:
name: Name of the probe statement definition.
Returns:
An instance of `probe_config_types.ProbeStatementDefinition`.
"""
return _GetAllProbeStatementDefinitions()[name]
| 49.210811
| 80
| 0.674209
|
8a437813b00d1d4c92055e202fe8d95650809726
| 35,476
|
py
|
Python
|
fenics_concrete/material_problems/concrete_thermo_mechanical.py
|
BAMresearch/FenicsConcrete
|
7a086d7767e20bd111cc7b05e5aa742d7e5ff47c
|
[
"MIT"
] | null | null | null |
fenics_concrete/material_problems/concrete_thermo_mechanical.py
|
BAMresearch/FenicsConcrete
|
7a086d7767e20bd111cc7b05e5aa742d7e5ff47c
|
[
"MIT"
] | 1
|
2022-03-24T15:24:53.000Z
|
2022-03-24T15:24:53.000Z
|
fenics_concrete/material_problems/concrete_thermo_mechanical.py
|
BAMresearch/FenicsConcrete
|
7a086d7767e20bd111cc7b05e5aa742d7e5ff47c
|
[
"MIT"
] | null | null | null |
import dolfin as df
import numpy as np
import scipy.optimize
from fenics_concrete.material_problems.material_problem import MaterialProblem
from fenics_concrete.helpers import Parameters
from fenics_concrete.helpers import set_q
from fenics_concrete.helpers import LocalProjector
from fenics_concrete import experimental_setups
import warnings
from ffc.quadrature.deprecation import QuadratureRepresentationDeprecationWarning
df.parameters["form_compiler"]["representation"] = "quadrature"
warnings.simplefilter("ignore", QuadratureRepresentationDeprecationWarning)
# full concrete model, including hydration-temperate and mechanics, including calls to solve etc.
class ConcreteThermoMechanical(MaterialProblem):
def __init__(self, experiment=None, parameters=None, pv_name='pv_output_concrete-thermo-mechanical'):
# generate "dummy" experiement when none is passed
if experiment == None:
experiment = experimental_setups.get_experiment('MinimalCube', parameters)
super().__init__(experiment, parameters, pv_name)
# # TODO: define global fields here
# # - alpha, V
# # - etc...
def setup(self):
# setup initial temperatre material paramters
default_p = Parameters()
# Material parameter for concrete model with temperature and hydration
default_p['density'] = 2350 # in kg/m^3 density of concrete
default_p['density_binder'] = 1440 # in kg/m^3 density of the binder
default_p['themal_cond'] = 2.0 # effective thermal conductivity, approx in Wm^-3K^-1, concrete!
# self.specific_heat_capacity = 9000 # effective specific heat capacity in J kg⁻1 K⁻1
default_p['vol_heat_cap'] = 2.4e6 # volumetric heat cap J/(m3 K)
default_p['b_ratio'] = 0.2 # volume percentage of binder
default_p['Q_pot'] = 500e3 # potential heat per weight of binder in J/kg
# p['Q_inf'] = self.Q_pot * self.density_binder * self.b_ratio # potential heat per concrete volume in J/m3
default_p['B1'] = 2.916E-4 # in 1/s
default_p['B2'] = 0.0024229 # -
default_p['eta'] = 5.554 # something about diffusion
default_p['alpha_max'] = 0.875 # also possible to approximate based on equation with w/c
default_p['E_act'] = 5653 * self.p.igc # activation energy in Jmol^-1
default_p['T_ref'] = 25 # reference temperature in degree celsius
# setting for temperature adjustment
# option: 'exponential' and 'off'
default_p['temp_adjust_law'] = 'exponential'
# polinomial degree
default_p['degree'] = 2 # default boundary setting
### paramters for mechanics problem
default_p['E_28'] = 15000000 # Youngs Modulus N/m2 or something... TODO: check units!
default_p['nu'] = 0.2 # Poissons Ratio
# required paramters for alpha to E mapping
default_p['alpha_t'] = 0.2
default_p['alpha_0'] = 0.05
default_p['a_E'] = 0.6
# required paramters for alpha to tensile and compressive stiffness mapping
default_p['fc_inf'] = 6210000
default_p['a_fc'] = 1.2
default_p['ft_inf'] = 467000
default_p['a_ft'] = 1.0
self.p = default_p + self.p
# setting up the two nonlinear problems
self.temperature_problem = ConcreteTempHydrationModel(self.experiment.mesh, self.p, pv_name=self.pv_name)
# here I "pass on the parameters from temperature to mechanics problem.."
self.mechanics_problem = ConcreteMechanicsModel(self.experiment.mesh, self.p, pv_name=self.pv_name)
# coupling of the output files
self.mechanics_problem.pv_file = self.temperature_problem.pv_file
# initialize concrete temperature as given in experimental setup
self.set_inital_T(self.p.T_0)
# setting bcs
self.mechanics_problem.set_bcs(self.experiment.create_displ_bcs(self.mechanics_problem.V))
self.temperature_problem.set_bcs(self.experiment.create_temp_bcs(self.temperature_problem.V))
# setting up the solvers
self.temperature_solver = df.NewtonSolver()
self.temperature_solver.parameters['absolute_tolerance'] = 1e-9
self.temperature_solver.parameters['relative_tolerance'] = 1e-8
self.mechanics_solver = df.NewtonSolver()
self.mechanics_solver.parameters['absolute_tolerance'] = 1e-9
self.mechanics_solver.parameters['relative_tolerance'] = 1e-8
def solve(self, t=1.0):
# print('Solving: T') # TODO ouput only a certain log level INFO
self.temperature_solver.solve(self.temperature_problem, self.temperature_problem.T.vector())
# set current DOH for computation of Young's modulus
self.mechanics_problem.q_alpha = self.temperature_problem.q_alpha
# print('Solving: u') # TODO ouput only a certain log level INFO
# mechanics paroblem is not required for temperature, could crash in frist time steps but then be useful
try:
self.mechanics_solver.solve(self.mechanics_problem, self.mechanics_problem.u.vector())
except Exception as e:
print('AAAAAAAAAAHHHHHHHHHH!!!!!')
warnings.warn(f'Mechanics crashed at time: {t}, Error message: {e}')
# history update
self.temperature_problem.update_history()
# save fields to global problem for sensor output
self.displacement = self.mechanics_problem.u
self.temperature = self.temperature_problem.T
self.degree_of_hydration = df.project(self.temperature_problem.q_alpha, self.temperature_problem.visu_space, form_compiler_parameters={'quadrature_degree': self.p.degree})
self.q_degree_of_hydration = self.temperature_problem.q_alpha
self.q_yield = self.mechanics_problem.q_yield
# get sensor data
for sensor_name in self.sensors:
# go through all sensors and measure
self.sensors[sensor_name].measure(self, t)
def pv_plot(self, t=0):
# calls paraview output for both problems
self.temperature_problem.pv_plot(t=t)
self.mechanics_problem.pv_plot(t=t)
def set_inital_T(self, T):
self.temperature_problem.set_initial_T(T)
def set_timestep(self, dt):
self.temperature_problem.set_timestep(dt)
def get_heat_of_hydration_ftk(self):
return self.temperature_problem.heat_of_hydration_ftk
def get_E_alpha_fkt(self):
return np.vectorize(self.mechanics_problem.E_fkt)
def get_X_alpha_fkt(self):
return self.mechanics_problem.general_hydration_fkt
class ConcreteTempHydrationModel(df.NonlinearProblem):
def __init__(self, mesh, p, pv_name='temp_output', **kwargs):
df.NonlinearProblem.__init__(self) # apparently required to initialize things
self.p = p
if mesh != None:
# initialize possible paraview output
self.pv_file = df.XDMFFile(pv_name + '.xdmf')
self.pv_file.parameters["flush_output"] = True
self.pv_file.parameters["functions_share_mesh"] = True
# function space for single value per element, required for plot of quadrature space values
# initialize timestep, musst be reset using .set_timestep(dt)
self.dt = 0
self.dt_form = df.Constant(self.dt)
if self.p.degree == 1:
self.visu_space = df.FunctionSpace(mesh, "DG", 0)
else:
self.visu_space = df.FunctionSpace(mesh, "P", 1)
metadata = {"quadrature_degree": self.p.degree, "quadrature_scheme": "default"}
dxm = df.dx(metadata=metadata)
# solution field
self.V = df.FunctionSpace(mesh, 'P', self.p.degree)
# generic quadrature function space
cell = mesh.ufl_cell()
q = "Quadrature"
quadrature_element = df.FiniteElement(q, cell, degree=self.p.degree, quad_scheme="default")
q_V = df.FunctionSpace(mesh, quadrature_element)
# quadrature functions
self.q_T = df.Function(q_V, name="temperature")
self.q_alpha = df.Function(q_V, name="degree of hydration")
self.q_alpha_n = df.Function(q_V, name="degree of hydration last time step")
self.q_delta_alpha = df.Function(q_V, name="inrease in degree of hydration")
self.q_ddalpha_dT = df.Function(q_V, name="derivative of delta alpha wrt temperature")
# empfy list for newton iteration to compute delta alpha using the last value as starting point
self.delta_alpha_n_list = np.full(np.shape(self.q_alpha_n.vector().get_local()), 0.2)
# empfy list for newton iteration to compute delta alpha using the last value as starting point
self.delta_alpha_guess = np.full(np.shape(self.q_alpha_n.vector().get_local()), 0.5)
# scalars for the analysis of the heat of hydration
self.alpha = 0
self.delta_alpha = 0
# Define variational problem
self.T = df.Function(self.V) # temperature
self.T_n = df.Function(self.V) # overwritten later...
T_ = df.TrialFunction(self.V) # temperature
vT = df.TestFunction(self.V)
# normal form
R_ufl = df.Constant(self.p.vol_heat_cap) * (self.T) * vT * dxm
R_ufl += self.dt_form * df.dot(df.Constant(self.p.themal_cond) * df.grad(self.T), df.grad(vT)) * dxm
R_ufl += - df.Constant(self.p.vol_heat_cap) * self.T_n * vT * dxm
# quadrature point part
self.R = R_ufl - df.Constant(
self.p.Q_pot * self.p.density_binder * self.p.b_ratio) * self.q_delta_alpha * vT * dxm
# derivative
# normal form
dR_ufl = df.derivative(R_ufl, self.T)
# quadrature part
self.dR = dR_ufl - df.Constant(
self.p.Q_pot * self.p.density_binder * self.p.b_ratio) * self.q_ddalpha_dT * T_ * vT * dxm
# setup projector to project continuous funtionspace to quadrature
self.project_T = LocalProjector(self.T, q_V, dxm)
self.assembler = None # set as default, to check if bc have been added???
def delta_alpha_fkt(self, delta_alpha, alpha_n, T):
return delta_alpha - self.dt * self.affinity(delta_alpha, alpha_n) * self.temp_adjust(T)
def delta_alpha_prime(self, delta_alpha, alpha_n, T):
return 1 - self.dt * self.daffinity_ddalpha(delta_alpha, alpha_n) * self.temp_adjust(T)
def heat_of_hydration_ftk(self, T, time_list, dt, parameter):
def interpolate(x, x_list, y_list):
# assuming ordered x list
i = 0
# check if x is in the dataset
if x > x_list[-1]:
print(' * Warning!!!: Extrapolation!!!')
point1 = (x_list[-2], y_list[-2])
point2 = (x_list[-1], y_list[-1])
elif x < x_list[0]:
print(' * Warning!!!: Extrapolation!!!')
point1 = (x_list[0], y_list[0])
point2 = (x_list[1], y_list[1])
else:
while x_list[i] < x:
i += 1
point1 = (x_list[i - 1], y_list[i - 1])
point2 = (x_list[i], y_list[i])
slope = (point2[1] - point1[1]) / (point2[0] - point1[0])
x_increment = x - point1[0]
y_increment = slope * x_increment
y = point1[1] + y_increment
return y
# get tmax, identify number of time steps, then interpolate data
# assuming time list is ordered!!!
tmax = time_list[-1]
# set paramters
self.p.B1 = parameter['B1']
self.p.B2 = parameter['B2']
self.p.eta = parameter['eta']
self.p.alpha_max = parameter['alpha_max']
self.p.E_act = parameter['E_act']
self.p.T_ref = parameter['T_ref']
self.p.Q_pot = parameter['Q_pot']
# set time step
self.dt = dt
t = 0
time = [0.0]
heat = [0.0]
alpha_list = [0.0]
alpha = 0
delta_alpha = 0.0
error_flag = False
while t < tmax:
# compute delta_alpha
try:
delta_alpha = scipy.optimize.newton(self.delta_alpha_fkt, args=(alpha, T + self.p.zero_C),
fprime=self.delta_alpha_prime, x0=delta_alpha)
if delta_alpha < 0:
raise Exception(
f'Problem with solving for delta alpha. Result is negative for starting delta alpha = {delta_alpha}')
except:
delta_alpha = 0.2
try:
delta_alpha = scipy.optimize.newton(self.delta_alpha_fkt, args=(alpha, T + self.p.zero_C),
fprime=self.delta_alpha_prime, x0=delta_alpha)
if delta_alpha < 0:
raise Exception(
'Problem with solving for delta alpha. Result is negative for starting delta alpha = 0.2')
except:
delta_alpha = 0.5
try:
delta_alpha = scipy.optimize.newton(self.delta_alpha_fkt, args=(alpha, T + self.p.zero_C),
fprime=self.delta_alpha_prime, x0=delta_alpha)
if delta_alpha < 0:
raise Exception(
'Problem with solving for delta alpha. Result is negative for starting delta alpha = 0.5')
except:
delta_alpha = 1.0
try:
delta_alpha = scipy.optimize.newton(self.delta_alpha_fkt, args=(alpha, T + self.p.zero_C),
fprime=self.delta_alpha_prime, x0=delta_alpha)
if delta_alpha < 0:
raise Exception('Problem with solving for delta alpha. Result is negative.')
except:
error_flag = True
break
# update alpha
alpha = delta_alpha + alpha
# save heat of hydration
alpha_list.append(alpha)
heat.append(alpha * self.p.Q_pot)
# timeupdate
t = t + self.dt
time.append(t)
# if there was a probem with the computation (bad input values), return zero
if error_flag:
heat_interpolated = np.zeros_like(time_list)
alpha_interpolated = np.zeros_like(time_list)
else:
# interpolate heat to match time_list
heat_interpolated = []
alpha_interpolated = []
for value in time_list:
heat_interpolated.append(interpolate(value, time, heat))
alpha_interpolated.append(interpolate(value, time, alpha_list))
return np.asarray(heat_interpolated) / 1000, np.asarray(alpha_interpolated)
def get_affinity(self):
alpha_list = []
affinity_list = []
for val in range(1000):
alpha = val / 1000
alpha_list.append(alpha)
affinity_list.append(self.affinity(alpha, 0))
return np.asarray(alpha_list), np.asarray(affinity_list)
def evaluate_material(self):
# project temperautre onto quadrature spaces
self.project_T(self.q_T)
# convert quadrature spaces to numpy vector
temperature_list = self.q_T.vector().get_local()
alpha_n_list = self.q_alpha_n.vector().get_local()
# solve for alpha at each quadrature point
# here the newton raphson method of the scipy package is used
# the zero value of the delta_alpha_fkt is found for each entry in alpha_n_list is found. the corresponding temparature
# is given in temperature_list and as starting point the value of last step used from delta_alpha_n
try:
delta_alpha_list = scipy.optimize.newton(self.delta_alpha_fkt, args=(alpha_n_list, temperature_list),
fprime=self.delta_alpha_prime, x0=self.delta_alpha_n_list)
# I dont trust the algorithim!!! check if only applicable results are obtained
except:
# AAAAAAHHHH, negative delta alpha!!!!
# NO PROBLEM!!!, different starting value!
delta_alpha_list = scipy.optimize.newton(self.delta_alpha_fkt, args=(alpha_n_list, temperature_list),
fprime=self.delta_alpha_prime, x0=self.delta_alpha_guess)
if np.any(delta_alpha_list < 0.0):
print('AAAAAAHHHH, negative delta alpha!!!!')
raise Exception(
'There is a problem with the alpha computation/initial guess, computed delta alpha is negative.')
# save the delta alpha for next iteration as starting guess
self.delta_alpha_n_list = delta_alpha_list
# compute current alpha
alpha_list = alpha_n_list + delta_alpha_list
# compute derivative of delta alpha with respect to temperature for rhs
ddalpha_dT_list = self.dt * self.affinity(alpha_list, alpha_n_list) * self.temp_adjust_tangent(temperature_list)
# project lists onto quadrature spaces
set_q(self.q_alpha, alpha_list)
set_q(self.q_delta_alpha, delta_alpha_list)
set_q(self.q_ddalpha_dT, ddalpha_dT_list)
def update_history(self):
self.T_n.assign(self.T) # save temparature field
self.q_alpha_n.assign(self.q_alpha) # save alpha field
def set_timestep(self, dt):
self.dt = dt
self.dt_form.assign(df.Constant(self.dt))
def set_initial_T(self, T):
# set initial temperature, in kelvin
T0 = df.Expression('t_zero', t_zero=T + self.p.zero_C, degree=0)
self.T_n.interpolate(T0)
self.T.interpolate(T0)
def set_bcs(self, bcs):
# Only now (with the bcs) can we initialize the assembler
self.assembler = df.SystemAssembler(self.dR, self.R, bcs)
def F(self, b, x):
if self.dt <= 0:
raise RuntimeError("You need to `.set_timestep(dt)` larger than zero before the solve!")
if not self.assembler:
raise RuntimeError("You need to `.set_bcs(bcs)` before the solve!")
self.evaluate_material()
self.assembler.assemble(b, x)
def J(self, A, x):
self.assembler.assemble(A)
def pv_plot(self, t=0):
# paraview export
# temperature plot
T_plot = df.project(self.T, self.V)
T_plot.rename("Temperature", "test string, what does this do??") # TODO: what does the second string do?
self.pv_file.write(T_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
# degree of hydration plot
alpha_plot = df.project(self.q_alpha, self.visu_space,
form_compiler_parameters={'quadrature_degree': self.p.degree})
alpha_plot.rename("DOH", "test string, what does this do??") # TODO: what does the second string do?
self.pv_file.write(alpha_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
def temp_adjust(self, T):
val = 1
if self.p.temp_adjust_law == 'exponential':
val = np.exp(-self.p.E_act / self.p.igc * (1 / T - 1 / (self.p.T_ref + self.p.zero_C)))
elif self.p.temp_adjust_law == 'off':
pass
else:
# TODO throw correct error
raise Exception(
f'Warning: Incorrect temp_adjust_law {self.p.temp_adjust_law} given, only "exponential" and "off" implemented')
return val
# derivative of the temperature adjustment factor with respect to the temperature
def temp_adjust_tangent(self, T):
val = 0
if self.p.temp_adjust_law == 'exponential':
val = self.p.E_act / self.p.igc / T ** 2
return val
# affinity function
def affinity(self, delta_alpha, alpha_n):
affinity = self.p.B1 * (self.p.B2 / self.p.alpha_max + delta_alpha + alpha_n) * (
self.p.alpha_max - (delta_alpha + alpha_n)) * np.exp(
-self.p.eta * (delta_alpha + alpha_n) / self.p.alpha_max)
return affinity
# derivative of affinity with respect to delta alpha
def daffinity_ddalpha(self, delta_alpha, alpha_n):
affinity_prime = self.p.B1 * np.exp(-self.p.eta * (delta_alpha + alpha_n) / self.p.alpha_max) * (
(self.p.alpha_max - (delta_alpha + alpha_n)) * (
self.p.B2 / self.p.alpha_max + (delta_alpha + alpha_n)) * (
-self.p.eta / self.p.alpha_max) - self.p.B2 / self.p.alpha_max - 2 * (
delta_alpha + alpha_n) + self.p.alpha_max)
return affinity_prime
class ConcreteMechanicsModel(df.NonlinearProblem):
def __init__(self, mesh, p, pv_name='mechanics_output', **kwargs):
df.NonlinearProblem.__init__(self) # apparently required to initialize things
self.p = p
if self.p.dim == 1:
self.stress_vector_dim = 1
elif self.p.dim == 2:
self.stress_vector_dim = 3
elif self.p.dim == 3:
self.stress_vector_dim = 6
# todo: I do not like the "meshless" setup right now
if mesh != None:
# initialize possible paraview output
self.pv_file = df.XDMFFile(pv_name + '.xdmf')
self.pv_file.parameters["flush_output"] = True
self.pv_file.parameters["functions_share_mesh"] = True
# function space for single value per element, required for plot of quadrature space values
#
if self.p.degree == 1:
self.visu_space = df.FunctionSpace(mesh, "DG", 0)
self.visu_space_T = df.TensorFunctionSpace(mesh, "DG", 0)
else:
self.visu_space = df.FunctionSpace(mesh, "P", 1)
self.visu_space_T = df.TensorFunctionSpace(mesh, "P", 1)
metadata = {"quadrature_degree": self.p.degree, "quadrature_scheme": "default"}
dxm = df.dx(metadata=metadata)
# solution field
self.V = df.VectorFunctionSpace(mesh, 'P', self.p.degree)
# generic quadrature function space
cell = mesh.ufl_cell()
q = "Quadrature"
quadrature_element = df.FiniteElement(q, cell, degree=self.p.degree, quad_scheme="default")
quadrature_vector_element = df.VectorElement(q, cell, degree=self.p.degree, dim=self.stress_vector_dim,
quad_scheme="default")
q_V = df.FunctionSpace(mesh, quadrature_element)
q_VT = df.FunctionSpace(mesh, quadrature_vector_element)
# quadrature functions
self.q_E = df.Function(q_V, name="youngs modulus")
self.q_fc = df.Function(q_V, name="compressive strength")
self.q_ft = df.Function(q_V, name="tensile strength")
self.q_yield = df.Function(q_V, name="yield criterion")
self.q_alpha = df.Function(q_V, name="degree of hydration")
self.q_sigma = df.Function(q_VT, name="stress")
# initialize degree of hydration to 1, in case machanics module is run without hydration coupling
self.q_alpha.vector()[:] = 1
# Define variational problem
self.u = df.Function(self.V) # displacement
v = df.TestFunction(self.V)
# Elasticity parameters without multiplication with E
x_mu = 1.0 / (2.0 * (1.0 + self.p.nu))
x_lambda = 1.0 * self.p.nu / ((1.0 + self.p.nu) * (1.0 - 2.0 * self.p.nu))
# Stress computation for linear elastic problem without multiplication with E
def x_sigma(v):
return 2.0 * x_mu * df.sym(df.grad(v)) + x_lambda * df.tr(df.sym(df.grad(v))) * df.Identity(len(v))
# Volume force
if self.p.dim == 1:
f = df.Constant(-self.p.g * self.p.density)
elif self.p.dim == 2:
f = df.Constant((0, -self.p.g * self.p.density))
elif self.p.dim == 3:
f = df.Constant((0, 0, -self.p.g * self.p.density))
self.sigma_ufl = self.q_E * x_sigma(self.u)
R_ufl = self.q_E * df.inner(x_sigma(self.u), df.sym(df.grad(v))) * dxm
R_ufl += - df.inner(f, v) * dxm # add volumetric force, aka gravity (in this case)
# quadrature point part
self.R = R_ufl # - Constant(p.Q_inf) * self.q_delta_alpha * vT * dxm
# derivative
# normal form
dR_ufl = df.derivative(R_ufl, self.u)
# quadrature part
self.dR = dR_ufl # - Constant(p.Q_inf) * self.q_ddalpha_dT * T_ * vT * dxm
self.project_sigma = LocalProjector(self.sigma_voigt(self.sigma_ufl), q_VT, dxm)
self.assembler = None # set as default, to check if bc have been added???
def sigma_voigt(self, s):
# 1D option
if s.ufl_shape == (1, 1):
stress_vector = df.as_vector((s[0, 0]))
# 2D option
elif s.ufl_shape == (2, 2):
stress_vector = df.as_vector((s[0, 0], s[1, 1], s[0, 1]))
# 3D option
elif s.ufl_shape == (3, 3):
stress_vector = df.as_vector((s[0, 0], s[1, 1], s[2, 2], s[0, 1], s[1, 2], s[0, 2]))
else:
raise ('Problem with stress tensor shape for voigt notation')
return stress_vector
def E_fkt(self, alpha, parameters):
if alpha < parameters['alpha_t']:
E = parameters['E_inf'] * alpha / parameters['alpha_t'] * (
(parameters['alpha_t'] - parameters['alpha_0']) / (1 - parameters['alpha_0'])) ** parameters[
'a_E']
else:
E = parameters['E_inf'] * ((alpha - parameters['alpha_0']) / (1 - parameters['alpha_0'])) ** parameters[
'a_E']
return E
def general_hydration_fkt(self, alpha, parameters):
return parameters['X_inf'] * alpha ** (parameters['a_X'])
def principal_stress(self, stresses):
# checking type of problem
n = stresses.shape[1] # number of stress components in stress vector
# finding eigenvalues of symmetric stress tensor
# 1D problem
if n == 1:
principal_stresses = stresses
# 2D problem
elif n == 3:
# the following uses
# lambda**2 - tr(sigma)lambda + det(sigma) = 0, solve for lambda using pq formula
p = - (stresses[:, 0] + stresses[:, 1])
q = stresses[:, 0] * stresses[:, 1] - stresses[:, 2] ** 2
D = p ** 2 / 4 - q # help varibale
assert np.all(D >= -1.0e-15) # otherwise problem with imaginary numbers
sqrtD = np.sqrt(D)
eigenvalues_1 = -p / 2.0 + sqrtD
eigenvalues_2 = -p / 2.0 - sqrtD
# strack lists as array
principal_stresses = np.column_stack((eigenvalues_1, eigenvalues_2))
# principal_stress = np.array([ev1p,ev2p])
elif n == 6:
# for a symetric stress vector a b c e f d we need to solve:
# x**3 - x**2(a+b+c) - x(e**2+f**2+d**2-ab-bc-ac) + (abc-ae**2-bf**2-cd**2+2def) = 0, solve for x
principal_stresses = np.empty([len(stresses), 3])
# currently slow solution with loop over all stresses and subsequent numpy function call:
for i, stress in enumerate(stresses):
# convert voigt to tensor, (00,11,22,12,02,01)
stress_tensor = np.zeros((3, 3))
stress_tensor[0][0] = stress[0]
stress_tensor[1][1] = stress[1]
stress_tensor[2][2] = stress[2]
stress_tensor[0][1] = stress[5]
stress_tensor[1][2] = stress[3]
stress_tensor[0][2] = stress[4]
stress_tensor[1][0] = stress[5]
stress_tensor[2][1] = stress[3]
stress_tensor[2][0] = stress[4]
# use numpy for eigenvalues
principal_stress = np.linalg.eigvalsh(stress_tensor)
# sort principal stress from lagest to smallest!!!
principal_stresses[i] = -np.sort(-principal_stress)
return principal_stresses
def yield_surface(self, stresses, ft, fc):
# function for approximated yield surface
# first approximation, could be changed if we have numbers/information
fc2 = fc
# pass voigt notation and compute the principal stress
p_stresses = self.principal_stress(stresses)
# get the principle tensile stresses
t_stresses = np.where(p_stresses < 0, 0, p_stresses)
# get dimension of problem, ie. length of list with principal stresses
n = p_stresses.shape[1]
# check case
if n == 1:
# rankine for the tensile region
rk_yield_vals = t_stresses[:, 0] - ft[:]
# invariants for drucker prager yield surface
I1 = stresses[:, 0]
I2 = np.zeros_like(I1)
# 2D problem
elif n == 2:
# rankine for the tensile region
rk_yield_vals = (t_stresses[:, 0] ** 2 + t_stresses[:, 1] ** 2) ** 0.5 - ft[:]
# invariants for drucker prager yield surface
I1 = stresses[:, 0] + stresses[:, 1]
I2 = ((stresses[:, 0] + stresses[:, 1]) ** 2 - ((stresses[:, 0]) ** 2 + (stresses[:, 1]) ** 2)) / 2
# 3D problem
elif n == 3:
# rankine for the tensile region
rk_yield_vals = (t_stresses[:, 0] ** 2 + t_stresses[:, 1] ** 2 + t_stresses[:, 2] ** 2) ** 0.5 - ft[:]
# invariants for drucker prager yield surface
I1 = stresses[:, 0] + stresses[:, 1] + stresses[:, 2]
I2 = ((stresses[:, 0] + stresses[:, 1] + stresses[:, 2]) ** 2 - (
(stresses[:, 0]) ** 2 + (stresses[:, 1]) ** 2 + (stresses[:, 2]) ** 2)) / 2
else:
raise ('Problem with input to yield surface, the array with stress values has the wrong size ')
J2 = 1 / 3 * I1 ** 2 - I2
beta = (3.0 ** 0.5) * (fc2 - fc) / (2 * fc2 - fc)
Hp = fc2 * fc / ((3.0 ** 0.5) * (2 * fc2 - fc))
dp_yield_vals = beta / 3 * I1 + J2 ** 0.5 - Hp
# TODO: is this "correct", does this make sense? for a compression state, what if rk yield > dp yield???
yield_vals = np.maximum(rk_yield_vals, dp_yield_vals)
return np.asarray(yield_vals)
def evaluate_material(self):
# convert quadrature spaces to numpy vector
alpha_list = self.q_alpha.vector().get_local()
parameters = {}
parameters['alpha_t'] = self.p.alpha_t
parameters['E_inf'] = self.p.E_28
parameters['alpha_0'] = self.p.alpha_0
parameters['a_E'] = self.p.a_E
# vectorize the function for speed up
E_fkt_vectorized = np.vectorize(self.E_fkt)
E_list = E_fkt_vectorized(alpha_list, parameters)
parameters = {}
parameters['X_inf'] = self.p.fc_inf
parameters['a_X'] = self.p.a_fc
fc_list = self.general_hydration_fkt(alpha_list, parameters)
parameters = {}
parameters['X_inf'] = self.p.ft_inf
parameters['a_X'] = self.p.a_ft
ft_list = self.general_hydration_fkt(alpha_list, parameters)
# now do the yield function thing!!!
# I need stresses!!!
# get stress values
self.project_sigma(self.q_sigma)
sigma_list = self.q_sigma.vector().get_local().reshape((-1, self.stress_vector_dim))
# compute the yield values (values > 0 : failure)
yield_list = self.yield_surface(sigma_list, ft_list, fc_list)
# # project lists onto quadrature spaces
set_q(self.q_E, E_list)
set_q(self.q_fc, fc_list)
set_q(self.q_ft, ft_list)
set_q(self.q_yield, yield_list)
def update_history(self):
# no history field currently
pass
def set_timestep(self, dt):
self.dt = dt
self.dt_form.assign(df.Constant(self.dt))
def set_bcs(self, bcs):
# Only now (with the bcs) can we initialize the assembler
self.assembler = df.SystemAssembler(self.dR, self.R, bcs)
def F(self, b, x):
# if self.dt <= 0:
# raise RuntimeError("You need to `.set_timestep(dt)` larger than zero before the solve!")
if not self.assembler:
raise RuntimeError("You need to `.set_bcs(bcs)` before the solve!")
self.evaluate_material()
self.assembler.assemble(b, x)
def J(self, A, x):
self.assembler.assemble(A)
def pv_plot(self, t=0):
# paraview export
# displacement plot
u_plot = df.project(self.u, self.V)
u_plot.rename("Displacement", "test string, what does this do??") # TODO: what does the second string do?
self.pv_file.write(u_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
# Elasticity parameters without multiplication with E
x_mu = 1.0 / (2.0 * (1.0 + self.p.nu))
x_lambda = 1.0 * self.p.nu / ((1.0 + self.p.nu) * (1.0 - 2.0 * self.p.nu))
def x_sigma(v):
return 2.0 * x_mu * df.sym(df.grad(v)) + x_lambda * df.tr(df.sym(df.grad(v))) * df.Identity(len(v))
sigma_plot = df.project(self.sigma_ufl, self.visu_space_T,
form_compiler_parameters={'quadrature_degree': self.p.degree})
E_plot = df.project(self.q_E, self.visu_space, form_compiler_parameters={'quadrature_degree': self.p.degree})
fc_plot = df.project(self.q_fc, self.visu_space, form_compiler_parameters={'quadrature_degree': self.p.degree})
ft_plot = df.project(self.q_ft, self.visu_space, form_compiler_parameters={'quadrature_degree': self.p.degree})
yield_plot = df.project(self.q_yield, self.visu_space,
form_compiler_parameters={'quadrature_degree': self.p.degree})
#
E_plot.rename("Young's Modulus", "test string, what does this do??") # TODO: what does the second string do?
fc_plot.rename("Compressive strength",
"test string, what does this do??") # TODO: what does the second string do?
ft_plot.rename("Tensile strength", "test string, what does this do??") # TODO: what does the second string do?
yield_plot.rename("Yield surface", "test string, what does this do??") # TODO: what does the second string do?
sigma_plot.rename("Stress", "test string, what does this do??") # TODO: what does the second string do?
self.pv_file.write(E_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
self.pv_file.write(fc_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
self.pv_file.write(ft_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
self.pv_file.write(yield_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
self.pv_file.write(sigma_plot, t, encoding=df.XDMFFile.Encoding.ASCII)
| 44.234414
| 179
| 0.597474
|
fc1ad83a6e6dbd179e10c0a7f69239173589f295
| 9,729
|
py
|
Python
|
scripts/labtainer-student/bin/CheckTars.py
|
jakuta-tech/Labtainers
|
f674204022ad5d13ad6bccaf02a14a283470d23f
|
[
"Apache-2.0"
] | null | null | null |
scripts/labtainer-student/bin/CheckTars.py
|
jakuta-tech/Labtainers
|
f674204022ad5d13ad6bccaf02a14a283470d23f
|
[
"Apache-2.0"
] | null | null | null |
scripts/labtainer-student/bin/CheckTars.py
|
jakuta-tech/Labtainers
|
f674204022ad5d13ad6bccaf02a14a283470d23f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''
This software was created by United States Government employees at
The Center for Cybersecurity and Cyber Operations (C3O)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import sys
import os
import shutil
import tempfile
'''
Look at _tar directories for the given labs/[lab]/[image] and
create or update tar files to reflect recent changes. Uses
an 'external-manifest' file to identify tars from other labs
that should be part of this one.
'''
external = 'external-manifest'
tmp_loc = tempfile.TemporaryDirectory().name
def expandManifest(full, tar_name):
'''
extract files from a tar named in an external manifest file
into a staging directory at tmp_loc
'''
#print('expand for %s' % full)
mf = os.path.join(full, external)
labdir = os.path.dirname(os.path.dirname(os.path.dirname(full)))
#print('labdir is %s' % labdir)
with open(mf) as fh:
for line in fh:
lab, image = line.strip().split(':')
ref_tar = os.path.join(labdir, lab, image, os.path.basename(full), tar_name)
#print('external ref is %s' % ref_tar)
cmd = 'tar xf %s -C %s' % (ref_tar, tmp_loc)
os.system(cmd)
def newest_referenced_tar(full, tar_name):
'''
return a path to the most recent tar file named in an external
manifest.
'''
retval = None
recent = 0
labdir = os.path.dirname(os.path.dirname(os.path.dirname(full)))
mf = os.path.join(full, external)
with open(mf) as fh:
for line in fh:
lab, image = line.strip().split(':')
ref_tar = os.path.join(labdir, lab, image, os.path.basename(full), tar_name)
if not os.path.isfile(ref_tar):
print('Tar file named in manifest not found: %s component %s' % (ref_tar, full))
exit(1)
tar_time = os.stat(ref_tar).st_mtime
if tar_time > recent:
retval = ref_tar
recent = tar_time
return retval
def newest_file_in_tree(rootfolder):
return max(
(os.path.join(dirname, filename)
for dirname, dirnames, filenames in os.walk(rootfolder)
for filename in filenames),
key=lambda fn: os.stat(fn).st_mtime)
def copydir(source, dest):
"""Copy a directory structure overwriting existing files"""
dest_par = os.path.dirname(dest)
for root, dirs, files in os.walk(source):
if not os.path.isdir(root):
os.makedirs(root)
for mdir in dirs:
try:
dest_path = os.path.join(dest_par, root, mdir)
if not os.path.isdir(dest_path):
os.makedirs(dest_path)
except:
pass
for file in files:
rel_path = root.replace(source, '').lstrip(os.sep)
dest_path = os.path.join(dest, rel_path)
if not os.path.isdir(dest_path):
os.makedirs(dest_path)
cpy_src = os.path.join(root, file)
cpy_dest = os.path.join(dest_path, file)
shutil.copyfile(cpy_src, cpy_dest)
shutil.copymode(cpy_src, cpy_dest)
def CheckTars(container_dir, image_name, logger):
here = os.getcwd()
if container_dir.endswith('/'):
container_dir = container_dir[:-1]
tar_list = os.listdir(container_dir)
manifest_name = '%s-home_tar.list' % image_name
lab_dir = os.path.dirname(container_dir)
logger.debug('container_dir is %s' % container_dir)
manifest = os.path.join(lab_dir, 'config', manifest_name)
for f in tar_list:
full = os.path.join(container_dir, f)
if os.path.isdir(full) and f.endswith('_tar'):
if os.path.isdir(tmp_loc):
logger.debug('remove tree at %s' % tmp_loc)
shutil.rmtree(tmp_loc)
os.mkdir(tmp_loc)
os.chdir(full)
tmp_name = f[:-4]
tar_name = tmp_name+'.tar'
logger.debug('check for %s' % tar_name)
if not os.path.isfile(tar_name):
''' no tar, make one '''
logger.debug('no tar %s, make one' % tar_name)
f_list = os.listdir('./')
if len(f_list) == 0:
#print('no files, make empty')
''' no files at all, create empty archive '''
cmd = 'tar cvf %s --files-from /dev/null' % tar_name
os.system(cmd)
logger.debug('did %s' % cmd)
else:
if external in f_list:
''' external manifest, expand that '''
logger.debug('expand manifest at %s' % full)
expandManifest(full, tar_name)
for cfile in f_list:
logger.debug('cfile is %s' % cfile)
if cfile != external:
if os.path.isdir(cfile):
logger.debug('copydir %s' % cfile)
copydir(cfile, os.path.join(tmp_loc, cfile))
else:
logger.debug('copyfile %s' % cfile)
shutil.copyfile(cfile, os.path.join(tmp_loc, cfile))
os.chdir(tmp_loc)
full_tar = os.path.join(full, tar_name)
if f == 'home_tar':
cmd = 'tar czf %s --owner=:1000 --group=:1000 `ls -A -1` > %s' % (full_tar, manifest)
else:
cmd = 'tar czf %s --owner=root --group=root `ls -A -1`' % (full_tar)
os.system(cmd)
logger.debug('did %s' % cmd)
else:
''' is a tar file, should it be updated? '''
os.chdir(full)
newest = newest_file_in_tree('./')
logger.debug('newest is %s' % newest)
referenced_tar_newer = False
if os.path.isfile(external):
latest_ref = newest_referenced_tar(full, tar_name)
logger.debug('has manifest, is referenced file (%s) newer than local tar?' % latest_ref)
if os.stat(latest_ref).st_mtime > os.stat(tar_name).st_mtime:
referenced_tar_newer = True
if referenced_tar_newer or not newest.endswith(tar_name):
os.remove(tar_name)
flist = os.listdir('./')
for f in flist:
if f == external:
continue
fpath = os.path.join(tmp_loc,f)
if not os.path.isfile(fpath):
shutil.copytree(f , fpath)
else:
shutil.copyfile(f , fpath)
''' something is newer than the tar, need to update tar '''
if os.path.isfile(os.path.join('./', external)):
expandManifest(full, tar_name)
os.chdir(tmp_loc)
full_tar = os.path.join(full, tar_name)
if f == 'home_tar':
cmd = 'tar czf %s `ls -A -1` > %s' % (full_tar, manifest)
else:
cmd = 'tar czf %s `ls -A -1`' % (full_tar)
os.system(cmd)
logger.debug(cmd)
#print('did %s' % cmd)
else:
''' tar file is the most recent. ensure we have a manifest '''
if f == 'home_tar' and not os.path.isfile(manifest):
os.chdir(full)
cmd = 'tar tf %s > %s' % (tar_name, manifest)
os.system(cmd)
logger.debug(cmd)
if os.path.isdir(tmp_loc):
logger.debug('remove tree at %s' % tmp_loc)
shutil.rmtree(tmp_loc)
os.chdir(here)
def __main__():
container_dir = sys.argv[1]
image_name = sys.argv[2]
CheckTars(container_dir, image_name)
| 44.62844
| 109
| 0.554322
|
cc41678e665055e67bcc002308a24c493bbdd7d7
| 47
|
py
|
Python
|
fastweb/util/option.py
|
BSlience/fastweb
|
2c1b956e9846c4205d0201d39d09891d088754e4
|
[
"Apache-2.0"
] | 123
|
2017-06-06T04:59:07.000Z
|
2019-07-11T10:20:35.000Z
|
fastweb/util/option.py
|
BSlience/fastweb
|
2c1b956e9846c4205d0201d39d09891d088754e4
|
[
"Apache-2.0"
] | null | null | null |
fastweb/util/option.py
|
BSlience/fastweb
|
2c1b956e9846c4205d0201d39d09891d088754e4
|
[
"Apache-2.0"
] | 2
|
2017-06-28T05:58:39.000Z
|
2018-09-25T00:18:33.000Z
|
from fastweb.accesspoint import options, define
| 47
| 47
| 0.87234
|
2c2141a918bcd49a76a6bd52fa90e1837c2cd680
| 756
|
py
|
Python
|
staircase/core/accessor.py
|
amagee/staircase
|
e0a45c05648e778ef61b624836908726fcc98b48
|
[
"MIT"
] | 25
|
2020-09-05T01:26:43.000Z
|
2021-01-31T06:51:47.000Z
|
staircase/core/accessor.py
|
amagee/staircase
|
e0a45c05648e778ef61b624836908726fcc98b48
|
[
"MIT"
] | 76
|
2020-03-03T22:26:19.000Z
|
2021-07-09T09:29:38.000Z
|
staircase/core/accessor.py
|
amagee/staircase
|
e0a45c05648e778ef61b624836908726fcc98b48
|
[
"MIT"
] | 10
|
2021-08-25T02:01:09.000Z
|
2021-11-23T10:31:12.000Z
|
"""
Taken from pandas.core.accessor.py, with minor adjustments.
"""
class CachedAccessor:
"""
Custom property-like object.
A descriptor for caching accessors.
Parameters
----------
name : str
Namespace that will be accessed under, e.g. ``sc.plot``.
accessor : cls
Class with the extension methods.
"""
def __init__(self, name: str, accessor) -> None:
self._name = name
self._accessor = accessor
def __get__(self, obj, cls):
if obj is None:
# we're accessing the attribute of the class, i.e., Stairs.plot
return self._accessor
accessor_obj = self._accessor(obj)
obj.__setattr__(self._name, accessor_obj)
return accessor_obj
| 25.2
| 75
| 0.613757
|
af76e4c30e059cbb020b01a66ee352682889c256
| 976
|
py
|
Python
|
sastvd/helpers/tokenise.py
|
davidhin/linevd
|
1fca96a1ae44efa3b2af645a01c50ca82add6ba3
|
[
"MIT"
] | 13
|
2022-03-11T15:13:29.000Z
|
2022-03-31T06:23:11.000Z
|
sastvd/helpers/tokenise.py
|
HickeyHsu/linevd
|
1fca96a1ae44efa3b2af645a01c50ca82add6ba3
|
[
"MIT"
] | 1
|
2022-03-24T22:55:38.000Z
|
2022-03-30T02:39:55.000Z
|
sastvd/helpers/tokenise.py
|
HickeyHsu/linevd
|
1fca96a1ae44efa3b2af645a01c50ca82add6ba3
|
[
"MIT"
] | 3
|
2022-03-28T10:10:25.000Z
|
2022-03-31T06:23:12.000Z
|
import re
def tokenise(s):
"""Tokenise according to IVDetect.
Tests:
s = "FooBar fooBar foo bar_blub23/x~y'z"
"""
spec_char = re.compile(r"[^a-zA-Z0-9\s]")
camelcase = re.compile(r".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)")
spec_split = re.split(spec_char, s)
space_split = " ".join(spec_split).split()
def camel_case_split(identifier):
return [i.group(0) for i in re.finditer(camelcase, identifier)]
camel_split = [i for j in [camel_case_split(i) for i in space_split] for i in j]
remove_single = [i for i in camel_split if len(i) > 1]
return " ".join(remove_single)
def tokenise_lines(s):
r"""Tokenise according to IVDetect by splitlines.
Example:
s = "line1a line1b\nline2a asdf\nf f f f f\na"
"""
slines = s.splitlines()
lines = []
for sline in slines:
tokline = tokenise(sline)
if len(tokline) > 0:
lines.append(tokline)
return lines
| 27.111111
| 84
| 0.601434
|
1089c63198595d7ed049e8b93883586d7d7a6e36
| 5,198
|
py
|
Python
|
frappe/build.py
|
Steggur/frappe
|
be95a19704dd3ac667f7ad64e1694dc5d59856fe
|
[
"MIT"
] | null | null | null |
frappe/build.py
|
Steggur/frappe
|
be95a19704dd3ac667f7ad64e1694dc5d59856fe
|
[
"MIT"
] | null | null | null |
frappe/build.py
|
Steggur/frappe
|
be95a19704dd3ac667f7ad64e1694dc5d59856fe
|
[
"MIT"
] | 1
|
2018-03-22T00:24:53.000Z
|
2018-03-22T00:24:53.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from frappe.utils.minify import JavascriptMinify
"""
Build the `public` folders and setup languages
"""
import os, frappe, json, shutil, re
# from cssmin import cssmin
app_paths = None
def setup():
global app_paths
pymodules = [frappe.get_module(app) for app in frappe.get_all_apps(True)]
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
def bundle(no_compress, make_copy=False, verbose=False):
"""concat / minify js files"""
# build js files
setup()
make_asset_dirs(make_copy=make_copy)
build(no_compress, verbose)
def watch(no_compress):
"""watch and rebuild if necessary"""
setup()
import time
compile_less()
build(no_compress=True)
while True:
compile_less()
if files_dirty():
build(no_compress=True)
time.sleep(3)
def make_asset_dirs(make_copy=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for dir_path in [
os.path.join(assets_path, 'js'),
os.path.join(assets_path, 'css')]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# symlink app/public > assets/app
for app_name in frappe.get_all_apps(True):
pymodule = frappe.get_module(app_name)
source = os.path.join(os.path.abspath(os.path.dirname(pymodule.__file__)), 'public')
target = os.path.join(assets_path, app_name)
if not os.path.exists(target) and os.path.exists(source):
if make_copy:
shutil.copytree(os.path.abspath(source), target)
else:
os.symlink(os.path.abspath(source), target)
def build(no_compress=False, verbose=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for target, sources in get_build_maps().iteritems():
pack(os.path.join(assets_path, target), sources, no_compress, verbose)
shutil.copy(os.path.join(os.path.dirname(os.path.abspath(frappe.__file__)), 'data', 'languages.txt'), frappe.local.sites_path)
# reset_app_html()
def get_build_maps():
"""get all build.jsons with absolute paths"""
# framework js and css files
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, 'public', 'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for target, sources in json.loads(f.read()).iteritems():
# update app path
source_paths = []
for source in sources:
if isinstance(source, list):
s = frappe.get_pymodule_path(source[0], *source[1].split("/"))
else:
s = os.path.join(app_path, source)
source_paths.append(s)
build_maps[target] = source_paths
except Exception:
print path
raise
return build_maps
timestamps = {}
def pack(target, sources, no_compress, verbose):
from cStringIO import StringIO
outtype, outtxt = target.split(".")[-1], ''
jsm = JavascriptMinify()
for f in sources:
suffix = None
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
print "did not find " + f
continue
timestamps[f] = os.path.getmtime(f)
try:
with open(f, 'r') as sourcefile:
data = unicode(sourcefile.read(), 'utf-8', errors='ignore')
extn = f.rsplit(".", 1)[1]
if outtype=="js" and extn=="js" and (not no_compress) and suffix!="concat" and (".min." not in f):
tmpin, tmpout = StringIO(data.encode('utf-8')), StringIO()
jsm.minify(tmpin, tmpout)
minified = tmpout.getvalue()
outtxt += unicode(minified or '', 'utf-8').strip('\n') + ';'
if verbose:
print "{0}: {1}k".format(f, int(len(minified) / 1024))
elif outtype=="js" and extn=="html":
# add to frappe.templates
outtxt += html_to_js_template(f, data)
else:
outtxt += ('\n/*\n *\t%s\n */' % f)
outtxt += '\n' + data + '\n'
except Exception:
print "--Error in:" + f + "--"
print frappe.get_traceback()
if not no_compress and outtype == 'css':
pass
#outtxt = cssmin(outtxt)
with open(target, 'w') as f:
f.write(outtxt.encode("utf-8"))
print "Wrote %s - %sk" % (target, str(int(os.path.getsize(target)/1024)))
def html_to_js_template(path, content):
content = re.sub("\s+", " ", content).replace("'", "\'")
return """frappe.templates["{key}"] = '{content}';\n""".format(\
key=path.rsplit("/", 1)[-1][:-5], content=content)
def files_dirty():
for target, sources in get_build_maps().iteritems():
for f in sources:
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f): continue
if os.path.getmtime(f) != timestamps.get(f):
print f + ' dirty'
return True
else:
return False
def compile_less():
for path in app_paths:
less_path = os.path.join(path, "public", "less")
if os.path.exists(less_path):
for fname in os.listdir(less_path):
if fname.endswith(".less") and fname != "variables.less":
fpath = os.path.join(less_path, fname)
mtime = os.path.getmtime(fpath)
if fpath in timestamps and mtime == timestamps[fpath]:
continue
timestamps[fpath] = mtime
print "compiling {0}".format(fpath)
os.system("lessc {0} > {1}".format(fpath,
os.path.join(path, "public", "css", fname.rsplit(".", 1)[0] + ".css")))
| 28.718232
| 127
| 0.66641
|
3d3316871d7c4140320f9218f56228cf97b5cfac
| 11,467
|
py
|
Python
|
VL-T5/src/video/yc2c_data.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 41
|
2021-12-14T02:50:16.000Z
|
2022-03-30T07:41:19.000Z
|
VL-T5/src/video/yc2c_data.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 1
|
2022-01-07T03:31:47.000Z
|
2022-03-25T00:31:53.000Z
|
VL-T5/src/video/yc2c_data.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 2
|
2021-12-14T03:10:18.000Z
|
2022-03-29T04:59:23.000Z
|
from torch.utils.data import DataLoader, Dataset, Sampler
from pathlib import Path
from collections import defaultdict
import json
import pandas as pd
import random
from multiprocessing import Pool
import h5py
import pickle
import math
from tqdm import tqdm
import torch
import numpy as np
from copy import deepcopy
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from transformers import T5TokenizerFast, BartTokenizer
import sys
sys.path.append(str(Path(__file__).parent.parent.resolve()))
from tokenization import VLT5TokenizerFast
project_dir = Path(__file__).resolve().parent.parent.parent # VLT5
workspace_dir = project_dir.parent
dataset_dir = workspace_dir.joinpath('datasets/video/').resolve()
TASK = "yc2c"
def resize(input_tensor, length):
L, D = input_tensor.shape
if L < length:
# pad
input_tensor = torch.cat([input_tensor, torch.zeros(length - L, D)], dim=0)
elif L > length:
input_tensor = input_tensor.t()
input_tensor = F.adaptive_max_pool1d(input_tensor, length)
input_tensor = input_tensor.t()
return input_tensor
class YC2CFineTuneDataset(Dataset):
def __init__(self, split='train', raw_dataset=None, rank=-1, topk=-1, verbose=True, args=None, mode='train'):
super().__init__()
self.raw_dataset = raw_dataset
self.topk = topk
self.verbose = verbose
self.args = args
self.args.BUTD100 = False
self.mode = mode
# Loading datasets to data
self.source = split
if self.verbose:
print('Data source: ', self.source)
if self.args.tokenizer is None:
self.args.tokenizer = self.args.backbone
if 't5' in self.args.tokenizer:
if self.args.use_vision:
self.tokenizer = VLT5TokenizerFast.from_pretrained(
args.backbone,
# max_length=self.args.max_text_length,
do_lower_case=self.args.do_lower_case)
else:
self.tokenizer = T5TokenizerFast.from_pretrained(
args.backbone,
# max_length=self.args.max_text_length,
do_lower_case=self.args.do_lower_case)
elif 'bart' in self.args.tokenizer:
self.tokenizer = BartTokenizer.from_pretrained(
args.backbone,
# max_length=self.args.max_text_length,
do_lower_case=self.args.do_lower_case)
additional_special_tokens = [f'<extra_id_{i}>' for i in range(100-1, -1, -1)] + \
[f'<vis_extra_id_{i}>' for i in range(100-1, -1, -1)]
special_tokens_dict = {'additional_special_tokens': additional_special_tokens}
num_added_toks = self.tokenizer.add_special_tokens(special_tokens_dict)
subtitles_path = dataset_dir.joinpath(f"ann/yc2_subtitles.jsonl")
self.subtitles = {}
with open(subtitles_path, "r") as f:
for line in f:
d = json.loads(line)
self.subtitles[d["vid_name"]] = d["sub"]
annotations = [
dataset_dir.joinpath(f'ann/yc2c/yc2c_{s}.jsonl')
for s in self.source.split(",")
]
self.source_dir = dataset_dir.joinpath(f'vis_features/yc2/clip-vit')
data = []
for ann in annotations:
with open(ann) as f:
for line in f:
d = json.loads(line)
# train data
if "descs" in d:
for desc in d["descs"]:
datum = {}
datum["vid_name"] = d["vid_name"]
datum["duration"] = d["duration"]
datum["answer"] = desc["desc"]
datum["question_id"] = desc["desc_id"]
datum["type"] = f"{TASK}"
data.append(datum)
# test data
else:
datum = {}
datum["vid_name"] = d["vid_name"]
datum["duration"] = d["duration"]
datum["question_id"] = d["clip_id"]
datum["type"] = f"{TASK}"
data.append(datum)
self.types = set()
for d in data:
self.types.add(d["type"])
print(self.types)
self.n_gpus = torch.cuda.device_count()
self.rank = rank
if isinstance(self.topk, float) and (0 < self.topk <= 1):
used_samples = int(self.topk * len(data))
data = random.sample(data, used_samples)
if self.verbose:
print(f"Use only {len(data)} data")
elif self.topk > 0:
data = data[:int(self.topk)]
if self.verbose:
print(f"Use only {len(data)} data")
self.data = data
if self.verbose:
print("# all sentences:", len(self.data))
self.n_boxes = args.n_boxes
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
out_dict = {}
out_dict['args'] = self.args
datum = self.data[idx]
###### Image ######
if self.args.use_vision:
video_id = datum['vid_name']
out_dict['video_id'] = video_id
path = self.source_dir.joinpath(f"{video_id}.npz")
feats = np.load(path)["features"]
feats = torch.from_numpy(feats)
feats = resize(feats, self.n_boxes)
out_dict['vis_feats'] = feats # (L, D)
boxes = torch.zeros(feats.shape[0], 4) # (L, 4)
out_dict['boxes'] = boxes
###### Text #####
# caption = datum['caption']
sent = ""
subs = []
for t in self.subtitles[video_id]:
# subs.append(f"({t['start']:.1f}-{t['end']:.1f}) {t['text'].strip()}")
subs.append(f"{t['text'].strip()}")
subs = " ".join(subs)
subs = f"[Subs] {subs}"
# duration
sent = " ".join([subs])
input_ids = self.tokenizer.encode(f"{sent} {self.args.prompt}", max_length=600, truncation=True)
out_dict['question_id'] = datum['question_id']
out_dict['type'] = datum['type']
out_dict['sent'] = sent
out_dict['input_ids'] = torch.LongTensor(input_ids)
out_dict['input_length'] = len(input_ids)
# out_dict['target_ids'] = torch.LongTensor(target_ids)
# out_dict['target_length'] = len(target_ids)
if 'answer' in datum:
answer = datum['answer']
out_dict['answer'] = answer
# print(feats.shape, feats.dtype)
# print(sent)
# print(answer)
target_ids = self.tokenizer.encode(answer, max_length=20, truncation=True)
out_dict['target_ids'] = torch.LongTensor(target_ids)
out_dict['target_length'] = len(target_ids)
return out_dict
def collate_fn(self, batch):
batch_entry = {}
args = batch[0]['args']
B = len(batch)
S_W_L = max(entry['input_length'] for entry in batch)
input_ids = torch.ones(B, S_W_L, dtype=torch.long) * self.tokenizer.pad_token_id
if args.use_vision:
V_L = len(batch[0]['boxes'])
feat_dim = batch[0]['vis_feats'].shape[-1]
boxes = torch.zeros(B, V_L, 4, dtype=torch.float)
vis_feats = torch.zeros(B, V_L, feat_dim, dtype=torch.float)
if 'target' in batch[0]:
# targets = []
targets = torch.zeros(B, len(batch[0]['target']), dtype=torch.float)
if 'target_ids' in batch[0]:
T_W_L = max(entry['target_length'] for entry in batch)
target_ids = torch.ones(B, T_W_L, dtype=torch.long) * self.tokenizer.pad_token_id
sentences = []
question_ids = []
answers = []
img_ids = []
img_paths = []
video_ids = []
for i, entry in enumerate(batch):
input_ids[i, :entry['input_length']] = entry['input_ids']
if args.use_vision:
boxes[i] += entry['boxes']
vis_feats[i] += entry['vis_feats']
# img_ids.append(entry['img_id'])
# img_paths.append(entry['img_path'])
if 'target_ids' in entry:
target_ids[i, :entry['target_length']] = entry['target_ids']
if 'target' in entry:
targets[i] += entry['target']
# targets.append(entry['target'])
sentences.append(entry['sent'])
question_ids.append(entry['question_id'])
video_ids.append(entry['video_id'])
if 'answer' in entry:
answers.append(entry['answer'])
batch_entry['input_ids'] = input_ids
if 'target_ids' in batch[0]:
word_mask = target_ids != self.tokenizer.pad_token_id
target_ids[~word_mask] = -100
batch_entry['target_ids'] = target_ids
if 'target' in batch[0]:
# targets = torch.stack(targets, dim=0)
batch_entry['targets'] = targets
if args.use_vision:
batch_entry['boxes'] = boxes
batch_entry['vis_feats'] = vis_feats
# batch_entry['img_id'] = img_ids
# batch_entry['img_paths'] = img_paths
batch_entry['sent'] = sentences
batch_entry['question_ids'] = question_ids
batch_entry['answers'] = answers
batch_entry['video_ids'] = video_ids
batch_entry['args'] = args
batch_entry['task'] = TASK
return batch_entry
def get_loader(args, split='train', mode='train',
batch_size=32, workers=4, distributed=False, gpu=0, topk=-1):
verbose = (gpu == 0)
dataset = YC2CFineTuneDataset(
split,
raw_dataset=None,
rank=gpu,
topk=topk,
verbose=verbose,
args=args,
mode=mode)
if distributed:
sampler = DistributedSampler(dataset)
else:
sampler = None
if mode == 'train':
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=(sampler is None),
num_workers=workers, pin_memory=True, sampler=sampler,
collate_fn=dataset.collate_fn)
else:
loader = DataLoader(
dataset,
batch_size=batch_size,
num_workers=workers, pin_memory=True,
sampler=sampler,
shuffle=None if (sampler is not None) else False,
collate_fn=dataset.collate_fn,
drop_last=False)
if verbose:
loader.evaluator = YC2CEvaluator(dataset.data, dataset.types)
loader.task = TASK
return loader
class YC2CEvaluator:
def __init__(self, data, types):
import language_evaluation
self.evaluator = language_evaluation.CocoEvaluator(verbose=False)
def evaluate(self, predicts, answers):
results = self.evaluator.run_evaluation(predicts, answers)
return results
if __name__ == "__main__":
from param import parse_args
args = parse_args()
d = YC2CFineTuneDataset(split='test_release', raw_dataset=None, rank=-1, topk=-1, verbose=True, args=args, mode='train')
for i in range(3):
print(d[i])
| 30.825269
| 124
| 0.560042
|
25b8fd1cb69cc0bd4dfbe3ac7e4b9b2f5b7ab206
| 11,101
|
py
|
Python
|
model/metrics.py
|
dimer116/YOLOv3-for-Particle-Tracking
|
db7e79101b32ea005270d3bf7d25bb91fdc04208
|
[
"MIT"
] | null | null | null |
model/metrics.py
|
dimer116/YOLOv3-for-Particle-Tracking
|
db7e79101b32ea005270d3bf7d25bb91fdc04208
|
[
"MIT"
] | null | null | null |
model/metrics.py
|
dimer116/YOLOv3-for-Particle-Tracking
|
db7e79101b32ea005270d3bf7d25bb91fdc04208
|
[
"MIT"
] | 1
|
2021-05-21T08:13:07.000Z
|
2021-05-21T08:13:07.000Z
|
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
import config
from tqdm import tqdm
from nms import nms
from detect_on_patches import run_on_patches
from utils import cells_to_bboxes
def rmse_xy(coord1, coord2):
xy_1 = coord1[0:2]
xy_2 = coord2[0:2]
return np.sqrt(np.mean((xy_1 - xy_2) ** 2))
def rmse(coord1, coord2):
return np.sum((coord1 - coord2) ** 2, axis=0)
def rel_error(pred, true):
return np.sum(np.abs((pred - true) / true), axis=0)
def get_errors(pred_boxes, true_boxes, pixel_threshold, image_size):
"""
This function calculates the matchings between two sets of coordinates and the number of true
positivs
Parameters:
pred_boxes (list): list of lists containing all bboxes with each bboxes
specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
true_boxes (list): Similar as pred_boxes except all the correct ones
pixel_threshold (float): the mean number of pixels for where a prediction is considered a true positive
Returns:
TP (int): number of true positive predictions
num_detections (int): number of detections in image
num_ground_truths (int): number of ground truths in image
coord_errors (np array): of mean absolute error for each coordinate in the image
"""
threshold = pixel_threshold / image_size
M = 1e8
pred_boxes = np.array(pred_boxes)
true_boxes = np.array(true_boxes)
num_detections = pred_boxes.shape[0]
num_ground_truths = true_boxes.shape[0]
cost_matrix = cdist(pred_boxes, true_boxes, rmse_xy)
cost_matrix[cost_matrix > threshold] = M
pred_indices, true_indices = linear_sum_assignment(cost_matrix)
true_positives = cost_matrix[pred_indices, true_indices] < M
TP = np.sum(true_positives)
if TP > 0:
rmse_errors = rmse(
pred_boxes[pred_indices[true_positives]],
true_boxes[true_indices[true_positives]],
)
rel_errors = rel_error(
pred_boxes[pred_indices[true_positives]],
true_boxes[true_indices[true_positives]],
)
else:
rmse_errors = np.zeros(true_boxes.shape[1])
rel_errors = np.zeros(true_boxes.shape[1])
return TP, num_detections, num_ground_truths, rmse_errors, rel_errors
def evaluate_experimental_data(
loader,
model,
conf_threshold=0.5,
pixel_threshold=5,
image_size=2240,
device=config.DEVICE,
nms_threshold=7,
batch_size=128,
z_unit="micro",
toggle_eval=False,
):
"""
Evaluates the YOLOv3 model on the data in the loader inputted
:param loader: PyTorch dataloader with images
:param model: YOLOv3 model with loaded weights
:param conf_threshold: confidence threshold over which to consider model predictions
:param pixel_threshold: pixel_threshold under which to consider prediction true positive
:param image_size: size of images in loader
:param device: device to run model on
:param nms_threshold: pixel threshold under which two predictions are considered to be duplicates
:param batch_size: batch size for model inference with patches of size 448x448
:param z_unit: if 'micro' the z predictions will be converted to micrometres according to simulation settings
used in our experiments. Do not use if your images differ.
:param toggle_eval: boolean to indicate whether to set model to eval or train mode for inference i.e.
whether to use batch statistics from training or not in batch normalization
:returns precision: (float) model's precision on loader
:returns recall: (float) model's recall on loader
:returns F1: (float) F1 score from precision and recall
:returns rmse_error_rate: numpy array with rmse for x, y, z, radius, refractive index
:returns rel_error_rate: numpy array with relative error for x, y, z, radius, refractive index
"""
total_TP = 0
num_detections = 0
num_ground_truths = 0
total_rmse_errors = 0
total_rel_errors = 0
for batch_idx, (x, labels) in enumerate(tqdm(loader)):
pred_bboxes = run_on_patches(
x.squeeze(0).permute(1, 2, 0),
model,
conf_threshold,
nms_threshold,
batch_size=batch_size,
z_unit=z_unit,
toggle_eval=toggle_eval,
device=device
)
# we just want one bbox for each label, not one for each scale
# remove predictions below certain threshold
pred_bboxes = pred_bboxes[pred_bboxes[:, 0] > conf_threshold, :][:, 1:]
TP, detections, ground_truths, rmse_errors, rel_errors = get_errors(
pred_bboxes, labels.squeeze(0), pixel_threshold, image_size
)
total_TP += TP
num_detections += detections
num_ground_truths += ground_truths
total_rmse_errors += rmse_errors
total_rel_errors += rel_errors
precision = total_TP / (num_detections + 1e-6)
recall = total_TP / (num_ground_truths + 1e-6)
F1 = 2 * precision * recall / (precision + recall + 1e-6)
rmse_error_rate = np.sqrt(total_rmse_errors / (total_TP + 1e-6))
rel_error_rate = total_rel_errors / (total_TP + 1e-6)
return precision, recall, F1, rmse_error_rate, rel_error_rate
def evaluate_model(
loader,
model,
conf_threshold=0.7,
pixel_threshold=5,
image_size=448,
device=config.DEVICE,
nms_threshold=2,
):
"""
Evaluates the YOLOv3 model on the data in the loader inputted
:param loader: PyTorch dataloader with images
:param model: YOLOv3 model with loaded weights
:param conf_threshold: confidence threshold over which to consider model predictions
:param pixel_threshold: pixel_threshold under which to consider prediction true positive
:param image_size: size of images in loader
:param device: device to run model on
:param nms_threshold: pixel threshold under which two predictions are considered to be duplicates
:returns precision: (float) model's precision on loader
:returns recall: (float) model's recall on loader
:returns F1: (float) model's F1 score from precision and recall
:returns rmse_error_rate: numpy array with rmse for x, y, z, radius, refractive index
:returns rel_error_rate: numpy array with relative error for x, y, z, radius, refractive index
"""
model.eval()
total_TP = 0
num_detections = 0
num_ground_truths = 0
total_rmse_errors = 0
total_rel_errors = 0
for batch_idx, (x, labels) in enumerate(tqdm(loader)):
if batch_idx > 50:
break
x = x.to(device).squeeze(0)
with torch.no_grad():
predictions = model(x)
TP, detections, ground_truths, rmse_errors, rel_errors = get_batch_errors(
predictions,
labels,
conf_threshold,
pixel_threshold,
image_size,
nms_threshold,
)
total_TP += TP
num_detections += detections
num_ground_truths += ground_truths
total_rmse_errors += rmse_errors
total_rel_errors += rel_errors
model.train()
precision = total_TP / (num_detections + 1e-6)
recall = total_TP / (num_ground_truths + 1e-6)
F1 = 2 * precision * recall / (precision + recall + 1e-6)
rmse_error_rate = np.sqrt(total_rmse_errors / (total_TP + 1e-6))
rel_error_rate = total_rel_errors / (total_TP + 1e-6)
return precision, recall, F1, rmse_error_rate, rel_error_rate
def get_batch_errors(
predictions,
labels,
conf_threshold,
pixel_threshold,
image_size,
nms_threshold,
):
"""
Returns number of true postives, detections and ground truths as well as total squared errors and relative errors
for inputted predictions and labels
:param predictions: list of tensors for predictions from model where each tensor has shape: (batch size, number of anchors on scale (3), grid size, grid size, 6)
:param target: list of tensors for targets where each tensor has shape: (batch size, number of anchors on scale (3), grid size, grid size, 6)
the 6 values signify (object score, x, y, z, radius, refractive index)
:param conf_threshold: confidence threshold over which to consider model predictions
:param pixel_threshold: pixel_threshold under which to consider a prediction true positive
:param image_size: size of images in loader
:param nms_threshold: pixel threshold under which two predictions are considered to be duplicates
:returns total_TP: (int) number of true positive in the batch
:returns num_detections: (int) number of detections in the batch
:returns num_ground_truths: (int) number of targets in the batch
:returns total_rmse_errors: (numpy array) total squared error for all true positive detections for each
x, y, z, radius, refractive index
:returns total_rel_errors: (numpy array) sum of all relative errors for all true positive detections for each
x, y, z, radius, refractive index
"""
total_TP = 0
num_detections = 0
num_ground_truths = 0
total_rmse_errors = 0
total_rel_errors = 0
batch_size = predictions[0].shape[0]
bboxes = [[] for _ in range(batch_size)]
for i in range(3):
S = predictions[i].shape[2]
boxes_scale_i = cells_to_bboxes(predictions[i], S=S, is_preds=True)
for idx, (box) in enumerate(boxes_scale_i):
bboxes[idx] += box
# we just want one bbox for each label, not one for each scale
true_bboxes = cells_to_bboxes(labels[2].squeeze(0), S=S, is_preds=False)
for idx in range(batch_size):
nms_boxes = nms(
np.array(bboxes[idx]),
conf_threshold=conf_threshold,
threshold=nms_threshold / image_size,
)
cur_pred_bboxes = np.array(nms_boxes)
cur_true_bboxes = np.array(true_bboxes[idx])
# remove predictions below certain threshold
cur_pred_bboxes = cur_pred_bboxes[cur_pred_bboxes[:, 0] > conf_threshold, :][
:, 1:
]
cur_true_bboxes = cur_true_bboxes[cur_true_bboxes[:, 0] > conf_threshold][:, 1:]
TP, detections, ground_truths, rmse_errors, rel_errors = get_errors(
cur_pred_bboxes, cur_true_bboxes, pixel_threshold, image_size
)
total_TP += TP
num_detections += detections
num_ground_truths += ground_truths
total_rmse_errors += rmse_errors
total_rel_errors += rel_errors
return (
total_TP,
num_detections,
num_ground_truths,
total_rmse_errors,
total_rel_errors,
)
| 39.505338
| 166
| 0.665075
|
d8147e99615ce031f2b68bc0e5ae193b119ae7b5
| 791
|
py
|
Python
|
var/spack/repos/builtin/packages/r-fda/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-fda/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-01-08T08:41:11.000Z
|
2022-03-14T19:28:07.000Z
|
var/spack/repos/builtin/packages/r-fda/package.py
|
foeroyingur/spack
|
5300cbbb2e569190015c72d0970d25425ea38647
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class RFda(RPackage):
"""These functions were developed to support functional data
analysis as described in Ramsay, J. O. and Silverman, B. W. (2005)
Functional Data Analysis. New York: Springer and in Ramsay, J. O.,
Hooker, Giles, and Graves, Spencer (2009). """
cran = 'fda'
version('5.5.1', sha256='dcaa2f6ae226d35855bc79c6967f60d45404b984c0afaec215b139c4b8dea23a')
depends_on('r@3.5:', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-fds', type=('build', 'run'))
depends_on('r-desolve', type=('build', 'run'))
| 37.666667
| 95
| 0.690265
|
2ccc1175c8dfb7afbfa4631306bfecaaf4e70451
| 4,472
|
py
|
Python
|
sleap/rangelist.py
|
jens-k/sleap
|
4e99ed037f1f7f41d9f15e2efaac638fc7e12b09
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
sleap/rangelist.py
|
jens-k/sleap
|
4e99ed037f1f7f41d9f15e2efaac638fc7e12b09
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
sleap/rangelist.py
|
jens-k/sleap
|
4e99ed037f1f7f41d9f15e2efaac638fc7e12b09
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
Module with RangeList class for manipulating a list of range intervals.
This is used to cache the track occupancy so we can keep cache updating
when user manipulates tracks for a range of instances.
"""
from typing import List, Tuple
class RangeList:
"""
Class for manipulating a list of range intervals.
Each range interval in the list is a [start, end)-tuple.
"""
def __init__(self, range_list: List[Tuple[int]] = None):
self.list = range_list if range_list is not None else []
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._list)
@property
def list(self):
"""Returns the list of ranges."""
return self._list
@list.setter
def list(self, val):
"""Sets the list of ranges."""
self._list = val
@property
def is_empty(self):
"""Returns True if the list is empty."""
return len(self.list) == 0
@property
def start(self):
"""Return the start value of range (or None if empty)."""
if self.is_empty:
return None
return self.list[0][0]
def add(self, val, tolerance=0):
"""Add a single value, merges to last range if contiguous."""
if self.list and self.list[-1][1] + tolerance >= val:
self.list[-1] = (self.list[-1][0], val + 1)
else:
self.list.append((val, val + 1))
def insert(self, new_range: tuple):
"""Add a new range, merging to adjacent/overlapping ranges as appropriate."""
new_range = self._as_tuple(new_range)
pre, _, post = self.cut_range(new_range)
self.list = self.join_([pre, [new_range], post])
return self.list
def insert_list(self, range_list: List[Tuple[int]]):
"""Add each range from a list of ranges."""
for range_ in range_list:
self.insert(range_)
return self.list
def remove(self, remove: tuple):
"""Remove everything that overlaps with given range."""
pre, _, post = self.cut_range(remove)
self.list = pre + post
def cut(self, cut: int):
"""Return a pair of lists with everything before/after cut."""
return self.cut_(self.list, cut)
def cut_range(self, cut: tuple):
"""Return three lists, everthing before/within/after cut range."""
if not self.list:
return [], [], []
cut = self._as_tuple(cut)
a, r = self.cut_(self.list, cut[0])
b, c = self.cut_(r, cut[1])
return a, b, c
@staticmethod
def _as_tuple(x):
"""Return tuple (converting from range if necessary)."""
if isinstance(x, range):
return x.start, x.stop
return x
@staticmethod
def cut_(range_list: List[Tuple[int]], cut: int):
"""Return a pair of lists with everything before/after cut.
Args:
range_list: the list to cut
cut: the value at which to cut list
Returns:
(pre-cut list, post-cut list)-tuple
"""
pre = []
post = []
for range_ in range_list:
if range_[1] <= cut:
pre.append(range_)
elif range_[0] >= cut:
post.append(range_)
elif range_[0] < cut < range_[1]:
# two new ranges, split at cut
a = (range_[0], cut)
b = (cut, range_[1])
pre.append(a)
post.append(b)
return pre, post
@classmethod
def join_(cls, list_list: List[List[Tuple[int]]]):
"""Return a single list that includes all lists in input list.
Args:
list_list: a list of range lists
Returns:
range list that joins all of the lists in list_list
"""
if len(list_list) == 1:
return list_list[0]
if len(list_list) == 2:
return cls.join_pair_(list_list[0], list_list[1])
return cls.join_pair_(list_list[0], cls.join_(list_list[1:]))
@staticmethod
def join_pair_(list_a: List[Tuple[int]], list_b: List[Tuple[int]]):
"""Return a single pair of lists that joins two input lists."""
if not list_a or not list_b:
return list_a + list_b
last_a = list_a[-1]
first_b = list_b[0]
if last_a[1] >= first_b[0]:
return list_a[:-1] + [(last_a[0], first_b[1])] + list_b[1:]
return list_a + list_b
| 30.841379
| 85
| 0.567531
|
9e989fc8e825c7a8d1c163b1fe60576a4ce179a4
| 524
|
py
|
Python
|
largest among three number.py
|
Max143/Python_programs
|
5084900844d7f6c39a255a6cfb8fa5120a189026
|
[
"MIT"
] | null | null | null |
largest among three number.py
|
Max143/Python_programs
|
5084900844d7f6c39a255a6cfb8fa5120a189026
|
[
"MIT"
] | null | null | null |
largest among three number.py
|
Max143/Python_programs
|
5084900844d7f6c39a255a6cfb8fa5120a189026
|
[
"MIT"
] | null | null | null |
# program to find the largest among three numbers
x = int(input("Enter the first number : "))
y = int(input("Enter the second number : "))
z = int(input("Enter the third number : "))
def largest_num():
if x > y and x > z:
print("x is largest among three number.")
elif y > x and y > z:
print("y is largest among three number.")
elif z > y and z > x:
print("z is largest among three number.")
else:
print("All are same number.")
largest_num()
| 21.833333
| 50
| 0.570611
|
fbce9eaced8bbff8915d25746e6819f3721564bf
| 18,535
|
py
|
Python
|
template_plot.py
|
lzx0014/STOPS
|
da73bb00cee53d2bc7b866dfdedc7ce014898c8c
|
[
"MIT"
] | null | null | null |
template_plot.py
|
lzx0014/STOPS
|
da73bb00cee53d2bc7b866dfdedc7ce014898c8c
|
[
"MIT"
] | null | null | null |
template_plot.py
|
lzx0014/STOPS
|
da73bb00cee53d2bc7b866dfdedc7ce014898c8c
|
[
"MIT"
] | 1
|
2022-03-02T21:28:53.000Z
|
2022-03-02T21:28:53.000Z
|
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
# plt.rc('text', usetex=True)
# plt.rcParams['text.latex.preamble'] = r'\usepackage{amsmath}'
# import os
# os.environ["PATH"] += os.pathsep + '/Library/TeX/texbin'
from deep_rl import *
IMG_DIR = 'C:/Users/xull/Desktop/DeepRL-MVPI/images'
def plot_ppo():
plotter = Plotter()
games = [
'HalfCheetah-v2',
'Walker2d-v2',
'Swimmer-v2',
'Hopper-v2',
'Reacher-v2',
'Ant-v2',
'Humanoid-v2',
# 'HumanoidStandup-v2',
]
patterns = [
'.*remark_ppo.*',
# '.*meta_lr_0\.01-run.*',
# '.*meta_lr_0\.001-run.*',
# '.*meta_lr_0\.01-meta_prob_0\.1-run.*',
# '.*meta_lr_0\.001-meta_prob_0\.1-run.*',
'.*meta_lr_0\.01-meta_prob_1\.0-run.*',
'.*meta_lr_0\.001-meta_prob_1\.0-run.*',
# '.*lam_10-run.*',
# '.*lam_1-run.*',
# '.*lam_0\.1-run.*',
# '.*lam_0\.01-run.*',
# '.*lam_0\.001-run.*',
]
patterns = [
'.*lam_0\.1-run.*',
# '.*lam_1-run.*',
# '.*lam_10-run.*',
'.*lr_0\.007-remark_mvp-run.*',
'.*lr_7e-05-remark_mvp-run.*',
]
patterns = [
'.*b_10-lam_0.1-lr_0.0007-remark_tamar-run.*',
'.*b_10-lam_0.1-lr_7e-05-remark_tamar-run.*',
'.*b_10-lam_1-lr_0.0007-remark_tamar-run.*',
'.*b_10-lam_10-lr_0.0007-remark_tamar-run.*',
'.*b_50-lam_0.1-lr_0.0007-remark_tamar-run.*',
]
patterns = [
# '.*remark_tamar-run.*',
# '.*remark_ppo-run.*',
# '.*lam_0.1-remark_mvppo-run.*',
# '.*lam_1-remark_mvppo-run.*',
# '.*lam_10-remark_mvppo-run.*',
# '.*meta_lr_0.001-meta_prob_0.1-run.*',
# '.*meta_lr_0.001-meta_prob_0.5-run.*',
# '.*meta_lr_0.001-meta_prob_1.0-run.*',
# '.*remark_mvp-run.*',
# '.*remark_tamar-run.*',
# '.*remark_risk-run.*',
'.*lam_0.1-remark_mva2c-run.*',
'.*lam_1-remark_mva2c-run.*',
'.*lam_10-remark_mva2c-run.*',
]
# patterns = [
# '.*lam_10-lr_0\.0007-remark_risk-run.*',
# '.*lam_10-lr_7e-05-remark_risk-run.*',
# ]
# labels = [
# 'PPO',
# 'VarPPO'
# ]
labels = patterns
plotter.plot_games(games=games,
patterns=patterns,
agg='mean',
# agg='mean',
downsample=0,
labels=labels,
right_align=False,
tag=plotter.RETURN_TRAIN,
# root='./log/per-step-reward/meta-var-ppo',
# root='./log/per-step-reward/mvp-params',
# root='./log/per-step-reward/30runs',
root='./tf_log/tf_log',
interpolation=100,
window=10,
)
plt.show()
# plt.tight_layout()
# plt.savefig('images/tmp.png', bbox_inches='tight')
def plot_ddpg_td3():
plotter = Plotter()
games = [
'HalfCheetah-v2',
'Walker2d-v2',
'Hopper-v2',
'Swimmer-v2',
'Reacher-v2',
]
patterns = [
'remark_ddpg',
'remark_td3',
]
labels = [
'DDPG',
'TD3',
]
plotter.plot_games(games=games,
patterns=patterns,
agg='mean',
downsample=0,
labels=labels,
right_align=False,
tag=plotter.RETURN_TEST,
root='./data/benchmark',
interpolation=0,
window=0,
)
# plt.show()
plt.tight_layout()
plt.savefig('images/mujoco_eval.png', bbox_inches='tight')
def plot_atari():
plotter = Plotter()
games = [
'BreakoutNoFrameskip-v4',
]
patterns = [
'remark_a2c',
'remark_categorical',
'remark_dqn',
'remark_n_step_dqn',
'remark_option_critic',
'remark_ppo',
'remark_quantile',
]
labels = [
'A2C',
'C51',
'DQN',
'N-Step DQN',
'OC',
'PPO',
'QR-DQN',
]
plotter.plot_games(games=games,
patterns=patterns,
agg='mean',
downsample=100,
labels=labels,
right_align=False,
tag=plotter.RETURN_TRAIN,
root='./data/benchmark/atari',
interpolation=0,
window=100,
)
# plt.show()
plt.tight_layout()
plt.savefig('images/Breakout.png', bbox_inches='tight')
def plot_risk_chain():
plotter = Plotter()
games = [
'RiskChain-v0',
]
num_samples = 1000
patterns1 = [
'lam_0-num_samples_%s-remark_off-policy-run' % (num_samples),
'lam_1-num_samples_%s-remark_off-policy-run' % (num_samples),
'lam_2-num_samples_%s-remark_off-policy-run' % (num_samples),
'lam_4-num_samples_%s-remark_off-policy-run' % (num_samples),
'lam_8-num_samples_%s-remark_off-policy-run' % (num_samples),
]
patterns2 = [
'lam_0-remark_off-policy-use_oracle_ratio_True-run',
'lam_10-remark_off-policy-use_oracle_ratio_True-run',
]
patterns = [patterns1, patterns2]
labels = [
r'$\lambda=0$',
r'$\lambda=1$',
r'$\lambda=2$',
r'$\lambda=4$',
r'$\lambda=8$',
]
titles = ['Off-line MVPI']
fontsize = 18
def plot_games(self, games, **kwargs):
kwargs.setdefault('agg', 'mean')
import matplotlib.pyplot as plt
l = len(titles)
# plt.figure(figsize=(l * 5, 5))
for i, title in enumerate(titles):
# plt.subplot(1, l, i + 1)
for j, p in enumerate(patterns[i]):
label = kwargs['labels'][j]
color = self.COLORS[j]
log_dirs = self.filter_log_dirs(pattern='.*%s.*%s' % (games[0], p), **kwargs)
x, y = self.load_results(log_dirs, **kwargs)
if kwargs['downsample']:
indices = np.linspace(0, len(x) - 1, kwargs['downsample']).astype(np.int)
x = x[indices]
y = y[:, indices]
if kwargs['agg'] == 'mean':
self.plot_mean(y, x, label=label, color=color, error='se')
elif kwargs['agg'] == 'mean_std':
self.plot_mean(y, x, label=label, color=color, error='std')
elif kwargs['agg'] == 'median':
self.plot_median_std(y, x, label=label, color=color)
else:
for k in range(y.shape[0]):
plt.plot(x, y[i], label=label, color=color)
label = None
plt.xlabel('Iterations', fontsize=fontsize)
plt.xticks([0, 100], [0, 200], fontsize=fontsize)
if i == 0:
plt.ylabel(r'$\pi(a_0|s_0)$', horizontalalignment='right', fontsize=fontsize, rotation='horizontal')
plt.yticks([0, 1], [0, 1], fontsize=fontsize)
plt.ylim([0, 1])
plt.title(title, fontsize=fontsize)
plt.legend(fontsize=14)
plot_games(plotter,
games=games,
patterns=patterns,
# agg='mean_std',
agg='mean',
# agg='median',
downsample=0,
labels=labels,
right_align=True,
tag='pi_a0',
root='./log/per-step-reward/risk-chain',
# root='./tf_log/tf_log',
interpolation=0,
window=0,
)
# plt.show()
plt.tight_layout()
plt.savefig('%s/off-policy-mvpi.pdf' % (IMG_DIR), bbox_inches='tight')
def plot_mvpi_td3(lam=1):
plotter = Plotter()
games = [
'InvertedPendulum-v2',
# 'InvertedDoublePendulum-v2',
# 'HalfCheetah-v2',
# 'Walker2d-v2',
# 'Swimmer-v2',
# 'Hopper-v2',
# 'Reacher-v2',
# 'Ant-v2',
]
patterns = [
'.*lam_0-remark_mvpi_td3-run.*',
'.*lam_%s-remark_mvpi_td3-run.*' % (lam),
'.*lam_%s-remark_trvo-run.*' % (lam),
'.*lam_%s-remark_mvp-run.*' % (lam),
'.*lam_%s-remark_risk-run.*' % (lam),
'.*lam_%s-remark_tamar-run.*' % (lam),
]
labels = [
'TD3',
'MVPI-TD3',
'TRVO',
'MVP',
'Prashanth',
'Tamar',
]
# labels = patterns
fontsize = 20
def plot_games(self, games, **kwargs):
kwargs.setdefault('agg', 'mean')
import matplotlib.pyplot as plt
l = len(games)
plt.figure(figsize=(4 * 5, 2 * 4.5))
for i, game in enumerate(games):
plt.subplot(2, 4, i + 1)
for j, p in enumerate(kwargs['patterns']):
label = kwargs['labels'][j]
color = self.COLORS[j]
log_dirs = self.filter_log_dirs(pattern='.*%s.*%s' % (game, p), **kwargs)
x, y = self.load_results(log_dirs, **kwargs)
if kwargs['downsample']:
indices = np.linspace(0, len(x) - 1, kwargs['downsample']).astype(np.int)
x = x[indices]
y = y[:, indices]
if kwargs['agg'] == 'mean':
self.plot_mean(y, x, label=label, color=color, error='se')
elif kwargs['agg'] == 'mean_std':
self.plot_mean(y, x, label=label, color=color, error='std')
elif kwargs['agg'] == 'median':
self.plot_median_std(y, x, label=label, color=color)
else:
for k in range(y.shape[0]):
plt.plot(x, y[i], label=label, color=color)
label = None
if i >= 4:
plt.xlabel('Steps', fontsize=fontsize)
plt.xticks([0, 1e6], ['0', r'$10^6$'], fontsize=fontsize)
if i % 4 == 0:
plt.ylabel('Episode Return', fontsize=fontsize)
plt.tick_params(axis='y', labelsize=fontsize)
plt.title(game, fontsize=fontsize)
if i == 0:
plt.legend(fontsize=fontsize)
plot_games(plotter,
games=games,
patterns=patterns,
# agg='mean_std',
agg='mean',
downsample=0,
labels=labels,
right_align=False,
tag=plotter.RETURN_TEST,
root='./per-step-reward/10runs',
interpolation=0,
window=0,
)
print('a)')
plt.show()
plt.tight_layout()
plt.savefig('images/mvpi-td3-%s.svg' % (lam), bbox_inches='tight')
def plot_mvpi_td3_mean_per_step_reward(lam=1):
plotter = Plotter()
games = [
'InvertedPendulum-v2',
'InvertedDoublePendulum-v2',
'HalfCheetah-v2',
'Walker2d-v2',
'Swimmer-v2',
'Hopper-v2',
'Reacher-v2',
'Ant-v2',
]
patterns = [
'.*lam_%s-remark_mvpi_td3-run.*' % (lam),
'.*lam_%s-remark_trvo-run.*' % (lam),
]
labels = [
'MVPI-TD3',
'TRVO',
]
# labels = patterns
fontsize = 20
def plot_games(self, games, **kwargs):
kwargs.setdefault('agg', 'mean')
import matplotlib.pyplot as plt
l = len(games)
plt.figure(figsize=(4 * 5, 2 * 4.5))
for i, game in enumerate(games):
plt.subplot(2, 4, i + 1)
for j, p in enumerate(kwargs['patterns']):
label = kwargs['labels'][j]
color = self.COLORS[j]
tag_prefix = kwargs['tag']
ys = []
for tag in ['mean', 'std']:
kwargs['tag'] = '%s_%s' % (tag_prefix, tag)
log_dirs = self.filter_log_dirs(pattern='.*%s.*%s' % (game, p), **kwargs)
x, y = self.load_results(log_dirs, **kwargs)
ys.append(y)
kwargs['tag'] = tag_prefix
y_mean, y_std = ys
y = y_mean - lam * np.power(y_std, 2)
if kwargs['downsample']:
indices = np.linspace(0, len(x) - 1, kwargs['downsample']).astype(np.int)
x = x[indices]
y = y[:, indices]
if kwargs['agg'] == 'mean':
self.plot_mean(y, x, label=label, color=color, error='se')
elif kwargs['agg'] == 'mean_std':
self.plot_mean(y, x, label=label, color=color, error='std')
elif kwargs['agg'] == 'median':
self.plot_median_std(y, x, label=label, color=color)
else:
for k in range(y.shape[0]):
plt.plot(x, y[i], label=label, color=color)
label = None
if i >= 4:
plt.xlabel('Steps', fontsize=fontsize)
plt.xticks([0, 1e6], ['0', r'$10^6$'], fontsize=fontsize)
if i % 4 == 0:
plt.ylabel(r'$J_{\text{reward}}$', fontsize=fontsize)
plt.tick_params(axis='y', labelsize=fontsize)
plt.title(game, fontsize=fontsize)
if i == 0:
plt.legend(fontsize=fontsize)
plot_games(plotter,
games=games,
patterns=patterns,
# agg='mean_std',
agg='mean',
downsample=0,
labels=labels,
right_align=False,
tag='per_step_reward_test',
root='./log/per-step-reward/10runs',
interpolation=0,
window=0,
)
# plt.show()
plt.tight_layout()
plt.savefig('%s/mvpi-td3-per-step-reward-%s.pdf' % (IMG_DIR, lam), bbox_inches='tight')
def generate_table(lam=1, reload=False):
plotter = Plotter()
games = [
'InvertedPendulum-v2',
'InvertedDoublePendulum-v2',
'HalfCheetah-v2',
'Walker2d-v2',
'Swimmer-v2',
'Hopper-v2',
'Reacher-v2',
'Ant-v2',
# 'Humanoid-v2',
]
patterns = [
'.*lam_0-remark_mvpi_td3-run.*',
'.*lam_%s-remark_trvo-run.*' % (lam),
'.*lam_%s-remark_mvpi_td3-run.*' % (lam),
# '.*lam_%s-remark_mvp-run.*' % (lam),
# '.*lam_%s-remark_tamar-run.*' % (lam),
# '.*lam_%s-remark_risk-run.*' % (lam),
]
labels = [
'TD3',
'TRVO',
'MVPI-TD3',
]
kwargs = dict(games=games,
patterns=patterns,
agg='mean',
downsample=0,
right_align=True,
tag='EOT_eval',
root='./log/per-step-reward/10runs',
interpolation=0,
window=0)
stats = dict()
if reload:
for i, game in enumerate(games):
for j, p in enumerate(patterns):
log_dirs = plotter.filter_log_dirs(pattern='.*%s.*%s' % (game, p), **kwargs)
x, y = plotter.load_results(log_dirs, **kwargs)
print(y.shape)
mean = np.mean(y, axis=1)
var = np.std(y, axis=1) ** 2 + 1e-5
stats[(i, j)] = [mean, var]
with open('data/per-step-reward/eval_perf-%s.bin' % (lam), 'wb') as f:
pickle.dump(stats, f)
with open('data/per-step-reward/eval_perf-%s.bin' % (lam), 'rb') as f:
stats = pickle.load(f)
# strs = []
# for j in range(1, len(patterns)):
# str = '%s' % (labels[j])
# for i, game in enumerate(games):
# mean_baseline, var_baseline = stats[(i, 0)]
# mean_algo, var_algo = stats[(i, j)]
# mean_baseline = np.mean(mean_baseline)
# var_baseline = np.mean(var_baseline)
# mean_algo = np.mean(mean_algo)
# var_algo = np.mean(var_algo)
# J_baseline = mean_baseline - lam * var_baseline
# J_algo = mean_algo - lam * var_algo
# perf_improv = np.round((J_algo - J_baseline) / np.abs(J_baseline) * 100).astype(np.int)
# str = str + '& %s\\%%' % (perf_improv)
# str = str + '\\\\ \\hline'
# strs.append(str)
# for str in strs:
# print(str)
strs = []
for i, game in enumerate(games):
str = '%s' % (games[i][:-3])
for j in range(1, len(patterns)):
mean_baseline, var_baseline = stats[(i, 0)]
mean_algo, var_algo = stats[(i, j)]
mean_baseline = np.mean(mean_baseline)
var_baseline = np.mean(var_baseline) + 1e-2
mean_algo = np.mean(mean_algo)
var_algo = np.mean(var_algo) + 1e-2
print(games[i], labels[0], mean_baseline, var_baseline)
print(games[i], labels[j], mean_algo, var_algo)
J_baseline = mean_baseline - lam * var_baseline
J_algo = mean_algo - lam * var_algo
sr_baseline = mean_baseline / (var_baseline ** 0.5)
sr_algo = mean_algo / (var_algo ** 0.5)
perf_improv = np.round((J_algo - J_baseline) / np.abs(J_baseline) * 100).astype(np.int)
mean_improv = np.round((mean_algo - mean_baseline)/ np.abs(mean_baseline) * 100).astype(np.int)
var_improv = np.round((var_algo - var_baseline)/ np.abs(var_baseline) * 100).astype(np.int)
sr_improv = np.round((sr_algo - sr_baseline)/ np.abs(sr_baseline) * 100).astype(np.int)
str = str + '& %s\\%% & %s\\%% & %s\\%% & %s\\%%' % (perf_improv, mean_improv, var_improv, sr_improv)
str = str + '\\\\ \\hline'
strs.append(str)
for str in strs:
print(str)
if __name__ == '__main__':
# mkdir('images')
# plot_ppo()
# plot_ddpg_td3()
# plot_atari()
plot_mvpi_td3(0.5)
# plot_mvpi_td3(1)
# plot_mvpi_td3(2)
# generate_table(0.5)
# generate_table(1)
# generate_table(2)
# plot_mvpi_td3_mean_per_step_reward(0.5)
# plot_mvpi_td3_mean_per_step_reward(1)
# plot_mvpi_td3_mean_per_step_reward(2)
# plot_risk_chain()
| 32.12305
| 116
| 0.483086
|
306385f7cc35a93913744172cdcceed9c88b02de
| 6,960
|
py
|
Python
|
parser/manager/TweetManager.py
|
ApostolWario/SNTA_OSINT
|
50d8237a557a8e808ea8ba9fd3af071293c4662b
|
[
"MIT"
] | null | null | null |
parser/manager/TweetManager.py
|
ApostolWario/SNTA_OSINT
|
50d8237a557a8e808ea8ba9fd3af071293c4662b
|
[
"MIT"
] | null | null | null |
parser/manager/TweetManager.py
|
ApostolWario/SNTA_OSINT
|
50d8237a557a8e808ea8ba9fd3af071293c4662b
|
[
"MIT"
] | null | null | null |
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, json, re, datetime, sys, \
http.cookiejar
from .. import models
from pyquery import PyQuery
from lxml import html
import requests
from bs4 import BeautifulSoup
from termcolor import colored
class TweetManager:
def __init__(self):
pass
@staticmethod
def getTweets(tweetCriteria, receiveBuffer=None, bufferLength=100):
refreshCursor = ''
results = []
resultsAux = []
cookieJar = http.cookiejar.CookieJar()
active = True
while active:
json = TweetManager.getJsonReponse(tweetCriteria, refreshCursor, cookieJar)
if len(json['items_html'].strip()) == 0:
break
refreshCursor = json['min_position']
tweets = PyQuery(json['items_html'])('div.js-stream-tweet')
if len(tweets) == 0:
break
for tweetHTML in tweets:
try:
tweetPQ = PyQuery(tweetHTML)
tweet = models.Tweet()
usernameTweet = tweetPQ("b").html()
txt = re.sub(r"\s+", " ", tweetPQ("p.js-tweet-text").text().replace('# ', '#').replace('@ ', '@'))
txt = txt.replace('# ','#')
print(colored("@","red") + colored(usernameTweet,"red")+colored(": ","red")+txt+"\n")
retweets = int(tweetPQ("span.ProfileTweet-action--retweet span.ProfileTweet-actionCount").attr(
"data-tweet-stat-count").replace(",", ""));
favorites = int(tweetPQ("span.ProfileTweet-action--favorite span.ProfileTweet-actionCount").attr(
"data-tweet-stat-count").replace(",", ""));
dateSec = int(tweetPQ("small.time span.js-short-timestamp").attr("data-time"))
id = tweetPQ.attr("data-tweet-id");
permalink = tweetPQ.attr("data-permalink-path");
user_id = int(tweetPQ("a.js-user-profile-link").attr("data-user-id"))
page = requests.get('https://twitter.com/tubiity/status/'+id)
script_geo =html.fromstring(page.content)
location = script_geo.xpath('//a[@class="u-textUserColor js-nav js-geo-pivot-link"]/text()')
sp_location = ','.join(location)
geo = ''
geoSpan = tweetPQ('span.Tweet-geo')
if len(geoSpan) > 0:
geo = geoSpan.attr('title')
urls = []
#userInformation
result = requests.get("https://twitter.com/"+usernameTweet)
c = result.content
soup = BeautifulSoup(c, "html.parser")
liste = []
samples = soup.find_all("a",
"ProfileNav-stat ProfileNav-stat--link u-borderUserColor u-textCenter js-tooltip js-openSignupDialog js-nonNavigable u-textUserColor")
#Follower, Follow and number of likes in list
for a in samples:
liste.append(a.attrs['title'])
for link in tweetPQ("a"):
try:
urls.append((link.attrib["data-expanded-url"]))
except KeyError:
pass
tweet.id = id
tweet.permalink = 'https://twitter.com' + permalink
tweet.username = usernameTweet
tweet.user_id = user_id
tweet.text = txt
tweet.date = datetime.datetime.fromtimestamp(dateSec)+datetime.timedelta(hours=2)
tweet.formatted_date = datetime.datetime.fromtimestamp(dateSec).strftime("%a %b %d %X +0000 %Y")
tweet.retweets = retweets
tweet.favorites = favorites
tweet.mentions = " ".join(re.compile('(@\\w*)').findall(tweet.text))
tweet.hashtags = " ".join(re.compile('(#\\w*)').findall(tweet.text))
tweet.geo = sp_location
tweet.urls = ",".join(urls)
tweet.author_id = user_id
results.append(tweet)
resultsAux.append(tweet)
if receiveBuffer and len(resultsAux) >= bufferLength:
receiveBuffer(resultsAux)
resultsAux = []
if tweetCriteria.maxTweets > 0 and len(results) >= tweetCriteria.maxTweets:
active = False
break
except:
receiveBuffer(resultsAux)
return
if receiveBuffer and len(resultsAux) > 0:
receiveBuffer(resultsAux)
return results
@staticmethod
def getJsonReponse(tweetCriteria, refreshCursor, cookieJar):
url = "https://twitter.com/i/search/timeline?f=realtime&q=%s&src=typd&%smax_position=%s"
#url = "https://twitter.com/search?l?&q=%s "
urlGetData = ''
if hasattr(tweetCriteria, 'username'):
urlGetData += ' from:' + tweetCriteria.username
if hasattr(tweetCriteria, 'querySearch'):
urlGetData += ' '+ tweetCriteria.querySearch
if hasattr(tweetCriteria, 'since'):
urlGetData += ' since:' + tweetCriteria.since
if hasattr(tweetCriteria, 'until'):
urlGetData += ' until:' + tweetCriteria.until
if hasattr(tweetCriteria, 'lang'):
urlLang = 'lang=' + tweetCriteria.lang + '&'
else:
urlLang = ''
url = url % (urllib.parse.quote(urlGetData), urlLang, refreshCursor)
headers = [
('Host', "twitter.com"),
('User-Agent', "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"),
('Accept', "application/json, text/javascript, */*; q=0.01"),
('Accept-Language', "de,en-US;q=0.7,en;q=0.3"),
('X-Requested-With', "XMLHttpRequest"),
('Referer', url),
('Connection', "keep-alive")
]
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookieJar))
opener.addheaders = headers
try:
response = opener.open(url)
jsonResponse = response.read()
except:
# print("Twitter weird response. Try to see on browser: ", url)
print(
"Twitter weird response. Try to see on browser: https://twitter.com/search?q=%s&src=typd" % urllib.parse.quote(
urlGetData))
print("Unexpected error:", sys.exc_info()[0])
sys.exit()
return
dataJson = json.loads(jsonResponse.decode())
return dataJson
| 41.183432
| 178
| 0.520546
|
39c1634dccd9c946f2ff13b17330be42d46d4497
| 1,209
|
py
|
Python
|
ci/templatetags/range.py
|
andrsd/civet
|
ac9ffffdea987437a5eb75779b9c7fe681e1ba85
|
[
"Apache-2.0"
] | 29
|
2016-11-29T15:15:56.000Z
|
2021-09-08T04:04:53.000Z
|
ci/templatetags/range.py
|
andrsd/civet
|
ac9ffffdea987437a5eb75779b9c7fe681e1ba85
|
[
"Apache-2.0"
] | 108
|
2016-11-29T17:29:00.000Z
|
2022-03-21T21:00:10.000Z
|
ci/templatetags/range.py
|
andrsd/civet
|
ac9ffffdea987437a5eb75779b9c7fe681e1ba85
|
[
"Apache-2.0"
] | 11
|
2016-11-29T15:15:53.000Z
|
2020-05-15T12:22:55.000Z
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.template import Library
register = Library()
@register.filter
def get_range( value ):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
return list(range(value))
| 29.487805
| 74
| 0.680728
|
6e25b186af61158beed9700659877260c8883beb
| 124
|
py
|
Python
|
blocks/land_cover_classification/src/run.py
|
bmoska/land-cover-classification-demo
|
2362913b586c8915086bb5ccc9bd17be15177954
|
[
"MIT"
] | 1
|
2021-04-16T13:32:18.000Z
|
2021-04-16T13:32:18.000Z
|
blocks/land_cover_classification/src/run.py
|
bmoska/land-cover-classification-demo
|
2362913b586c8915086bb5ccc9bd17be15177954
|
[
"MIT"
] | null | null | null |
blocks/land_cover_classification/src/run.py
|
bmoska/land-cover-classification-demo
|
2362913b586c8915086bb5ccc9bd17be15177954
|
[
"MIT"
] | null | null | null |
from land_cover_classification import LandCoverClassification
if __name__ == "__main__":
LandCoverClassification.run()
| 24.8
| 61
| 0.822581
|
ae3060268b874a2a1b7b57735b3972a1e1909155
| 5,083
|
py
|
Python
|
simple/stile/assignment2-q5ii.py
|
feiooo/games-puzzles-algorithms
|
66d97135d163fb04e820338068d9bd9e12d907e9
|
[
"MIT"
] | null | null | null |
simple/stile/assignment2-q5ii.py
|
feiooo/games-puzzles-algorithms
|
66d97135d163fb04e820338068d9bd9e12d907e9
|
[
"MIT"
] | null | null | null |
simple/stile/assignment2-q5ii.py
|
feiooo/games-puzzles-algorithms
|
66d97135d163fb04e820338068d9bd9e12d907e9
|
[
"MIT"
] | null | null | null |
# simple bfs program to solve sliding tile
from collections import deque
#from random import shuffle
from time import sleep, time
from sys import stdin
def int2chr(t): # nonneg int to single in '0123456789ABCDEFGHIJ...'
if t <= 9: return chr(t+ord('0'))
else: return chr(t-10 + ord('A'))
def chr2int(c): # chr in '0123456789ABCDEFGHIJ...' to int
if c in '0123456789': return ord(c) - ord('0')
else: return 10 + ord(c) - ord('A')
def list2str(L): # list nonneg ints to string monochars
s = ''
for x in L: s += int2chr(x)
return s
def pretty(s,cols,monochar): # string to printable matrix
# if monochar true: print elements as monochars
# else: print elements as ints
count, outstr, BLANK = 0, '', ' '
for x in s:
count += 1
if monochar:
if x == '0': outstr += ' ' + BLANK
else: outstr += ' ' + x
else:
if x == '0': outstr += ' ' + BLANK # blank
elif x in ' 123456789': outstr += ' ' + x # digit
else: outstr += ' ' + str(chr2int(x)) # 2 digits
if count%cols == 0: outstr += '\n'
#sleep(.005)
return outstr
def str_swap(s,lcn,shift): # swap chars at s[lcn], s[lcn+shift]
a , b = min(lcn,lcn+shift), max(lcn,lcn+shift)
return s[:a] + s[b] + s[a+1:b] + s[a] + s[b+1:]
class Tile:
"""a sliding tile class with simple search"""
def __init__(self,f_name):
# state will be the starting state of any computer search
# initialized from stdin, 0 is blank,
# format: r c then tile entries, row by row, e.g.:
# 2 3
# 2 0 5
# 4 1 3
self.state = []
f = open(f_name)
for line in f.read():#stdin:
for elem in line.split():
self.state.append(int(elem))
f.close()
print(self.state)
# rows, cols are 1st 2 elements of list, so pop them
self.rows, self.cols = self.state.pop(0), self.state.pop(0)
# state now holds contents of tile in row-major order
# assert
# - at least 2 rows, at least 2 cols,
# - all entries in [0 .. r*c-1], and
# - some entry 0
assert(self.rows>=2 and self.cols>=2)
for s in self.state: assert(s>=0 and s < self.rows*self.cols)
ndx_min = self.state.index(min(self.state))
assert(self.state[ndx_min] == 0)
# these shifts of .state indices effect moves of the blank:
self.LF, self.RT, self.UP, self.DN = -1, 1, -self.cols, self.cols
self.shifts = [self.LF, self.RT, self.UP, self.DN] #left right up down
self.level_num = 0
def legal_shifts(self,psn): # list of legal shifts
S = []
c,r = psn % self.cols, psn // self.cols # column number, row number
if c > 0: S.append(self.LF)
if c < self.cols-1: S.append(self.RT)
if r > 0: S.append(self.UP)
if r < self.rows-1: S.append(self.DN)
return S
def bfs(self):
def report(i, d, L, s):
print(i,'iterations',s,'seconds',i/s,'itn/s')
print(len(d), 'states')
print('nodes by level')
for j in range(len(L)): print(j, L[j])
print('')
def targetlist(n): # return target state, as list
L = []
for j in range(1,n): L.append(j)
L.append(0)
return L
start = list2str(self.state)
target = list2str(targetlist(self.rows*self.cols))
# use a parent dictionary to
# - track seen states (all are in dictionary)
# - record parents, to recover solution transition sequence
Parent = { start : start}
Fringe = deque() # the sliding tile states, as strings, we encounter
Fringe.append(start)
iteration, nodes_this_level, Levels = 0, 1, [1]
start_time = time()
while len(Fringe) > 0:
iteration +=1
stst = Fringe.popleft() # popleft() and append() give FIFO
#print(pretty(stst, self.cols, True))
ndx0 = stst.index('0')
for shift in self.legal_shifts(ndx0):
nbr = str_swap(stst,ndx0,shift)
if nbr == target:
print('found target')
while True: # show the sequence, backwards
#sleep(.5)
print(pretty(stst, self.cols, True))
p = Parent[stst]
if p == stst:
end_time = time()
report(iteration, Parent, Levels, end_time-start_time)
self.level_num = len(Levels)
#print(",,,")
#print(self.level_num)
return self.level_num
stst = p
elif nbr not in Parent:
Parent[nbr] = stst
Fringe.append(nbr)
nodes_this_level -= 1
if nodes_this_level == 0:
nodes_this_level = len(Fringe)
Levels.append(nodes_this_level)
print(' ',iteration,'iterations, level',len(Levels),'has',nodes_this_level,'nodes')
#sleep(1)
print('\nno solution found')
end_time = time()
report(iteration, Parent, Levels, end_time-start_time)
self.level_num = len(Levels)
return self.level_num
#print("...")
#print(self.level_num)
n = 0
st = Tile("st.33.2")
lev_num += st.bfs()
n+=1
#print(avglev_num/n)
| 33.006494
| 91
| 0.577218
|
4b17d79693d45f2edb99d6c21adfb349fb3e262a
| 999
|
py
|
Python
|
setup.py
|
paulomach/mqtt2gpio
|
95f57ecfb6561eaa9a5a9d50157814f182a09320
|
[
"MIT"
] | null | null | null |
setup.py
|
paulomach/mqtt2gpio
|
95f57ecfb6561eaa9a5a9d50157814f182a09320
|
[
"MIT"
] | null | null | null |
setup.py
|
paulomach/mqtt2gpio
|
95f57ecfb6561eaa9a5a9d50157814f182a09320
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="mqtt2gpio",
version="0.0.1",
author="Paulo Machado",
author_email="paulo.mach@gmail.com",
description="A very crude and direct mqtt to gpio write",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/paulomach/mqtt2gpio",
project_urls={
"Bug Tracker": "https://github.com/paulomach/mqtt2gpio/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: GNU/Linux",
],
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.7",
entry_points={
'console_scripts': [
'mqtt2gpio=mqtt2gpio.mqtt2gpio:main'
],
},
install_requires=[
'paho-mqtt',
'gpiozero'
]
)
| 27.75
| 71
| 0.61962
|
a5df36482e9f447883ecea9d677e09e0e66800cb
| 7,913
|
py
|
Python
|
pptx/oxml/chart/chart.py
|
mikebirdgeneau/python-pptx
|
ab86d39a643595ccc33f4644d584e35268cb4f22
|
[
"MIT"
] | 1
|
2021-04-12T08:00:24.000Z
|
2021-04-12T08:00:24.000Z
|
pptx/oxml/chart/chart.py
|
mikebirdgeneau/python-pptx
|
ab86d39a643595ccc33f4644d584e35268cb4f22
|
[
"MIT"
] | null | null | null |
pptx/oxml/chart/chart.py
|
mikebirdgeneau/python-pptx
|
ab86d39a643595ccc33f4644d584e35268cb4f22
|
[
"MIT"
] | 3
|
2019-05-28T16:28:53.000Z
|
2020-07-28T19:13:44.000Z
|
# encoding: utf-8
"""Custom element classes for top-level chart-related XML elements."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from pptx.oxml import parse_xml
from pptx.oxml.chart.shared import CT_Title
from pptx.oxml.ns import nsdecls, qn
from pptx.oxml.simpletypes import ST_Style, XsdString
from pptx.oxml.text import CT_TextBody
from pptx.oxml.xmlchemy import (
BaseOxmlElement, OneAndOnlyOne, RequiredAttribute, ZeroOrMore, ZeroOrOne
)
class CT_Chart(BaseOxmlElement):
"""`c:chart` custom element class."""
_tag_seq = (
'c:title', 'c:autoTitleDeleted', 'c:pivotFmts', 'c:view3D',
'c:floor', 'c:sideWall', 'c:backWall', 'c:plotArea', 'c:legend',
'c:plotVisOnly', 'c:dispBlanksAs', 'c:showDLblsOverMax', 'c:extLst',
)
title = ZeroOrOne('c:title', successors=_tag_seq[1:])
autoTitleDeleted = ZeroOrOne(
'c:autoTitleDeleted', successors=_tag_seq[2:]
)
plotArea = OneAndOnlyOne('c:plotArea')
legend = ZeroOrOne('c:legend', successors=_tag_seq[9:])
rId = RequiredAttribute('r:id', XsdString)
_chart_tmpl = (
'<c:chart %s %s r:id="%%s"/>' % (nsdecls('c'), nsdecls('r'))
)
@property
def has_legend(self):
"""
True if this chart has a legend defined, False otherwise.
"""
legend = self.legend
if legend is None:
return False
return True
@has_legend.setter
def has_legend(self, bool_value):
"""
Add, remove, or leave alone the ``<c:legend>`` child element depending
on current state and *bool_value*. If *bool_value* is |True| and no
``<c:legend>`` element is present, a new default element is added.
When |False|, any existing legend element is removed.
"""
if bool(bool_value) is False:
self._remove_legend()
else:
if self.legend is None:
self._add_legend()
@staticmethod
def new_chart(rId):
"""
Return a new ``<c:chart>`` element
"""
xml = CT_Chart._chart_tmpl % (rId)
chart = parse_xml(xml)
return chart
def _new_title(self):
return CT_Title.new_title()
class CT_ChartSpace(BaseOxmlElement):
"""`c:chartSpace` root element of a chart part."""
_tag_seq = (
'c:date1904', 'c:lang', 'c:roundedCorners', 'c:style', 'c:clrMapOvr',
'c:pivotSource', 'c:protection', 'c:chart', 'c:spPr', 'c:txPr',
'c:externalData', 'c:printSettings', 'c:userShapes', 'c:extLst',
)
date1904 = ZeroOrOne('c:date1904', successors=_tag_seq[1:])
style = ZeroOrOne('c:style', successors=_tag_seq[4:])
chart = OneAndOnlyOne('c:chart')
txPr = ZeroOrOne('c:txPr', successors=_tag_seq[10:])
externalData = ZeroOrOne('c:externalData', successors=_tag_seq[11:])
del _tag_seq
@property
def catAx_lst(self):
return self.chart.plotArea.catAx_lst
@property
def date_1904(self):
"""
Return |True| if the `c:date1904` child element resolves truthy,
|False| otherwise. This value indicates whether date number values
are based on the 1900 or 1904 epoch.
"""
date1904 = self.date1904
if date1904 is None:
return False
return date1904.val
@property
def dateAx_lst(self):
return self.xpath('c:chart/c:plotArea/c:dateAx')
def get_or_add_title(self):
"""Return the `c:title` grandchild, newly created if not present."""
return self.chart.get_or_add_title()
@property
def plotArea(self):
"""
Return the required `c:chartSpace/c:chart/c:plotArea` grandchild
element.
"""
return self.chart.plotArea
@property
def valAx_lst(self):
return self.chart.plotArea.valAx_lst
@property
def xlsx_part_rId(self):
"""
The string in the required ``r:id`` attribute of the
`<c:externalData>` child, or |None| if no externalData element is
present.
"""
externalData = self.externalData
if externalData is None:
return None
return externalData.rId
def _add_externalData(self):
"""
Always add a ``<c:autoUpdate val="0"/>`` child so auto-updating
behavior is off by default.
"""
externalData = self._new_externalData()
externalData._add_autoUpdate(val=False)
self._insert_externalData(externalData)
return externalData
def _new_txPr(self):
return CT_TextBody.new_txPr()
class CT_ExternalData(BaseOxmlElement):
"""
`<c:externalData>` element, defining link to embedded Excel package part
containing the chart data.
"""
autoUpdate = ZeroOrOne('c:autoUpdate')
rId = RequiredAttribute('r:id', XsdString)
class CT_PlotArea(BaseOxmlElement):
"""
``<c:plotArea>`` element.
"""
catAx = ZeroOrMore('c:catAx')
valAx = ZeroOrMore('c:valAx')
def iter_sers(self):
"""
Generate each of the `c:ser` elements in this chart, ordered first by
the document order of the containing xChart element, then by their
ordering within the xChart element (not necessarily document order).
"""
for xChart in self.iter_xCharts():
for ser in xChart.iter_sers():
yield ser
def iter_xCharts(self):
"""
Generate each xChart child element in document.
"""
plot_tags = (
qn('c:area3DChart'), qn('c:areaChart'), qn('c:bar3DChart'),
qn('c:barChart'), qn('c:bubbleChart'), qn('c:doughnutChart'),
qn('c:line3DChart'), qn('c:lineChart'), qn('c:ofPieChart'),
qn('c:pie3DChart'), qn('c:pieChart'), qn('c:radarChart'),
qn('c:scatterChart'), qn('c:stockChart'), qn('c:surface3DChart'),
qn('c:surfaceChart')
)
for child in self.iterchildren():
if child.tag not in plot_tags:
continue
yield child
@property
def last_ser(self):
"""
Return the last `<c:ser>` element in the last xChart element, based
on series order (not necessarily the same element as document order).
"""
last_xChart = self.xCharts[-1]
sers = last_xChart.sers
if not sers:
return None
return sers[-1]
@property
def next_idx(self):
"""
Return the next available `c:ser/c:idx` value within the scope of
this chart, the maximum idx value found on existing series,
incremented by one.
"""
idx_vals = [s.idx.val for s in self.sers]
if not idx_vals:
return 0
return max(idx_vals)+1
@property
def next_order(self):
"""
Return the next available `c:ser/c:order` value within the scope of
this chart, the maximum order value found on existing series,
incremented by one.
"""
order_vals = [s.order.val for s in self.sers]
if not order_vals:
return 0
return max(order_vals)+1
@property
def sers(self):
"""
Return a sequence containing all the `c:ser` elements in this chart,
ordered first by the document order of the containing xChart element,
then by their ordering within the xChart element (not necessarily
document order).
"""
return tuple(self.iter_sers())
@property
def xCharts(self):
"""
Return a sequence containing all the `c:{x}Chart` elements in this
chart, in document order.
"""
return tuple(self.iter_xCharts())
class CT_Style(BaseOxmlElement):
"""
``<c:style>`` element; defines the chart style.
"""
val = RequiredAttribute('val', ST_Style)
| 31.031373
| 78
| 0.607102
|
5d142e02b661b8842cdb31ffb5d6033d92bc119f
| 938
|
py
|
Python
|
tests/addons.py
|
loriab/gau2grid
|
100e18175a23b766e78b1e349dfafded7aa5e330
|
[
"BSD-3-Clause"
] | null | null | null |
tests/addons.py
|
loriab/gau2grid
|
100e18175a23b766e78b1e349dfafded7aa5e330
|
[
"BSD-3-Clause"
] | null | null | null |
tests/addons.py
|
loriab/gau2grid
|
100e18175a23b766e78b1e349dfafded7aa5e330
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
def _plugin_import(plug):
import sys
if sys.version_info >= (3, 4):
from importlib import util
plug_spec = util.find_spec(plug)
else:
import pkgutil
plug_spec = pkgutil.find_loader(plug)
if plug_spec is None:
return False
else:
return True
def is_psi4_new_enough(version_feature_introduced):
if not _plugin_import('psi4'):
return False
import psi4
from pkg_resources import parse_version
#print(psi4.__file__)
#print(psi4.__version__)
#print(parse_version(psi4.__version__))
#print(parse_version(version_feature_introduced))
return parse_version(psi4.__version__) >= parse_version(version_feature_introduced)
using_psi4_libxc = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev100") is False,
reason="Psi4 does not include DFT rewrite to use Libxc. Update to development head")
| 28.424242
| 116
| 0.690832
|
baa92a85e3cd8c7f9a592bf3bace8dcd4b1cd484
| 4,682
|
py
|
Python
|
CLUE_Rock_Paper_Scissors/advanced/rps_crypto_chacha.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
CLUE_Rock_Paper_Scissors/advanced/rps_crypto_chacha.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
CLUE_Rock_Paper_Scissors/advanced/rps_crypto_chacha.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# Copyright (c) 2015 Hubert Kario (code from tlslite-ng library)
# Copyright (c) 2020 Kevin J. Walters (very minor CP tweaks)
# GNU Lesser General Public License, version 2.1
# https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Pure Python implementation of ChaCha cipher
Implementation that follows RFC 7539 closely.
"""
import struct
MASK32 = 0xffffffff
class ChaCha():
"""Pure python implementation of ChaCha cipher"""
constants = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574]
# pylint: disable=invalid-name
@staticmethod
def rotl32(v, c):
"""Rotate left a 32 bit integer v by c bits"""
return ((v << c) & MASK32) | (v >> (32 - c))
@staticmethod
def quarter_round(x, a, b, c, d):
"""Perform a ChaCha quarter round"""
xa = x[a]
xb = x[b]
xc = x[c]
xd = x[d]
xa = (xa + xb) & MASK32
xd = xd ^ xa
xd = ((xd << 16) & MASK32 | (xd >> 16))
xc = (xc + xd) & MASK32
xb = xb ^ xc
xb = ((xb << 12) & MASK32 | (xb >> 20))
xa = (xa + xb) & MASK32
xd = xd ^ xa
xd = ((xd << 8) & MASK32 | (xd >> 24))
xc = (xc + xd) & MASK32
xb = xb ^ xc
xb = ((xb << 7) & MASK32 | (xb >> 25))
x[a] = xa
x[b] = xb
x[c] = xc
x[d] = xd
_round_mixup_box = [(0, 4, 8, 12),
(1, 5, 9, 13),
(2, 6, 10, 14),
(3, 7, 11, 15),
(0, 5, 10, 15),
(1, 6, 11, 12),
(2, 7, 8, 13),
(3, 4, 9, 14)]
@classmethod
def double_round(cls, x):
"""Perform two rounds of ChaCha cipher"""
for a, b, c, d in cls._round_mixup_box:
xa = x[a]
xb = x[b]
xc = x[c]
xd = x[d]
xa = (xa + xb) & MASK32
xd = xd ^ xa
xd = ((xd << 16) & MASK32 | (xd >> 16))
xc = (xc + xd) & MASK32
xb = xb ^ xc
xb = ((xb << 12) & MASK32 | (xb >> 20))
xa = (xa + xb) & MASK32
xd = xd ^ xa
xd = ((xd << 8) & MASK32 | (xd >> 24))
xc = (xc + xd) & MASK32
xb = xb ^ xc
xb = ((xb << 7) & MASK32 | (xb >> 25))
x[a] = xa
x[b] = xb
x[c] = xc
x[d] = xd
@staticmethod
def chacha_block(key, counter, nonce, rounds):
"""Generate a state of a single block"""
state = ChaCha.constants + key + [counter] + nonce
working_state = state[:]
dbl_round = ChaCha.double_round
for _ in range(0, rounds // 2):
dbl_round(working_state)
return [(st + wrkSt) & MASK32 for st, wrkSt
in zip(state, working_state)]
@staticmethod
def word_to_bytearray(state):
"""Convert state to little endian bytestream"""
return bytearray(struct.pack('<LLLLLLLLLLLLLLLL', *state))
@staticmethod
def _bytearray_to_words(data):
"""Convert a bytearray to array of word sized ints"""
ret = []
for i in range(0, len(data) // 4):
ret.extend(struct.unpack('<L', data[i*4:(i+1)*4]))
return ret
def __init__(self, key, nonce, counter=0, rounds=20):
"""Set the initial state for the ChaCha cipher"""
if len(key) != 32:
raise ValueError("Key must be 256 bit long")
if len(nonce) != 12:
raise ValueError("Nonce must be 96 bit long")
self.key = []
self.nonce = []
self.counter = counter
self.rounds = rounds
# convert bytearray key and nonce to little endian 32 bit unsigned ints
self.key = ChaCha._bytearray_to_words(key)
self.nonce = ChaCha._bytearray_to_words(nonce)
def encrypt(self, plaintext):
"""Encrypt the data"""
encrypted_message = bytearray()
for i, block in enumerate(plaintext[i:i + 64] for i
in range(0, len(plaintext), 64)):
key_stream = ChaCha.chacha_block(self.key,
self.counter + i,
self.nonce,
self.rounds)
key_stream = ChaCha.word_to_bytearray(key_stream)
encrypted_message += bytearray(x ^ y for x, y
in zip(key_stream, block))
return encrypted_message
def decrypt(self, ciphertext):
"""Decrypt the data"""
return self.encrypt(ciphertext)
| 30.206452
| 79
| 0.478855
|
ba0298a7bb0157d34c81b739bbf16f25b9160774
| 1,469
|
py
|
Python
|
napari/layers/image/_image_slice_data.py
|
Zac-HD/napari
|
102a7e8f845893c874d2b86f9371d41130100b89
|
[
"BSD-3-Clause"
] | 1
|
2021-04-24T10:10:54.000Z
|
2021-04-24T10:10:54.000Z
|
napari/layers/image/_image_slice_data.py
|
Zac-HD/napari
|
102a7e8f845893c874d2b86f9371d41130100b89
|
[
"BSD-3-Clause"
] | 17
|
2020-06-11T21:02:03.000Z
|
2021-02-02T19:10:19.000Z
|
napari/layers/image/_image_slice_data.py
|
Zac-HD/napari
|
102a7e8f845893c874d2b86f9371d41130100b89
|
[
"BSD-3-Clause"
] | null | null | null |
"""ImageSliceData class.
"""
from typing import Optional, Tuple
import numpy as np
from ...types import ArrayLike
from ..base import Layer
class ImageSliceData:
"""The contents of an ImageSlice.
Parameters
----------
layer : Layer
The layer that contains the data.
indices : Tuple[Optional[slice], ...]
The indices of this slice.
image : ArrayList
The image to display in the slice.
thumbnail_source : ArrayList
The source used to create the thumbnail for the slice.
"""
def __init__(
self,
layer: Layer,
indices: Tuple[Optional[slice], ...],
image: ArrayLike,
thumbnail_source: ArrayLike,
):
self.layer = layer
self.indices = indices
self.image = image
self.thumbnail_source = thumbnail_source
def load_sync(self) -> None:
"""Call asarray on our images to load them."""
self.image = np.asarray(self.image)
if self.thumbnail_source is not None:
self.thumbnail_source = np.asarray(self.thumbnail_source)
def transpose(self, order: tuple) -> None:
"""Transpose our images.
Parameters
----------
order : tuple
Transpose the image into this order.
"""
self.image = self.image.transpose(order)
if self.thumbnail_source is not None:
self.thumbnail_source = self.thumbnail_source.transpose(order)
| 25.77193
| 74
| 0.6113
|
dd4515719a9fdf5f81888b16696e58683ea26054
| 487
|
py
|
Python
|
molo/profiles/migrations/0021_remove_uuid_null.py
|
Ishma59/molo
|
4fd31df9266bc251e09e9339a132d3ccd4143c69
|
[
"BSD-2-Clause"
] | 25
|
2015-09-26T13:45:30.000Z
|
2018-09-13T14:12:20.000Z
|
molo/profiles/migrations/0021_remove_uuid_null.py
|
Ishma59/molo
|
4fd31df9266bc251e09e9339a132d3ccd4143c69
|
[
"BSD-2-Clause"
] | 510
|
2015-05-29T09:30:44.000Z
|
2018-12-11T09:08:11.000Z
|
molo/profiles/migrations/0021_remove_uuid_null.py
|
Ishma59/molo
|
4fd31df9266bc251e09e9339a132d3ccd4143c69
|
[
"BSD-2-Clause"
] | 5
|
2020-03-26T19:30:13.000Z
|
2020-09-04T16:35:59.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-02 11:04
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('profiles', '0020_populate_uuid_values'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
]
| 22.136364
| 68
| 0.632444
|
8efb7e34ec6256962a89a3b298a1c102d0e5a6c9
| 1,541
|
py
|
Python
|
tests/test_metadata_filters_from_config.py
|
robocorp/jupytext
|
57c011db9055242bb59ca9dd1ee5ca4f7fab752b
|
[
"MIT"
] | 1
|
2021-12-27T16:26:14.000Z
|
2021-12-27T16:26:14.000Z
|
tests/test_metadata_filters_from_config.py
|
huangyingw/mwouts_jupytext
|
b72d03f39920333eb312d675a1ecd0fa7c2b549f
|
[
"MIT"
] | null | null | null |
tests/test_metadata_filters_from_config.py
|
huangyingw/mwouts_jupytext
|
b72d03f39920333eb312d675a1ecd0fa7c2b549f
|
[
"MIT"
] | null | null | null |
import nbformat
from nbformat.v4.nbbase import new_notebook, new_markdown_cell
from jupytext.cli import jupytext as jupytext_cli
from jupytext.compare import compare, compare_notebooks
def test_metadata_filters_from_config(tmpdir):
cfg_file = tmpdir.join("jupytext.toml")
nb_file = tmpdir.join("notebook.ipynb")
md_file = tmpdir.join("notebook.md")
cfg_file.write(
"""default_notebook_metadata_filter = "-all"
default_cell_metadata_filter = "-all"
"""
)
nb = new_notebook(
cells=[new_markdown_cell("A markdown cell")],
metadata={
"kernelspec": {
"display_name": "Python [conda env:.conda-week1]",
"language": "python",
"name": "conda-env-.conda-week1-py",
},
"language_info": {
"codemirror_mode": {"name": "ipython", "version": 3},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3",
},
"nbsphinx": {"execute": "never"},
},
)
nbformat.write(nb, str(nb_file))
jupytext_cli([str(nb_file), "--to", "md"])
md = md_file.read()
compare(md, "A markdown cell\n")
jupytext_cli([str(md_file), "--to", "notebook", "--update"])
nb2 = nbformat.read(str(nb_file), as_version=4)
del nb2.metadata["jupytext"]
compare_notebooks(nb2, nb)
| 31.44898
| 69
| 0.573005
|
8187efd769a87d281bc1ab77ea4b4b8bb0528ca5
| 19,069
|
py
|
Python
|
scipy/optimize/zeros.py
|
FRidh/scipy
|
dabfb4586e0b656b5f6da8b301643b918259e61f
|
[
"BSD-3-Clause"
] | 39
|
2016-11-08T11:24:30.000Z
|
2021-11-18T06:50:21.000Z
|
scipy/optimize/zeros.py
|
FRidh/scipy
|
dabfb4586e0b656b5f6da8b301643b918259e61f
|
[
"BSD-3-Clause"
] | 1
|
2015-09-30T05:26:54.000Z
|
2016-03-22T15:09:56.000Z
|
scipy/optimize/zeros.py
|
FRidh/scipy
|
dabfb4586e0b656b5f6da8b301643b918259e61f
|
[
"BSD-3-Clause"
] | 13
|
2017-04-08T08:03:12.000Z
|
2021-08-25T08:38:52.000Z
|
from __future__ import division, print_function, absolute_import
import warnings
from . import _zeros
from numpy import finfo, sign, sqrt
_iter = 100
_xtol = 1e-12
_rtol = finfo(float).eps * 2
__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth']
CONVERGED = 'converged'
SIGNERR = 'sign error'
CONVERR = 'convergence error'
flag_map = {0: CONVERGED, -1: SIGNERR, -2: CONVERR}
class RootResults(object):
""" Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
"""
def __init__(self, root, iterations, function_calls, flag):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == 0
try:
self.flag = flag_map[flag]
except KeyError:
self.flag = 'unknown error %d' % (flag,)
def results_c(full_output, r):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
else:
return r
# Newton-Raphson method
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None):
"""
Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivate `fprime2` of `func` is provided, parabolic Halley's method
is used.
Parameters
----------
func : function
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : function, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
fprime2 : function, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is given, parabolic Halley's
method is used.
Returns
-------
zero : float
Estimated location where function is zero.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero is approximately the square
(cube for Halley) of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size
and there is no guarantee that a zero has been found. Consequently
the result should be verified. Safer algorithms are brentq,
brenth, ridder, and bisect, but they all require that the root
first be bracketed in an interval where the function changes
sign. The brentq algorithm is recommended for general use in one
dimensional problems when such an interval has been found.
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if fprime is not None:
# Newton-Rapheson method
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
# so it still works if x0 is complex.
p0 = 1.0 * x0
fder2 = 0
for iter in range(maxiter):
myargs = (p0,) + args
fder = fprime(*myargs)
if fder == 0:
msg = "derivative was zero."
warnings.warn(msg, RuntimeWarning)
return p0
fval = func(*myargs)
if fprime2 is not None:
fder2 = fprime2(*myargs)
if fder2 == 0:
# Newton step
p = p0 - fval / fder
else:
# Parabolic Halley's method
discr = fder ** 2 - 2 * fval * fder2
if discr < 0:
p = p0 - fder / fder2
else:
p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr))
if abs(p - p0) < tol:
return p
p0 = p
else:
# Secant method
p0 = x0
if x0 >= 0:
p1 = x0*(1 + 1e-4) + 1e-4
else:
p1 = x0*(1 + 1e-4) - 1e-4
q0 = func(*((p0,) + args))
q1 = func(*((p1,) + args))
for iter in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached" % (p1 - p0)
warnings.warn(msg, RuntimeWarning)
return (p1 + p0)/2.0
else:
p = p1 - q1*(p1 - p0)/(q1 - q0)
if abs(p - p1) < tol:
return p
p0 = p1
q0 = q1
p1 = p
q1 = func(*((p1,) + args))
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within `xtol` of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
Notes
-----
Uses [Ridders1979]_ method to find a zero of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent rountines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._ridder(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in given interval.
Return float, a zero of `f` between `a` and `b`. `f` must be a continuous
function, and [a,b] must be a sign changing interval.
Description:
Uses the classic Brent (1973) method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. Another description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brentq`, `brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find root of f in [a,b].
A variation on the classic Brent routine to find a zero of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. There was a paper back in the 1980's ...
f(a) and f(b) cannot have the same signs. Generally on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation. The version here is by
Chuck Harris.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : n-dimensional root-finding
brentq, brenth, ridder, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brenth(f,a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
| 37.244141
| 80
| 0.620746
|
43ae3fe092175be20a3ccb12dbd73ed4aba778ba
| 1,454
|
py
|
Python
|
linux_binary_exploit/canary_bypass02/canary_bypass02_exploit_v01.py
|
greyshell/Penetration-Test
|
d73b0c6dc01833f69522ab2fc97d683c8b808fca
|
[
"MIT"
] | 13
|
2018-12-31T06:33:38.000Z
|
2020-01-25T07:51:50.000Z
|
linux_binary_exploit/canary_bypass02/canary_bypass02_exploit_v01.py
|
greyshell/Penetration-Test
|
d73b0c6dc01833f69522ab2fc97d683c8b808fca
|
[
"MIT"
] | 1
|
2018-12-13T08:55:17.000Z
|
2019-04-12T05:38:44.000Z
|
linux_binary_exploit/canary_bypass02/canary_bypass02_exploit_v01.py
|
greyshell/Penetration-Test
|
d73b0c6dc01833f69522ab2fc97d683c8b808fca
|
[
"MIT"
] | 4
|
2019-04-03T12:12:28.000Z
|
2019-11-03T19:31:21.000Z
|
#!/usr/bin/env python3
# author: greyshell
from pwn import *
from pwn_utils import PwnUtils
def exploit(conn):
"""
interact with the binary with some valid input
"""
conn.recvuntil("name:\n") # receive bytes till name:
input_name = "A" * 23 # sendline() will add \n at the end
conn.sendline(input_name) # \n will be added in the last and it will be treated as null byte
conn.recvuntil("description:\n") # receive bytes till description:
input_des = "B" * 7 # sendline() will add \n at the end
conn.sendline(input_des)
# make the connection interactive
conn.interactive()
def main():
my_input = PwnUtils()
arguments = my_input.parser.parse_args()
connection = ""
# run the script without any argument
if len(sys.argv) == 1:
my_input.parser.print_help(sys.stderr)
sys.exit(1)
# exploiting local binary
if arguments.command == 'local':
binary_name = "./"
binary_name += arguments.binary
connection = process([binary_name])
# attach the binary with gdb in tmux session
if arguments.gdb == 'true':
gdb.attach(connection)
elif arguments.command == 'network':
connection = remote(arguments.ip_address, arguments.port)
if arguments.debug_mode == 'true':
context.log_level = 'debug'
# invoke the exploit function
exploit(connection)
if __name__ == '__main__':
main()
| 25.964286
| 97
| 0.643741
|
8b855f66c905ebdf0c626cb20317269dca321624
| 5,306
|
py
|
Python
|
tools/valgrind/common.py
|
Gitman1989/chromium
|
2b1cceae1075ef012fb225deec8b4c8bbe4bc897
|
[
"BSD-3-Clause"
] | 2
|
2017-09-02T19:08:28.000Z
|
2021-11-15T15:15:14.000Z
|
tools/valgrind/common.py
|
Gitman1989/chromium
|
2b1cceae1075ef012fb225deec8b4c8bbe4bc897
|
[
"BSD-3-Clause"
] | null | null | null |
tools/valgrind/common.py
|
Gitman1989/chromium
|
2b1cceae1075ef012fb225deec8b4c8bbe4bc897
|
[
"BSD-3-Clause"
] | 1
|
2020-04-13T05:45:10.000Z
|
2020-04-13T05:45:10.000Z
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import signal
import subprocess
import sys
import time
class NotImplementedError(Exception):
pass
class TimeoutError(Exception):
pass
def _print_line(line, flush=True):
# Printing to a text file (including stdout) on Windows always winds up
# using \r\n automatically. On buildbot, this winds up being read by a master
# running on Linux, so we manually convert crlf to '\n'
print line.rstrip() + '\n',
if flush:
sys.stdout.flush()
def RunSubprocessInBackground(proc):
"""Runs a subprocess in the background. Returns a handle to the process."""
logging.info("running %s in the background" % " ".join(proc))
return subprocess.Popen(proc)
def RunSubprocess(proc, timeout=0, detach=False, background=False):
""" Runs a subprocess, until it finishes or |timeout| is exceeded and the
process is killed with taskkill. A |timeout| <= 0 means no timeout.
Args:
proc: list of process components (exe + args)
timeout: how long to wait before killing, <= 0 means wait forever
detach: Whether to pass the DETACHED_PROCESS argument to CreateProcess
on Windows. This is used by Purify subprocesses on buildbot which
seem to get confused by the parent console that buildbot sets up.
"""
logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
if detach:
# see MSDN docs for "Process Creation Flags"
DETACHED_PROCESS = 0x8
p = subprocess.Popen(proc, creationflags=DETACHED_PROCESS)
else:
# For non-detached processes, manually read and print out stdout and stderr.
# By default, the subprocess is supposed to inherit these from its parent,
# however when run under buildbot, it seems unable to read data from a
# grandchild process, so we have to read the child and print the data as if
# it came from us for buildbot to read it. We're not sure why this is
# necessary.
# TODO(erikkay): should we buffer stderr and stdout separately?
p = subprocess.Popen(proc, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logging.info("started subprocess")
# How long to wait (in seconds) before printing progress log messages.
progress_delay = 300
progress_delay_time = time.time() + progress_delay
did_timeout = False
if timeout > 0:
wait_until = time.time() + timeout
while p.poll() is None and not did_timeout:
if not detach:
line = p.stdout.readline()
while line and not did_timeout:
_print_line(line)
line = p.stdout.readline()
if timeout > 0:
did_timeout = time.time() > wait_until
else:
# When we detach, blocking on reading stdout doesn't work, so we sleep
# a short time and poll.
time.sleep(0.5)
if time.time() >= progress_delay_time:
# Force output on a periodic basis to avoid getting killed off by the
# buildbot.
# TODO(erikkay): I'd prefer a less obtrusive 'print ".",' with a flush
# but because of how we're doing subprocesses, this doesn't appear to
# work reliably.
logging.info("%s still running..." % os.path.basename(proc[0]))
progress_delay_time = time.time() + progress_delay
if timeout > 0:
did_timeout = time.time() > wait_until
if did_timeout:
logging.info("process timed out")
else:
logging.info("process ended, did not time out")
if did_timeout:
if IsWindows():
subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
else:
# Does this kill all children, too?
os.kill(p.pid, signal.SIGINT)
logging.error("KILLED %d" % p.pid)
# Give the process a chance to actually die before continuing
# so that cleanup can happen safely.
time.sleep(1.0)
logging.error("TIMEOUT waiting for %s" % proc[0])
raise TimeoutError(proc[0])
elif not detach:
for line in p.stdout.readlines():
_print_line(line, False)
if not IsMac(): # stdout flush fails on Mac
logging.info("flushing stdout")
p.stdout.flush()
logging.info("collecting result code")
result = p.poll()
if result:
logging.error("%s exited with non-zero result code %d" % (proc[0], result))
return result
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsWine():
"""This needs to be set by the script that starts the buildbot, i.e.
/etc/init.d/buildbot, or manually on the command line."""
return (os.environ.get('WINE') and
os.environ.get('WINEPREFIX') and
os.environ.get('WINESERVER'))
def PlatformNames():
"""Return an array of string to be used in paths for the platform
(e.g. suppressions, gtest filters, ignore files etc.)
The first element of the array describes the 'main' platform
"""
# This has to be before IsLinux()
if IsWine():
return ['wine', 'win32']
if IsLinux():
return ['linux']
if IsMac():
return ['mac']
if IsWindows():
return ['win32']
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
| 33.1625
| 80
| 0.680173
|
95032c3d66297ed95afc364ca2f22262ae563e22
| 1,837
|
py
|
Python
|
var/spack/repos/builtin/packages/rocprim/package.py
|
healther/spack
|
389e9c11f6927ea27d629ed0e77ca0a52e402bc9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/rocprim/package.py
|
abouteiller/spack
|
95f54195021d3d32dec75bed6d8dbbaeac3d921f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/rocprim/package.py
|
abouteiller/spack
|
95f54195021d3d32dec75bed6d8dbbaeac3d921f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rocprim(CMakePackage):
""" Radeon Open Compute Parallel Primitives Library"""
homepage = "https://github.com/ROCmSoftwarePlatform/rocPRIM"
url = "https://github.com/ROCmSoftwarePlatform/rocPRIM/archive/rocm-3.8.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('3.9.0', sha256='ace6b4ee4b641280807028375cb0e6fa7b296edba9e9fc09177a5d8d075a716e')
version('3.8.0', sha256='4d37320d174eaada99dd796d81fa97d5dcc65a6dff8e8ff1c21e8e68acb4ea74')
version('3.7.0', sha256='225209a0cbd003c241821c8a9192cec5c07c7f1a6ab7da296305fc69f5f6d365')
version('3.5.0', sha256='29302dbeb27ae88632aa1be43a721f03e7e597c329602f9ca9c9c530c1def40d')
variant('build_type', default='Release', values=("Release", "Debug"), description='CMake build type')
depends_on('cmake@3:', type='build')
depends_on('numactl', type='link', when='@3.7.0:')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0']:
depends_on('hip@' + ver, type='build', when='@' + ver)
depends_on('rocm-device-libs@' + ver, type='build', when='@' + ver)
depends_on('comgr@' + ver, type='build', when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, type='build', when='@' + ver)
def setup_build_environment(self, env):
env.set('CXX', self.spec['hip'].hipcc)
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_MODULE_PATH={0}/cmake'.format(spec['hip'].prefix),
'-DONLY_INSTALL=ON',
'-DBUILD_TEST=OFF',
'-DBUILD_BENCHMARK=OFF',
'-DBUILD_EXAMPLE=OFF'
]
return args
| 39.085106
| 105
| 0.658138
|
47688fc6d84482d1e985c9692b4ba306bad4ca1e
| 843
|
py
|
Python
|
salt/matchers/pillar_exact_match.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 5
|
2017-02-07T05:39:29.000Z
|
2020-06-13T02:07:33.000Z
|
salt/matchers/pillar_exact_match.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/matchers/pillar_exact_match.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
# -*- coding: utf-8 -*-
'''
This is the default pillar exact matcher.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
log = logging.getLogger(__name__)
def match(tgt, delimiter=':', opts=None):
'''
Reads in the pillar match, no globbing, no PCRE
'''
if not opts:
opts = __opts__
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
| 29.068966
| 72
| 0.576512
|
49a77ac89b6804d8a8508f71b16094f6fd393f0b
| 600
|
py
|
Python
|
swaps/service/trade/post_batch_cancel_open_order.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | 1
|
2021-09-06T00:09:11.000Z
|
2021-09-06T00:09:11.000Z
|
swaps/service/trade/post_batch_cancel_open_order.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | null | null | null |
swaps/service/trade/post_batch_cancel_open_order.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | null | null | null |
from swaps.connection.restapi_sync_client import RestApiSyncClient
from swaps.constant import *
from swaps.model.trade import *
from swaps.utils import *
class PostBatchCancelOpenOrderService:
def __init__(self, params):
self.params = params
def request(self, **kwargs):
channel = "/v1/order/orders/batchCancelOpenOrders"
def parse(dict_data):
data = dict_data.get("data", {})
return default_parse(data, BatchCancelCount)
return RestApiSyncClient(**kwargs).request_process(HttpMethod.POST_SIGN, channel, self.params, parse)
| 23.076923
| 109
| 0.706667
|
27bd9cc9992aa121cac4f57c4c11da2c23bb7106
| 28,233
|
py
|
Python
|
Yank/tests/test_restraints.py
|
lilyminium/yank
|
aef24d9f413e646a3c4e65581198e0b67ae7a21b
|
[
"MIT"
] | 136
|
2015-02-16T12:24:02.000Z
|
2022-03-17T11:15:11.000Z
|
Yank/tests/test_restraints.py
|
lilyminium/yank
|
aef24d9f413e646a3c4e65581198e0b67ae7a21b
|
[
"MIT"
] | 1,024
|
2015-01-08T22:13:51.000Z
|
2022-03-25T19:33:33.000Z
|
Yank/tests/test_restraints.py
|
lilyminium/yank
|
aef24d9f413e646a3c4e65581198e0b67ae7a21b
|
[
"MIT"
] | 76
|
2015-01-12T23:58:00.000Z
|
2022-03-19T23:33:17.000Z
|
#!/usr/bin/python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Test restraints module.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import os
import math
import copy
import numpy as np
from simtk import openmm, unit
import openmmtools as mmtools
from openmmtools import testsystems, states, multistate
import nose
from nose.plugins.attrib import attr
import yank.restraints
from yank import experiment, Topography
from yank.analyze import YankMultiStateSamplerAnalyzer
from yank.utils import get_data_filename
OpenMM73 = yank.restraints.OpenMM73 # TODO: Document this
# =============================================================================================
# UNIT TESTS
# =============================================================================================
class HostGuestNoninteracting(testsystems.HostGuestVacuum):
"""CB7:B2 host-guest system in vacuum with no nonbonded interactions.
Parameters
----------
Same as HostGuestVacuum
Examples
--------
Create host:guest system with no nonbonded interactions.
>>> testsystem = HostGuestVacuumNoninteracting()
>>> system, positions = testsystem.system, testsystem.positions
Properties
----------
receptor_atoms : list of int
Indices of receptor atoms
ligand_atoms : list of int
Indices of ligand atoms
"""
def __init__(self, **kwargs):
super(HostGuestNoninteracting, self).__init__(**kwargs)
# Store receptor and ligand atom indices
self.receptor_atoms = range(0, 126)
self.ligand_atoms = range(126, 156)
# Remove nonbonded interactions
force_indices = {self.system.getForce(index).__class__.__name__: index
for index in range(self.system.getNumForces())}
self.system.removeForce(force_indices['NonbondedForce'])
@staticmethod
def build_test_case():
"""Create a new ThermodynamicState, SamplerState and Topography."""
# Create a test system
t = HostGuestNoninteracting()
# Create states and topography encoding the info to determine the parameters.
topography = Topography(t.topology, ligand_atoms='resname B2')
sampler_state = states.SamplerState(positions=t.positions)
thermodynamic_state = states.ThermodynamicState(system=t.system, temperature=300.0*unit.kelvin)
return thermodynamic_state, sampler_state, topography
expected_restraints = {
'Harmonic': yank.restraints.Harmonic,
'FlatBottom': yank.restraints.FlatBottom,
'Boresch': yank.restraints.Boresch,
'PeriodicTorsionBoresch': yank.restraints.PeriodicTorsionBoresch,
'RMSD': yank.restraints.RMSD,
}
restraint_test_yaml = """
---
options:
minimize: no
verbose: no
output_dir: %(output_directory)s
temperature: 300*kelvin
pressure: null
anisotropic_dispersion_cutoff: null
platform: CPU
hydrogen_mass: 3*amu
mcmc_moves:
langevin:
type: LangevinSplittingDynamicsMove
timestep: 4.0*femtoseconds
collision_rate: 1.0 / picosecond
n_steps: 50
reassign_velocities: yes
n_restart_attempts: 4
splitting: 'V R O R V'
samplers:
sams:
type: SAMSSampler
mcmc_moves: langevin
number_of_iterations: %(number_of_iter)s
state_update_scheme: global-jump
gamma0: 2.0
flatness_threshold: 5.0
online_analysis_interval: 200
online_analysis_minimum_iterations: 50
online_analysis_target_error: 0.1
repex:
type: ReplicaExchangeSampler
mcmc_moves: langevin
number_of_iterations: %(number_of_iter)s
online_analysis_interval: 50
online_analysis_minimum_iterations: 25
online_analysis_target_error: 0.1
solvents:
vacuum:
nonbonded_method: PME
nonbonded_cutoff: 0.59 * nanometer
systems:
ship:
phase1_path: [%(input_directory)s/benzene-toluene-standard-state/standard_state_complex.inpcrd, %(input_directory)s/benzene-toluene-standard-state/standard_state_complex.prmtop]
phase2_path: [%(input_directory)s/benzene-toluene-standard-state/standard_state_complex.inpcrd, %(input_directory)s/benzene-toluene-standard-state/standard_state_complex.prmtop]
ligand_dsl: resname ene
solvent: vacuum
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_restraints: [0.0, 0.05, 0.10, 0.15, 0.25, 0.5, 0.75, 1.0]
lambda_electrostatics: [0.0, 0.00, 0.00, 0.00, 0.00, 0.0, 0.00, 0.0]
lambda_sterics: [0.0, 0.00, 0.00, 0.00, 0.00, 0.0, 0.00, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [0.0, 0.0]
lambda_sterics: [0.0, 0.0]
experiments:
system: ship
sampler: repex
protocol: absolute-binding
restraint:
type: %(restraint_type)s
"""
def general_restraint_run(options):
"""
Generalized restraint simulation run to test free energy = standard state correction.
options : Dict. A dictionary of substitutions for restraint_test_yaml
"""
with mmtools.utils.temporary_directory() as output_directory:
# TODO refactor this to use AlchemicalPhase API rather than a YAML script.
options['input_directory'] = get_data_filename(os.path.join('tests', 'data'))
options['output_directory'] = output_directory
# run both setup and experiment
yaml_builder = experiment.ExperimentBuilder(restraint_test_yaml % options)
yaml_builder.run_experiments()
# Estimate Free Energies
ncfile_path = os.path.join(output_directory, 'experiments', 'complex.nc')
reporter = multistate.MultiStateReporter(ncfile_path, open_mode='r')
#analyzer = multistate.MultiStateSamplerAnalyzer(reporter)
analyzer = YankMultiStateSamplerAnalyzer(reporter)
Deltaf_ij, dDeltaf_ij = analyzer.get_free_energy()
# Correct the sign for the fact that we are adding vs removing the restraints
DeltaF_simulated = Deltaf_ij[-1, 0]
dDeltaF_simulated = dDeltaf_ij[-1, 0]
print('Standard state correction:')
#ncfile = netcdf.Dataset(ncfile_path, 'r')
#print(ncfile.groups['metadata'].variables['standard_state_correction'][:])
#print(float(ncfile.groups['metadata'].variables['standard_state_correction'][:]))
#ncfile.close()
DeltaF_restraints = analyzer.get_standard_state_correction()
# Check if they are close
msg = ''
msg += 'Computed: %8.3f kT\n' % (DeltaF_restraints)
msg += 'Actual: %8.3f +- %8.3f kT\n' % (DeltaF_simulated, dDeltaF_simulated)
msg += 'ERROR: %8.3f +- %8.3f kT\n' % (DeltaF_restraints - DeltaF_simulated, dDeltaF_simulated)
# DEBUG
print(msg)
assert np.allclose(DeltaF_restraints, DeltaF_simulated, rtol=2*dDeltaF_simulated), 'Standard state correction is inaccurate.\n' + msg
@attr('slow') # Skip on Travis-CI
def test_harmonic_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
options = {'number_of_iter': '1000',
'restraint_type': 'Harmonic'}
general_restraint_run(options)
@attr('slow') # Skip on Travis-CI
def test_flat_bottom_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
options = {'number_of_iter': '1000',
'restraint_type': 'FlatBottom'}
general_restraint_run(options)
@attr('slow') # Skip on Travis-CI
def test_Boresch_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
# These need more samples to converge
options = {'number_of_iter': '1000',
'restraint_type': 'Boresch'}
general_restraint_run(options)
@attr('slow') # Skip on Travis-CI
def test_PeriodicTorsionBoresch_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
# These need more samples to converge
options = {'number_of_iter': '1000',
'restraint_type': 'PeriodicTorsionBoresch'}
general_restraint_run(options)
def test_harmonic_standard_state_analytical():
"""
Perform some analytical tests of the Harmonic standard state correction.
Also ensures that PBC is being handled correctly
"""
LJ_fluid = testsystems.LennardJonesFluid()
# Create Harmonic restraint.
restraint = yank.restraints.create_restraint('Harmonic', restrained_receptor_atoms=1)
# Determine other parameters.
ligand_atoms = [3, 4, 5]
topography = Topography(LJ_fluid.topology, ligand_atoms=ligand_atoms)
sampler_state = states.SamplerState(positions=LJ_fluid.positions)
thermodynamic_state = states.ThermodynamicState(system=LJ_fluid.system,
temperature=300.0 * unit.kelvin)
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
spring_constant = restraint.spring_constant
# Compute standard-state volume for a single molecule in a box of size (1 L) / (avogadros number)
liter = 1000.0 * unit.centimeters ** 3 # one liter
box_volume = liter / (unit.AVOGADRO_CONSTANT_NA * unit.mole) # standard state volume
analytical_shell_volume = (2 * math.pi / (spring_constant * thermodynamic_state.beta))**(3.0/2)
analytical_standard_state_G = - math.log(box_volume / analytical_shell_volume)
restraint_standard_state_G = restraint.get_standard_state_correction(thermodynamic_state)
np.testing.assert_allclose(analytical_standard_state_G, restraint_standard_state_G)
def test_BoreschLike_standard_state_analytical():
"""
Perform some analytical tests of the Boresch standard state correction.
Also ensures that PBC is being handled correctly
"""
LJ_fluid = testsystems.LennardJonesFluid()
# Define receptor and ligand atoms
receptor_atoms = [0, 1, 2]
ligand_atoms = [3, 4, 5]
# Create restraint
K_r = 1.0*unit.kilocalories_per_mole/unit.angstrom**2
r_0 = 0.0*unit.angstrom
K_theta = 0.0*unit.kilocalories_per_mole/unit.degrees**2
theta_0 = 30.0*unit.degrees
topography = Topography(LJ_fluid.topology, ligand_atoms=ligand_atoms)
sampler_state = states.SamplerState(positions=LJ_fluid.positions)
thermodynamic_state = states.ThermodynamicState(system=LJ_fluid.system,
temperature=300.0 * unit.kelvin)
for restraint_name in ['Boresch', 'PeriodicTorsionBoresch']:
restraint = yank.restraints.create_restraint('Boresch',
restrained_receptor_atoms=receptor_atoms,
restrained_ligand_atoms=ligand_atoms,
K_r=K_r, r_aA0=r_0,
K_thetaA=K_theta, theta_A0=theta_0,
K_thetaB=K_theta, theta_B0=theta_0,
K_phiA=K_theta, phi_A0=theta_0,
K_phiB=K_theta, phi_B0=theta_0,
K_phiC=K_theta, phi_C0=theta_0)
# Determine other parameters
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
# Compute standard-state volume for a single molecule in a box of size (1 L) / (avogadros number)
liter = 1000.0 * unit.centimeters ** 3 # one liter
box_volume = liter / (unit.AVOGADRO_CONSTANT_NA * unit.mole) # standard state volume
analytical_shell_volume = (2 * math.pi / (K_r * thermodynamic_state.beta))**(3.0/2)
analytical_standard_state_G = - math.log(box_volume / analytical_shell_volume)
restraint_standard_state_G = restraint.get_standard_state_correction(thermodynamic_state)
msg = 'Failed test for restraint {}'.format(restraint_name)
np.testing.assert_allclose(analytical_standard_state_G, restraint_standard_state_G, err_msg=msg)
# ==============================================================================
# RESTRAINT PARAMETER DETERMINATION
# ==============================================================================
def test_partial_parametrization():
"""The automatic restraint parametrization doesn't overwrite user values."""
# Create states and identify ligand/receptor.
test_system = testsystems.HostGuestVacuum()
topography = Topography(test_system.topology, ligand_atoms='resname B2')
sampler_state = states.SamplerState(positions=test_system.positions)
thermodynamic_state = states.ThermodynamicState(test_system.system,
temperature=300.0*unit.kelvin)
# Test case: (restraint_type, constructor_kwargs)
boresch = dict(restrained_ligand_atoms=[130, 131, 136], K_r=1.0*unit.kilojoule_per_mole/unit.angstroms**2)
test_cases = [
('Harmonic', dict(spring_constant=2.0*unit.kilojoule_per_mole/unit.nanometer**2,
restrained_receptor_atoms=[5])),
('FlatBottom', dict(well_radius=1.0*unit.angstrom, restrained_ligand_atoms=[130])),
('Boresch', boresch),
('PeriodicTorsionBoresch', boresch),
]
if OpenMM73.dev_validate:
test_cases.append(('RMSD', dict(restrained_ligand_atoms=[130, 131, 136],
K_RMSD=1.0 * unit.kilojoule_per_mole / unit.angstroms ** 2)))
for restraint_type, kwargs in test_cases:
state = copy.deepcopy(thermodynamic_state)
restraint = yank.restraints.create_restraint(restraint_type, **kwargs)
# Test-precondition: The restraint has undefined parameters.
with nose.tools.assert_raises(yank.restraints.RestraintParameterError):
restraint.restrain_state(state)
# The automatic parametrization maintains user values.
restraint.determine_missing_parameters(state, sampler_state, topography)
for parameter_name, parameter_value in kwargs.items():
assert getattr(restraint, parameter_name) == parameter_value
# The rest of the parameters has been determined.
restraint.get_standard_state_correction(state)
# The force has been configured correctly.
restraint.restrain_state(state)
system = state.system
for force in system.getForces():
# RadiallySymmetricRestraint between two single atoms.
if isinstance(force, openmm.CustomBondForce):
particle1, particle2, _ = force.getBondParameters(0)
assert particle1 == restraint.restrained_receptor_atoms[0]
assert particle2 == restraint.restrained_ligand_atoms[0]
# Boresch restraint.
elif isinstance(force, openmm.CustomCompoundBondForce):
particles, _ = force.getBondParameters(0)
assert particles == tuple(restraint.restrained_receptor_atoms + restraint.restrained_ligand_atoms)
# RMSD restraint.
elif OpenMM73.dev_validate and isinstance(force, openmm.CustomCVForce):
rmsd_cv = force.getCollectiveVariable(0)
particles = rmsd_cv.getParticles()
assert particles == tuple(restraint.restrained_receptor_atoms + restraint.restrained_ligand_atoms)
def restraint_selection_template(topography_ligand_atoms=None,
restrained_receptor_atoms=None,
restrained_ligand_atoms=None,
topography_regions=None):
"""The DSL atom selection works as expected."""
test_system = testsystems.HostGuestVacuum()
topography = Topography(test_system.topology, ligand_atoms=topography_ligand_atoms)
if topography_regions is not None:
for region, selection in topography_regions.items():
topography.add_region(region, selection)
sampler_state = states.SamplerState(positions=test_system.positions)
thermodynamic_state = states.ThermodynamicState(test_system.system,
temperature=300.0 * unit.kelvin)
# Initialize with DSL and without processing the string raises an error.
restraint = yank.restraints.Harmonic(spring_constant=2.0 * unit.kilojoule_per_mole / unit.nanometer ** 2,
restrained_receptor_atoms=restrained_receptor_atoms,
restrained_ligand_atoms=restrained_ligand_atoms)
with nose.tools.assert_raises(yank.restraints.RestraintParameterError):
restraint.restrain_state(thermodynamic_state)
# After parameter determination, the indices of the restrained atoms are correct.
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
assert len(restraint.restrained_receptor_atoms) == 14
assert len(restraint.restrained_ligand_atoms) == 30
# The bond force is configured correctly.
restraint.restrain_state(thermodynamic_state)
system = thermodynamic_state.system
for force in system.getForces():
if isinstance(force, openmm.CustomCentroidBondForce):
assert force.getBondParameters(0)[0] == (0, 1)
assert len(force.getGroupParameters(0)[0]) == 14
assert len(force.getGroupParameters(1)[0]) == 30
assert isinstance(force, openmm.CustomCentroidBondForce) # We have found a force.
def test_restraint_dsl_selection():
"""The DSL atom selection works as expected."""
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms="(resname CUC) and (name =~ 'O[0-9]+')",
restrained_ligand_atoms='resname B2')
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms="(resname CUC) and (name =~ 'O[0-9]+')",
restrained_ligand_atoms='(mass > 0.5) and (resname B2)')
def test_restraint_region_selection():
"""Test that the region atom selection works as expected"""
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms='choice_res_residue and the_oxygen',
restrained_ligand_atoms='choice_lig_residue',
topography_regions={'choice_lig_residue': 'resname B2',
'choice_res_residue': 'resname CUC',
'the_oxygen': "name =~ 'O[0-9]+'"})
def test_restraint_region_dsl_mix():
"""Test that the region atom selection works as expected"""
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms='choice_res_residue and the_oxygen',
restrained_ligand_atoms='resname B2',
topography_regions={'choice_lig_residue': 'resname B2',
'choice_res_residue': 'resname CUC',
'the_oxygen': "name =~ 'O[0-9]+'"})
# ==============================================================================
# RESTRAINT FACTORY FUNCTIONS
# ==============================================================================
def test_available_restraint_classes():
"""Test to make sure expected restraint classes are available."""
available_restraint_classes = yank.restraints.available_restraint_classes()
available_restraint_types = yank.restraints.available_restraint_types()
# We shouldn't have `None` (from the base class) as an available type
assert None not in available_restraint_classes
assert None not in available_restraint_types
for restraint_type, restraint_class in expected_restraints.items():
msg = "Failed comparing restraint type '%s' with %s" % (restraint_type, str(available_restraint_classes))
assert restraint_type in available_restraint_classes, msg
assert available_restraint_classes[restraint_type] is restraint_class, msg
assert restraint_type in available_restraint_types, msg
def test_restraint_dispatch():
"""Test dispatch of various restraint types."""
thermodynamic_state, sampler_state, topography = HostGuestNoninteracting.build_test_case()
for restraint_type, restraint_class in expected_restraints.items():
# Trap the dev and ignore it
try:
valid = restraint_class.dev_validate
if not valid:
continue
except AttributeError:
pass
# Add restraints and determine parameters.
thermo_state = copy.deepcopy(thermodynamic_state)
restraint = yank.restraints.create_restraint(restraint_type)
restraint.determine_missing_parameters(thermo_state, sampler_state, topography)
# Check that we got the right restraint class.
assert restraint.__class__.__name__ == restraint_type
assert restraint.__class__ is restraint_class
def test_restraint_force_group():
"""Test that the restraint force should be placed in its own force group for optimization."""
thermodynamic_state, sampler_state, topography = HostGuestNoninteracting.build_test_case()
for restraint_type, restraint_class in expected_restraints.items():
# Trap the dev and ignore it
try:
valid = restraint_class.dev_validate
if not valid:
continue
except AttributeError:
pass
# Add restraints and determine parameters.
thermo_state = copy.deepcopy(thermodynamic_state)
restraint = yank.restraints.create_restraint(restraint_type)
restraint.determine_missing_parameters(thermo_state, sampler_state, topography)
restraint.restrain_state(thermo_state)
# Find the force group of the restraint force.
system = thermo_state.system
for force_idx, force in enumerate(system.getForces()):
try:
num_parameters = force.getNumGlobalParameters()
except AttributeError:
continue
for parameter_idx in range(num_parameters):
parameter_name = force.getGlobalParameterName(parameter_idx)
if parameter_name == 'lambda_restraints':
restraint_force_idx = force_idx
restraint_force_group = force.getForceGroup()
break
# No other force should have the same force group.
for force_idx, force in enumerate(system.getForces()):
if force_idx != restraint_force_idx:
assert force.getForceGroup() != restraint_force_group
# ==============================================================================
# RESTRAINT STATE
# ==============================================================================
class TestRestraintState(object):
"""Test class RestraintState."""
@classmethod
def setup_class(cls):
lysozyme = testsystems.LysozymeImplicit()
system, positions = lysozyme.system, lysozyme.positions
thermodynamic_state = states.ThermodynamicState(system, 300*unit.kelvin)
sampler_state = states.SamplerState(positions)
topography = Topography(lysozyme.topology, ligand_atoms='resname TMP')
cls.lysozyme_test_case = (thermodynamic_state, sampler_state, topography)
def get_restraint_cases(self):
for cls_name, cls in yank.restraints.available_restraint_classes().items():
# Create restraint and automatically determine parameters.
# Trap the dev and ignore it
try:
valid = cls.dev_validate
if not valid:
continue
except AttributeError:
pass
restraint = cls()
thermodynamic_state, sampler_state, topography = copy.deepcopy(self.lysozyme_test_case)
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
# Apply restraint.
restraint.restrain_state(thermodynamic_state)
# Create compound state to control the strength of the restraint.
restraint_state = yank.restraints.RestraintState(lambda_restraints=1.0)
compound_state = states.CompoundThermodynamicState(thermodynamic_state=thermodynamic_state,
composable_states=[restraint_state])
yield compound_state
def test_apply_to_system(self):
"""The System parameters are updated when lambda_restraints is set on the compound state."""
for compound_state in self.get_restraint_cases():
# Test pre-condition.
assert compound_state.lambda_restraints == 1.0
# Changing the attribute changes the internal representation of a system.
compound_state.lambda_restraints = 0.5
for force, parameter_name, parameter_id in compound_state._get_system_controlled_parameters(
compound_state.system, parameters_name_suffix=None):
assert force.getGlobalParameterDefaultValue(parameter_id) == 0.5
def test_apply_to_context(self):
"""The Context parameters are updated when the compound state is applied."""
for compound_state in self.get_restraint_cases():
compound_state.lambda_restraints = 0.5
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
context = compound_state.create_context(integrator)
assert context.getParameter('lambda_restraints') == 0.5
compound_state.lambda_restraints = 0.0
compound_state.apply_to_context(context)
assert context.getParameter('lambda_restraints') == 0.0
del context, integrator
def test_compatibility(self):
"""States differing only by the strength of the restraint are compatible."""
unrestrained_system = self.lysozyme_test_case[0].system
for compound_state in self.get_restraint_cases():
compound_state.lambda_restraints = 1.0
compatible_state = copy.deepcopy(compound_state)
compatible_state.lambda_restraints = 0.0
assert compound_state.is_state_compatible(compatible_state)
# Trying to assign a System without a Restraint raises an error.
with nose.tools.assert_raises(mmtools.states.GlobalParameterError):
compound_state.system = unrestrained_system
def test_find_force_groups_to_update(self):
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
for compound_state in self.get_restraint_cases():
context = compound_state.create_context(copy.deepcopy(integrator))
# Find the restraint force group.
system = context.getSystem()
force, _, _ = next(yank.restraints.RestraintState._get_system_controlled_parameters(
system, parameters_name_suffix=None))
force_group = force.getForceGroup()
# No force group should be updated if we don't move.
assert compound_state._find_force_groups_to_update(context, compound_state, memo={}) == set()
# We need to update the force if the current state changes.
compound_state2 = copy.deepcopy(compound_state)
compound_state2.lambda_restraints = 0.5
assert compound_state._find_force_groups_to_update(context, compound_state2, memo={}) == {force_group}
# ==============================================================================
# MAIN
# ==============================================================================
if __name__ == '__main__':
test_restraint_dispatch()
| 44.321821
| 181
| 0.651968
|
42194453f3b31b3bba101d8a9ffd8da653d8f200
| 3,607
|
py
|
Python
|
parliament_lk/middlewares.py
|
prabod/CS4642-IR-Parliament.lk-Scraper
|
a0ce0f71f5747f62d25a0abc580fb7dd0fc39007
|
[
"MIT"
] | null | null | null |
parliament_lk/middlewares.py
|
prabod/CS4642-IR-Parliament.lk-Scraper
|
a0ce0f71f5747f62d25a0abc580fb7dd0fc39007
|
[
"MIT"
] | null | null | null |
parliament_lk/middlewares.py
|
prabod/CS4642-IR-Parliament.lk-Scraper
|
a0ce0f71f5747f62d25a0abc580fb7dd0fc39007
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ParliamentLkSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ParliamentLkDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.682692
| 78
| 0.667036
|
fecda72a6a98ea431385dcff02f09ad0d73d53f6
| 1,508
|
py
|
Python
|
metaci/views.py
|
abhishekalgo/metaci
|
cd62473b3fb85fb0f39623f9fb2850993ff708a5
|
[
"BSD-3-Clause"
] | null | null | null |
metaci/views.py
|
abhishekalgo/metaci
|
cd62473b3fb85fb0f39623f9fb2850993ff708a5
|
[
"BSD-3-Clause"
] | null | null | null |
metaci/views.py
|
abhishekalgo/metaci
|
cd62473b3fb85fb0f39623f9fb2850993ff708a5
|
[
"BSD-3-Clause"
] | 1
|
2018-12-07T09:51:07.000Z
|
2018-12-07T09:51:07.000Z
|
import os
import re
import subprocess
import sys
import django
from django.conf import settings
from django.views.generic.base import TemplateView
import cumulusci
class AboutView(TemplateView):
def get_context_data(self, **kwargs):
context = super(AboutView, self).get_context_data(**kwargs)
# django
context["DJANGO_VERSION"] = "{}.{}.{}".format(
django.VERSION[0], # major
django.VERSION[1], # minor
django.VERSION[2], # micro
)
# python
context["PYTHON_VERSION"] = "{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
# Salesforce DX
out = subprocess.check_output(["sfdx", "--version"])
match = re.match(r"sfdx-cli/(\d+.\d+.\d+)-.+", out)
if match:
context["SFDX_CLI_VERSION"] = match.group(1)
# cumulusci
context["CUMULUSCI_VERSION"] = cumulusci.__version__
# heroku
heroku_env_vars = [
"HEROKU_APP_ID",
"HEROKU_APP_NAME",
"HEROKU_DYNO_ID",
"HEROKU_RELEASE_CREATED_AT",
"HEROKU_RELEASE_VERSION",
"HEROKU_SLUG_COMMIT",
"HEROKU_SLUG_DESCRIPTION",
]
for var in heroku_env_vars:
context[var] = os.environ.get(var, "Heroku dyno metadata not found")
context["METACI_FLOW_SUBCLASS_ENABLED"] = settings.METACI_FLOW_SUBCLASS_ENABLED
return context
| 28.45283
| 87
| 0.598143
|
8afa98b3b02c5afb493a1b79094df44d884f5a0c
| 238
|
py
|
Python
|
hw/hw12/tests/q1_6.py
|
ds-modules/Colab-demo
|
cccaff13633f8a5ec697cd4aeca9087f2feec2e4
|
[
"BSD-3-Clause"
] | null | null | null |
hw/hw12/tests/q1_6.py
|
ds-modules/Colab-demo
|
cccaff13633f8a5ec697cd4aeca9087f2feec2e4
|
[
"BSD-3-Clause"
] | null | null | null |
hw/hw12/tests/q1_6.py
|
ds-modules/Colab-demo
|
cccaff13633f8a5ec697cd4aeca9087f2feec2e4
|
[
"BSD-3-Clause"
] | null | null | null |
test = { 'name': 'q1_6',
'points': 1,
'suites': [{'cases': [{'code': '>>> # `k` should be an int;\n>>> type(k) == int\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| 59.5
| 193
| 0.495798
|
43b9c934db1e929d9ad8f13629fb0a8e0f26fba1
| 3,103
|
py
|
Python
|
UnGastos/UnGastos/settings.py
|
Rdlenke/UnGastos
|
6bb1ec456ad97d93a3805a7c39817dbbf5c413e3
|
[
"MIT"
] | null | null | null |
UnGastos/UnGastos/settings.py
|
Rdlenke/UnGastos
|
6bb1ec456ad97d93a3805a7c39817dbbf5c413e3
|
[
"MIT"
] | null | null | null |
UnGastos/UnGastos/settings.py
|
Rdlenke/UnGastos
|
6bb1ec456ad97d93a3805a7c39817dbbf5c413e3
|
[
"MIT"
] | null | null | null |
"""
Django settings for UnGastos project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*jpf45b7&a%^pwy4_@cs2ol=y*wsd$6ikv^2noqk2gmi(^ubu)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'UnGastos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'UnGastos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.644628
| 91
| 0.695778
|
a25edf4c49231a6352369fc63939411b70a6a432
| 569
|
py
|
Python
|
pylonsapp/pylonsapp/controllers/owners.py
|
pombreda/formalchemy
|
613f721c6da432a12ed94012fad8e337fb5af958
|
[
"MIT"
] | 20
|
2015-01-25T13:54:08.000Z
|
2021-03-09T16:54:53.000Z
|
pylonsapp/pylonsapp/controllers/owners.py
|
pombreda/formalchemy
|
613f721c6da432a12ed94012fad8e337fb5af958
|
[
"MIT"
] | 11
|
2015-02-05T16:36:19.000Z
|
2020-11-13T11:41:19.000Z
|
pylonsapp/pylonsapp/controllers/owners.py
|
pombreda/formalchemy
|
613f721c6da432a12ed94012fad8e337fb5af958
|
[
"MIT"
] | 12
|
2015-02-19T14:06:21.000Z
|
2021-07-30T08:44:45.000Z
|
import logging
from pylons import request, response, session, url, tmpl_context as c
from pylons.controllers.util import abort, redirect
from pylonsapp.lib.base import BaseController, render
from pylonsapp import model
from pylonsapp.model import meta
from formalchemy.ext.pylons.controller import RESTController
log = logging.getLogger(__name__)
class OwnersController(BaseController):
def Session(self):
return meta.Session
def get_model(self):
return model.Owner
OwnersController = RESTController(OwnersController, 'owner', 'owners')
| 24.73913
| 70
| 0.785589
|
be8636abf7d8687d5a97d8e6b5e4f322ff491274
| 2,134
|
py
|
Python
|
src/eventgrid/azext_eventgrid/vendored_sdks/eventgrid/models/azure_function_event_subscription_destination.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2022-03-22T15:02:32.000Z
|
2022-03-22T15:02:32.000Z
|
src/eventgrid/azext_eventgrid/vendored_sdks/eventgrid/models/azure_function_event_subscription_destination.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2018-11-19T05:12:58.000Z
|
2018-11-19T05:12:58.000Z
|
src/eventgrid/azext_eventgrid/vendored_sdks/eventgrid/models/azure_function_event_subscription_destination.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-06-03T19:31:10.000Z
|
2021-06-03T19:31:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .event_subscription_destination import EventSubscriptionDestination
class AzureFunctionEventSubscriptionDestination(EventSubscriptionDestination):
"""Information about the azure function destination for an event subscription.
All required parameters must be populated in order to send to Azure.
:param endpoint_type: Required. Constant filled by server.
:type endpoint_type: str
:param resource_id: The Azure Resource Id that represents the endpoint of
the Azure Function destination of an event subscription.
:type resource_id: str
:param max_events_per_batch: Maximum number of events per batch.
:type max_events_per_batch: int
:param preferred_batch_size_in_kilobytes: Preferred batch size in
Kilobytes.
:type preferred_batch_size_in_kilobytes: int
"""
_validation = {
'endpoint_type': {'required': True},
}
_attribute_map = {
'endpoint_type': {'key': 'endpointType', 'type': 'str'},
'resource_id': {'key': 'properties.resourceId', 'type': 'str'},
'max_events_per_batch': {'key': 'properties.maxEventsPerBatch', 'type': 'int'},
'preferred_batch_size_in_kilobytes': {'key': 'properties.preferredBatchSizeInKilobytes', 'type': 'int'},
}
def __init__(self, **kwargs):
super(AzureFunctionEventSubscriptionDestination, self).__init__(**kwargs)
self.resource_id = kwargs.get('resource_id', None)
self.max_events_per_batch = kwargs.get('max_events_per_batch', None)
self.preferred_batch_size_in_kilobytes = kwargs.get('preferred_batch_size_in_kilobytes', None)
self.endpoint_type = 'AzureFunction'
| 43.55102
| 112
| 0.679944
|
0b2f50ff89bcb63b04bd94297e68ccc42699cc9e
| 129
|
py
|
Python
|
scripts/download_data.py
|
jgehunter/stock_price_prediction
|
229a9b540409519fcbbfa0fcdd821df15fab0185
|
[
"MIT"
] | null | null | null |
scripts/download_data.py
|
jgehunter/stock_price_prediction
|
229a9b540409519fcbbfa0fcdd821df15fab0185
|
[
"MIT"
] | null | null | null |
scripts/download_data.py
|
jgehunter/stock_price_prediction
|
229a9b540409519fcbbfa0fcdd821df15fab0185
|
[
"MIT"
] | null | null | null |
import fire
from stock_price_prediction.download.ticker_download import GetData
if __name__ == "__main__":
fire.Fire(GetData)
| 32.25
| 67
| 0.813953
|
87843280d8093799cf09713b826210ff3b4d25c7
| 615
|
py
|
Python
|
pythonzoo/zoo/views.py
|
ppolanco01/Spring2018PythonZoo
|
1ec9b6dd141870f8cd0f7aeaed7baee9d00c2285
|
[
"MIT"
] | null | null | null |
pythonzoo/zoo/views.py
|
ppolanco01/Spring2018PythonZoo
|
1ec9b6dd141870f8cd0f7aeaed7baee9d00c2285
|
[
"MIT"
] | null | null | null |
pythonzoo/zoo/views.py
|
ppolanco01/Spring2018PythonZoo
|
1ec9b6dd141870f8cd0f7aeaed7baee9d00c2285
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views import generic
from.models import Zoo, Exhibit, Animal
# Create your views here.
def aboutus(request):
return render(
request,
"zoo/aboutus.html",
context = { },
)
def contactus(request):
return render(
request,
"zoo/contactus.html",
context = { },
)
class ZooListView(generic.ListView):
model = Zoo
class ZooDetailView(generic.DetailView):
model = Zoo
class ExhibitDetailView(generic.DetailView):
model = Exhibit
class AnimalDetailView(generic.DetailView):
model = Animal
| 18.636364
| 44
| 0.666667
|
3599b77da9835b3451a89a7f610111bef5fa379b
| 68,070
|
py
|
Python
|
Tools/MassSpectrometry/PeakRationalizer.py
|
deaconjs/SPADE
|
da28cb927ae14f60aaf847591f81a86c9796d95e
|
[
"BSD-3-Clause"
] | 3
|
2017-09-26T03:09:14.000Z
|
2022-03-20T11:12:34.000Z
|
Tools/MassSpectrometry/PeakRationalizer.py
|
deaconjs/SPADE
|
da28cb927ae14f60aaf847591f81a86c9796d95e
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/MassSpectrometry/PeakRationalizer.py
|
deaconjs/SPADE
|
da28cb927ae14f60aaf847591f81a86c9796d95e
|
[
"BSD-3-Clause"
] | 1
|
2020-01-15T03:05:36.000Z
|
2020-01-15T03:05:36.000Z
|
# python imports
import string
import math
import os.path
import pickle
import sys
import copy
# dependency imports
sys.path.append(os.path.abspath('./Dependencies'))
sys.path.append(os.path.abspath('./Dependencies/pybwidget-0.1.2_1.7.0'))
sys.path.append(os.path.abspath('./Dependencies/pybwidget/bwidget'))
import bwidget
from Tkinter import *
from tkFileDialog import *
import Pmw
class Viewer:
def __init__(self, parent, experiment, peak_index):
# build the widget
self.parent = parent
self.rtop = Toplevel(self.parent)
self.experiment = experiment
self.my_filter_resolution = experiment.get_filter_resolution()
self.rtop.title('Rationalize Peak')
# top frame carries mutations options
self.rtop.optionsframe = Frame(self.rtop, bd=8)
self.rtop.mw_string = StringVar()
self.rtop.mw_string.set('%s amu'%(self.parent.parent.parent.max_plot.x[peak_index]))
self.rtop.mw_label = Label(self.rtop.optionsframe, bd=4, textvariable=self.rtop.mw_string)
self.rtop.mutantsframe = Frame(self.rtop.optionsframe, relief=GROOVE, bd=2)
self.rtop.mutantcheck_int = IntVar()
self.rtop.mutantcheck = Checkbutton(self.rtop.mutantsframe,
text="Check for Mutants",
variable=self.rtop.mutantcheck_int)
self.rtop.nonnatural_int = IntVar()
self.rtop.include_nonnatural = Checkbutton(self.rtop.mutantsframe,
text="include non-natural",
variable = self.rtop.nonnatural_int,
padx=8)
self.rtop.indel_int = IntVar()
self.rtop.include_indels = Checkbutton(self.rtop.mutantsframe,
text="include insertions/deletions",
padx=12,
variable = self.rtop.indel_int)
self.rtop.mutant_range_counter = Pmw.Counter(self.rtop.mutantsframe,
labelpos = 'w',
label_text = 'maximum count:',
orient = 'horizontal',
entry_width = 2,
entryfield_value = 1,
datatype = {'counter':'numeric'},
entryfield_validate = {'validator':'numeric'},
padx=20,
increment=1)
self.rtop.mutantcheck.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.include_nonnatural.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.include_indels.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.mutant_range_counter.pack(side=TOP, expand=NO, fill=NONE)
self.rtop.mw_label.pack(anchor='w', side=TOP, expand=YES, fill=BOTH)
self.rtop.mutantsframe.pack(side=TOP, expand=YES, fill=BOTH, pady=8)
# Create and pack a RadioSelect widget, with radiobuttons.
self.rtop.modifymutants_radio = Pmw.RadioSelect(self.rtop.optionsframe, buttontype = 'radiobutton', labelpos = 'w', command = None, label_text = 'Modify mutants?')
for text in ('Yes', 'No'):
self.rtop.modifymutants_radio.add(text)
self.rtop.modifymutants_radio.invoke('No')
self.rtop.modifymutants_radio.pack(side = TOP, expand = YES, fill=BOTH, pady=10)
# bottom frame
self.rtop.modsframe = Frame(self.rtop.optionsframe, relief=GROOVE, bd=2)
self.rtop.modscheck_int = IntVar()
self.rtop.modscheck = Checkbutton(self.rtop.modsframe,
text='Check for Modifications',
variable = self.rtop.modscheck_int)
self.rtop.modscheck.select()
self.rtop.uncommon_int = IntVar()
self.rtop.include_uncommon = Checkbutton(self.rtop.modsframe,
text='include uncommon',
padx=12,
variable = self.rtop.uncommon_int)
self.rtop.nonspecific_int = IntVar()
self.rtop.include_nonspecific = Checkbutton(self.rtop.modsframe,
text='include nonspecific',
padx=12,
variable = self.rtop.nonspecific_int)
self.rtop.crosslinks_int = IntVar()
self.rtop.include_crosslinks = Checkbutton(self.rtop.modsframe,
text='include crosslinks',
padx=12,
variable = self.rtop.crosslinks_int)
self.rtop.mods_range_counter = Pmw.Counter(self.rtop.modsframe,
labelpos = 'w',
label_text='maximum count: ',
orient = 'horizontal',
entry_width = 2,
entryfield_value = 1,
datatype = {'counter':'numeric'},
entryfield_validate = {'validator':'numeric'},
padx=20,
increment=1)
self.rtop.modscheck.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.include_uncommon.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.include_nonspecific.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.include_crosslinks.pack(anchor='w', side=TOP, expand=NO, fill=None)
self.rtop.mods_range_counter.pack(anchor='w', side=TOP, expand=NO, fill=NONE)
self.rtop.modsframe.pack(side=TOP, expand=YES, fill=BOTH, pady=8)
self.rtop.limitsframe = Frame(self.rtop.optionsframe, relief=GROOVE, bd=2)
self.rtop.fragweightlimit_counter = Pmw.Counter(self.rtop.limitsframe,
labelpos='w',
label_text='examine fragments within',
orient='horizontal',
entry_width=5,
entryfield_value=1000,
datatype={'counter':'numeric'},
entryfield_validate={'validator':'numeric'},
increment=50)
self.rtop.ignorechangesbelow_counter = Pmw.Counter(self.rtop.limitsframe,
labelpos='w',
label_text='ignore weight changes below:',
orient='horizontal',
entry_width=3,
entryfield_value=3,
datatype={'counter':'numeric'},
entryfield_validate={'validator':'numeric'},
increment=1)
self.rtop.my_filter_resolution_counter = Pmw.Counter(self.rtop.limitsframe,
labelpos='w',
label_text='apply filter resolution:',
orient='horizontal',
entry_width=6,
entryfield_value=self.my_filter_resolution,
datatype={'counter':'real'},
entryfield_validate={'validator':'real'},
increment=0.001)
self.rtop.limitsframe.pack(side=TOP, expand=NO, fill=X, pady=10)
self.rtop.fragweightlimit_counter.pack(side=TOP, expand=NO, fill=X, pady=10)
self.rtop.ignorechangesbelow_counter.pack(side=TOP, expand=NO, fill=X,pady=10)
self.rtop.my_filter_resolution_counter.pack(side=TOP, expand=NO, fill=X, pady=10)
# button box
self.rtop.rationalize_buttonbox = Pmw.ButtonBox(self.rtop.optionsframe, orient='horizontal')
self.rtop.rationalize_buttonbox.add('Calculate', command=self._rerun_rationalize_peak)
self.rtop.rationalize_buttonbox.add('Close', command=self.rtop.destroy)
self.rtop.rationalize_buttonbox.pack(side=BOTTOM, expand=NO, fill=X)
# now add a scrolled frame for holding fragment options and final output trees
self.rtop.outframe = Frame(self.rtop, width=600, height=600)
self.rtop.fragment_options_frame = Frame(self.rtop.outframe, width=600, height=200)
self.rtop.output_frame = Frame(self.rtop.outframe, width=600, height=400)
self.rtop.fragment_options_frame.pack(side=TOP, expand=NO, fill=X)
self.rtop.output_frame.pack(side=TOP, expand=YES, fill=BOTH)
self.rtop.optionsframe.pack(side=LEFT, expand=NO, fill=BOTH)
self.rtop.outframe.pack(side=LEFT, expand=YES, fill=BOTH)
self.starting_fragments = {}
self.examination_fragments = {}
def _rerun_rationalize_peak(self):
self.starting_fragments = {}
self.examination_fragments = {}
self._run_rationalize_peak()
def _run_rationalize_peak(self, calculation_fragments={}):
aa_weights = {'A':71.09, 'C':103.15, 'D':115.09, 'E':129.12, 'F':147.18,
'G':57.05, 'H':137.14, 'I':113.16, 'K':128.17, 'L':113.16,
'M':131.19, 'N':114.11, 'P':97.12, 'Q':128.14, 'R':156.19,
'S':87.08, 'T':101.11, 'V':99.14, 'W':186.21, 'Y':163.18}
self.my_filter_resolution = float(self.rtop.my_filter_resolution_counter.get())
val = self.rtop.modifymutants_radio.getcurselection()
if val == 'Yes':
modify_mutants_val = 1
else:
modify_mutants_val = 0
args = [float(string.split(self.rtop.mw_string.get())[0]), # molecular weight
int(self.rtop.mutantcheck_int.get()), # check primary sequence
int(self.rtop.nonnatural_int.get()), # allow nonnatural amino acids
int(self.rtop.indel_int.get()), # allow insertions and deletions
int(self.rtop.mutant_range_counter.get()), # number of mutations tolerated
int(self.rtop.modscheck_int.get()), # check post-translational modifications
int(self.rtop.nonspecific_int.get()), # allow nonspecific reactions
int(self.rtop.mods_range_counter.get()), # number of post-translational modifications tolerated
int(self.rtop.fragweightlimit_counter.get()), # start with fragments within this limit of the selected peak
int(self.rtop.ignorechangesbelow_counter.get()), # any modifications or mutations that adjust the molecular weight by less than this are ignored
modify_mutants_val, # if selected ('YES'), apply the chemical modifications to mutants and indels
int(self.rtop.crosslinks_int.get()), # allow crosslinks
int(self.rtop.uncommon_int.get())] # include mods dict
frags = self.experiment.get_all_possible_fragment_objects()
# make a hash of all possible fragment sequences, indexed by molecular weight
if len(calculation_fragments.keys()) == 0:
self.starting_fragments = {}
sequences = self.experiment.get_protein_sequences()
for frag in frags:
# determine if it's the cterminal fragment for c-terminus-specific modifications
is_cterm = 0
if len(sequences[frag.get_chain()]) == frag.get_cterm_index():
is_cterm = 1
if frag.get_weight() > args[0] - args[8] and frag.get_weight() < args[0] + args[8]:
print 'consider mw %s fragment %s'%(frag.get_weight(), frag.get_sequence())
try:
self.starting_fragments[int(round(frag.get_weight(), 0))]
except KeyError:
self.starting_fragments[int(round(frag.get_weight(), 0))] = [[frag.get_chain(), frag.get_sequence(), 0, 0, '', frag.get_nterm_index(), frag.get_cterm_index(), is_cterm, frag.get_weight()]]
else:
self.starting_fragments[int(round(frag.get_weight(), 0))].append([frag.get_chain(), frag.get_sequence(), 0, 0, '', frag.get_nterm_index(), frag.get_cterm_index(), is_cterm, frag.get_weight()])
# here is the initial list. append new fragments to it where index 2 and 3 retain how many mutations/indels
# and chemical adducts (respectively) have been added. This block generates all possibilities.
if len(calculation_fragments.keys()) == 0:
rationalization_fragments = copy.deepcopy(self.starting_fragments)
else:
rationalization_fragments = copy.deepcopy(calculation_fragments)
if args[1]:
print 'collecting mutations'
if args[3]:
print 'do deletions first so that mutations and insertions are not deleted'
for i in range(args[4]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'deletion', args[4], args[9])
if args[2]:
print 'add nonnatural mutations second because the X is ignored by the mutation producing code'
for i in range(args[4]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'nonnatural_mutation', args[4], args[9])
if 1:
print 'add normal mutations'
for i in range(args[4]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'mutation', args[4], args[9])
if args[3]:
print 'do insertions last so they are not mutated'
for i in range(args[4]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'insertion', args[4],args[9])
mutant_holdout = {}
if not args[10]:
mutant_holdout = copy.deepcopy(rationalization_fragments)
if len(calculation_fragments.keys()) == 0:
rationalization_fragments = copy.deepcopy(self.starting_fragments)
else:
rationalization_fragments = copy.deepcopy(calculation_fragments)
if args[5]:
print 'collecting post-translational modifications'
for i in range(args[7]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'post-translational modification', args[7],args[9], args[12])
if args[6]:
print 'and nonspecific post-translational modifications'
for i in range(args[7]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'nonspecific reactions', args[7],args[9])
if args[11]:
print 'and crosslinks'
for i in range(args[7]):
count, rationalization_fragments = self._collect_rationalization_fragset(rationalization_fragments, 'crosslinks', args[7],args[9])
if len(mutant_holdout.keys()) > 0: # if mutants are not being modified
# merge mutant and modification dictionaries
for mkey in mutant_holdout.keys():
for rkey in rationalization_fragments.keys():
if mkey == rkey:
for mutant_fragment in mutant_holdout[mkey]:
rationalization_fragments[rkey].append(mutant_fragment)
break
else:
rationalization_fragments[mkey] = mutant_holdout[mkey]
# now that all possiblities have been enumerated, filter out those with an appropriate molecular weight
filtered_mutants = self._filter_rationalization_fragset(rationalization_fragments)
total_count = 0
for key in filtered_mutants.keys():
total_count += len(filtered_mutants[key])
print '%s after filtering with %s molecular weights'%(total_count, len(filtered_mutants))
self.update_rationalization_output(filtered_mutants)
def _collect_rationalization_fragset(self, input_fragset, type, how_many, ignore_changes_below, include_uncommon=0):
aa_weights = {'A':71.09, 'C':103.15, 'D':115.09, 'E':129.12, 'F':147.18,
'G':57.05, 'H':137.14, 'I':113.16, 'K':128.17, 'L':113.16,
'M':131.19, 'N':114.11, 'P':97.12, 'Q':128.14, 'R':156.19,
'S':87.08, 'T':101.11, 'V':99.14, 'W':186.21, 'Y':163.18}
# first load the common modifications and build the mods dictionary appropriately
file = open('./Tools/MassSpectrometry/reactions_dict.pkl')
common_mods = pickle.load(file)
file.close()
mods = {'A':{}, 'C':{}, 'D':{}, 'E':{}, 'F':{}, 'G':{}, 'H':{},
'I':{}, 'K':{}, 'L':{}, 'M':{}, 'N':{}, 'P':{}, 'Q':{},
'R':{}, 'S':{}, 'T':{}, 'V':{}, 'W':{}, 'Y':{}}
for key in common_mods.keys():
for target_AA in common_mods[key]['target_AA']:
if target_AA not in mods.keys():
mods[target_AA] = {key:float(common_mods[key]['added_weight'])}
else:
mods[target_AA][key] = float(common_mods[key]['added_weight'])
file = open('./Tools/MassSpectrometry/mods.pkl', 'rb')
uncommon_mods = pickle.load(file)
file.close()
for token in ['nn', 'crosslinks', 'X', 'cterm', 'nterm', 'nsp']:
mods[token] = copy.deepcopy(uncommon_mods[token])
if include_uncommon:
# open the modifications database
for key in uncommon_mods.keys():
if key not in mods.keys():
mods[key] = copy.deepcopy(uncommon_mods[key])
for reaction_key in uncommon_mods[key].keys():
if reaction_key not in mods[key].keys():
mods[key][reaction_key] = copy.deepcopy(uncommon_mods[key][reaction_key])
return_set = {}
# have the original fragment
if type == 'deletion':
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[2] < how_many:
for i in range(len(sf[1])):
if aa_weights[sf[1][i]] > ignore_changes_below:
try:
return_set[int(round(sf_key-aa_weights[sf[1][i]]))]
except KeyError:
return_set[int(round(sf_key-aa_weights[sf[1][i]]))] = [[sf[0], sf[1][:i] + sf[1][i+1:], sf[2]+1, sf[3], sf[4]+'%s deletion at position %s (-%s)'%(sf[1][i], i, aa_weights[sf[1][i]]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key-aa_weights[sf[1][i]]))].append([sf[0], sf[1][:i] + sf[1][i+1:], sf[2]+1, sf[3], sf[4]+'%s deletion at position %s (-%s)'%(sf[1][i], i, aa_weights[sf[1][i]]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'insertion':
# have the original fragment and all possible single or multiple deletions
#
# put the insertion in the second position, so as to not obstruct terminus analysis
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[2] < how_many:
for aa in aa_weights.keys():
if aa_weights[aa] > ignore_changes_below:
try:
return_set[int(round(sf_key+aa_weights[aa]))]
except KeyError:
return_set[int(round(sf_key+aa_weights[aa]))] = [[sf[0], sf[1][0] + aa + sf[1][1:], sf[2]+1, sf[3], sf[4]+'%s insertion (+%s)'%(aa, aa_weights[aa]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+aa_weights[aa]))].append([sf[0], sf[1][0] + aa + sf[1][1:], sf[2]+1, sf[3], sf[4] + '%s insertion (+%s)'%(aa, aa_weights[aa]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'mutation':
# have the original fragment and all possible single or multiple insertions and deletions
# generate all possible mutants
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
sequence = sf[1]
if sf[2] < how_many:
for i in range(len(sequence)): # parse this fragment's sequence
for aa in aa_weights.keys():
if abs(aa_weights[aa]-aa_weights[sequence[i]]) > ignore_changes_below:
try:
return_set[int(round(sf_key-aa_weights[sequence[i]]+aa_weights[aa]))]
except KeyError:
return_set[int(round(sf_key-aa_weights[sequence[i]]+aa_weights[aa]))] = [[sf[0], sequence[:i] + aa + sequence[i+1:], sf[2]+1, sf[3], sf[4] + '%s to %s mutation at position %s (%s)'%(sequence[i], aa, i, aa_weights[aa]-aa_weights[sequence[i]]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key-aa_weights[sequence[i]]+aa_weights[aa]))].append([sf[0], sequence[:i] + aa + sequence[i+1:], sf[2]+1, sf[3], sf[4] + '%s to %s mutation at position %s (%s)'%(sequence[i], aa, i, aa_weights[aa]-aa_weights[sequence[i]]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'nonnatural_mutation':
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[2] < how_many:
for i in range(len(sf[1])):
for nn in mods['nn'].keys():
if abs(mods['nn'][nn]-aa_weights[sf[1][i]]) > ignore_changes_below:
try:
return_set[int(round(sf_key-aa_weights[sf[1][i]]+mods['nn'][nn]))]
except KeyError:
return_set[int(round(sf_key-aa_weights[sf[1][i]]+mods['nn'][nn]))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2]+1, sf[3], sf[4] + '%s to %s mutation at position %s (%s)'%(sf[1][i], nn, i, mods['nn'][nn]-aa_weights[sf[1][i]]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key-aa_weights[sf[1][i]]+mods['nn'][nn]))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2]+1, sf[3], sf[4] + '%s to %s mutation at position %s (%s)'%(sf[1][i], nn, i, mods['nn'][nn]-aa_weights[sf[1][i]]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'post-translational modification':
j = 0
k = 0
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[3] < how_many:
for i in range(len(sf[1])): # for each aa in the sequence
modset = mods[sf[1][i]]
for modkey in modset.keys():
if abs(modset[modkey]) > ignore_changes_below:
try:
return_set[int(round(sf_key+modset[modkey], 0))]
except KeyError:
return_set[int(round(sf_key+modset[modkey], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2], sf[3]+1, sf[4] + '%s at position %s (%s)'%(modkey, i, modset[modkey]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modset[modkey], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2], sf[3]+1, sf[4] + '%s at position %s (%s)'%(modkey, i, modset[modkey]),sf[5],sf[6],sf[7], sf[8]])
j += 1
if j%1000000 == 999999:
k += 1
print '%sM possibilities'%(k)
if sf[5] == 0: # if is the n-term
modset = mods['nterm']
for modkey in modset.keys():
if abs(modset[modkey]) > ignore_changes_below:
try:
return_set[int(round(sf_key+modset[modkey], 0))]
except KeyError:
return_set[int(round(sf_key+modset[modkey], 0))] = [[sf[0], 'X' + sf[1][1:], sf[2], sf[3]+1, sf[4] + '%s at n-terminus 0 (%s)'%(modkey, modset[modkey]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modset[modkey], 0))].append([sf[0], 'X' + sf[1][1:], sf[2], sf[3]+1, sf[4] + '%s at n-terminus 0 (%s)'%(modkey, modset[modkey]),sf[5],sf[6],sf[7], sf[8]])
if sf[7] == 1: # if includes the n-terms
modset = mods['cterm']
for modkey in modset.keys():
if abs(modset[modkey]) > ignore_changes_below:
try:
return_set[int(round(sf_key+modset[modkey], 0))]
except KeyError:
return_set[int(round(sf_key+modset[modkey], 0))] = [[sf[0], sf[1][:-1] + 'X', sf[2], sf[3]+1, sf[4] + '%s at n-terminus 0 (%s)'%(modkey, modset[modkey]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modset[modkey], 0))].append([sf[0], sf[1][:-1] + 'X', sf[2], sf[3]+1, sf[4] + '%s at n-terminus (%s)'%(modkey, modset[modkey]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'nonspecific reactions':
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[3] < how_many:
for i in range(len(sf[1])): # for each aa in the sequence
modset = mods['nsp']
for modkey in modset.keys():
if abs(mods['nsp'][modkey]-aa_weights[sf[1][i]]) > ignore_changes_below:
try:
return_set[int(round(sf_key+mods['nsp'][modkey], 0))]
except KeyError:
return_set[int(round(sf_key+mods['nsp'][modkey], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2], sf[3]+1, sf[4] + 'nonspecific %s at position %s (%s)'%(modkey, i, mods['nsp'][modkey]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+mods['nsp'][modkey], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2], sf[3]+1, sf[4] + 'nonspecific %s at position %s (%s)'%(modkey, i, mods['nsp'][modkey]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'X':
# this block, and the whole 'X' option here is
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[3] < how_many:
for i in range(len(sf[1])): # for each aa in the sequence
modset = mods['X']
for modkey in modset.keys():
if abs(mods['X'][modkey]-aa_weights[sf[1][i]]) > ignore_changes_below:
try:
return_set[int(round(sf_key+mods['X'][modkey], 0))]
except KeyError:
return_set[int(round(sf_key+mods['X'][modkey], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2], sf[3]+1, sf[4] + 'nonspecific %s at position %s (%s)'%(modkey, i, mods['X'][modkey]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+mods['X'][modkey], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:], sf[2], sf[3]+1, sf[4] + 'nonspecific %s at position %s (%s)'%(modkey, i, mods['X'][modkey]),sf[5],sf[6],sf[7], sf[8]])
elif type == 'crosslinks':
# look for every combination of the crosslinking partners
# conceptually, the code works like post translational modifications
for sf_key in input_fragset.keys():
for sf in input_fragset[sf_key]:
if sf[3] < how_many:
k = len(sf[1])
for i in range(len(sf[1])-1): # for each aa in the sequence
for j in range(i+1,len(sf[1])):
coupling1 = '%s%s'%(sf[1][i], sf[1][j])
coupling2 = '%s%s'%(sf[1][j], sf[1][i])
coupling3 = 'XX'
coupling4 = 'XX'
if i==0:
coupling3 == '0%s'%(sf[1][j])
coupling4 == '%s0'%(sf[1][j])
elif j==0:
coupling3 == 'O%s'%(sf[1][j])
coupling4 == '%sO'%(sf[1][j])
if coupling1 in mods['crosslinks'].keys():
if mods['crosslinks'][coupling1] > ignore_changes_below:
modsdic = mods['crosslinks'][coupling1]
for rxn in modsdic.keys():
try:
return_set[int(round(sf_key+modsdic[rxn], 0))]
except KeyError:
return_set[int(round(sf_key+modsdic[rxn], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modsdic[rxn], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + ' crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]])
elif coupling2 in mods['crosslinks'].keys():
if mods['crosslinks'][coupling2] > ignore_changes_below:
modsdic = mods['crosslinks'][coupling2]
for rxn in modsdic.keys():
try:
return_set[int(round(sf_key+modsdic[rxn], 0))]
except KeyError:
return_set[int(round(sf_key+modsdic[rxn], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modsdic[rxn], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]])
elif coupling3 in mods['crosslinks'].keys():
if mods['crosslinks'][coupling3] > ignore_changes_below:
modsdic = mods['crosslinks'][coupling3]
for rxn in modsdic.keys():
try:
return_set[int(round(sf_key+modsdic[rxn], 0))]
except KeyError:
return_set[int(round(sf_key+modsdic[rxn], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modsdic[rxn], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]])
elif coupling4 in mods['crosslinks'].keys():
if mods['crosslinks'][coupling4] > ignore_changes_below:
modsdic = mods['crosslinks'][coupling4]
for rxn in modsdic.keys():
try:
return_set[int(round(sf_key+modsdic[rxn], 0))]
except KeyError:
return_set[int(round(sf_key+modsdic[rxn], 0))] = [[sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]]]
else:
return_set[int(round(sf_key+modsdic[rxn], 0))].append([sf[0], sf[1][:i] + 'X' + sf[1][i+1:j] + 'X' + sf[1][j+1:], sf[2], sf[3]+1, sf[4] + 'crosslink %s between positions %s and %s (%s)'%(rxn, i, j, modsdic[rxn]),sf[5],sf[6],sf[7], sf[8]])
# enter the return set into the input set.
redundancy_count = 0
redundancy_filter_on = 0
for key in return_set.keys():
try:
input_fragset[key]
except KeyError:
input_fragset[key] = return_set[key] # if its not there, append the whole list
else:
if redundancy_filter_on:
for return_fragment in return_set[key]: # if it is there, append the elements of the return list to the existing input list
for input_fragment in input_fragset[key]:
if input_fragment[5] == return_fragment[5] and input_fragment[6] == return_fragment[6] and input_fragment[0] == return_fragment[0] and input_fragment[4] == return_fragment[4]:
redundancy_count += 1
break
else:
input_fragset[key].append(return_fragment)
else:
input_fragset[key] += return_set[key]
total_count = 0
for key in input_fragset.keys():
total_count += len(input_fragset[key])
print '%s possibilities cumulative'%(total_count)
return total_count, input_fragset
def _filter_rationalization_fragset(self, input_set):
skeys = input_set.keys()
skeys.sort()
actual = float(string.split(self.rtop.mw_string.get())[0])
# filter and remove redundancies.
return_set = {}
redundancy_count = 0
sizefiltercount = 0
for key in skeys:
if abs(key-actual) < self.my_filter_resolution*actual:
return_set[key] = []
for frag1 in input_set[key]:
if len(return_set[key]) == 0:
return_set[key].append(frag1)
else:
for frag2 in return_set[key]:
if frag1[5]==frag2[5] and frag1[6]==frag2[6] and frag1[0]==frag2[0] and frag1[4]==frag2[4]:
redundancy_count += 1
break
else:
return_set[key].append(frag1)
else:
sizefiltercount += len(input_set[key])
total_count = 0
for key in return_set.keys():
total_count += len(return_set[key])
skeys = return_set.keys()
skeys.sort()
if len(skeys) == 0:
print 'no possible modified fragments match the peak molecular weight'
return {}
print '%s filtered by molecular weight spanning %s to %s'%(sizefiltercount, skeys[0], skeys[-1])
print '%s remaining in %s molecular weight bins'%(total_count, len(return_set.keys()))
print '%s redundancies'%(redundancy_count)
aa_weights = {'A':71.09, 'C':103.15, 'D':115.09, 'E':129.12, 'F':147.18,
'G':57.05, 'H':137.14, 'I':113.16, 'K':128.17, 'L':113.16,
'M':131.19, 'N':114.11, 'P':97.12, 'Q':128.14, 'R':156.19,
'S':87.08, 'T':101.11, 'V':99.14, 'W':186.21, 'Y':163.18, 'X':0}
fout = open('./rationalization_out.txt', 'w')
for key in skeys:
fout.write('\n\n %s daltons (+/- %s)\n'%(key, key * self.my_filter_resolution))
for frag in return_set[key]:
fragwt = 18.0
for aa in frag[1]:
fragwt += aa_weights[aa]
fout.write('%3s %s %s %s %s %s %s\n'%(frag[5], frag[6], frag[2], frag[3], frag[8], frag[1], frag[4]))
fout.close()
return return_set
def _recalculate_with_fragment_filter(self):
self.examination_fragments = {}
cursel = self.rtop.fragselection_radio.getcurselection()
# capture the selected fragments as input to run_rationalize_peak
calculation_fragments = {}
for sel in cursel:
tokens = string.split(sel)
chain1, nterm1, cterm1 = tokens[1], tokens[3], tokens[5]
print chain1, nterm1, cterm1
for key in self.starting_fragments.keys():
for st_frag in self.starting_fragments[key]:
chain2, nterm2, cterm2 = st_frag[0], st_frag[5], st_frag[6]
if int(nterm1) == int(nterm2) and int(cterm1) == int(cterm2) and int(chain1) == int(chain2):
wt = key
try:
calculation_fragments[wt]
except KeyError:
calculation_fragments[wt] = [st_frag]
else:
calculation_fragments[wt].append(st_frag)
break
self._run_rationalize_peak(calculation_fragments)
def toggle_fragments_for_filter(self):
list = []
for i in range(self.rtop.fragselection_radio.numbuttons()):
self.rtop.fragselection_radio.invoke(i)
def update_rationalization_output(self, input_modified_fragments):
# first copy over the currently selected indices for regeneration
if len(self.examination_fragments.keys()) == 0:
self.examination_fragments = input_modified_fragments
# first make a list of indices that are covered by examination fragments
# make a table of all fragments that are affected by the indices covered by these
sequences = self.experiment.get_protein_sequences()
indices = []
i = 0
for seq in sequences:
indices.append([])
for aa in seq:
indices[-1].append([])
for exkey in self.examination_fragments.keys():
for exfrag in self.examination_fragments[exkey]:
if exfrag[0] == i:
for j in range(exfrag[5], exfrag[6]):
indices[i][j].append(exfrag)
i += 1
stored_fragment_selection_indices = []
try:
self.rtop.fragselection_radio
except AttributeError:
pass
else:
cur_selection = self.rtop.fragselection_radio.getcurselection()
numbuttons = self.rtop.fragselection_radio.numbuttons()
for i in range(numbuttons):
stored_fragment_selection_indices.append(0)
for i in range(len(cur_selection)):
stored_fragment_selection_indices[self.rtop.fragselection_radio.index(cur_selection[i])] = 1
# first destroy any old copy of the output frame contents
self.rtop.fragment_options_frame.destroy()
self.rtop.output_frame.destroy()
# and recreate frames
self.rtop.fragment_options_frame = Frame(self.rtop.outframe, width=600, height=200)
self.rtop.output_frame = Frame(self.rtop.outframe, width=600, height=400)
# and subframes
self.rtop.fragment_buttons_frame = Frame(self.rtop.fragment_options_frame)
self.rtop.fragment_radio_frame = Pmw.ScrolledFrame(self.rtop.fragment_options_frame, usehullsize=1, hull_width=600, hull_height=200)
# and widgets
self.rtop.apply_fragment_filter_button = Button(self.rtop.fragment_buttons_frame, text='Recalculate', command=self._recalculate_with_fragment_filter)
# first make fragments available for selection/deselection and filtering
tokens = {}
tokens_counter = {}
for key in self.examination_fragments.keys():
for frag in self.examination_fragments[key]:
if frag[0] >= 0 and frag[0] < 10:
part1 = '0%s'%(frag[0])
elif frag[0] >= 10:
part1 = '%s'%(frag[0])
if frag[5] >= 0 and frag[5] < 10:
part2 = '000%s'%(frag[5])
elif frag[5] >= 10 and frag[5] < 100:
part2 = '00%s'%(frag[5])
elif frag[5] >= 100 and frag[5] < 1000:
part2 = '0%s'%(frag[5])
elif frag[5] >= 1000:
part2 = '%s'%(frag[5])
if frag[6] >= 0 and frag[6] < 10:
part3 = '000%s'%(frag[6])
elif frag[6] >= 10 and frag[6] < 100:
part3 = '00%s'%(frag[6])
elif frag[6] >= 100 and frag[6] < 1000:
part3 = '0%s'%(frag[6])
elif frag[6] >= 1000:
part3 = '%s'%(frag[6])
index = '%s %s %s'%(part1, part2, part3)
if index not in tokens.keys():
tokens[index] = [frag[0], frag[1], frag[5], frag[6], frag[7], frag[8]]
tokens_counter[index] = 1
else:
tokens_counter[index] += 1
stokens = tokens.keys()
stokens.sort()
tokenlist = []
for token in stokens:
stuff = string.split(token, " ")
sequence = tokens[token][1]
tokenlist.append('chain %s positions %s to %s %6.2famu %s possibilites - %s'%(int(stuff[0]), int(stuff[1]), int(stuff[2]), tokens[token][5], tokens_counter[token], sequence))
# and create the radiobuttons
self.rtop.fragselection_radio = Pmw.RadioSelect(self.rtop.fragment_radio_frame.interior(),
buttontype = 'checkbutton',
orient = 'vertical',
labelpos = 'n',
pady = 1,
command = None,
label_text = 'Deselect uninteresting fragments:',
selectmode = 'multiple')
if len(stored_fragment_selection_indices) != len(tokenlist):
for text in tokenlist:
self.rtop.fragselection_radio.add(text)
self.rtop.fragselection_radio.invoke(text)
else:
j = 0
for text in tokenlist:
self.rtop.fragselection_radio.add(text)
if stored_fragment_selection_indices[j]:
self.rtop.fragselection_radio.invoke(text)
j += 1
self.rtop.toggle_fragments_button = Button(self.rtop.fragment_buttons_frame, text='Toggle selection', command=self.toggle_fragments_for_filter)
self.rtop.fragselection_radio.pack(side = TOP, expand = YES, fill=BOTH, pady=10)
self.rtop.apply_fragment_filter_button.pack(side=LEFT, anchor='nw', expand=NO, fill=NONE)
self.rtop.toggle_fragments_button.pack(side=LEFT, anchor='nw', expand=NO, fill=NONE)
self.rtop.fragment_buttons_frame.pack(side=TOP, expand=NO, fill=X)
self.rtop.fragment_radio_frame.pack(side=TOP, expand=YES, fill=X)
self.rtop.fragment_options_frame.pack(side=TOP, expand=NO, fill=X)
self.rtop.accuracy_notebook = Pmw.NoteBook(self.rtop.output_frame)
self.rtop.accuracy_notebook.pack(fill=BOTH, expand=1)
optimal_weight = float(string.split(self.rtop.mw_string.get())[0])
# find the range of accuracies
closest = 1000.0
furthest = 0.0
for xfkey in self.examination_fragments.keys():
if abs(optimal_weight-xfkey) > furthest:
furthest = int(math.ceil(abs(optimal_weight-xfkey)))
if abs(optimal_weight-xfkey) < closest:
closest = int(math.floor(abs(optimal_weight-xfkey)))
increment = int(math.ceil((furthest-closest)/4.0))
if increment > 3.0:
bottomrange = [0.0, closest+increment]
secondrange = [bottomrange[1], bottomrange[1]+increment]
thirdrange = [secondrange[1], secondrange[1]+increment]
fourthrange = [thirdrange[1], furthest+1]
range_keys = ['< %s amu'%(bottomrange[1]),
'%s-%s amu'%(secondrange[0],secondrange[1]),
'%s-%s amu'%(thirdrange[0], thirdrange[1]),
'%s-%s amu'%(fourthrange[0],fourthrange[1])]
ranges = {range_keys[0]:bottomrange,
range_keys[1]:secondrange,
range_keys[2]:thirdrange,
range_keys[3]:fourthrange}
elif increment > 2.0:
increment = int(math.ceil((furthest-closest)/3.0))
bottomrange = [0.0, closest+increment]
secondrange = [bottomrange[1], bottomrange[1]+increment]
thirdrange = [secondrange[1], secondrange[1]+increment]
range_keys = ['< %s amu'%(bottomrange[1]),
'%s-%s amu'%(secondrange[0],secondrange[1]),
'%s-%s amu'%(thirdrange[0], thirdrange[1])]
ranges = {range_keys[0]:bottomrange,
range_keys[1]:secondrange,
range_keys[2]:thirdrange}
elif increment > 1.0:
increment = int(math.ceil((furthest-closest)/2.0))
bottomrange = [0.0, closest+increment]
secondrange = [bottomrange[1], bottomrange[1]+increment]
range_keys = ['< %s amu'%(bottomrange[1]),
'%s-%s amu'%(secondrange[0],secondrange[1])]
ranges = {range_keys[0]:bottomrange,
range_keys[1]:secondrange}
else:
increment = int(math.ceil((furthest)))
bottomrange = [0.0, closest+increment]
range_keys = ['< %s amu'%(bottomrange[1])]
ranges = {range_keys[0]:bottomrange}
# use
trees = {}
apply_buttons = {}
i = -1
for range_key in range_keys:
i += 1
page = self.rtop.accuracy_notebook.add(range_key)
self.rtop.accuracy_notebook.tab(range_key).focus_set()
trees[range_key] = [None, None]
apply_buttons[range_key] = [None, None]
treepanes = Pmw.PanedWidget(page,
hull_borderwidth=1,
orient='horizontal',
hull_relief='sunken',
hull_width=250)
mutation_pane = treepanes.add('mutation pane', min=.2, max=.8, size=0.5)
modification_pane = treepanes.add('modification pane', min=.2, max=.8, size=0.5)
# mutation tree
mut_scframe = Pmw.ScrolledFrame(mutation_pane, horizflex='elastic', vertflex='elastic', vscrollmode='static', usehullsize=1, hull_height=800)
mod_scframe = Pmw.ScrolledFrame(modification_pane, horizflex='elastic', vertflex='elastic', vscrollmode='static', usehullsize=1, hull_height=800)
mut_button_box = Pmw.ButtonBox(mutation_pane)
mod_button_box = Pmw.ButtonBox(modification_pane)
apply_buttons[range_key][0] = mut_button_box.add('Apply')
apply_buttons[range_key][0].config(state='disabled')
apply_buttons[range_key][1] = mod_button_box.add('Apply')
apply_buttons[range_key][1].config(state='disabled')
def _update_mut_apply_button(widget, args, range_key):
text = args
tokens = string.split(args, '} {')
if len(tokens) == 1 and len(args) > 0:
apply_buttons[range_key][0].config(state='active')
else:
apply_buttons[range_key][0].config(state='disabled')
def _update_mod_apply_button(widget, args, range_key):
text = args
tokens = string.split(args, '} {')
if len(tokens) == 1 and len(args) > 0:
apply_buttons[range_key][1].config(state='active')
else:
apply_buttons[range_key][1].config(state='disabled')
c_lambda = lambda widget, args, x=range_key:_update_mut_apply_button(widget, args, x)
trees[range_key][0] = bwidget.Tree(mut_scframe.interior(), dragenabled=0, height=800, selectcommand=c_lambda)
c_lambda = lambda widget, args, x=range_key:_update_mod_apply_button(widget, args, x)
trees[range_key][1] = bwidget.Tree(mod_scframe.interior(), dragenabled=0, height=800, selectcommand=c_lambda)
def _apply_mutation(key):
print trees[key][0]
def _apply_update_weight_tags(key):
weight_tags = {}
# find the node
x = trees[key][1].selection_get()
tokens = string.split(x[0])
# find the sequence
sequences = self.experiment.get_protein_sequences()
sequence = sequences[int(tokens[0])]
# the modification
modification = ""
modification_tokens = tokens[3:-8]
for mod_token in modification_tokens:
modification += mod_token
# and the weight
weight = float(tokens[-5][1:-1])
# and send it back to the experiment
print 'weight tags %s'%(int(tokens[1])+int(tokens[-6]))
print weight, modification, sequence
print self.parent.parent.parent.parent.parent
self.parent.parent.parent.parent.parent.update_modification_weights({sequence:[[int(tokens[1])+int(tokens[-6]), weight]]})
# and rerun the analysis, closing this rationalization dialog
# recalculate fragments
self.experiment._calculate_single_reaction_fragments_dictionary()
self.experiment._calculate_unreactive_fragments_dictionary()
self.experiment._calculate_all_possible_fragments_dictionary()
# recognize peaks
self.parent.parent.parent.set_peak_rec_possible_peaks(self.experiment.get_single_reaction_fragment_weights(0))
self.parent.parent.parent.set_all_fragment_weights(self.experiment.get_all_possible_fragments(0))
self.parent.parent.parent.calculate_peaks()
self.parent.parent.parent.draw_PlotPanels()
self.parent.parent.parent.draw_peaks()
c_lambda = lambda a=range_key: _apply_mutation(a)
apply_buttons[range_key][0].config(command=c_lambda)
c_lambda = lambda a=range_key: _apply_update_weight_tags(a)
apply_buttons[range_key][1].config(command=c_lambda)
# collect altered fragments in this accuracy range
# make a dictionary of fragments, indexed by comment, for this accuracy range
muts_dict = {}
mods_dict = {}
for weight in self.examination_fragments.keys():
# chain, sequence, # mutations, # modifications, comment, nterm, cterm, iscterm, original weight
if abs(weight-optimal_weight) >= ranges[range_key][0] and abs(weight-optimal_weight) < ranges[range_key][1]:
for altered_frag in self.examination_fragments[weight]:
if altered_frag[2] == 1 and altered_frag[3] == 0:
tokens = string.split(altered_frag[4])
if tokens[1] == 'deletion' or tokens[1] == 'insertion':
key = '%s %s'%(tokens[0], tokens[1])
else:
key = '%s %s %s'%(tokens[0], tokens[1], tokens[2])
try:
muts_dict[key]
except KeyError:
muts_dict[key] = [altered_frag]
else:
muts_dict[key].append(altered_frag)
elif altered_frag[2] == 0 and altered_frag[3] == 1:
tokens = string.split(altered_frag[4])
key = ""
for j in range(0,len(tokens)-4):
key += '%s '%(tokens[j])
try:
mods_dict[key]
except KeyError:
mods_dict[key] = [altered_frag]
else:
mods_dict[key].append(altered_frag)
print 'range %s - %s in muts, %s in mods'%(range_key, len(muts_dict.keys()), len(mods_dict.keys()))
tree_store1 = {}
tree_store2 = {}
tree_store3 = {}
# query the host experimental PlotWindow for any peaks that are not rationalized
unrationalized_peak_indices = self.parent.parent.parent.get_unrationalized_peaks()
rationalized_peaks = self.parent.parent.parent.get_reactions()
for muts_key in muts_dict.keys():
tree_store1[muts_key] = [0, 'root', '(%s) %s'%(len(muts_dict[muts_key]), muts_key)]
i = 1
for mutated_fragment in muts_dict[muts_key]:
new_justification_count = 0
new_unjustification_count = 0
justified_peaks = []
unjustified_peaks = []
tokens = string.split(mutated_fragment[4])
if tokens[-2] == 'insertion':
continue
new_justification_count = 0
for unrationalized_peak_index in unrationalized_peak_indices:
position = int(tokens[-2]) + int(mutated_fragment[5])
wt_alteration = float(tokens[-1][1:-1])
for other_fragment_object in self.experiment.get_all_possible_fragment_objects():
nterm, cterm = other_fragment_object.get_nterm_index(), other_fragment_object.get_cterm_index()
if position >= nterm and position <= cterm:
# consider the alteration to have affected the fragment
wt = other_fragment_object.get_weight()
if abs((wt + wt_alteration) - self.parent.x[unrationalized_peak_index]) < (wt + wt_alteration) * self.my_filter_resolution:
new_justification_count += 1
justified_peaks.append([self.parent.x[unrationalized_peak_index], other_fragment_object])
# next see how many identified peaks would no longer be justified
# take the reaction profiles and see if it contains the position
for rationalized_peak in rationalized_peaks:
position = int(tokens[-2]) + int(mutated_fragment[5])
possible_fragments = rationalized_peak.get_possible_fragments()
if len(possible_fragments) != 1:
continue
nterm, cterm = possible_fragments[0].get_nterm_index(), possible_fragments[0].get_cterm_index()
if position >= nterm and position <= cterm:
new_unjustification_count += 1
unjustified_peaks.append([possible_fragments[0].get_weight(), possible_fragments[0]])
info = '%s %s %s %s (gain %s, lose %s)'%(mutated_fragment[0], mutated_fragment[5], mutated_fragment[6], mutated_fragment[4], new_justification_count, new_unjustification_count)
tree_store2[info] = [i, muts_key, info, new_justification_count, new_unjustification_count]
i += 1
for justified_peak in justified_peaks:
new_info = '%s justified by %s %s %s'%(justified_peak[0], justified_peak[1].get_nterm_index(), justified_peak[1].get_cterm_index(), justified_peak[1].get_chain())
tree_store3[new_info] = [i, info, new_info]
i += 1
for unjustified_peak in unjustified_peaks:
new_info = '%s is unjustified by %s %s %s'%(unjustified_peak[0], unjustified_peak[1].get_nterm_index(), unjustified_peak[1].get_cterm_index(), unjustified_peak[1].get_chain())
tree_store3[new_info] = [i, info, new_info]
i += 1
# now build the tree with the stored nodes, adding the number of justified peaks to the top level node info
for node1_key in tree_store1.keys():
node1 = tree_store1[node1_key]
# find the highest justification count
max_just = 0
for node2_key in tree_store2.keys():
node2 = tree_store2[node2_key]
if node2[1] == node1_key:
if node2[3] > max_just:
max_just = node2[3]
max_unjust = 0
for node2_key in tree_store2.keys():
node2 = tree_store2[node2_key]
if node2[1] == node1_key:
if node2[4] > max_unjust:
max_unjust = node2[4]
node1[2] += ' (gain %s, lose %s)'%(max_just, max_unjust)
trees[range_key][0].insert(0, 'root', node1_key, text=node1[2])
for node2_key in tree_store2.keys():
node2 = tree_store2[node2_key]
trees[range_key][0].insert(node2[0], node2[1], node2[2], text=node2[2])
for node3_key in tree_store3.keys():
node3 = tree_store3[node3_key]
trees[range_key][0].insert(node3[0], node3[1], node3[2], text=node3[2])
tree_store1 = {}
tree_store2 = {}
tree_store3 = {}
for mods_key in mods_dict.keys():
tree_store1[mods_key] = [0, 'root', '(%s) %s'%(len(mods_dict[mods_key]), mods_key)]
i = 1
for modified_fragment in mods_dict[mods_key]:
new_justification_count = 0
justified_peaks = []
new_unjustification_count = 0
unjustified_peaks = []
tokens = string.split(modified_fragment[4])
if tokens[-2] == 'insertion':
continue
for unrationalized_peak_index in unrationalized_peak_indices:
if tokens[-2] == 'n-terminus':
position = int(modified_fragment[5])
else:
position = int(tokens[-2]) + int(modified_fragment[5])
wt_alteration = float(tokens[-1][1:-1])
for other_fragment_object in self.experiment.get_all_possible_fragment_objects():
nterm, cterm = other_fragment_object.get_nterm_index(), other_fragment_object.get_cterm_index()
if position >= nterm and position <= cterm:
# consider the alteration to have affected the fragment
wt = other_fragment_object.get_weight()
if abs((wt + wt_alteration) - self.parent.x[unrationalized_peak_index]) < (wt + wt_alteration) * self.experiment.get_filter_resolution():
new_justification_count += 1
justified_peaks.append([self.parent.x[unrationalized_peak_index], other_fragment_object])
# next see how many identified peaks would no longer be justified
# take the reaction profiles and see if it contains the position
for rationalized_peak in rationalized_peaks:
position = int(tokens[-2]) + int(modified_fragment[5])
possible_fragments = rationalized_peak.get_possible_fragments()
if len(possible_fragments) != 1:
continue
nterm, cterm = possible_fragments[0].get_nterm_index(), possible_fragments[0].get_cterm_index()
if position >= nterm and position <= cterm:
new_unjustification_count += 1
unjustified_peaks.append([possible_fragments[0].get_weight(), possible_fragments[0]])
info = '%s %s %s %s (gain %s, lose %s)'%(modified_fragment[0], modified_fragment[5], modified_fragment[6], modified_fragment[4], new_justification_count, new_unjustification_count)
tree_store2[info] = [i, mods_key, info, new_justification_count, new_unjustification_count]
i += 1
for justified_peak in justified_peaks:
new_info = '%s is justified by %s %s %s %s'%(justified_peak[0], justified_peak[1].get_nterm_index(), justified_peak[1].get_cterm_index(), justified_peak[1].get_chain(), modified_fragment[4])
tree_store3[new_info] = [i, info, new_info]
i += 1
for unjustified_peak in unjustified_peaks:
new_info = '%s is unjustified by %s %s %s %s'%(unjustified_peak[0], unjustified_peak[1].get_nterm_index(), unjustified_peak[1].get_cterm_index(), unjustified_peak[1].get_chain(), modified_fragment[4])
tree_store3[new_info] = [i, info, new_info]
i += 1
# now build the tree with the stored nodes, adding the number of justified peaks to the top level node info
for node1_key in tree_store1.keys():
node1 = tree_store1[node1_key]
# find the highest justification count
max_just = 0
for node2_key in tree_store2.keys():
node2 = tree_store2[node2_key]
if node2[1] == node1_key:
if node2[3] > max_just:
max_just = node2[3]
# find the highest unjustification count
max_unjust = 0
for node2_key in tree_store2.keys():
node2 = tree_store2[node2_key]
if node2[1] == node1_key:
if node2[4] > max_unjust:
max_unjust = node2[4]
node1[2] += ' (gain %s, lose %s)'%(max_just, max_unjust)
trees[range_key][1].insert(0, 'root', node1_key, text=node1[2], selectable=0)
for node2_key in tree_store2.keys():
node2 = tree_store2[node2_key]
trees[range_key][1].insert(node2[0], node2[1], node2[2], text=node2[2], selectable=1)
for node3_key in tree_store3.keys():
node3 = tree_store3[node3_key]
trees[range_key][1].insert(node3[0], node3[1], node3[2], text=node3[2], selectable=0)
trees[range_key][0].pack(side=TOP, expand=YES, fill=BOTH)
trees[range_key][1].pack(side=TOP, expand=YES, fill=BOTH)
mut_button_box.pack(side=TOP, expand=NO, fill=NONE)
mod_button_box.pack(side=TOP, expand=NO, fill=NONE)
mut_scframe.pack(side=TOP, expand=YES, fill=BOTH)
mod_scframe.pack(side=TOP, expand=YES, fill=BOTH)
treepanes.pack(side=TOP, expand=YES, fill=BOTH)
self.rtop.output_frame.pack(side=TOP, expand=YES, fill=BOTH)
| 62.564338
| 312
| 0.504554
|
bc43932ed29cbfca431620f76db985662cb4ce2e
| 635
|
py
|
Python
|
advanced/lychrel.py
|
rocket3989/ACCA2020
|
24ae77df2ca428cf761a987fb2bb2f1a35285804
|
[
"MIT"
] | null | null | null |
advanced/lychrel.py
|
rocket3989/ACCA2020
|
24ae77df2ca428cf761a987fb2bb2f1a35285804
|
[
"MIT"
] | null | null | null |
advanced/lychrel.py
|
rocket3989/ACCA2020
|
24ae77df2ca428cf761a987fb2bb2f1a35285804
|
[
"MIT"
] | null | null | null |
def isPali(n):
n = str(n)
for a, b in zip(n, reversed(n)):
if a != b:
return False
return True
while True:
N = int(input())
if N == 0: break
test = 0
while test < 100:
if isPali(N):
break
temp = N
rev = 0
while temp:
rev = rev * 10 + (temp % 10)
temp //= 10
N += rev
test += 1
if test == 0:
print("Already a palindrome")
elif test == 100:
print("Not palindrome after 100 iterations")
else:
print("Palindrome", N, test, "iterations")
| 21.896552
| 53
| 0.434646
|
0744dc60c6fef803be3559a08227c306d3c240eb
| 371
|
py
|
Python
|
finmeter/sentiment/utils/cupy_utils.py
|
mikahama/FinMeter
|
fd1d3d8feb216e6247a1eeac3bac16a9dd235e66
|
[
"Apache-2.0"
] | 5
|
2019-10-06T20:13:32.000Z
|
2021-11-07T14:27:02.000Z
|
finmeter/sentiment/utils/cupy_utils.py
|
mikahama/FinMeter
|
fd1d3d8feb216e6247a1eeac3bac16a9dd235e66
|
[
"Apache-2.0"
] | null | null | null |
finmeter/sentiment/utils/cupy_utils.py
|
mikahama/FinMeter
|
fd1d3d8feb216e6247a1eeac3bac16a9dd235e66
|
[
"Apache-2.0"
] | null | null | null |
import numpy
try:
import cupy
except ImportError:
cupy = None
def get_cupy():
return cupy
def get_array_module(*args, **kwargs):
if cupy is not None:
return cupy.get_array_module(*args, **kwargs)
else:
return numpy
def asnumpy(x):
if cupy is not None:
return cupy.asnumpy(x)
else:
return numpy.asarray(x)
| 15.458333
| 53
| 0.625337
|
8e13a7c707dddeaced4a145f995b62bdcc9b063f
| 416
|
py
|
Python
|
exceptions.py
|
nultero/jetx
|
91b6642600f11d1cd0a4964661462811086443f5
|
[
"MIT"
] | null | null | null |
exceptions.py
|
nultero/jetx
|
91b6642600f11d1cd0a4964661462811086443f5
|
[
"MIT"
] | null | null | null |
exceptions.py
|
nultero/jetx
|
91b6642600f11d1cd0a4964661462811086443f5
|
[
"MIT"
] | null | null | null |
from sys import exit
def exc_handler(*args, case: str):
cases = {
"invalid fn": "is an unrecognized command"
}
j = " 🚀 jetx error:"
# todo
# dynamics for variadic args
if case in cases.keys():
if len(args) > 0:
s = f"{j} '{args[0]}' {cases[case]}"
else:
s = f"{j} {cases[case]}"
else:
s = j + case
print(s)
exit(0)
| 16
| 50
| 0.473558
|
f7381d635f3c0ce2d25584e3dfa645a0f5a58cc1
| 45
|
py
|
Python
|
easyfilemanager/__init__.py
|
RaphaelNanje/easyfilemanager
|
29cb6ad90dc28de41478ce7ed768917051f0988a
|
[
"MIT"
] | null | null | null |
easyfilemanager/__init__.py
|
RaphaelNanje/easyfilemanager
|
29cb6ad90dc28de41478ce7ed768917051f0988a
|
[
"MIT"
] | null | null | null |
easyfilemanager/__init__.py
|
RaphaelNanje/easyfilemanager
|
29cb6ad90dc28de41478ce7ed768917051f0988a
|
[
"MIT"
] | null | null | null |
from easyfilemanager.core import FileManager
| 22.5
| 44
| 0.888889
|
020ba6bc01ff63283b2005d89e0d8af4e13fbeab
| 2,075
|
py
|
Python
|
setup.py
|
MD-Studio/MDStudio_haddock
|
7e6c04206eef9ad444ecf08ab52a92d69b6230f3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MD-Studio/MDStudio_haddock
|
7e6c04206eef9ad444ecf08ab52a92d69b6230f3
|
[
"Apache-2.0"
] | 1
|
2018-05-22T13:12:01.000Z
|
2018-05-22T13:12:01.000Z
|
setup.py
|
MD-Studio/MDStudio_haddock
|
7e6c04206eef9ad444ecf08ab52a92d69b6230f3
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# package: mdstudio_haddock
# file: setup.py
#
# Part of MDStudio HADDOCK, providing access to the HADDOCK web server for
# biomolecular docking.
#
# Copyright © 2019 Marc van Dijk, VU University Amsterdam, the Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
distribution_name = 'mdstudio_haddock'
setup(
name=distribution_name,
version=1.0,
description='Access to the HADDOCK webserver for biomolecular docking',
author="""
Marc van Dijk - VU University - Amsterdam
Paul Visscher - Zefiros Software (www.zefiros.eu)
Felipe Zapata - eScience Center (https://www.esciencecenter.nl/)""",
author_email=['m4.van.dijk@vu.nl', 'f.zapata@esciencecenter.nl'],
url='https://github.com/MD-Studio/mdstudio_haddock',
license='Apache Software License 2.0',
keywords='MDStudio HADDOCK docking web server',
platforms=['Any'],
packages=find_packages(),
package_data={distribution_name: ['schemas/*', 'schemas/endpoints/*']},
py_modules=[distribution_name],
install_requires=['py-graphit', 'pandas'],
extras_require={'test': ['jsonschema']},
include_package_data=True,
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
)
| 36.403509
| 75
| 0.704096
|
b5bc077b7d661edae5012f71d0f40abbe8089297
| 10,661
|
py
|
Python
|
server.py
|
ebidel/chromium-dashboard
|
76e9d935869d4d48ad7062033b480237d302207d
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
ebidel/chromium-dashboard
|
76e9d935869d4d48ad7062033b480237d302207d
|
[
"Apache-2.0"
] | 3
|
2017-03-16T11:56:53.000Z
|
2019-02-27T22:50:27.000Z
|
server.py
|
ebidel/chromium-dashboard
|
76e9d935869d4d48ad7062033b480237d302207d
|
[
"Apache-2.0"
] | 1
|
2017-09-03T01:23:23.000Z
|
2017-09-03T01:23:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ericbidelman@chromium.org (Eric Bidelman)'
import json
import logging
import os
import time
import webapp2
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
import common
import models
import settings
import http2push.http2push as http2push
def normalized_name(val):
return val.lower().replace(' ', '').replace('/', '')
def first_of_milestone(feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
if (str(f['shipped_milestone']) == str(milestone) or
f['impl_status_chrome'] == str(milestone)):
return i
elif (f['shipped_milestone'] == None and
str(f['shipped_android_milestone']) == str(milestone)):
return i
return -1
def first_of_milestone_v2(feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
desktop_milestone = f['browsers']['chrome'].get('desktop', None)
android_milestone = f['browsers']['chrome'].get('android', None)
status = f['browsers']['chrome']['status'].get('text', None)
if (str(desktop_milestone) == str(milestone) or status == str(milestone)):
return i
elif (desktop_milestone == None and str(android_milestone) == str(milestone)):
return i
return -1
def get_omaha_data():
omaha_data = memcache.get('omaha_data')
if omaha_data is None:
result = urlfetch.fetch('https://omahaproxy.appspot.com/all.json')
if result.status_code == 200:
omaha_data = json.loads(result.content)
memcache.set('omaha_data', omaha_data, time=86400) # cache for 24hrs.
return omaha_data
def annotate_first_of_milestones(feature_list, version=None):
try:
omaha_data = get_omaha_data()
win_versions = omaha_data[0]['versions']
# Find the latest canary major version from the list of windows versions.
canary_versions = [x for x in win_versions if x.get('channel') and x.get('channel').startswith('canary')]
LATEST_VERSION = int(canary_versions[0].get('version').split('.')[0])
# TODO(ericbidelman) - memcache this calculation as part of models.py
milestones = range(1, LATEST_VERSION + 1)
milestones.reverse()
versions = [
models.IMPLEMENTATION_STATUS[models.NO_ACTIVE_DEV],
models.IMPLEMENTATION_STATUS[models.PROPOSED],
models.IMPLEMENTATION_STATUS[models.IN_DEVELOPMENT],
]
versions.extend(milestones)
versions.append(models.IMPLEMENTATION_STATUS[models.NO_LONGER_PURSUING])
first_of_milestone_func = first_of_milestone
if version == 2:
first_of_milestone_func = first_of_milestone_v2
last_good_idx = 0
for i, ver in enumerate(versions):
idx = first_of_milestone_func(feature_list, ver, start=last_good_idx)
if idx != -1:
feature_list[idx]['first_of_milestone'] = True
last_good_idx = idx
except Exception as e:
logging.error(e)
class MainHandler(http2push.PushHandler, common.ContentHandler, common.JSONHandler):
def get(self, path, feature_id=None):
# Default to features page.
# TODO: remove later when we want an index.html
if not path:
return self.redirect('/features')
# Default /metrics to CSS ranking.
# TODO: remove later when we want /metrics/index.html
if path == 'metrics' or path == 'metrics/css':
return self.redirect('/metrics/css/popularity')
# Remove trailing slash from URL and redirect. e.g. /metrics/ -> /metrics
if feature_id == '':
return self.redirect(self.request.path.rstrip('/'))
template_data = {}
push_urls = [] # URLs to push in this response.
template_data['embed'] = self.request.get('embed', None) is not None
if path.startswith('features'):
if path.endswith('.xml'): # Atom feed request.
status = self.request.get('status', None)
if status:
feature_list = models.Feature.get_all_with_statuses(status.split(','))
else:
filterby = None
category = self.request.get('category', None)
# Support setting larger-than-default Atom feed sizes so that web
# crawlers can use this as a full site feed.
try:
max_items = int(self.request.get('max-items',
settings.RSS_FEED_LIMIT))
except TypeError:
max_items = settings.RSS_FEED_LIMIT
if category is not None:
for k,v in models.FEATURE_CATEGORIES.iteritems():
normalized = normalized_name(v)
if category == normalized:
filterby = ('category =', k)
break
feature_list = models.Feature.get_all( # Memcached
limit=max_items,
filterby=filterby,
order='-updated')
return self.render_atom_feed('Features', feature_list)
else:
template_data['categories'] = [
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()]
template_data['IMPLEMENTATION_STATUSES'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.IMPLEMENTATION_STATUS.iteritems()])
template_data['VENDOR_VIEWS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.VENDOR_VIEWS.iteritems()])
template_data['WEB_DEV_VIEWS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.WEB_DEV_VIEWS.iteritems()])
template_data['STANDARDS_VALS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.STANDARDIZATION.iteritems()])
template_data['TEMPLATE_CACHE_TIME'] = settings.TEMPLATE_CACHE_TIME
push_urls = http2push.use_push_manifest('push_manifest_features.json')
elif path.startswith('feature'):
feature = None
try:
feature = models.Feature.get_feature(int(feature_id))
except TypeError:
pass
if feature is None:
self.abort(404)
was_updated = False
if self.request.referer:
was_updated = (self.request.referer.endswith('/admin/features/new') or
'/admin/features/edit' in self.request.referer)
template_data['feature'] = feature
template_data['was_updated'] = was_updated
elif path.startswith('metrics/css/timeline'):
properties = sorted(
models.CssPropertyHistogram.get_all().iteritems(), key=lambda x:x[1])
template_data['CSS_PROPERTY_BUCKETS'] = json.dumps(
properties, separators=(',',':'))
elif path.startswith('metrics/feature/timeline'):
properties = sorted(
models.FeatureObserverHistogram.get_all().iteritems(), key=lambda x:x[1])
template_data['FEATUREOBSERVER_BUCKETS'] = json.dumps(
properties, separators=(',',':'))
elif path.startswith('omaha_data'):
omaha_data = get_omaha_data()
return common.JSONHandler.get(self, omaha_data, formatted=True)
elif path.startswith('samples'):
feature_list = models.Feature.get_shipping_samples() # Memcached
if path.endswith('.json'): # JSON request.
return common.JSONHandler.get(self, feature_list, formatted=True)
elif path.endswith('.xml'): # Atom feed request.
# Support setting larger-than-default Atom feed sizes so that web
# crawlers can use this as a full site feed.
try:
max_items = int(self.request.get('max-items',
settings.RSS_FEED_LIMIT))
except TypeError:
max_items = settings.RSS_FEED_LIMIT
return self.render_atom_feed('Samples', feature_list)
else:
template_data['FEATURES'] = json.dumps(feature_list, separators=(',',':'))
template_data['CATEGORIES'] = [
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()]
template_data['categories'] = dict([
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()])
if path.startswith('metrics/'):
push_urls = http2push.use_push_manifest('push_manifest_metrics.json')
# Add Link rel=preload header for h2 push on .html file requests.
if push_urls:
self.response.headers.add_header(
'Link', self._generate_link_preload_headers(push_urls))
self.render(data=template_data, template_path=os.path.join(path + '.html'))
class FeaturesAPIHandler(common.JSONHandler):
def __get_feature_list(self, version=None):
feature_list = models.Feature.get_chronological(version=version) # Memcached
annotate_first_of_milestones(feature_list, version=version)
return feature_list
def get(self, version=None):
if version is None:
version = 1
else:
version = int(version)
KEY = '%s|v%s|all' % (models.Feature.DEFAULT_MEMCACHE_KEY, version)
feature_list = memcache.get(KEY)
if feature_list is None:
feature_list = self.__get_feature_list(version)
memcache.set(KEY, feature_list)
return common.JSONHandler.get(self, feature_list, formatted=True)
class DelayHandler(common.ContentHandler):
def get(self):
delay = self.request.get('delay') or 0
url = self.request.get('url')
if url is None:
return self.response.write('No URL')
time.sleep(int(delay))
result = urlfetch.fetch(url)
if result.status_code == 200:
if url.endswith('.js'):
self.response.headers.add_header('Content-Type', 'application/json')
elif url.endswith('.css'):
self.response.headers.add_header('Content-Type', 'text/css')
self.response.write(result.content)
else:
self.abort(500)
# Main URL routes.
routes = [
(r'/features(?:_v(\d+))?.json', FeaturesAPIHandler),
('/delay', DelayHandler),
('/(.*)/([0-9]*)', MainHandler),
('/(.*)', MainHandler),
]
app = webapp2.WSGIApplication(routes, debug=settings.DEBUG)
app.error_handlers[404] = common.handle_404
if settings.PROD and not settings.DEBUG:
app.error_handlers[500] = common.handle_500
| 35.184818
| 109
| 0.66354
|
dc0046384f4d475db624d52151dea4a558c000f0
| 6,834
|
py
|
Python
|
SOURCES/olCheckLibs/olCheckLib.py
|
OL-GIT/CheckShot
|
40688f4a52d28305d1f642be9317d1451ae8d9a7
|
[
"MIT"
] | null | null | null |
SOURCES/olCheckLibs/olCheckLib.py
|
OL-GIT/CheckShot
|
40688f4a52d28305d1f642be9317d1451ae8d9a7
|
[
"MIT"
] | 1
|
2021-07-16T16:06:19.000Z
|
2021-07-16T16:06:19.000Z
|
SOURCES/olCheckLibs/olCheckLib.py
|
OL-GIT/CheckShot
|
40688f4a52d28305d1f642be9317d1451ae8d9a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
# ----------------------------------------
### LICENSE
# ----------------------------------------
# MIT License - Copyright (c) 2021 OL-GIT
# ----------------------------------------
# Various Checks
import os
import sys
myOS = (sys.platform)
if myOS == "cygwin":
pgmDir = "/cygdrive/e/OCR/P06/PROGRAM/"
else:
pgmDir = "/home/ol/OCR/P06/PROGRAM/"
libDir = pgmDir + 'olCheckLibs/'
print(libDir)
#sys.path.insert(0, libDir)
sys.path.append(libDir)
print("sys.path :", sys.path)
# from olCheckLibs import olHtmlLib
from olHtmlLib import olHtml, olCss
curDir = os.getcwd()
# --------------------------------------------------------------------------------
### olCol - Console colors
class olCol():
def Purple(text):
print('\033[95m'+text)
def Cyan(text):
print('\033[96m'+text)
def DarkCyan(text):
print('\033[36m'+text)
def Blue(text):
print('\033[94m'+text)
def Green(text):
print('\033[92m'+text)
def Yellow(text):
print('\033[93m'+text,end='')
def Red(text):
print('\033[91m'+text)
def Bold(text):
print('\033[1m'+text)
def UnderLine(text):
print('\033[4m'+text)
def End():
print('\033[0m',end='')
# --------------------------------------------------------------------------------
### Lines - Console lines
class olPr():
def eLine():
print("")
def sLine():
print("------------------------------------------------------------------------")
def bLine():
print("# ==================================================================== #")
def dLine1():
print("########################################################################")
def dLine2():
print("# #")
def newLine():
print("\n")
# --------------------------------------------------------------------------------
### HTML Reports
class olReports():
# ----------------------------------------
### WRITE REPORT START
def reportStart(leftField):
org_stdout = sys.stdout
with open('report.htm', 'a') as olReport:
sys.stdout = olReport # set output to olReport
olCss.TRTD()
print(" ",leftField)
olCss.TDTD700()
sys.stdout = org_stdout # Back to std output
### WRITE REPORT END
def reportEnd():
org_stdout = sys.stdout
with open('report.htm', 'a') as olReport:
sys.stdout = olReport # set output to olReport
olCss.TDTR()
sys.stdout = org_stdout # Back to std output
# --------------------------------------------------------------------------------
### Verifications
class olChecks():
# ----------------------------------------
### Is file empty or missing ?
def isEmpty(file):
print("\033[93m --- Function isEmpty \033[0m")
size = os.stat(file).st_size
if size > 0:
with open('.bounds','r') as boundfile:
line = boundfile.readline().rstrip()
bounds = line.split()
if not bounds:
print(" .bounds is not empty but contains nothing")
else:
print(" .bounds is not empty")
else:
print(" .bounds is empty")
print(" Please inquire the .bounds file in", curDir)
# ----------------------------------------
### Is file empty or missing ?
def isEmptyHtml(file):
org_stdout = sys.stdout
size = os.stat(file).st_size
leftField = "Bounds"
# print(leftField)
olReports.reportStart(leftField)
with open('report.htm', 'a') as olReport:
sys.stdout = olReport # set output to olReport
if size > 0:
with open('.bounds','r') as boundfile:
line = boundfile.readline().rstrip()
bounds = line.split()
if not bounds:
print("<li> WARNING - .bounds is not empty but contains nothing")
else:
print(" <li> OK - .bounds is not empty : ", line)
else:
print(" <li> WARNING - .bounds is empty")
print(" Please inquire the .bounds file in", shotdir)
sys.stdout = org_stdout # Back to std output
# print(" <li> WARNING - .bounds is missing")
# print(" Please inquire the .bounds file in", curDir)
olReports.reportEnd()
# ----------------------------------------
### Extract start bound
def myStart():
print(" \033[93m--- Function myStart \033[0m")
with open('.bounds','r') as boundfile:
for line in boundfile:
bounds = line.split()
if bounds:
mystart = int(bounds[0])
else:
print(" .bounds is not empty but contains nothing")
mystart = 0
# print("mystart:", mystart)
return mystart
# ----------------------------------------
### Extract end bound
def myEnd():
print(" \033[93m--- Function myEnd \033[0m")
with open('.bounds','r') as boundfile:
for line in boundfile:
bounds = line.split()
if bounds:
myend = int(bounds[1])
else:
print(" .bounds is not empty but contains nothing")
myend = 0
# print("myend:", myend)
return myend
# ----------------------------------------
### Extract duration
def myBounds():
print(" \033[93m--- Function myBounds \033[0m")
if os.path.isfile('.bounds'):
with open('.bounds','r') as boundfile:
line = boundfile.readline().rstrip()
bounds = line.split()
if bounds:
boundsNb = len(bounds)
if boundsNb == 2:
try:
(bounds[0]) = int(bounds[0])
except:
pass
try:
(bounds[1]) = int(bounds[1])
except:
pass
# print(isinstance(bounds[0], int))
if isinstance(bounds[0], int) == False:
# print(" 1st value is not an integer")
mystart = 0
myend = 0
duration = 1
else:
# print(" 1st value is an integer")
if isinstance(bounds[1], int) == False:
# print(" 2nd value is not an integer")
mystart = 0
myend = 0
duration = 1
else:
# print(" 2nd value is an integer")
mystart = int(bounds[0])
myend = int(bounds[1])
duration = myend - mystart + 1
# print(" Start:", mystart, "| End:", myend)
print(" Expected duration :", duration)
### If there is only 1 bound
elif boundsNb == 1:
mystart = 0
myend = 0
duration = 1
print(" End bound missing")
print(" Unknown duration")
### If bounds contains null
else:
mystart = 0
myend = 0
duration = 1
print(" .bounds is not empty but contains nothing")
print(" Unknown duration")
return mystart, myend, duration
else:
mystart = 0
myend = 0
duration = 0
print(" .bounds missing")
print(" Unknown duration")
return mystart, myend, duration
# ----------------------------------------
### Check number of images
def nbImages(listPics):
print(" \033[93m--- Function nbImages \033[0m")
nbPics = 0
for pic in listPics:
# print("PIC:", pic)
nbPics += 1
print(" Number of pics in shot :", nbPics)
return nbPics
| 25.217712
| 83
| 0.507316
|
d87cbb15bb1addcd03b82a1530958bb7ad7a3f87
| 12,891
|
py
|
Python
|
otp/login/TTAccount.py
|
ksmit799/POTCO-PS
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 8
|
2017-01-24T04:33:29.000Z
|
2020-11-01T08:36:24.000Z
|
otp/login/TTAccount.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 1
|
2017-03-02T18:05:17.000Z
|
2017-03-14T06:47:10.000Z
|
otp/login/TTAccount.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 11
|
2017-03-02T18:46:07.000Z
|
2020-11-01T08:36:26.000Z
|
from pandac.PandaModules import *
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import PythonUtil
from otp.otpbase import OTPLocalizer
import HTTPUtil
import RemoteValueSet
import copy
accountServer = base.config.GetString('account-server', '')
#accountServer = launcher.getAccountServer()
print 'TTAccount: accountServer from launcher: ', accountServer
configAccountServer = base.config.GetString('account-server', '')
if configAccountServer:
accountServer = configAccountServer
print 'TTAccount: overriding accountServer from config: ', accountServer
if not accountServer:
accountServer = 'https://toontown.go.com'
print 'TTAccount: default accountServer: ', accountServer
accountServer = URLSpec(accountServer, 1)
def getAccountServer():
return accountServer
TTAccountException = HTTPUtil.HTTPUtilException
class TTAccount:
notify = DirectNotifyGlobal.directNotify.newCategory('TTAccount')
def __init__(self, cr):
self.cr = cr
self.response = None
def createAccount(self, loginName, password, data):
return self.talk('create', data = self._TTAccount__makeLoginDict(loginName, password, data))
def authorize(self, loginName, password):
return self.talk('play', data = self._TTAccount__makeLoginDict(loginName, password))
def createBilling(self, loginName, password, data):
return self.talk('purchase', data = self._TTAccount__makeLoginDict(loginName, password, data))
def setParentPassword(self, loginName, password, parentPassword):
return self.talk('setParentPassword', data = self._TTAccount__makeLoginDict(loginName, password, {
'parentPassword': parentPassword }))
def supportsParentPassword(self):
return 1
def authenticateParentPassword(self, loginName, password, parentPassword):
try:
errorMsg = self.talk('authenticateParentPassword', data = self._TTAccount__makeLoginDict(loginName, parentPassword))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException:
e = None
return (0, str(e))
def supportsAuthenticateDelete(self):
return 1
def authenticateDelete(self, loginName, password):
try:
errorMsg = self.talk('authenticateDelete', data = self._TTAccount__makeLoginDict(loginName, password))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException:
e = None
return (0, str(e))
def enableSecretFriends(self, loginName, password, parentPassword, enable = 1):
try:
errorMsg = self.talk('setSecretChat', data = self._TTAccount__makeLoginDict(loginName, parentPassword, {
'chat': base.cr.secretChatAllowed,
'secretsNeedParentPassword': base.cr.secretChatNeedsParentPassword }))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException:
e = None
return (0, str(e))
def changePassword(self, loginName, password, newPassword):
return self.talk('purchase', data = self._TTAccount__makeLoginDict(loginName, password, {
'newPassword': newPassword }))
def requestPwdReminder(self, email = None, acctName = None):
data = { }
if email is not None:
data['email'] = email
else:
data['accountName'] = acctName
return self.talk('forgotPassword', data)
def cancelAccount(self, loginName, password):
return self.talk('cancel', data = self._TTAccount__makeLoginDict(loginName, password))
def getAccountData(self, loginName, password):
errorMsg = self.talk('get', data = self._TTAccount__makeLoginDict(loginName, password))
if errorMsg:
self.notify.warning('getAccountData error: %s' % errorMsg)
return errorMsg
if self.response.hasKey('errorMsg'):
self.notify.warning("error field is: '%s'" % self.response.getString('errorMsg'))
self.accountData = copy.deepcopy(self.response)
fieldNameMap = {
'em': 'email',
'l1': 'addr1',
'l2': 'addr2',
'l3': 'addr3' }
dict = self.accountData.dict
for fieldName in dict.keys():
if fieldNameMap.has_key(fieldName):
dict[fieldNameMap[fieldName]] = dict[fieldName]
del dict[fieldName]
continue
def getLastErrorMsg(self, forceCustServNum = 0):
errCode = self.response.getInt('errorCode')
if errCode < 100:
msg = self.response.getString('errorMsg')
if forceCustServNum:
msg += ' ' + OTPLocalizer.TTAccountCustomerServiceHelp % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
elif errCode < 200:
msg = self.response.getString('errorMsg')
msg += ' ' + OTPLocalizer.TTAccountCustomerServiceHelp % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
elif errCode >= 500:
msg = OTPLocalizer.TTAccountIntractibleError
msg += ' ' + OTPLocalizer.TTAccountCallCustomerService % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
else:
self.notify.warning('unknown error code class: %s: %s' % (self.response.getInt('errorCode'), self.response.getString('errorMsg')))
msg = self.response.getString('errorMsg')
msg += ' ' + OTPLocalizer.TTAccountCallCustomerService % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
return msg
def _TTAccount__makeLoginDict(self, loginName, password, data = None):
dict = {
'accountName': loginName,
'password': password }
if data:
dict.update(data)
return dict
def makeLoginDict(self, loginName, password, data = None):
return self._TTAccount__makeLoginDict(loginName, password, data)
def talk(self, operation, data = { }):
self.notify.debug('TTAccount.talk()')
for key in data.keys():
data[key] = str(data[key])
if operation in ('play', 'get', 'cancel', 'authenticateParentPassword', 'authenticateDelete', 'authenticateParentPasswordNewStyle', 'authenticateDeleteNewStyle'):
pass
1
if operation == 'authenticateParentUsernameAndPassword':
pass
1
if operation == 'forgotPassword':
pass
1
if operation == 'setParentPassword':
pass
1
if operation == 'setSecretChat':
pass
1
if operation == 'create':
pass
1
if operation == 'purchase':
if data.has_key('newPassword'):
pass
else:
self.notify.error("Internal TTAccount error: need to add 'required data' checking for %s operation" % operation)
op2Php = {
'play': 'play',
'get': 'get',
'cancel': 'cancel',
'create': 'create',
'purchase': 'purchase',
'setParentPassword': 'setSecrets',
'authenticateParentPassword': 'authenticateChat',
'authenticateDelete': 'authDelete',
'setSecretChat': 'setChat',
'forgotPassword': 'forgotPw',
'authenticateParentPasswordNewStyle': 'api/authChat',
'authenticateParentUsernameAndPassword': 'api/authParentChat',
'authenticateDeleteNewStyle': 'api/authDelete' }
newWebOperations = ('authenticateParentPasswordNewStyle', 'authenticateParentUsernameAndPassword', 'authenticateDeleteNewStyle')
url = URLSpec(getAccountServer())
if operation in newWebOperations:
url.setPath('/%s' % op2Php[operation])
else:
url.setPath('/%s.php' % op2Php[operation])
body = ''
if data.has_key('accountName'):
if operation not in newWebOperations:
url.setQuery('n=%s' % URLSpec.quote(data['accountName']))
serverFields = {
'accountName': 'n',
'password': 'p',
'parentPassword': 'sp',
'newPassword': 'np',
'chat': 'chat',
'email': 'em',
'dobYear': 'doby',
'dobMonth': 'dobm',
'dobDay': 'dobd',
'ccNumber': 'ccn',
'ccMonth': 'ccm',
'ccYear': 'ccy',
'nameOnCard': 'noc',
'addr1': 'l1',
'addr2': 'l2',
'addr3': 'l3',
'city': 'city',
'state': 'state',
'country': 'country',
'zip': 'zip',
'referrer': 'ref',
'secretsNeedParentPassword': 'secretsNeedsParentPassword',
'parentPasswordNewStyle': 'pp',
'parentUsername': 'pu',
'userid': 'userid' }
ignoredFields = ('ccType',)
outBoundFields = { }
for fieldName in data.keys():
if not serverFields.has_key(fieldName):
if fieldName not in ignoredFields:
self.notify.error('unknown data field: %s' % fieldName)
fieldName not in ignoredFields
outBoundFields[serverFields[fieldName]] = data[fieldName]
orderedFields = outBoundFields.keys()
orderedFields.sort()
for fieldName in orderedFields:
if len(body):
body += '&'
body += '%s=%s' % (fieldName, URLSpec.quotePlus(outBoundFields[fieldName]))
self.notify.debug('url=' + url.cStr())
self.notify.debug('body=' + body)
if operation in ('get',):
expectedHeader = 'ACCOUNT INFO'
elif operation in ('play', 'cancel', 'create', 'purchase', 'setParentPassword', 'setSecretChat', 'authenticateParentPassword', 'authenticateDelete', 'forgotPassword', 'authenticateParentPasswordNewStyle', 'authenticateParentUsernameAndPassword', 'authenticateDeleteNewStyle'):
expectedHeader = 'ACCOUNT SERVER RESPONSE'
else:
self.notify.error("Internal TTAccount error: need to set expected response header for '%s' operation" % operation)
self.response = RemoteValueSet.RemoteValueSet(url, self.cr.http, body = body, expectedHeader = expectedHeader)
self.notify.debug(' self.response=' + str(self.response))
if self.response.hasKey('errorCode'):
errorCode = self.response.getInt('errorCode')
self.notify.info('account server error code: %s' % errorCode)
if errorCode == 10:
self.cr.freeTimeExpiresAt = 0
if self.response.hasKey('errorMsg'):
return self.getLastErrorMsg()
if operation in ('get', 'forgotPassword', 'authenticateDelete', 'play', 'cancel', 'create', 'purchase', 'setParentPassword', 'authenticateParentPassword', 'authenticateParentPasswordNewStyle', 'authenticateParentUsernameAndPassword', 'authenticateDeleteNewStyle'):
pass
1
if operation == 'setSecretChat':
self.playToken = self.response.getString('playToken')
self.playTokenIsEncrypted = 1
else:
self.notify.error('Internal TTAccount error: need to extract useful data for %s operation' % operation)
def authenticateParentUsernameAndPassword(self, loginName, password, parentUsername, parentPassword):
try:
errorMsg = self.talk('authenticateParentUsernameAndPassword', data = self._TTAccount__makeLoginDict(loginName, password, {
'parentUsername': parentUsername,
'parentPasswordNewStyle': parentPassword,
'userid': loginName }))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException:
e = None
return (0, str(e))
| 38.026549
| 284
| 0.594523
|
5275d50e2ecc5cd050960b00cd3843a501f3c11c
| 7,324
|
py
|
Python
|
kuryr_kubernetes/controller/handlers/kuryrnetwork.py
|
openshift-bot/kuryr-kubernetes
|
9577bf398b19c1305468dda749edac31d76d46ee
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/controller/handlers/kuryrnetwork.py
|
openshift-bot/kuryr-kubernetes
|
9577bf398b19c1305468dda749edac31d76d46ee
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/controller/handlers/kuryrnetwork.py
|
openshift-bot/kuryr-kubernetes
|
9577bf398b19c1305468dda749edac31d76d46ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
LOG = logging.getLogger(__name__)
class KuryrNetworkHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNetwork process for Kubernetes pods.
`KuryrNetworkHandler` runs on the Kuryr-Kubernetes controller and is
responsible for creating the OpenStack resources associated to the
newly created namespaces, and update the KuryrNetwork CRD status with
them.
"""
OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS
def __init__(self):
super(KuryrNetworkHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
if self._is_network_policy_enabled():
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._drv_svc_sg = (
drivers.ServiceSecurityGroupsDriver.get_instance())
def on_present(self, kuryrnet_crd):
ns_name = kuryrnet_crd['spec']['nsName']
project_id = kuryrnet_crd['spec']['projectId']
kns_status = kuryrnet_crd.get('status', {})
crd_creation = False
net_id = kns_status.get('netId')
if not net_id:
net_id = self._drv_subnets.create_network(ns_name, project_id)
status = {'netId': net_id}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
subnet_id = kns_status.get('subnetId')
if not subnet_id or crd_creation:
subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
ns_name, project_id, net_id)
status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
if not kns_status.get('routerId') or crd_creation:
router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
status = {'routerId': router_id, 'populated': False}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
# check labels to create sg rules
ns_labels = kns_status.get('nsLabels', {})
if (crd_creation or
ns_labels != kuryrnet_crd['spec']['nsLabels']):
# update SG and svc SGs
namespace = driver_utils.get_namespace(ns_name)
crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
# update status
status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
def on_finalize(self, kuryrnet_crd):
LOG.debug("Deleting kuryrnetwork CRD resources: %s", kuryrnet_crd)
net_id = kuryrnet_crd.get('status', {}).get('netId')
if net_id:
self._drv_vif_pool.delete_network_pools(
kuryrnet_crd['status']['netId'])
try:
self._drv_subnets.delete_namespace_subnet(kuryrnet_crd)
except k_exc.ResourceNotReady:
LOG.debug("Subnet is not ready to be removed.")
# TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
# execute a delete network ports method here to remove the
# ports associated to the namespace/subnet, ensuring next
# retry will be successful
raise
namespace = {
'metadata': {'name': kuryrnet_crd['spec']['nsName']}}
crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
project_id = kuryrnet_crd['spec']['projectId']
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
kubernetes = clients.get_kubernetes_client()
LOG.debug('Removing finalizer for KuryrNet CRD %s', kuryrnet_crd)
try:
kubernetes.remove_finalizer(kuryrnet_crd,
constants.KURYRNETWORK_FINALIZER)
except k_exc.K8sClientException:
LOG.exception('Error removing kuryrnetwork CRD finalizer for %s',
kuryrnet_crd)
raise
def _is_network_policy_enabled(self):
enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
def _update_services(self, services, crd_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
service, crd_selectors):
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def _patch_kuryrnetwork_crd(self, kuryrnet_crd, status, labels=False):
kubernetes = clients.get_kubernetes_client()
LOG.debug('Patching KuryrNetwork CRD %s', kuryrnet_crd)
try:
if labels:
kubernetes.patch_crd('status',
kuryrnet_crd['metadata']['selfLink'],
status)
else:
kubernetes.patch('status',
kuryrnet_crd['metadata']['selfLink'],
status)
except k_exc.K8sResourceNotFound:
LOG.debug('KuryrNetwork CRD not found %s', kuryrnet_crd)
except k_exc.K8sClientException:
LOG.exception('Error updating kuryrNetwork CRD %s', kuryrnet_crd)
raise
| 45.775
| 79
| 0.654697
|
9e355d933814a6976f1527a96b6405e1e9cc0dd1
| 58
|
py
|
Python
|
med-cabinet-predictions/__init__.py
|
med-cabinet-5/data-science
|
e43495739da6266c0c76c2b4984c365f1a50789c
|
[
"MIT"
] | 1
|
2020-02-03T16:03:20.000Z
|
2020-02-03T16:03:20.000Z
|
med-cabinet-predictions/__init__.py
|
med-cabinet-5/data-science
|
e43495739da6266c0c76c2b4984c365f1a50789c
|
[
"MIT"
] | null | null | null |
med-cabinet-predictions/__init__.py
|
med-cabinet-5/data-science
|
e43495739da6266c0c76c2b4984c365f1a50789c
|
[
"MIT"
] | null | null | null |
# imports
from .app import create_app
APP = create_app()
| 11.6
| 27
| 0.741379
|
5a5eeb78e48958fde9c9b340d523cfeb16661a6a
| 2,661
|
py
|
Python
|
lib/custom_layers.py
|
1zong2/pggan
|
8ae47a77df817d91634dfeeec8ab26b2888e407a
|
[
"MIT"
] | null | null | null |
lib/custom_layers.py
|
1zong2/pggan
|
8ae47a77df817d91634dfeeec8ab26b2888e407a
|
[
"MIT"
] | 1
|
2022-03-16T09:30:18.000Z
|
2022-03-20T08:37:07.000Z
|
lib/custom_layers.py
|
1zong2/pggan
|
8ae47a77df817d91634dfeeec8ab26b2888e407a
|
[
"MIT"
] | 1
|
2022-03-19T08:08:22.000Z
|
2022-03-19T08:08:22.000Z
|
import math
import torch
import torch.nn as nn
from numpy import prod
def getLayerNormalizationFactor(x):
"""
Get per-layer normalization constant from He’s initializer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self,
module,
equalized=True,
lrMul=1.0,
init_bias_to_zero=True):
"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
init_bias_to_zero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if init_bias_to_zero:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.module.weight.data /= lrMul
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv2d(ConstrainedLayer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding=0,
bias=True,
**kwargs):
"""
A nn.Conv2d module with specific constraints
- Shape of nn.Conv2d.weight: (out_channels, in_channels, kernel_size[0], kernel_size[1])
"""
ConstrainedLayer.__init__(self,
nn.Conv2d(in_channels, out_channels,
kernel_size, padding=padding,
bias=bias),
**kwargs)
class EqualizedLinear(ConstrainedLayer):
def __init__(self,
in_features,
out_features,
bias=True,
**kwargs):
"""
A nn.Linear module with specific constraints
- Shape of nn.Linear.weight: (out_features, in_features)
"""
ConstrainedLayer.__init__(self,
nn.Linear(in_features, out_features,
bias=bias), **kwargs)
| 29.241758
| 110
| 0.538895
|
d6fb9f36db2530363fcfe8e84047830069cd3e31
| 6,578
|
py
|
Python
|
ucsmsdk/mometa/bios/BiosVProfile.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/mometa/bios/BiosVProfile.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/mometa/bios/BiosVProfile.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the general information for BiosVProfile ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVProfileConsts:
INT_ID_NONE = "none"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
REBOOT_ON_UPDATE_FALSE = "false"
REBOOT_ON_UPDATE_NO = "no"
REBOOT_ON_UPDATE_TRUE = "true"
REBOOT_ON_UPDATE_YES = "yes"
class BiosVProfile(ManagedObject):
"""This is BiosVProfile class."""
consts = BiosVProfileConsts()
naming_props = set(['name'])
mo_meta = MoMeta("BiosVProfile", "biosVProfile", "bios-prof-[name]", VersionMeta.Version111j, "InputOutput", 0x1ff, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], ['orgOrg'], ['biosTokenFeatureGroup', 'biosVfACPI10Support', 'biosVfASPMSupport', 'biosVfAllUSBDevices', 'biosVfAltitude', 'biosVfAssertNMIOnPERR', 'biosVfAssertNMIOnSERR', 'biosVfBMEDMAMitigation', 'biosVfBootOptionRetry', 'biosVfCPUHardwarePowerManagement', 'biosVfCPUPerformance', 'biosVfConsistentDeviceNameControl', 'biosVfConsoleRedirection', 'biosVfCoreMultiProcessing', 'biosVfDDR3VoltageSelection', 'biosVfDRAMClockThrottling', 'biosVfDirectCacheAccess', 'biosVfDramRefreshRate', 'biosVfEnergyPerformanceTuning', 'biosVfEnhancedIntelSpeedStepTech', 'biosVfEnhancedPowerCappingSupport', 'biosVfExecuteDisableBit', 'biosVfFRB2Timer', 'biosVfFrequencyFloorOverride', 'biosVfFrontPanelLockout', 'biosVfIOEMezz1OptionROM', 'biosVfIOENVMe1OptionROM', 'biosVfIOENVMe2OptionROM', 'biosVfIOESlot1OptionROM', 'biosVfIOESlot2OptionROM', 'biosVfIntegratedGraphics', 'biosVfIntegratedGraphicsApertureSize', 'biosVfIntelEntrySASRAIDModule', 'biosVfIntelHyperThreadingTech', 'biosVfIntelTrustedExecutionTechnology', 'biosVfIntelTurboBoostTech', 'biosVfIntelVTForDirectedIO', 'biosVfIntelVirtualizationTechnology', 'biosVfInterleaveConfiguration', 'biosVfLocalX2Apic', 'biosVfLvDIMMSupport', 'biosVfMaxVariableMTRRSetting', 'biosVfMaximumMemoryBelow4GB', 'biosVfMemoryMappedIOAbove4GB', 'biosVfMirroringMode', 'biosVfNUMAOptimized', 'biosVfOSBootWatchdogTimer', 'biosVfOSBootWatchdogTimerPolicy', 'biosVfOSBootWatchdogTimerTimeout', 'biosVfOnboardGraphics', 'biosVfOnboardSATAController', 'biosVfOnboardStorage', 'biosVfOptionROMEnable', 'biosVfOptionROMLoad', 'biosVfOutOfBandManagement', 'biosVfPCHSATAMode', 'biosVfPCILOMPortsConfiguration', 'biosVfPCIROMCLP', 'biosVfPCISlotLinkSpeed', 'biosVfPCISlotOptionROMEnable', 'biosVfPOSTErrorPause', 'biosVfPSTATECoordination', 'biosVfPackageCStateLimit', 'biosVfPanicAndHighWatermark', 'biosVfProcessorC1E', 'biosVfProcessorC3Report', 'biosVfProcessorC6Report', 'biosVfProcessorC7Report', 'biosVfProcessorCMCI', 'biosVfProcessorCState', 'biosVfProcessorEnergyConfiguration', 'biosVfProcessorPrefetchConfig', 'biosVfQPILinkFrequencySelect', 'biosVfQPISnoopMode', 'biosVfQuietBoot', 'biosVfRedirectionAfterBIOSPOST', 'biosVfResumeOnACPowerLoss', 'biosVfSBMezz1OptionROM', 'biosVfSBNVMe1OptionROM', 'biosVfSIOC1OptionROM', 'biosVfSIOC2OptionROM', 'biosVfScrubPolicies', 'biosVfSelectMemoryRASConfiguration', 'biosVfSerialPortAEnable', 'biosVfSparingMode', 'biosVfSriovConfig', 'biosVfTPMPendingOperation', 'biosVfTPMSupport', 'biosVfTrustedPlatformModule', 'biosVfUCSMBootModeControl', 'biosVfUCSMBootOrderRuleControl', 'biosVfUEFIOSUseLegacyVideo', 'biosVfUSBBootConfig', 'biosVfUSBConfiguration', 'biosVfUSBFrontPanelAccessLock', 'biosVfUSBPortConfiguration', 'biosVfUSBSystemIdlePowerOptimizingSetting', 'biosVfVGAPriority', 'biosVfWorkloadConfiguration'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["local", "pending-policy", "policy"], []),
"reboot_on_update": MoPropertyMeta("reboot_on_update", "rebootOnUpdate", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["false", "no", "true", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"intId": "int_id",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rebootOnUpdate": "reboot_on_update",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.descr = None
self.int_id = None
self.policy_level = None
self.policy_owner = None
self.reboot_on_update = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "BiosVProfile", parent_mo_or_dn, **kwargs)
| 96.735294
| 3,037
| 0.732898
|
01806470228115708569066162a0601009e9786a
| 9,891
|
py
|
Python
|
download_data.py
|
melo-gonzo/StockDataDownload
|
3471591eec2089dcb7f604c1db5ca2e010d9e9a2
|
[
"Apache-2.0"
] | 72
|
2021-02-01T08:40:53.000Z
|
2022-02-10T07:44:00.000Z
|
download_data.py
|
melo-gonzo/StockDataDownload
|
3471591eec2089dcb7f604c1db5ca2e010d9e9a2
|
[
"Apache-2.0"
] | 1
|
2021-02-03T13:51:21.000Z
|
2021-02-04T07:49:37.000Z
|
download_data.py
|
melo-gonzo/StockDataDownload
|
3471591eec2089dcb7f604c1db5ca2e010d9e9a2
|
[
"Apache-2.0"
] | 13
|
2021-02-01T08:32:59.000Z
|
2021-12-21T20:04:32.000Z
|
import re
import os
import time
import requests
import datetime
import argparse
import numpy as np
import multiprocessing
from functools import partial
def split_crumb_store(v):
if v is None:
return
return v.split(':')[2].strip('"')
def find_crumb_store(lines):
for l in lines:
if re.findall(r'CrumbStore', l):
return l
def get_cookie_value(r):
if not r.cookies:
return
print(r.cookies['B'])
return {'B': r.cookies['B']}
def get_page_data(symbol):
url = "https://finance.yahoo.com/quote/%s/?p=%s" % (symbol, symbol)
headers = {'User-Agent': 'Chrome'}
r = requests.get(url, headers=headers, timeout=10)
cookie = get_cookie_value(r)
lines = r.content.decode('latin-1').replace('\\', '')
lines = lines.replace('}', '\n')
return cookie, lines.split('\n')
def get_cookie_crumb(symbol):
cookie, lines = get_page_data(symbol)
crumb = split_crumb_store(find_crumb_store(lines))
return cookie, crumb
def get_now_epoch():
return int(time.time())
def waitbar(total, current):
current += 1
percent_complete = 100 * (current / total)
here_sym = '>'
complete_sym = '-'
advance = str(int(np.round((percent_complete / 2) - 1)) * complete_sym + here_sym)
retreat = str(int(np.round(((100 - percent_complete) / 2) - 1)) * '.')
print(advance + retreat + ' ' + str(np.round(percent_complete, 3)) + '%', end='\r')
def get_data(symbol, start_date, end_date, cookie, crumb, append_to_file, csv_location):
filename = csv_location + '%s.csv' % (symbol)
url = "https://query1.finance.yahoo.com/v7/finance/download/%s?period1=%s&period2=%s&interval=1d&events=history&crumb=%s" % (
symbol, start_date, end_date, crumb)
headers = {'User-Agent': 'Chrome'}
print(url)
try:
response = requests.get(url, cookies=cookie, headers=headers, timeout=20)
print(response)
except Exception:
return False
block = response.content[:1].decode('UTF-8')
if block == '{' or block == '4':
return False
if append_to_file:
for block in response.iter_content(1024):
pass
with open(filename, 'r') as open_file:
new_handle = bytes('\n'.join(open_file.read().split('\n')[:-3]) + '\n', 'utf-8')
with open(filename, 'wb') as new_csv:
new_csv.write(new_handle)
new_csv.write(block[42:])
return True
if not append_to_file:
block = response.content[:1].decode('UTF-8')
if block == '{' or block == '4':
return False
with open(filename, 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
return True
return False
def dq(symbol, list_location='', csv_location='', verbose=True):
if list_location != '':
waitbar(len(open(list_location, 'r').read().split('\n')),
len(open(''.join(list_location.split('.')[:-1]) + '_completed_list.txt', 'r').read().split('\n')))
csv_present = os.listdir(csv_location)
filename = csv_location + '%s.csv' % (symbol)
present = symbol + '.csv' in csv_present
if present:
if os.path.getsize(filename) < 1000:
present = False
os.remove(filename)
end_date = get_now_epoch()
cookie, crumb = get_cookie_crumb(symbol)
if verbose:
print("--------------------------------------------------")
print("Downloading %s to %s.csv" % (symbol, symbol))
if not present:
append_to_file = False
start_date = 0
else:
append_to_file = True
last_time = open(csv_location + symbol + '.csv', 'r').read().split('\n')[-3].split(',')[0]
if '}' in last_time:
os.remove(filename)
start_date = 0
append_to_file = False
else:
start_date = int(datetime.datetime.timestamp(datetime.datetime.strptime(last_time, "%Y-%m-%d")))
data_saved = False
attempts = 0
while attempts < 5 and not data_saved:
data_saved = get_data(symbol, start_date, end_date, cookie, crumb, append_to_file, csv_location)
if data_saved == False:
cookie, crumb = get_cookie_crumb(symbol)
attempts += 1
if verbose and data_saved: print(symbol + ' Download Successful')
if data_saved and list_location != '':
with open(''.join(list_location.split('.')[:-1]) + '_completed_list.txt', 'a') as complete:
complete.write('\n' + symbol)
if verbose and not data_saved: print(symbol + ' Download Unsuccessful')
if not data_saved and list_location != '':
with open(''.join(list_location.split('.')[:-1]) + '_failed_list.txt', 'a') as failed:
failed.write('\n' + symbol)
def gather_tickers(ticker_list):
tickers = open(ticker_list, 'r')
tickers = tickers.read()
tickers = tickers.split('\n')
tickers = [ticker for ticker in tickers if ticker != '']
return tickers
def download_parallel_quotes(symbols, args):
list_location = args.ticker_location
csv_location = args.csv_location
verbose = args.verbose
with open(''.join(list_location.split('.')[:-1]) + '_completed_list.txt', 'w') as complete:
pass
with open(''.join(list_location.split('.')[:-1]) + '_failed_list.txt', 'w') as failed:
pass
pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()))
dfunc = partial(dq, list_location=list_location, csv_location=csv_location, verbose=verbose)
output = pool.map(dfunc, symbols)
def download_quotes(args):
with open(args.ticker_location, 'r') as tickers:
tickers = tickers.read().split('\n')
tickers = [ticker for ticker in tickers if ticker != '']
new = list(args.add_tickers.split(','))
new = [n for n in new if n not in tickers]
total = len(new)
for current, symbol in enumerate(new):
waitbar(total, current)
dq(symbol, csv_location=args.csv_location, verbose=args.verbose)
tickers.extend(new)
tickers = list(set(tickers))
tickers.sort()
with open(args.ticker_location, 'w') as t:
t.write('\n'.join(tickers))
def remove_tickers(args):
with open(args.ticker_location, 'r') as tickers:
tickers = tickers.read().split('\n')
tickers = [ticker for ticker in tickers if ticker != '']
remove = list(args.remove_tickers.split(','))
tickers = [n for n in tickers if n not in remove]
tickers = list(set(tickers))
tickers.sort()
with open(args.ticker_location, 'w') as t:
t.write('\n'.join(tickers))
for ticker in remove:
try:
os.remove(args.csv_location + ticker + '.csv')
except FileNotFoundError:
pass
def parser():
parser = argparse.ArgumentParser(description='Stock Market Ticker Downloader')
parser.add_argument("--ticker_location",
default='/home/carmelo/Projects/StockMarket/TickerLists/tickers.txt',
help="path pointing to a list of tickers to download. must be from text file. tickers seperated by newline")
parser.add_argument("--csv_location", default='/home/carmelo/Projects/StockMarket/CSVFiles/',
help="path pointing to location to save csv files, ex. /home/user/Desktop/CSVFiles/")
parser.add_argument("--add_tickers", default='', type=str,
help="download data for a tickers and add to list. input as string, ex. 'GOOG', or 'GOOG,AAPL,TSLA'."
" separate by commas only. works when not pointing to a list of tickers already")
parser.add_argument("--remove_tickers", default='', type=str,
help="remove data for a tickers . input as string, ex. 'GOOG', or 'GOOG,AAPL,TSLA'."
" separate by commas only. works when not pointing to a list of tickers already")
parser.add_argument("--multitry", default=True, type=bool,
help="bool to indicate trying to download list of bad tickers once initial try is complete")
parser.add_argument("--verbose", default=True, type=bool,
help="print status of downloading or not")
return parser.parse_args()
def check_arguments_errors(args):
if not os.path.exists(args.csv_location):
print('Please create a file to store csv files and update the default location inside parser().')
raise (ValueError("Invalid csv_location path {}".format(os.path.abspath(args.config_file))))
if not os.path.exists(args.ticker_location):
print('Please create a file to store ticker names and update the default location inside the parser().')
raise (ValueError("Invalid ticker_location path {}".format(os.path.abspath(args.weights))))
def do_multitry(args):
bad_list = open(''.join(args.ticker_location.split('.')[:-1]) + '_failed_list.txt', 'r').read().split('\n')
bad_list = [bl for bl in bad_list if bl != '']
args.remove_tickers = ','.join(bad_list)
remove_tickers(args)
download_parallel_quotes(bad_list, args)
# bad_list = open(''.join(list_location.split('.')[:-1]) + '_failed_list.txt', 'r').read().split('\n')
# bad_list = [bl for bl in bad_list if bl != '']
# args.remove_tickers = ','.join(bad_list)
# remove_tickers(args)
def download_data():
args = parser()
check_arguments_errors(args)
if args.add_tickers == '' and args.remove_tickers == '':
tickers = gather_tickers(args.ticker_location)
download_parallel_quotes(tickers, args)
if args.multitry:
do_multitry(args)
elif args.add_tickers != '':
download_quotes(args)
elif args.remove_tickers != '':
remove_tickers(args)
else:
print('Use -h for more info.')
if __name__ == '__main__':
download_data()
| 38.940945
| 132
| 0.625013
|
1d8f4d12beb96bfe56276edb482c463671e8bc01
| 1,614
|
py
|
Python
|
add_unix_time.py
|
PlatonaM/lopco-merge-csv-worker
|
36f8a1cc83d6f7b1e1e3ac19f3f73e91f86adec4
|
[
"Apache-2.0"
] | null | null | null |
add_unix_time.py
|
PlatonaM/lopco-merge-csv-worker
|
36f8a1cc83d6f7b1e1e3ac19f3f73e91f86adec4
|
[
"Apache-2.0"
] | null | null | null |
add_unix_time.py
|
PlatonaM/lopco-merge-csv-worker
|
36f8a1cc83d6f7b1e1e3ac19f3f73e91f86adec4
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2021 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import time
import os
import sys
class Config:
def __init__(self):
self.delimiter = os.getenv("delimiter")
self.time_column = os.getenv("time_column")
self.time_format = os.getenv("time_format")
self.data_cache_path = sys.argv[1]
self.input_file = sys.argv[2]
self.output_file = sys.argv[3]
config = Config()
with open("{}/{}".format(config.data_cache_path, config.input_file), "r") as in_file:
with open("{}/{}".format(config.data_cache_path, config.output_file), "w") as out_file:
first_line = in_file.readline().strip()
first_line = first_line.split(config.delimiter)
time_col_num = first_line.index(config.time_column)
for line in in_file:
line = line.strip()
line = line.split(config.delimiter)
line.append("{}\n".format(time.mktime(datetime.datetime.strptime(line[time_col_num], config.time_format).timetuple())))
out_file.write(config.delimiter.join(line))
| 36.681818
| 131
| 0.688971
|
1a987a034983900774f514c8cbe08fd4f0cefd07
| 2,170
|
py
|
Python
|
meraki/models/tuesday_model.py
|
bossypants22/python-sdk-test
|
37701d62dc18c2abb910eb790ab978913adcaf7b
|
[
"MIT"
] | 37
|
2019-04-24T14:01:33.000Z
|
2022-01-28T01:37:21.000Z
|
meraki/models/tuesday_model.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 10
|
2019-07-09T16:35:11.000Z
|
2021-12-07T03:47:53.000Z
|
meraki/models/tuesday_model.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 17
|
2019-04-30T23:53:21.000Z
|
2022-02-07T22:57:44.000Z
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class TuesdayModel(object):
"""Implementation of the 'Tuesday' model.
The schedule object for Tuesday.
Attributes:
active (bool): Whether the schedule is active (true) or inactive
(false) during the time specified between 'from' and 'to'.
Defaults to true.
mfrom (string): The time, from '00:00' to '24:00'. Must be less than
the time specified in 'to'. Defaults to '00:00'. Only 30 minute
increments are allowed.
to (string): The time, from '00:00' to '24:00'. Must be greater than
the time specified in 'from'. Defaults to '24:00'. Only 30 minute
increments are allowed.
"""
# Create a mapping from Model property names to API property names
_names = {
"active":'active',
"mfrom":'from',
"to":'to'
}
def __init__(self,
active=None,
mfrom=None,
to=None):
"""Constructor for the TuesdayModel class"""
# Initialize members of the class
self.active = active
self.mfrom = mfrom
self.to = to
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
active = dictionary.get('active')
mfrom = dictionary.get('from')
to = dictionary.get('to')
# Return an object of this model
return cls(active,
mfrom,
to)
| 28.552632
| 95
| 0.55023
|
e4f7636b5ebc4a9f9ea1e394ee1c77261098f7bb
| 970
|
py
|
Python
|
list_range.py
|
CrazyJ36/python
|
4cff6e7240672a273d978521bb511065f45d4312
|
[
"MIT"
] | null | null | null |
list_range.py
|
CrazyJ36/python
|
4cff6e7240672a273d978521bb511065f45d4312
|
[
"MIT"
] | null | null | null |
list_range.py
|
CrazyJ36/python
|
4cff6e7240672a273d978521bb511065f45d4312
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# 'list(range(int)' creates A numbered list.
# Makes a int list with 6 items.
nums = list(range(6))
# Print the new list.
print("New list, 6 items:\n>>> nums = list(range(6))\n", nums)
# Print 2nd index position of nums list.
print("\nPrint list item at 2nd index position:")
print(">>> nums[2]\n", nums[2])
# Range can be defined with A specific range of numbers.
# This starts at 2, ends before 6.
nums2 = list(range(2,6))
print("\nNew list whos' indexes range from only 2 to 6:")
print(">>> nums2 = list(range(2,6))\n", nums2)
# A third argument specifies the interval sequence of range.
nums3 = list(range(2,10,2))
print("\nA third range argument defines skipped intervals:")
print("This means 'A range from 2 to 10, in sequences of 2")
print(">>> nums3 = list(range(2,10,2))\n", nums3)
# Test that new range of 10 numbers is same as new range 0-10.
print("\nIs 'range(10)' the same as 'range(0,10)'?:")
print(range(10) == range(0,10))
| 32.333333
| 62
| 0.679381
|
277d80cbe0b220c0e339b3d4055607b00e4d2528
| 3,549
|
py
|
Python
|
Xpense/settings.py
|
ronyyosef/Xpense
|
9b7bf5785072dee5c95863130a3ea0eb9c2832db
|
[
"MIT"
] | null | null | null |
Xpense/settings.py
|
ronyyosef/Xpense
|
9b7bf5785072dee5c95863130a3ea0eb9c2832db
|
[
"MIT"
] | null | null | null |
Xpense/settings.py
|
ronyyosef/Xpense
|
9b7bf5785072dee5c95863130a3ea0eb9c2832db
|
[
"MIT"
] | null | null | null |
"""
Django settings for Xpense project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-*w7^)pdy8piifku&yv8^-l1bmm1zr+l03=q28%df0j2%21to#9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'expenses.apps.ExpensesConfig',
'house.apps.HouseConfig',
'django_extensions',
'tips.apps.TipsConfig',
'accounts.apps.AccountsConfig',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Xpense.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Xpense.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = [BASE_DIR / 'static']
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = "/../house/"
LOGOUT_REDIRECT_URL = "/"
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| 27.726563
| 91
| 0.704142
|
2b77f838b89847ceb50880ba7b5cb5c7c06ffe5d
| 12,495
|
py
|
Python
|
docs/conf.py
|
renovate-bot/python-identity-toolkit
|
ccf9b1bc538d1a3de90fcefd66465b947198a1a7
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
renovate-bot/python-identity-toolkit
|
ccf9b1bc538d1a3de90fcefd66465b947198a1a7
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
renovate-bot/python-identity-toolkit
|
ccf9b1bc538d1a3de90fcefd66465b947198a1a7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-identity-toolkit documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-identity-toolkit"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-identity-toolkit",
"github_user": "googleapis",
"github_repo": "python-identity-toolkit",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-identity-toolkit-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-identity-toolkit.tex",
"google-cloud-identity-toolkit Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-identity-toolkit",
"google-cloud-identity-toolkit Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-identity-toolkit",
"google-cloud-identity-toolkit Documentation",
author,
"google-cloud-identity-toolkit",
"google-cloud-identity-toolkit Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 32.709424
| 88
| 0.706202
|
040aa899497e2aa02f213596ed3bc9c34fa76243
| 114
|
py
|
Python
|
classtoolz/classtoolz/__init__.py
|
WallaceLiu/python_learn
|
ea8c4c20856a25491c756f74b3b572463ea7d7ea
|
[
"Apache-2.0"
] | 4
|
2016-02-11T15:54:48.000Z
|
2021-02-06T00:32:20.000Z
|
classtoolz/classtoolz/__init__.py
|
WallaceLiu/python_learn
|
ea8c4c20856a25491c756f74b3b572463ea7d7ea
|
[
"Apache-2.0"
] | null | null | null |
classtoolz/classtoolz/__init__.py
|
WallaceLiu/python_learn
|
ea8c4c20856a25491c756f74b3b572463ea7d7ea
|
[
"Apache-2.0"
] | 2
|
2015-10-03T20:24:38.000Z
|
2017-03-13T21:58:58.000Z
|
from .slotted import Slotted
from .typed import Typed
from .immutable import Immutable
from .cached import Cached
| 22.8
| 32
| 0.824561
|
a8eba30a728a0a858abe16e36de463856e19be7d
| 3,867
|
py
|
Python
|
password_test.py
|
Jessevictor/PythonIP1
|
fb600107f67a4e18333e527b9ce6cd5714d10134
|
[
"Unlicense",
"MIT"
] | null | null | null |
password_test.py
|
Jessevictor/PythonIP1
|
fb600107f67a4e18333e527b9ce6cd5714d10134
|
[
"Unlicense",
"MIT"
] | null | null | null |
password_test.py
|
Jessevictor/PythonIP1
|
fb600107f67a4e18333e527b9ce6cd5714d10134
|
[
"Unlicense",
"MIT"
] | null | null | null |
import unittest
from password import User
from password import Credentials
class TestClass(unittest.TestCase):
"""
A Test class that defines test cases for the User class.
"""
def setUp(self):
"""
Method that runs before each individual test methods run.
"""
self.new_user = User('Miki','ptaxy342')
def test_init(self):
"""
test case to chek if the object has been initialized correctly
"""
self.assertEqual(self.new_user.username,'prty24est')
self.assertEqual(self.new_user.password,'ZZP245Cvst6')
def test_save_user(self):
"""
test case to test if a new user instance has been saved into the User list
"""
self.new_user.save_user()
self.assertEqual(len(User.user_list),1)
class TestCredentials(unittest.TestCase):
"""
A test class that defines test cases for credentials class
"""
def setUp(self):
"""
Method that runs before each individual credentials test methods run.
"""
self.new_credential = Credentials('Gmail','patrick','TTcq34t56op')
def test_init(self):
"""
Test case to check if a new Credentials instance has been initialized correctly
"""
self.assertEqual(self.new_credential.account,'Gmail')
self.assertEqual(self.new_credential.userName,'patrick')
self.assertEqual(self.new_credential.password,'tft3w23zte')
def save_credential_test(self):
"""
test case to test if the crential object is saved into the credentials list.
"""
self.new_credential.save_details()
self.assertEqual(len(Credentials.credentials_list),1)
def tearDown(self):
'''
method that does clean up after each test case has run.
'''
Credentials.credentials_list = []
def test_save_many_accounts(self):
'''
test to check if we can save multiple credentials objects to our credentials list
'''
self.new_credential.save_details()
test_credential = Credentials("Twitter","Meshack","nWtop67Nots")
test_credential.save_details()
self.assertEqual(len(Credentials.credentials_list),2)
def test_delete_credential(self):
"""
test method to test if we can remove an account credentials from our credentials_list
"""
self.new_credential.save_details()
test_credential = Credentials("SnapTube","Mwende","Mfh45hfk")
test_credential.save_details()
self.new_credential.delete_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_find_credentialr(self):
"""
test to check if we can find a credential entry by account name and display the details of the credential
"""
self.new_credential.save_details()
test_credential = Credentials("Twitter","ANNA","Mfh45hfk")
test_credential.save_details()
the_credential = Credentials.find_credential("Twitter")
self.assertEqual(the_credential.account,test_credential.account)
def test_credential_exist(self):
"""
test to check if we can return a true or false based on whether we find or can't find the credential.
"""
self.new_credential.save_details()
the_credential = Credentials("Facebook", "Jesse", "PTka3214")
the_credential.save_details()
credential_is_found = Credentials.if_credential_exist("Facebook")
self.assertTrue(credential_is_found)
def test_display_all_saved_credentials(self):
'''
method that displays all the credentials that has been saved by the user
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
if __name__ == "__main__":
unittest.main()
| 35.477064
| 113
| 0.664081
|
7c16d13e59d3a27b08205b4b3cfa672fe5a129e6
| 80,856
|
py
|
Python
|
pandas/tests/test_algos.py
|
dorothykiz1/pandas
|
6033ed4b3383d874ee4a8a461724c0b8c2ca968d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T01:38:03.000Z
|
2022-03-29T01:38:03.000Z
|
pandas/tests/test_algos.py
|
dorothykiz1/pandas
|
6033ed4b3383d874ee4a8a461724c0b8c2ca968d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T01:26:58.000Z
|
2022-03-18T01:26:58.000Z
|
pandas/tests/test_algos.py
|
dorothykiz1/pandas
|
6033ed4b3383d874ee4a8a461724c0b8c2ca968d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T21:54:40.000Z
|
2022-03-25T21:54:40.000Z
|
from datetime import datetime
from itertools import permutations
import struct
import numpy as np
import pytest
from pandas._libs import (
algos as libalgos,
hashtable as ht,
)
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_bool_dtype,
is_complex_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timedelta,
Timestamp,
date_range,
timedelta_range,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray
import pandas.core.common as com
class TestFactorize:
@pytest.mark.parametrize("sort", [True, False])
def test_factorize(self, index_or_series_obj, sort):
obj = index_or_series_obj
result_codes, result_uniques = obj.factorize(sort=sort)
constructor = Index
if isinstance(obj, MultiIndex):
constructor = MultiIndex.from_tuples
expected_uniques = constructor(obj.unique())
if (
isinstance(obj, Index)
and expected_uniques.dtype == bool
and obj.dtype == object
):
expected_uniques = expected_uniques.astype(object)
if sort:
expected_uniques = expected_uniques.sort_values()
# construct an integer ndarray so that
# `expected_uniques.take(expected_codes)` is equal to `obj`
expected_uniques_list = list(expected_uniques)
expected_codes = [expected_uniques_list.index(val) for val in obj]
expected_codes = np.asarray(expected_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result_codes, expected_codes)
tm.assert_index_equal(result_uniques, expected_uniques, exact=True)
def test_series_factorize_na_sentinel_none(self):
# GH#35667
values = np.array([1, 2, 1, np.nan])
ser = Series(values)
codes, uniques = ser.factorize(na_sentinel=None)
expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)
expected_uniques = Index([1.0, 2.0, np.nan])
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_index_equal(uniques, expected_uniques)
def test_basic(self):
codes, uniques = algos.factorize(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object))
codes, uniques = algos.factorize(
["a", "b", "b", "a", "a", "c", "c", "c"], sort=True
)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array(["a", "b", "c"], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
arr = np.arange(5, dtype=np.intp)[::-1]
codes, uniques = algos.factorize(arr)
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
codes, uniques = algos.factorize(arr, sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
arr = np.arange(5.0)[::-1]
codes, uniques = algos.factorize(arr)
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
codes, uniques = algos.factorize(arr, sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(["A", "A", np.nan, "B", 3.14, np.inf])
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = Index(["A", "B", 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = Index([3.14, np.inf, "A", "B"])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp("20130101 09:00:00.00004")
v2 = Timestamp("20130101")
x = Series([v1, v1, v1, v2, v2, v1])
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = Period("201302", freq="M")
v2 = Period("201303", freq="M")
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
# GH 5986
v1 = to_timedelta("1 day 1 min")
v2 = to_timedelta("1 day")
x = Series([v1, v2, v1, v1, v2, v2, v1])
codes, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, to_timedelta([v1, v2]))
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype="O")
rizer = ht.ObjectFactorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype="int32")
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype="O")
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype="int32")
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
[(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), "nonsense"],
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)],
),
([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]),
],
)
def test_factorize_tuple_list(self, data, expected_codes, expected_uniques):
# GH9454
codes, uniques = pd.factorize(data)
tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp))
expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object)
tm.assert_numpy_array_equal(uniques, expected_uniques_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
msg = "'[<>]' not supported between instances of .*"
with pytest.raises(TypeError, match=msg):
algos.factorize(x17[::-1], sort=True)
def test_numeric_dtype_factorize(self, any_real_numpy_dtype):
# GH41132
dtype = any_real_numpy_dtype
data = np.array([1, 2, 2, 1], dtype=dtype)
expected_codes = np.array([0, 1, 1, 0], dtype=np.intp)
expected_uniques = np.array([1, 2], dtype=dtype)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_float64_factorize(self, writable):
data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)
expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_uint64_factorize(self, writable):
data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_int64_factorize(self, writable):
data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_string_factorize(self, writable):
data = np.array(["a", "c", "a", "b", "c"], dtype=object)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp)
expected_uniques = np.array(["a", "c", "b"], dtype=object)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_object_factorize(self, writable):
data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object)
data.setflags(write=writable)
expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)
expected_uniques = np.array(["a", "c", "b"], dtype=object)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_datetime64_factorize(self, writable):
# GH35650 Verify whether read-only datetime64 array can be factorized
data = np.array([np.datetime64("2020-01-01T00:00:00.000")])
data.setflags(write=writable)
expected_codes = np.array([0], dtype=np.intp)
expected_uniques = np.array(
["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]"
)
codes, uniques = pd.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize("sort", [True, False])
def test_factorize_rangeindex(self, sort):
# increasing -> sort doesn't matter
ri = pd.RangeIndex.from_range(range(10))
expected = np.arange(10, dtype=np.intp), ri
result = algos.factorize(ri, sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
result = ri.factorize(sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
@pytest.mark.parametrize("sort", [True, False])
def test_factorize_rangeindex_decreasing(self, sort):
# decreasing -> sort matters
ri = pd.RangeIndex.from_range(range(10))
expected = np.arange(10, dtype=np.intp), ri
ri2 = ri[::-1]
expected = expected[0], ri2
if sort:
expected = expected[0][::-1], expected[1][::-1]
result = algos.factorize(ri2, sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
result = ri2.factorize(sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with pytest.raises(TypeError, match="got an unexpected keyword"):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize(
"data",
[
np.array([0, 1, 0], dtype="u8"),
np.array([-(2**63), 1, -(2**63)], dtype="i8"),
np.array(["__nan__", "foo", "__nan__"], dtype="object"),
],
)
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
codes, uniques = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_codes = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, na_value",
[
(np.array([0, 1, 0, 2], dtype="u8"), 0),
(np.array([1, 0, 1, 2], dtype="u8"), 1),
(np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)),
(np.array([1, -(2**63), 1, 0], dtype="i8"), 1),
(np.array(["a", "", "a", "b"], dtype=object), "a"),
(np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),
(np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),
],
)
def test_parametrized_factorize_na_value(self, data, na_value):
codes, uniques = algos.factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize("sort", [True, False])
@pytest.mark.parametrize("na_sentinel", [-1, -10, 100])
@pytest.mark.parametrize(
"data, uniques",
[
(
np.array(["b", "a", None, "b"], dtype=object),
np.array(["b", "a"], dtype=object),
),
(
pd.array([2, 1, np.nan, 2], dtype="Int64"),
pd.array([2, 1], dtype="Int64"),
),
],
ids=["numpy_array", "extension_array"],
)
def test_factorize_na_sentinel(self, sort, na_sentinel, data, uniques):
codes, uniques = algos.factorize(data, sort=sort, na_sentinel=na_sentinel)
if sort:
expected_codes = np.array([1, 0, na_sentinel, 1], dtype=np.intp)
expected_uniques = algos.safe_sort(uniques)
else:
expected_codes = np.array([0, 1, na_sentinel, 0], dtype=np.intp)
expected_uniques = uniques
tm.assert_numpy_array_equal(codes, expected_codes)
if isinstance(data, np.ndarray):
tm.assert_numpy_array_equal(uniques, expected_uniques)
else:
tm.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
["a", None, "b", "a"],
np.array([0, 2, 1, 0], dtype=np.dtype("intp")),
np.array(["a", "b", np.nan], dtype=object),
),
(
["a", np.nan, "b", "a"],
np.array([0, 2, 1, 0], dtype=np.dtype("intp")),
np.array(["a", "b", np.nan], dtype=object),
),
],
)
def test_object_factorize_na_sentinel_none(
self, data, expected_codes, expected_uniques
):
codes, uniques = algos.factorize(data, na_sentinel=None)
tm.assert_numpy_array_equal(uniques, expected_uniques)
tm.assert_numpy_array_equal(codes, expected_codes)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
[1, None, 1, 2],
np.array([0, 2, 0, 1], dtype=np.dtype("intp")),
np.array([1, 2, np.nan], dtype="O"),
),
(
[1, np.nan, 1, 2],
np.array([0, 2, 0, 1], dtype=np.dtype("intp")),
np.array([1, 2, np.nan], dtype=np.float64),
),
],
)
def test_int_factorize_na_sentinel_none(
self, data, expected_codes, expected_uniques
):
codes, uniques = algos.factorize(data, na_sentinel=None)
tm.assert_numpy_array_equal(uniques, expected_uniques)
tm.assert_numpy_array_equal(codes, expected_codes)
class TestUnique:
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype("O")
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ["A", "B", "C", "D", "E"]
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = MultiIndex.from_arrays(
[np.arange(5).repeat(5), np.tile(np.arange(5), 5)]
)
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_dtype_preservation(self, any_numpy_dtype):
# GH 15442
if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES):
data = [1, 2, 2]
uniques = [1, 2]
elif is_integer_dtype(any_numpy_dtype):
data = [1, 2, 2]
uniques = [1, 2]
elif is_float_dtype(any_numpy_dtype):
data = [1, 2, 2]
uniques = [1.0, 2.0]
elif is_complex_dtype(any_numpy_dtype):
data = [complex(1, 0), complex(2, 0), complex(2, 0)]
uniques = [complex(1, 0), complex(2, 0)]
elif is_bool_dtype(any_numpy_dtype):
data = [True, True, False]
uniques = [True, False]
elif is_object_dtype(any_numpy_dtype):
data = ["A", "B", "B"]
uniques = ["A", "B"]
else:
# datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere
data = [1, 2, 2]
uniques = [1, 2]
result = Series(data, dtype=any_numpy_dtype).unique()
expected = np.array(uniques, dtype=any_numpy_dtype)
if any_numpy_dtype in tm.STRING_DTYPES:
expected = expected.astype(object)
tm.assert_numpy_array_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np.array(
[
"2015-01-03T00:00:00.000000000",
"2015-01-01T00:00:00.000000000",
],
dtype="M8[ns]",
)
dt_index = to_datetime(
[
"2015-01-03T00:00:00.000000000",
"2015-01-01T00:00:00.000000000",
"2015-01-01T00:00:00.000000000",
]
)
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_datetime_non_ns(self):
a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]")
result = pd.unique(a)
expected = np.array(["2000", "2001"], dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_timedelta_non_ns(self):
a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]")
result = pd.unique(a)
expected = np.array([2000000000000, 2001000000000], dtype="timedelta64[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype="m8[ns]")
td_index = to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
duplicated_items = ["a", np.nan, "c", "c"]
result = pd.unique(duplicated_items)
expected = np.array(["a", np.nan, "c"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list("bac"))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True)
# GH 15939
c = Categorical(list("baabc"))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list("baabc"), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list("baabc")), name="foo")
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc")))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
).unique()
expected = DatetimeArray._from_sequence(
np.array([Timestamp("2016-01-01 00:00:00-0500", tz="US/Eastern")])
)
tm.assert_extension_array_equal(result, expected)
result = Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
).unique()
expected = DatetimeIndex(
["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
)
)
expected = DatetimeArray._from_sequence(
np.array([Timestamp("2016-01-01", tz="US/Eastern")])
)
tm.assert_extension_array_equal(result, expected)
result = pd.unique(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
)
expected = DatetimeIndex(
["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64"))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64"))
result = pd.unique(Series([Timestamp("20160101"), Timestamp("20160101")]))
expected = np.array(["2016-01-01T00:00:00.000000000"], dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
)
expected = DatetimeIndex(
["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
)
tm.assert_index_equal(result, expected)
result = pd.unique(list("aabc"))
expected = np.array(["a", "b", "c"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list("aabc"))))
expected = Categorical(list("abc"))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"arg ,expected",
[
(("1", "1", "2"), np.array(["1", "2"], dtype=object)),
(("foo",), np.array(["foo"], dtype=object)),
],
)
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
def test_obj_none_preservation(self):
# GH 20866
arr = np.array(["foo", None], dtype=object)
result = pd.unique(arr)
expected = np.array(["foo", None], dtype=object)
tm.assert_numpy_array_equal(result, expected, strict_nan=True)
def test_signed_zero(self):
# GH 21866
a = np.array([-0.0, 0.0])
result = pd.unique(a)
expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent
tm.assert_numpy_array_equal(result, expected)
def test_different_nans(self):
# GH 21866
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent
result = pd.unique(a)
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("el_type", [np.float64, object])
def test_first_nan_kept(self, el_type):
# GH 22295
# create different nans from bit-patterns:
bits_for_nan1 = 0xFFF8000000000001
bits_for_nan2 = 0x7FF8000000000001
NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
a = np.array([NAN1, NAN2], dtype=el_type)
result = pd.unique(a)
assert result.size == 1
# use bit patterns to identify which nan was kept:
result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0]
assert result_nan_bits == bits_for_nan1
def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2):
# GH 22295
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values not unique
a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)
result = pd.unique(a)
assert result.size == 2
assert a[0] is unique_nulls_fixture
assert a[1] is unique_nulls_fixture2
class TestIsin:
def test_invalid(self):
msg = (
r"only list-like objects are allowed to be passed to isin\(\), "
r"you passed a \[int\]"
)
with pytest.raises(TypeError, match=msg):
algos.isin(1, 1)
with pytest.raises(TypeError, match=msg):
algos.isin(1, [1])
with pytest.raises(TypeError, match=msg):
algos.isin([1], 1)
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), {1})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(["a", "b"], ["a"])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(["a", "b"]), Series(["a"]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(["a", "b"]), {"a"})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(["a", "b"], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = date_range("20130101", periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = timedelta_range("1 day", periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])
@pytest.mark.parametrize("dtype", ["i8", "f8", "u8"])
def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1):
# Anything but object and we get all-False shortcut
dta = date_range("2013-01-01", periods=3)._values
if dtype1 == "period[D]":
# TODO: fix Series.view to get this on its own
arr = dta.to_period("D")
elif dtype1 == "M8[ns, UTC]":
# TODO: fix Series.view to get this on its own
arr = dta.tz_localize("UTC")
else:
arr = Series(dta.view("i8")).view(dtype1)._values
comps = arr.view("i8").astype(dtype)
result = algos.isin(comps, arr)
expected = np.zeros(comps.shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = date_range("20000101", periods=2000000, freq="s").values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ["a", "b", "c"]
Sd = Series(Categorical([1]).from_codes(vals, cats))
St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
def test_categorical_isin(self):
vals = np.array([0, 1, 2, 0])
cats = ["a", "b", "c"]
cat = Categorical([1]).from_codes(vals, cats)
other = Categorical([1]).from_codes(np.array([0, 1]), cats)
expected = np.array([True, True, False, True])
result = algos.isin(cat, other)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in(self):
# GH 22160
# nan is special, because from " a is b" doesn't follow "a == b"
# at least, isin() should follow python's "np.nan in [nan] == True"
# casting to -> np.float64 -> another float-object somewhere on
# the way could lead jepardize this behavior
comps = [np.nan] # could be casted to float64
values = [np.nan]
expected = np.array([True])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in_large(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
s[0] = np.nan
result = algos.isin(s, [np.nan, 1])
expected = np.ones(len(s), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_same_nan_is_in_large_series(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
series = Series(s)
s[0] = np.nan
result = series.isin([np.nan, 1])
expected = Series(np.ones(len(s), dtype=bool))
tm.assert_series_equal(result, expected)
def test_same_object_is_in(self):
# GH 22160
# there could be special treatment for nans
# the user however could define a custom class
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
class LikeNan:
def __eq__(self, other) -> bool:
return False
def __hash__(self):
return 0
a, b = LikeNan(), LikeNan()
# same object -> True
tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))
# different objects -> False
tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))
def test_different_nans(self):
# GH 22160
# all nans are handled as equivalent
comps = [float("nan")]
values = [float("nan")]
assert comps[0] is not values[0] # different nan-objects
# as list of python-objects:
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(np.array([True]), result)
# as object-array:
result = algos.isin(
np.asarray(comps, dtype=object), np.asarray(values, dtype=object)
)
tm.assert_numpy_array_equal(np.array([True]), result)
# as float64-array:
result = algos.isin(
np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64)
)
tm.assert_numpy_array_equal(np.array([True]), result)
def test_no_cast(self):
# GH 22160
# ensure 42 is not casted to a string
comps = ["ss", 42]
values = ["42"]
expected = np.array([False, False])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
def test_different_nan_objects(self):
# GH 22119
comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object)
vals = np.array([float("nan")], dtype=object)
expected = np.array([False, False, True])
result = algos.isin(comps, vals)
tm.assert_numpy_array_equal(expected, result)
def test_different_nans_as_float64(self):
# GH 21866
# create different nans from bit-patterns,
# these nans will land in different buckets in the hash-table
# if no special care is taken
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# check that NAN1 and NAN2 are equivalent:
arr = np.array([NAN1, NAN2], dtype=np.float64)
lookup1 = np.array([NAN1], dtype=np.float64)
result = algos.isin(arr, lookup1)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
lookup2 = np.array([NAN2], dtype=np.float64)
result = algos.isin(arr, lookup2)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_isin_int_df_string_search(self):
"""Comparing df with int`s (1,2) with a string at isin() ("1")
-> should not match values because int 1 is not equal str 1"""
df = DataFrame({"values": [1, 2]})
result = df.isin(["1"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_nan_df_string_search(self):
"""Comparing df with nan value (np.nan,2) with a string at isin() ("NaN")
-> should not match values because np.nan is not equal str NaN"""
df = DataFrame({"values": [np.nan, 2]})
result = df.isin(["NaN"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_float_df_string_search(self):
"""Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245")
-> should not match values because float 1.4245 is not equal str 1.4245"""
df = DataFrame({"values": [1.4245, 2.32441]})
result = df.isin(["1.4245"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
class TestValueCounts:
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4], index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series(
[2, 2], index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)])
)
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.0])
assert len(result) == 1
result = algos.value_counts([1, 1.0], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1.0, "1"])) # object
assert len(result) == 2
msg = "bins argument only works with numeric data"
with pytest.raises(TypeError, match=msg):
algos.value_counts(["1", 1], bins=1)
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]")
dt = to_datetime(["NaT", "2014-01-01"])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series(
[
datetime(3000, 1, 1),
datetime(5000, 1, 1),
datetime(5000, 1, 1),
datetime(6000, 1, 1),
datetime(3000, 1, 1),
datetime(3000, 1, 1),
]
)
res = s.value_counts()
exp_index = Index(
[datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)],
dtype=object,
)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore")
exp = Series(["2362-01-01", np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list("aaabbc")))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(["a", "b", "c"]))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series(
[4, 3, 2],
index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]),
)
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(
Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"])
)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series(
[4, 3, 2],
index=CategoricalIndex(
["a", "b", "c"], categories=["b", "a", "c"], ordered=True
),
)
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series(
[4, 3, 2, 1],
index=CategoricalIndex(
["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True
),
)
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True))
result = s.value_counts()
expected = Series(
[3, 2, 1, 0],
index=Categorical(
["b", "a", "c", "d"], categories=list("abcd"), ordered=True
),
)
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]),
)
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]),
)
tm.assert_series_equal(
Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True),
Series([3, 2], index=Index([True, False], dtype=object)),
)
tm.assert_series_equal(
Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False),
Series([5, 3, 2], index=[True, False, np.nan]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0]).value_counts(dropna=True),
Series([2, 1], index=[5.0, 10.3]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0]).value_counts(dropna=False),
Series([2, 1], index=[5.0, 10.3]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True),
Series([2, 1], index=[5.0, 10.3]),
)
result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False)
expected = Series([3, 2, 1], index=[5.0, 10.3, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", (np.float64, object, "M8[ns]"))
def test_value_counts_normalized(self, dtype):
# GH12558
s = Series([1] * 2 + [2] * 3 + [np.nan] * 5)
s_typed = s.astype(dtype)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series(
[0.5, 0.3, 0.2], index=Series([np.nan, 2.0, 1.0], dtype=dtype)
)
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.6, 0.4], index=Series([2.0, 1.0], dtype=dtype))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
class TestDuplicated:
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep="first")
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep="last")
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(
zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2)
):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep="last")
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"case",
[
np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]),
np.array(
[
1 + 1j,
2 + 2j,
1 + 1j,
5 + 5j,
3 + 3j,
2 + 2j,
4 + 4j,
1 + 1j,
5 + 5j,
6 + 6j,
]
),
np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object),
np.array(
[1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64
),
],
)
def test_numeric_object_likes(self, case):
exp_first = np.array(
[False, False, True, False, False, True, False, True, True, False]
)
exp_last = np.array(
[True, True, True, True, False, False, False, False, False, False]
)
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype="category")]:
res_first = idx.duplicated(keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype="category")]:
res_first = s.duplicated(keep="first")
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep="last")
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = [
"2011-01-01",
"2011-01-02",
"2011-01-01",
"NaT",
"2011-01-03",
"2011-01-02",
"2011-01-04",
"2011-01-01",
"NaT",
"2011-01-06",
]
td = [
"1 days",
"2 days",
"1 days",
"NaT",
"3 days",
"2 days",
"4 days",
"1 days",
"NaT",
"6 days",
]
cases = [
np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz="US/Eastern") for d in dt]),
np.array([Period(d, freq="D") for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([Timedelta(d) for d in td]),
]
exp_first = np.array(
[False, False, True, False, False, True, False, True, True, False]
)
exp_last = np.array(
[True, True, True, True, False, False, False, False, False, False]
)
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [
Index(case),
Index(case, dtype="category"),
Index(case, dtype=object),
]:
res_first = idx.duplicated(keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [
Series(case),
Series(case, dtype="category"),
Series(case, dtype=object),
]:
res_first = s.duplicated(keep="first")
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep="last")
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
@pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)])
def test_unique_index(self, case):
assert case.is_unique is True
tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False]))
@pytest.mark.parametrize(
"arr, uniques",
[
(
[(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)],
),
(
[("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")],
[("b", "c"), ("a", "b")],
),
([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]),
],
)
def test_unique_tuples(self, arr, uniques):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(uniques), dtype=object)
expected[:] = uniques
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"array,expected",
[
(
[1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j],
# Should return a complex dtype in the future
np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object),
)
],
)
def test_unique_complex_numbers(self, array, expected):
# GH 17927
result = pd.unique(array)
tm.assert_numpy_array_equal(result, expected)
class TestHashTable:
@pytest.mark.parametrize(
"htable, tm_dtype",
[
(ht.PyObjectHashTable, "String"),
(ht.StringHashTable, "String"),
(ht.Float64HashTable, "Float"),
(ht.Int64HashTable, "Int"),
(ht.UInt64HashTable, "UInt"),
],
)
def test_hashtable_unique(self, htable, tm_dtype, writable):
# output of maker has guaranteed unique elements
maker = getattr(tm, "make" + tm_dtype + "Index")
s = Series(maker(1000))
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
elif htable == ht.PyObjectHashTable:
# use different NaN types for object column
s.loc[500:502] = [np.nan, None, NaT]
# create duplicated selection
s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
s_duplicated.values.setflags(write=writable)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.unique()
expected_unique = s_duplicated.drop_duplicates(keep="first").values
result_unique = htable().unique(s_duplicated.values)
tm.assert_numpy_array_equal(result_unique, expected_unique)
# test return_inverse=True
# reconstruction can only succeed if the inverse is correct
result_unique, result_inverse = htable().unique(
s_duplicated.values, return_inverse=True
)
tm.assert_numpy_array_equal(result_unique, expected_unique)
reconstr = result_unique[result_inverse]
tm.assert_numpy_array_equal(reconstr, s_duplicated.values)
@pytest.mark.parametrize(
"htable, tm_dtype",
[
(ht.PyObjectHashTable, "String"),
(ht.StringHashTable, "String"),
(ht.Float64HashTable, "Float"),
(ht.Int64HashTable, "Int"),
(ht.UInt64HashTable, "UInt"),
],
)
def test_hashtable_factorize(self, htable, tm_dtype, writable):
# output of maker has guaranteed unique elements
maker = getattr(tm, "make" + tm_dtype + "Index")
s = Series(maker(1000))
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
elif htable == ht.PyObjectHashTable:
# use different NaN types for object column
s.loc[500:502] = [np.nan, None, NaT]
# create duplicated selection
s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
s_duplicated.values.setflags(write=writable)
na_mask = s_duplicated.isna().values
result_unique, result_inverse = htable().factorize(s_duplicated.values)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.factorize()
# since factorize removes all NaNs, we do the same here
expected_unique = s_duplicated.dropna().drop_duplicates().values
tm.assert_numpy_array_equal(result_unique, expected_unique)
# reconstruction can only succeed if the inverse is correct. Since
# factorize removes the NaNs, those have to be excluded here as well
result_reconstruct = result_unique[result_inverse[~na_mask]]
expected_reconstruct = s_duplicated.dropna().values
tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct)
class TestRank:
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"arr",
[
[np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan],
[4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan],
],
)
def test_scipy_compat(self, arr):
from scipy.stats import rankdata
arr = np.array(arr)
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = np.nan
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_basic(self, writable, dtype):
exp = np.array([1, 2], dtype=np.float64)
data = np.array([1, 100], dtype=dtype)
data.setflags(write=writable)
ser = Series(data)
result = algos.rank(ser)
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("dtype", [np.float64, np.uint64])
def test_uint64_overflow(self, dtype):
exp = np.array([1, 2], dtype=np.float64)
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with pytest.raises(TypeError, match=msg):
algos.rank(arr)
@pytest.mark.single_cpu
def test_pct_max_many_rows(self):
# GH 18271
values = np.arange(2**24 + 1)
result = algos.rank(values, pct=True).max()
assert result == 1
values = np.arange(2**25 + 2).reshape(2**24 + 1, 2)
result = algos.rank(values, pct=True).max()
assert result == 1
def test_pad_backfill_object_segfault():
old = np.array([], dtype="O")
new = np.array([datetime(2010, 12, 31)], dtype="O")
result = libalgos.pad["object"](old, new)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad["object"](new, old)
expected = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill["object"](old, new)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill["object"](new, old)
expected = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
class TestTseriesUtil:
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(list(range(12)))
filler = libalgos.backfill["int64_t"](old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(list(range(5, 10)))
filler = libalgos.backfill["int64_t"](old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(list(range(12)))
filler = libalgos.pad["int64_t"](old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(np.arange(5))
filler = libalgos.pad["int64_t"](old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array(
[
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
dtype="int64",
),
np.array(
[
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
],
dtype="int64",
),
]
assert not libalgos.is_lexsorted(failure)
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.intp)
b = np.random.randint(0, 1000, 100).astype(np.intp)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns intp
expected = np.argsort(a, kind="mergesort")
expected = expected.astype(np.intp)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns intp
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert result is arr
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), arr_mask=np.array([False, True])
)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), b_mask=np.array([False, True])
)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(
np.array([m, m]),
np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]),
)
with pytest.raises(OverflowError, match=msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]), np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), arr_mask=np.array([True, True])
)
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), b_mask=np.array([True, True])
)
algos.checked_add_with_arr(
np.array([m, m]),
np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]),
)
class TestMode:
def test_no_mode(self):
exp = Series([], dtype=np.float64, index=Index([], dtype=int))
tm.assert_numpy_array_equal(algos.mode([]), exp.values)
@pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"])
def test_mode_single(self, dt):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
ser = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
ser = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
def test_mode_obj_int(self):
exp = Series([1], dtype=int)
tm.assert_numpy_array_equal(algos.mode([1]), exp.values)
exp = Series(["a", "b", "c"], dtype=object)
tm.assert_numpy_array_equal(algos.mode(["a", "b", "c"]), exp.values)
@pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"])
def test_number_mode(self, dt):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
ser = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
ser = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
def test_strobj_mode(self):
exp = ["b"]
data = ["a"] * 2 + ["b"] * 3
ser = Series(data, dtype="c")
exp = Series(exp, dtype="c")
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
@pytest.mark.parametrize("dt", [str, object])
def test_strobj_multi_char(self, dt):
exp = ["bar"]
data = ["foo"] * 2 + ["bar"] * 3
ser = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
def test_datelike_mode(self):
exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]")
ser = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]")
tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
tm.assert_series_equal(ser.mode(), exp)
exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]")
ser = Series(
["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"],
dtype="M8[ns]",
)
tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
tm.assert_series_equal(ser.mode(), exp)
def test_timedelta_mode(self):
exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]")
ser = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]")
tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
tm.assert_series_equal(ser.mode(), exp)
exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]")
ser = Series(
["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],
dtype="timedelta64[ns]",
)
tm.assert_extension_array_equal(algos.mode(ser.values), exp._values)
tm.assert_series_equal(ser.mode(), exp)
def test_mixed_dtype(self):
exp = Series(["foo"])
ser = Series([1, "foo", "foo"])
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
ser = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
exp = Series([1, 2**63], dtype=np.uint64)
ser = Series([1, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
msg = "Categorical.mode is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
res = c.mode()
tm.assert_categorical_equal(res, exp)
c = Categorical([1, "a", "a"])
exp = Categorical(["a"], categories=[1, "a"])
with tm.assert_produces_warning(FutureWarning, match=msg):
res = c.mode()
tm.assert_categorical_equal(res, exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning, match=msg):
res = c.mode()
tm.assert_categorical_equal(res, exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(algos.mode(idx), exp.values)
idx = Index([1, "a", "a"])
exp = Series(["a"], dtype=object)
tm.assert_numpy_array_equal(algos.mode(idx), exp.values)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_numpy_array_equal(algos.mode(idx), exp.values)
idx = Index(
["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],
dtype="timedelta64[ns]",
)
with pytest.raises(AttributeError, match="TimedeltaIndex"):
# algos.mode expects Arraylike, does *not* unwrap TimedeltaIndex
algos.mode(idx)
class TestDiff:
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_diff_datetimelike_nat(self, dtype):
# NaT - NaT is NaT, not 0
arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4)
arr[:, 2] = arr.dtype.type("NaT", "ns")
result = algos.diff(arr, 1, axis=0)
expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4
expected[:, 2] = np.timedelta64("NaT", "ns")
expected[0, :] = np.timedelta64("NaT", "ns")
tm.assert_numpy_array_equal(result, expected)
result = algos.diff(arr.T, 1, axis=1)
tm.assert_numpy_array_equal(result, expected.T)
def test_diff_ea_axis(self):
dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data
msg = "cannot diff DatetimeArray on axis=1"
with pytest.raises(ValueError, match=msg):
algos.diff(dta, 1, axis=1)
@pytest.mark.parametrize("dtype", ["int8", "int16"])
def test_diff_low_precision_int(self, dtype):
arr = np.array([0, 1, 1, 0, 0], dtype=dtype)
result = algos.diff(arr, 1)
expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("op", [np.array, pd.array])
def test_union_with_duplicates(op):
# GH#36289
lvals = op([3, 1, 3, 4])
rvals = op([2, 3, 1, 1])
expected = op([3, 3, 1, 1, 4, 2])
if isinstance(expected, np.ndarray):
result = algos.union_with_duplicates(lvals, rvals)
tm.assert_numpy_array_equal(result, expected)
else:
result = algos.union_with_duplicates(lvals, rvals)
tm.assert_extension_array_equal(result, expected)
| 34.702146
| 88
| 0.551919
|
080eec5be8283dedc491afecb6b381e3dbae21cc
| 4,227
|
py
|
Python
|
tests/performance/cv_performance.py
|
drunkcoding/efficient-nlp
|
9509e0ef08016506280a7cfc600ea8e3778dea2d
|
[
"Apache-2.0"
] | null | null | null |
tests/performance/cv_performance.py
|
drunkcoding/efficient-nlp
|
9509e0ef08016506280a7cfc600ea8e3778dea2d
|
[
"Apache-2.0"
] | null | null | null |
tests/performance/cv_performance.py
|
drunkcoding/efficient-nlp
|
9509e0ef08016506280a7cfc600ea8e3778dea2d
|
[
"Apache-2.0"
] | null | null | null |
from tqdm import tqdm
from torchvision import datasets, transforms
import torchvision.models as models
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.profiler import profile, record_function, ProfilerActivity, schedule
import torch
import torch.cuda as cutorch
import numpy as np
import pandas as pd
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from ecosys.utils.logger import Logger
from ecosys.utils.data_processor import processors, output_modes
from ecosys.utils.data_structure import HuggingFaceDataset
logger = Logger(__file__, "info", "w")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
model_keys = [
'resnet18',
'resnet50',
'resnet152',
'vgg11',
'vgg16',
'vgg19_bn',
'inception',
'mobilenet',
]
model_instances = [
models.resnet18(pretrained=True),
models.resnet50(pretrained=True),
models.resnet152(pretrained=True),
models.vgg11(pretrained=True),
models.vgg16(pretrained=True),
models.vgg19_bn(pretrained=True),
models.inception_v3(pretrained=True),
models.mobilenet_v2(pretrained=True),
]
for model in model_instances:
model.to(device)
model.eval()
models = dict(zip(model_keys, model_instances))
# ------------- Dataset Prepare --------------
dataset = datasets.ImageNet("/home/oai/share/dataset/.", split="val", transform=preprocess)
sampler = SequentialSampler(dataset)
logger.info("n_samples %s", len(dataset))
# performance_schedule = schedule(
# skip_first=10,
# wait=5,
# warmup=1,
# active=3,
# repeat=2
# )
import subprocess as sp
record = {
'bs': list(),
'key': list(),
'mem': list(),
'tol_t': list(),
'avg_t': list(),
}
def get_gpu_memory():
command = "nvidia-smi --query-gpu=memory.used --format=csv"
memory_used_info = sp.check_output(command.split()).decode('ascii').split('\n')[:-1][1:]
memory_used_values = [int(x.split()[0]) for i, x in enumerate(memory_used_info)]
return np.sum(memory_used_values)
for key in model_keys:
with torch.no_grad():
for batch_size in [1, 2, 4, 8, 16 ,32, 64, 128]:
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=batch_size
)
# with profile(
# activities=[ProfilerActivity.CPU],
# # record_shapes=True,
# profile_memory=True,
# schedule=performance_schedule,
# ) as prof:
# # with record_function("model_inference"):
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
timings = []
for data in tqdm(dataloader, desc="Measuring"):
image = data[0].to(device)
label = data[1].to(device)
starter.record()
_ = models[key](image)
ender.record()
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender)
timings.append(curr_time)
# print(dir(cutorch.get_device_properties(device)))
# print(prof.key_averages())
record['bs'].append(batch_size)
record['key'].append(key)
record['mem'].append(get_gpu_memory())
record['tol_t'].append(np.sum(timings))
record['avg_t'].append(np.mean(timings))
logger.info(
"bs %s; key %s; Mem (MiB) %s; total time (ms) %s; avg time (ms) %s",
batch_size,
key,
get_gpu_memory(),
np.sum(timings),
np.mean(timings)
)
# logger.info("bs %s; key %s;\n\n %s \n\n ", batch_size, key, prof.key_averages().table(sort_by="cuda_time_total"))
df = pd.Dataframe(record)
df.to_csv(os.path.join(os.path.dirname(__file__), f"cv_performance.csv"))
| 32.267176
| 127
| 0.609652
|
073e129d79a65231f457d3fdf1349eaad2ef4aa4
| 173
|
py
|
Python
|
tools/old/ext.py
|
yuj09161/word
|
8260c50998cf3f129b7da886163a218774dea029
|
[
"MIT"
] | null | null | null |
tools/old/ext.py
|
yuj09161/word
|
8260c50998cf3f129b7da886163a218774dea029
|
[
"MIT"
] | null | null | null |
tools/old/ext.py
|
yuj09161/word
|
8260c50998cf3f129b7da886163a218774dea029
|
[
"MIT"
] | null | null | null |
import os
for file in os.listdir():
name,ext=os.path.splitext(file)
print(name,ext)
if os.path.isfile(file) and ext=='.txt':
os.rename(file,name+'.csv')
| 24.714286
| 44
| 0.630058
|
6d768611f47d025618bdecaebef9a6d419ef05a8
| 2,558
|
py
|
Python
|
simublocks/simulation/plot.py
|
bentoavb/simublocks
|
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
|
[
"MIT"
] | 2
|
2020-05-14T12:34:43.000Z
|
2020-06-11T23:48:09.000Z
|
simublocks/simulation/plot.py
|
bentoavb/simublocks
|
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
|
[
"MIT"
] | null | null | null |
simublocks/simulation/plot.py
|
bentoavb/simublocks
|
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
|
[
"MIT"
] | 1
|
2020-05-12T07:01:28.000Z
|
2020-05-12T07:01:28.000Z
|
# MIT License
#
# Copyright (c) 2020 Anderson Vitor Bento
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import matplotlib.pyplot as plt
from simublocks.dialog import Dialog
class Plot:
def run(s):
count = 0
for i in s['graphs']:
count += 1
Plot.figure(count, i, {**s['systems'], **s['inputs'], **s['functions'], **s['sums']}, s['t'])
#Plot.plotInput(s['inputs'], s['t'])
#Plot.plotSystem(s['systems'], s['t'])
plt.show()
def figure(count, graph, blocks, t):
plt.figure(count)
legend = []
for line in graph.code:
if line['check']:
if line['type'] in ['input', 'system', 'function', 'sum']:
try:
block = next(filter(lambda i: blocks[i].name == line['name'], blocks))
block = blocks[block]
except Exception as e:
Dialog.alert("Alert", [
"Error in the 'graph' block",
"Please, after removing or creating a new 'system' or 'input' block, remove and recreate all 'graph' blocks"
])
legend.append(line['legend'])
if line['type'] == 'input':
plt.plot(t[:-1], block.input[:-1],line['color'])
elif line['type'] in ['function','system','sum']:
plt.plot(t[:-1], block.y[:-1],line['color'])
plt.legend(legend)
plt.grid()
| 43.355932
| 136
| 0.595778
|
5aadc220bfebf610ed755a0f8c00e1d02c5ad8c5
| 5,776
|
py
|
Python
|
pymodbus/datastore/context.py
|
etactica/pymodbus
|
eb51b4df4fa72d3ef5c5d14699b02456ef56203f
|
[
"W3C"
] | null | null | null |
pymodbus/datastore/context.py
|
etactica/pymodbus
|
eb51b4df4fa72d3ef5c5d14699b02456ef56203f
|
[
"W3C"
] | null | null | null |
pymodbus/datastore/context.py
|
etactica/pymodbus
|
eb51b4df4fa72d3ef5c5d14699b02456ef56203f
|
[
"W3C"
] | null | null | null |
from pymodbus.exceptions import NoSuchSlaveException
from pymodbus.interfaces import IModbusSlaveContext
from pymodbus.datastore.store import ModbusSequentialDataBlock
from pymodbus.constants import Defaults
#---------------------------------------------------------------------------#
# Logging
#---------------------------------------------------------------------------#
import logging;
_logger = logging.getLogger(__name__)
#---------------------------------------------------------------------------#
# Slave Contexts
#---------------------------------------------------------------------------#
class ModbusSlaveContext(IModbusSlaveContext):
'''
This creates a modbus data model with each data access
stored in its own personal block
'''
def __init__(self, *args, **kwargs):
''' Initializes the datastores, defaults to fully populated
sequential data blocks if none are passed in.
:param kwargs: Each element is a ModbusDataBlock
'di' - Discrete Inputs initializer
'co' - Coils initializer
'hr' - Holding Register initializer
'ir' - Input Registers iniatializer
'''
self.store = {}
self.store['d'] = kwargs.get('di', ModbusSequentialDataBlock.create())
self.store['c'] = kwargs.get('co', ModbusSequentialDataBlock.create())
self.store['i'] = kwargs.get('ir', ModbusSequentialDataBlock.create())
self.store['h'] = kwargs.get('hr', ModbusSequentialDataBlock.create())
self.zero_mode = kwargs.get('zero-mode', Defaults.ZeroMode)
def __str__(self):
''' Returns a string representation of the context
:returns: A string representation of the context
'''
return "Modbus Slave Context"
def reset(self):
''' Resets all the datastores to their default values '''
for datastore in self.store.itervalues():
datastore.reset()
def validate(self, fx, address, count=1):
''' Validates the request to make sure it is in range
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to test
:returns: True if the request in within range, False otherwise
'''
if not self.zero_mode: address = address + 1
_logger.debug("validate[%d] %d:%d" % (fx, address, count))
return self.store[self.decode(fx)].validate(address, count)
def getValues(self, fx, address, count=1):
''' Validates the request to make sure it is in range
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to retrieve
:returns: The requested values from a:a+c
'''
if not self.zero_mode: address = address + 1
_logger.debug("getValues[%d] %d:%d" % (fx, address, count))
return self.store[self.decode(fx)].getValues(address, count)
def setValues(self, fx, address, values):
''' Sets the datastore with the supplied values
:param fx: The function we are working with
:param address: The starting address
:param values: The new values to be set
'''
if not self.zero_mode: address = address + 1
_logger.debug("setValues[%d] %d:%d" % (fx, address, len(values)))
self.store[self.decode(fx)].setValues(address, values)
class ModbusServerContext(object):
''' This represents a master collection of slave contexts.
If single is set to true, it will be treated as a single
context so every unit-id returns the same context. If single
is set to false, it will be interpreted as a collection of
slave contexts.
'''
def __init__(self, slaves=None, single=True):
''' Initializes a new instance of a modbus server context.
:param slaves: A dictionary of client contexts
:param single: Set to true to treat this as a single context
'''
self.single = single
self.__slaves = slaves or {}
if self.single:
self.__slaves = {Defaults.UnitId: self.__slaves}
def __iter__(self):
''' Iterater over the current collection of slave
contexts.
:returns: An iterator over the slave contexts
'''
return self.__slaves.iteritems()
def __contains__(self, slave):
''' Check if the given slave is in this list
:param slave: slave The slave to check for existance
:returns: True if the slave exists, False otherwise
'''
return slave in self.__slaves
def __setitem__(self, slave, context):
''' Used to set a new slave context
:param slave: The slave context to set
:param context: The new context to set for this slave
'''
if self.single: slave = Defaults.UnitId
if 0xf7 >= slave >= 0x00:
self.__slaves[slave] = context
else: raise NoSuchSlaveException('slave index[%d] out of range' % slave)
def __delitem__(self, slave):
''' Wrapper used to access the slave context
:param slave: The slave context to remove
'''
if not self.single and (0xf7 >= slave >= 0x00):
del self.__slaves[slave]
else: raise NoSuchSlaveException('slave index[%d] out of range' % slave)
def __getitem__(self, slave):
''' Used to get access to a slave context
:param slave: The slave context to get
:returns: The requested slave context
'''
if self.single: slave = Defaults.UnitId
if slave in self.__slaves:
return self.__slaves.get(slave)
else: raise NoSuchSlaveException('slave index[%d] out of range' % slave)
| 37.751634
| 80
| 0.605609
|
efe5fa1c1f05589bd95ae813a1e2533c50e86981
| 5,310
|
py
|
Python
|
CSA_SR.py
|
YiyongHuang/CSA-SR
|
522e946df50b52d9d560e255c8ca4abf1a89501f
|
[
"MIT"
] | 6
|
2020-12-02T14:13:21.000Z
|
2021-05-18T07:22:27.000Z
|
CSA_SR.py
|
YiyongHuang/CSA-SR
|
522e946df50b52d9d560e255c8ca4abf1a89501f
|
[
"MIT"
] | 2
|
2020-12-02T14:20:27.000Z
|
2021-05-17T07:18:42.000Z
|
CSA_SR.py
|
YiyongHuang/CSA-SR
|
522e946df50b52d9d560e255c8ca4abf1a89501f
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from data_process import *
# from ConvGRU import SeqVLADModule
from ConvGRU_att import SeqVLADModule
from SLSTM import SLSTM
import math
from torch import Tensor
from torch.nn import Parameter
word_counts, unk_required = build_vocab(word_count_threshold=0)
word2id, id2word = word_to_ids(word_counts, unk_requried=unk_required)
class CSA_SR(nn.Module):
def __init__(self, vocab_size, batch_size=64, hidden=512, dropout=0.5, n_step=40, feats_c=1536,
feats_h=8, feats_w=8, num_centers=32, redu_dim=512):
super(CSA_SR, self).__init__()
self.batch_size = batch_size
self.hidden = hidden
self.n_step = n_step
self.feats_c = feats_c
self.feats_h = feats_h
self.feats_w = feats_w
self.num_centers = num_centers
self.redu_dim = redu_dim
# semantic weights
self.w_s = Parameter(Tensor(300, self.hidden))
self.w_x = Parameter(Tensor(self.hidden, self.hidden))
self.u_x = Parameter(Tensor(self.hidden, self.hidden))
self.reset_weigths()
self.seqvlad = SeqVLADModule(self.n_step, self.num_centers, self.redu_dim)
self.drop = nn.Dropout(p=dropout)
self.linear1 = nn.Linear(self.num_centers*self.redu_dim, self.hidden)
# self.linear1 = nn.Linear(self.num_centers, 1)
self.linear2 = nn.Linear(2*self.hidden+self.redu_dim, vocab_size)
# self.lstm1 = nn.LSTM(hidden, hidden, batch_first=True, dropout=dropout)
self.lstm2 = SLSTM(2 * hidden, hidden)
self.sem_decoder = nn.LSTM(hidden, 300, batch_first=True)
# self.lstm2 = nn.LSTM(2*hidden, hidden, batch_first=True, dropout=dropout)
self.embedding = nn.Embedding(vocab_size, hidden)
def reset_weigths(self):
"""reset weights
"""
stdv = 1.0 / math.sqrt(self.redu_dim)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def mean_pool_hiddens(hiddens, caption_masks):
caption_lens = caption_masks.sum(dim=0).type(torch.cuda.FloatTensor)
caption_masks = caption_masks.unsqueeze(2).expand_as(hiddens).type_as(hiddens)
hiddens_masked = caption_masks * hiddens
hiddens_mean_pooled = hiddens_masked.sum(dim=0) / \
caption_lens.unsqueeze(1).expand(caption_lens.size(0), hiddens_masked.size(2))
return hiddens_mean_pooled
def forward(self, video, tag, caption=None):
video = video.contiguous().view(-1, self.feats_c, self.feats_h, self.feats_w)
if self.training:
video = self.drop(video)
vlad = self.seqvlad(video) # batch_size, num_centers*redu_dim
vlad = self.linear1(vlad)
vlad = (vlad @ self.w_x) * (tag @ self.w_s) @ self.u_x
vid_out = vlad.unsqueeze(1).repeat(1, self.n_step - 1, 1)
if self.training:
caption = self.embedding(caption[:, 0:self.n_step - 1])
caption = torch.cat((caption, vid_out), 2) # caption input
# cap_out, state_cap = self.lstm2(caption)
cap_out, state_cap = self.lstm2(caption, tag)
sem_out, sem_state = self.sem_decoder(cap_out)
sem_out = sem_out.sum(1)/(self.n_step - 1)
cap_out = torch.cat((cap_out, caption), 2)
cap_out = cap_out.contiguous().view(-1, 2*self.hidden+self.redu_dim)
cap_out = self.drop(cap_out)
cap_out = self.linear2(cap_out)
return cap_out, sem_out
# cap_out size [batch_size*79, vocab_size]
else:
bos_id = word2id['<BOS>'] * torch.ones(self.batch_size, dtype=torch.long)
bos_id = bos_id.cuda()
cap_input = self.embedding(bos_id)
cap_input = torch.cat((cap_input, vid_out[:, 0, :]), 1)
cap_input = cap_input.view(self.batch_size, 1, 2 * self.hidden)
# cap_out, state_cap = self.lstm2(cap_input)
cap_out, state_cap = self.lstm2(cap_input, tag)
cap_out = torch.cat((cap_out, cap_input), 2)
cap_out = cap_out.contiguous().view(-1, 2*self.hidden+self.redu_dim)
cap_out = self.linear2(cap_out)
cap_out = torch.argmax(cap_out, 1)
# input ["<BOS>"] to let the generate start
caption = []
caption.append(cap_out)
# put the generate word index in caption list, generate one word at one time step for each batch
for i in range(self.n_step - 2):
cap_input = self.embedding(cap_out)
cap_input = torch.cat((cap_input, vid_out[:, 1 + i, :]), 1)
cap_input = cap_input.view(self.batch_size, 1, 2 * self.hidden)
# cap_out, state_cap = self.lstm2(cap_input, state_cap)
cap_out, state_cap = self.lstm2(cap_input, tag, state_cap)
cap_out = torch.cat((cap_out, cap_input), 2)
cap_out = cap_out.contiguous().view(-1, 2*self.hidden+self.redu_dim)
cap_out = self.linear2(cap_out)
cap_out = torch.argmax(cap_out, 1)
# get the index of each word in vocabulary
caption.append(cap_out)
return caption
# size of caption is [79, batch_size]
| 42.48
| 108
| 0.622787
|
01231e72835bc3335360916fe43880f3d771e11c
| 9,082
|
py
|
Python
|
tiledb/fragment.py
|
TileDB-Inc/TileDB-Py
|
a5b0371d34020c17c351d560edb786d2224ad32f
|
[
"MIT"
] | 136
|
2018-02-26T05:17:24.000Z
|
2022-03-29T22:59:31.000Z
|
tiledb/fragment.py
|
TileDB-Inc/TileDB-Py
|
a5b0371d34020c17c351d560edb786d2224ad32f
|
[
"MIT"
] | 578
|
2018-02-20T02:07:51.000Z
|
2022-03-31T11:24:34.000Z
|
tiledb/fragment.py
|
TileDB-Inc/TileDB-Py
|
a5b0371d34020c17c351d560edb786d2224ad32f
|
[
"MIT"
] | 30
|
2018-03-22T04:13:43.000Z
|
2022-03-26T13:24:43.000Z
|
import pprint
import warnings
import tiledb
from tiledb.main import PyFragmentInfo
"""
Retrieves information from all fragments for a given array.
"""
class FragmentInfoList:
"""
Class representing an ordered list of FragmentInfo objects.
:param uri: URIs of fragments
:param version: Fragment version of each fragment
:param nonempty_domain: Non-empty domain of each fragment
:param cell_num: Number of cells in each fragment
:param timestamp_range: Timestamp range of when each fragment was written
:param dense: For each fragment, True if fragment is dense, else False
:param sparse: For each fragment, True if fragment is sparse, else False
:param has_consolidated_metadata: For each fragment, True if fragment has consolidated fragment metadata, else False
:param unconsolidated_metadata_num: Number of unconsolidated metadata fragments in each fragment
:param to_vacuum: URIs of already consolidated fragments to vacuum
**Example:**
>>> import tiledb, numpy as np, tempfile
>>> with tempfile.TemporaryDirectory() as tmp:
... # The array will be 4x4 with dimensions "rows" and "cols", with domain [1,4] and space tiles 2x2
... dom = tiledb.Domain(
... tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.int32),
... tiledb.Dim(name="cols", domain=(1, 4), tile=2, dtype=np.int32),
... )
... # The array will be dense with a single attribute "a" so each (i,j) cell can store an integer.
... schema = tiledb.ArraySchema(
... domain=dom, sparse=False, attrs=[tiledb.Attr(name="a", dtype=np.int32)]
... )
... # Set URI of the array
... uri = tmp + "/array"
... # Create the (empty) array on disk.
... tiledb.Array.create(uri, schema)
...
... # Write three fragments to the array
... with tiledb.DenseArray(uri, mode="w") as A:
... A[1:3, 1:5] = np.array(([1, 2, 3, 4, 5, 6, 7, 8]))
... with tiledb.DenseArray(uri, mode="w") as A:
... A[2:4, 2:4] = np.array(([101, 102, 103, 104]))
... with tiledb.DenseArray(uri, mode="w") as A:
... A[3:4, 4:5] = np.array(([202]))
...
... # tiledb.array_fragments() requires TileDB-Py version > 0.8.5
... fragments_info = tiledb.array_fragments(uri)
...
... "====== FRAGMENTS INFO ======"
... f"number of fragments: {len(fragments_info)}"
... f"nonempty domains: {fragments_info.nonempty_domain}"
... f"sparse fragments: {fragments_info.sparse}"
...
... for fragment in fragments_info:
... f"===== FRAGMENT NUMBER {fragment.num} ====="
... f"is dense: {fragment.dense}"
... f"cell num: {fragment.cell_num}"
... f"has consolidated metadata: {fragment.has_consolidated_metadata}"
... f"nonempty domain: {fragment.nonempty_domain}"
'====== FRAGMENTS INFO ======'
'number of fragments: 3'
'nonempty domains: (((1, 2), (1, 4)), ((2, 3), (2, 3)), ((3, 3), (4, 4)))'
'sparse fragments: (False, False, False)'
'===== FRAGMENT NUMBER 0 ====='
'is dense: True'
'cell num: 8'
'has consolidated metadata: False'
'nonempty domain: ((1, 2), (1, 4))'
'===== FRAGMENT NUMBER 1 ====='
'is dense: True'
'cell num: 16'
'has consolidated metadata: False'
'nonempty domain: ((2, 3), (2, 3))'
'===== FRAGMENT NUMBER 2 ====='
'is dense: True'
'cell num: 4'
'has consolidated metadata: False'
'nonempty domain: ((3, 3), (4, 4))'
"""
def __init__(self, array_uri, ctx=None):
if ctx is None:
ctx = tiledb.default_ctx()
schema = tiledb.ArraySchema.load(array_uri, ctx=ctx)
self.array_uri = array_uri
fi = PyFragmentInfo(self.array_uri, ctx)
fi.load()
self.uri = fi.fragment_uri()
self.__nums = fi.fragment_num()
self.version = fi.version()
self.nonempty_domain = fi.get_non_empty_domain(schema)
self.cell_num = fi.cell_num()
self.timestamp_range = fi.timestamp_range()
self.dense = fi.dense()
self.sparse = fi.sparse()
self.has_consolidated_metadata = fi.has_consolidated_metadata()
self.unconsolidated_metadata_num = fi.unconsolidated_metadata_num()
self.to_vacuum = fi.to_vacuum_uri()
fi.close()
@property
def non_empty_domain(self):
warnings.warn(
"FragmentInfoList.non_empty_domain is deprecated; "
"please use FragmentInfoList.nonempty_domain",
DeprecationWarning,
)
return self.nonempty_domain
@property
def to_vacuum_num(self):
warnings.warn(
"FragmentInfoList.to_vacuum_num is deprecated; "
"please use len(FragmentInfoList.to_vacuum)",
DeprecationWarning,
)
return len(self.to_vacuum)
@property
def to_vacuum_uri(self):
warnings.warn(
"FragmentInfoList.to_vacuum_uri is deprecated; "
"please use FragmentInfoList.to_vacuum",
DeprecationWarning,
)
return self.to_vacuum
def __iter__(self):
return FragmentsInfoIterator(self)
def __getitem__(self, key):
if isinstance(key, slice):
# Get the start, stop, and step from the slice
return [FragmentInfo(self, idx) for idx in range(*key.indices(len(self)))]
elif isinstance(key, int):
return FragmentInfo(self, key)
else:
raise TypeError("Invalid argument type.")
def __len__(self):
return self.__nums
def __repr__(self):
public_attrs = {
key: value
for (key, value) in self.__dict__.items()
if not key.startswith("_")
}
return pprint.PrettyPrinter().pformat(public_attrs)
class FragmentsInfoIterator:
"""
Iterator class for the FragmentsInfo container.
"""
def __init__(self, fragments):
self._fragments = fragments
self._index = 0
def __next__(self):
if self._index < len(self._fragments):
fi = FragmentInfo(self._fragments, self._index)
self._index += 1
return fi
raise StopIteration
class FragmentInfo:
"""
Class representing the metadata for a single fragment. See :py:class:`tiledb.FragmentInfoList` for example of usage.
:param str uri: URIs of fragments
:param int version: Fragment version of each fragment
:param nonempty_domain: Non-empty domain of each fragment
:type nonempty_domain: tuple(numpy scalar, numpy scalar)
:param cell_num int: Number of cells in each fragment
:param timestamp_range: Timestamp range of when each fragment was written
:type timestamp_range: tuple(int, int)
:param bool dense: True if fragment is dense, else False
:param bool sparse: True if fragment is sparse, else False
:param bool has_consolidated_metadata: True if fragment has consolidated metadata, else False
:param int unconsolidated_metadata_num: Number of unconsolidated metadata fragments
"""
def __init__(self, fragments: FragmentInfoList, num):
self._frags = fragments
self.num = num
self.uri = fragments.uri[num]
self.version = fragments.version[num]
self.nonempty_domain = fragments.nonempty_domain[num]
self.cell_num = fragments.cell_num[num]
self.timestamp_range = fragments.timestamp_range[num]
self.dense = fragments.dense[num]
self.sparse = fragments.sparse[num]
self.has_consolidated_metadata = fragments.has_consolidated_metadata[num]
self.unconsolidated_metadata_num = fragments.unconsolidated_metadata_num
def __repr__(self):
return pprint.PrettyPrinter().pformat(self.__dict__)
@property
def non_empty_domain(self):
warnings.warn(
"FragmentInfo.non_empty_domain is deprecated; "
"please use FragmentInfo.nonempty_domain",
DeprecationWarning,
)
return self.nonempty_domain
@property
def to_vacuum_num(self):
warnings.warn(
"FragmentInfo.to_vacuum_num is deprecated; "
"please use len(FragmentInfoList.to_vacuum)",
DeprecationWarning,
)
return len(self._frags.to_vacuum)
@property
def to_vacuum_uri(self):
warnings.warn(
"FragmentInfo.to_vacuum_uri is deprecated; "
"please use FragmentInfoList.to_vacuum",
DeprecationWarning,
)
return self._frags.to_vacuum
def FragmentsInfo(array_uri, ctx=None):
"""
Deprecated in 0.8.8.
Renamed to FragmentInfoList to make name more distinguishable from FragmentInfo.
"""
warnings.warn(
"FragmentsInfo is deprecated; please use FragmentInfoList", DeprecationWarning
)
if ctx is None:
ctx = tiledb.default_ctx()
return FragmentInfoList(array_uri, ctx)
| 35.20155
| 120
| 0.625963
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.