hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fca689ff842187573cc5c9f59e5be60936d82444
| 861
|
py
|
Python
|
setup.py
|
IAmTheBlurr/TotalRecall
|
52e5018059f9d7f1b758984cbf0d57f2f1202272
|
[
"MIT"
] | null | null | null |
setup.py
|
IAmTheBlurr/TotalRecall
|
52e5018059f9d7f1b758984cbf0d57f2f1202272
|
[
"MIT"
] | 2
|
2019-09-26T18:40:12.000Z
|
2019-09-28T08:17:59.000Z
|
setup.py
|
IAmTheBlurr/TotalRecall
|
52e5018059f9d7f1b758984cbf0d57f2f1202272
|
[
"MIT"
] | 1
|
2019-10-03T17:24:28.000Z
|
2019-10-03T17:24:28.000Z
|
import setuptools
with open("README.md", "r") as file:
long_description = file.read()
setuptools.setup(
name='Chronomancy',
version='0.0.2',
author='Jonathan Craig',
author_email='blurr@iamtheblurr.com',
description="The power of Time itself, for Python",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=setuptools.find_packages(),
url='https://github.com/IAmTheBlurr/Chronomancy',
license='MIT',
)
| 31.888889
| 70
| 0.650407
|
758129989550e7f4799c6f188cf8582869c237ed
| 1,612
|
py
|
Python
|
scripts/plot_results.py
|
callaunchpad/emergence
|
b5da3697fa08202c23302c2ed628d24af3af4940
|
[
"MIT"
] | null | null | null |
scripts/plot_results.py
|
callaunchpad/emergence
|
b5da3697fa08202c23302c2ed628d24af3af4940
|
[
"MIT"
] | null | null | null |
scripts/plot_results.py
|
callaunchpad/emergence
|
b5da3697fa08202c23302c2ed628d24af3af4940
|
[
"MIT"
] | null | null | null |
from stable_baselines import results_plotter
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def main():
"""
Example usage in jupyter-notebook
.. code-block:: python
from stable_baselines import results_plotter
%matplotlib inline
results_plotter.plot_results(["./log"], 10e6, results_plotter.X_TIMESTEPS, "Breakout")
Here ./log is a directory containing the monitor.csv files
"""
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)
parser.add_argument('--task_name', help='Title of plot', default='Title')
args = parser.parse_args()
args.dirs = [os.path.abspath(folder) for folder in args.dirs]
results_plotter.plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)
plt.show()
if __name__ == '__main__':
main()
| 36.636364
| 95
| 0.692928
|
e5ac8f5d9a520c52a18b5bc2264b4fac7c01babc
| 2,817
|
py
|
Python
|
web3/utils/threads.py
|
voBits/web3
|
947e252124f04b33ac5f96179dccd1a3476b3936
|
[
"MIT"
] | 326
|
2016-04-29T21:51:06.000Z
|
2022-03-31T03:20:54.000Z
|
web3/utils/threads.py
|
voBits/web3
|
947e252124f04b33ac5f96179dccd1a3476b3936
|
[
"MIT"
] | 283
|
2016-04-15T16:41:31.000Z
|
2017-11-28T16:41:36.000Z
|
web3/utils/threads.py
|
voBits/web3
|
947e252124f04b33ac5f96179dccd1a3476b3936
|
[
"MIT"
] | 146
|
2016-04-14T16:27:54.000Z
|
2021-10-03T13:31:07.000Z
|
"""
A minimal implementation of the various gevent APIs used within this codebase.
"""
import time
import threading
class Timeout(Exception):
"""
A limited subset of the `gevent.Timeout` context manager.
"""
seconds = None
exception = None
begun_at = None
is_running = None
def __init__(self, seconds=None, exception=None, *args, **kwargs):
self.seconds = seconds
self.exception = exception
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def __str__(self):
if self.seconds is None:
return ''
return "{0} seconds".format(self.seconds)
@property
def expire_at(self):
if self.seconds is None:
raise ValueError("Timeouts with `seconds == None` do not have an expiration time")
elif self.begun_at is None:
raise ValueError("Timeout has not been started")
return self.begun_at + self.seconds
def start(self):
if self.is_running is not None:
raise ValueError("Timeout has already been started")
self.begun_at = time.time()
self.is_running = True
def check(self):
if self.is_running is None:
raise ValueError("Timeout has not been started")
elif self.is_running is False:
raise ValueError("Timeout has already been cancelled")
elif self.seconds is None:
return
elif time.time() > self.expire_at:
self.is_running = False
if isinstance(self.exception, type):
raise self.exception(str(self))
elif isinstance(self.exception, Exception):
raise self.exception
else:
raise self
def cancel(self):
self.is_running = False
def sleep(self, seconds):
time.sleep(seconds)
self.check()
class ThreadWithReturn(threading.Thread):
def __init__(self, target=None, args=None, kwargs=None):
super(ThreadWithReturn, self).__init__(
target=target,
args=args or tuple(),
kwargs=kwargs or {},
)
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self._return = self.target(*self.args, **self.kwargs)
def get(self, timeout=None):
self.join(timeout)
try:
return self._return
except AttributeError:
raise RuntimeError("Something went wrong. No `_return` property was set")
def spawn(target, *args, thread_class=ThreadWithReturn, **kwargs):
thread = thread_class(
target=target,
args=args,
kwargs=kwargs,
)
thread.daemon = True
thread.start()
return thread
| 27.617647
| 94
| 0.601704
|
c380b45bf852cdeab6da334d75c48533073d0360
| 1,586
|
py
|
Python
|
src/web/modules/finance/migrations/0008_auto_20160725_1510.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 5
|
2018-03-08T17:22:27.000Z
|
2018-03-11T14:20:53.000Z
|
src/web/modules/finance/migrations/0008_auto_20160725_1510.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 263
|
2018-03-08T18:05:12.000Z
|
2022-03-11T23:26:20.000Z
|
src/web/modules/finance/migrations/0008_auto_20160725_1510.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 6
|
2018-03-12T19:48:19.000Z
|
2022-01-14T04:58:52.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-25 12:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('finance', '0007_auto_20160531_0315'),
]
operations = [
migrations.RenameField(
model_name='discount',
old_name='for_school',
new_name='school',
),
migrations.RenameField(
model_name='discount',
old_name='for_user',
new_name='user',
),
migrations.RenameField(
model_name='document',
old_name='for_school',
new_name='school',
),
migrations.RenameField(
model_name='document',
old_name='for_users',
new_name='users',
),
migrations.RenameField(
model_name='documenttype',
old_name='for_school',
new_name='school',
),
migrations.RenameField(
model_name='paymentamount',
old_name='for_school',
new_name='school',
),
migrations.RenameField(
model_name='paymentamount',
old_name='for_user',
new_name='user',
),
migrations.AlterUniqueTogether(
name='documenttype',
unique_together=set([('school', 'short_name')]),
),
migrations.AlterUniqueTogether(
name='paymentamount',
unique_together=set([('school', 'user')]),
),
]
| 26.881356
| 60
| 0.534048
|
68be34c4fbebd434c73ecf1cf4699108c00590fb
| 3,876
|
py
|
Python
|
S2SRL/SymbolicExecutor/transform_util.py
|
gokasiko/NS-CQA-1
|
96399928c9c1ee30521ea5defd3257fff27bfa74
|
[
"MIT"
] | 9
|
2020-04-29T00:49:59.000Z
|
2022-01-15T08:56:28.000Z
|
S2SRL/SymbolicExecutor/transform_util.py
|
gokasiko/NS-CQA-1
|
96399928c9c1ee30521ea5defd3257fff27bfa74
|
[
"MIT"
] | 1
|
2020-07-19T09:04:47.000Z
|
2020-10-08T12:23:03.000Z
|
S2SRL/SymbolicExecutor/transform_util.py
|
gokasiko/NS-CQA-1
|
96399928c9c1ee30521ea5defd3257fff27bfa74
|
[
"MIT"
] | 6
|
2020-07-19T09:01:05.000Z
|
2021-11-11T07:58:07.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/9/1 23:36
# @Author : Devin Hua
# Function: transforming.
# Transform boolean results into string format.
def transformBooleanToString(list):
temp_set = set()
if len(list) == 0:
return ''
else:
for i, item in enumerate(list):
if item == True:
list[i] = "YES"
temp_set.add(list[i])
elif item == False:
list[i] = "NO"
temp_set.add(list[i])
else:
return ''
if len(temp_set) == 1:
return temp_set.pop()
if len(temp_set) > 1:
return ((' and '.join(list)).strip() + ' respectively')
# Transform action sequence ['A2', '(', 'Q5058355', 'P361', 'Q5058355', ')', 'A2', '(', 'Q5058355', 'P361', 'Q5058355', ')', 'A15', '(', 'Q22329858', ')'] into list.
def list2dict(list):
final_list = []
temp_list = []
new_list = []
action_list = []
left_count, right_count, action_count = 0, 0, 0
for a in list:
if a.startswith("A"):
action_count+=1
action_list.append(a)
if (a == "("):
new_list = []
left_count+=1
continue
if (a == ")"):
right_count+=1
if ("-" in new_list and new_list[-1] != "-"):
new_list[new_list.index("-") + 1] = "-" + new_list[new_list.index("-") + 1]
new_list.remove("-")
if (new_list == []):
new_list = ["", "", ""]
if (len(new_list) == 1):
new_list = [new_list[0], "", ""]
if ("&" in new_list):
new_list = ["&", "", ""]
if ("-" in new_list):
new_list = ["-", "", ""]
if ("|" in new_list):
new_list = ["|", "", ""]
temp_list.append(new_list)
# To handle the error when action sequence is like 'A1 (Q1,P1,Q2) A2 Q3,P2,Q4)'.
new_list = []
continue
if not a.startswith("A"):
if a.startswith("E"): a = "Q17"
if a.startswith("T"): a = "Q17"
new_list.append(a)
# To handle the error when action sequence is like 'A1 Q1,P1,Q2) A2(Q3,P2,Q4', 'A1(Q1,P1,Q2 A2(Q3,P2,Q4)'.
number_list = [left_count, right_count, len(action_list), len(temp_list)]
set_temp = set(number_list)
# The value of multiple numbers is same.
if len(set_temp) == 1:
for action, parameter_temp in zip(action_list, temp_list):
final_list.append({action: parameter_temp})
# print("final_list", final_list)
return final_list
def list2dict_webqsp(list):
#print("list", list)
final_list = []
temp_list = []
new_list = []
for a in list:
if (a == "("):
new_list = []
continue
if (a == ")"):
if ("-" in new_list):
new_list[new_list.index("-") + 1] = "-" + new_list[new_list.index("-") + 1]
new_list.remove("-")
if (new_list == []):
new_list = ["", "", ""]
if (len(new_list) == 1):
new_list = [new_list[0], "", ""]
if ("&" in new_list):
new_list = ["&", "", ""]
if ("-" in new_list):
new_list = ["-", "", ""]
if ("|" in new_list):
new_list = ["|", "", ""]
temp_list.append(new_list)
continue
if not a.startswith("A"):
# if a.startswith("E"): a = "Q17"
# if a.startswith("T"): a = "Q17"
new_list.append(a)
i = 0
for a in list:
if (a.startswith("A")):
if i < len(temp_list):
final_list.append({a: temp_list[i]})
# temp_dict[a] = temp_list[i]
i += 1
return final_list
| 34.300885
| 165
| 0.45743
|
0c7b0486613e2145e4acf0b596f92345458cfd42
| 3,431
|
py
|
Python
|
3_Deep_Double_Q_Learning_Atari_Games/plot.py
|
vamshikumarkurva/DeepReinforcementLearning
|
e3c123ba60620dfdfb1514ca99f5f86c813984d1
|
[
"MIT"
] | 8
|
2018-06-11T18:26:58.000Z
|
2021-05-12T07:57:54.000Z
|
3_Deep_Double_Q_Learning_Atari_Games/plot.py
|
vamshikumarkurva/DeepReinforcementLearning
|
e3c123ba60620dfdfb1514ca99f5f86c813984d1
|
[
"MIT"
] | 2
|
2018-06-21T15:17:57.000Z
|
2018-07-12T14:42:05.000Z
|
3_Deep_Double_Q_Learning_Atari_Games/plot.py
|
vamshikumarkurva/DeepReinforcementLearning
|
e3c123ba60620dfdfb1514ca99f5f86c813984d1
|
[
"MIT"
] | 3
|
2018-07-10T10:16:22.000Z
|
2020-09-02T10:48:32.000Z
|
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
"""
Using the plotter:
Call it from the command line, and supply it with logdirs to experiments.
Suppose you ran an experiment with name 'test', and you ran 'test' for 10
random seeds. The runner code stored it in the directory structure
data
L test_EnvName_DateTime
L 0
L log.txt
L params.json
L 1
L log.txt
L params.json
.
.
.
L 9
L log.txt
L params.json
To plot learning curves from the experiment, averaged over all random
seeds, call
python plot.py data/test_EnvName_DateTime --value AverageReturn
and voila. To see a different statistics, change what you put in for
the keyword --value. You can also enter /multiple/ values, and it will
make all of them in order.
Suppose you ran two experiments: 'test1' and 'test2'. In 'test2' you tried
a different set of hyperparameters from 'test1', and now you would like
to compare them -- see their learning curves side-by-side. Just call
python plot.py data/test1 data/test2
and it will plot them both! They will be given titles in the legend according
to their exp_name parameters. If you want to use custom legend titles, use
the --legend flag and then provide a title for each logdir.
"""
def plot_data(data, value="AverageReturn"):
print(data)
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
sns.set(style="darkgrid", font_scale=1.5)
sns.tsplot(data=data, time="Iteration", value=value, unit="Unit", condition="Condition")
plt.legend(loc='best').draggable()
plt.show()
def get_datasets(fpath, condition=None):
unit = 0
datasets = []
for root, dir, files in os.walk(fpath):
if 'log.txt' in files:
param_path = open(os.path.join(root,'params.json'))
params = json.load(param_path)
exp_name = params['exp_name']
log_path = os.path.join(root,'log.txt')
experiment_data = pd.read_table(log_path)
experiment_data.insert(
len(experiment_data.columns),
'Unit',
unit
)
experiment_data.insert(
len(experiment_data.columns),
'Condition',
condition or exp_name
)
datasets.append(experiment_data)
unit += 1
return datasets
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', nargs='*')
parser.add_argument('--value', default='AverageReturn', nargs='*')
args = parser.parse_args()
use_legend = False
if args.legend is not None:
assert len(args.legend) == len(args.logdir), \
"Must give a legend title for each set of experiments."
use_legend = True
data = []
if use_legend:
for logdir, legend_title in zip(args.logdir, args.legend):
data += get_datasets(logdir, legend_title)
else:
for logdir in args.logdir:
data += get_datasets(logdir)
if isinstance(args.value, list):
values = args.value
else:
values = [args.value]
for value in values:
plot_data(data, value=value)
if __name__ == "__main__":
main()
| 28.355372
| 92
| 0.629554
|
bde2ac3f710d3e398bd53893c4acd0b9164d93be
| 3,637
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_container_state_running.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_container_state_running.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_container_state_running.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1ContainerStateRunning(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'started_at': 'datetime'
}
attribute_map = {
'started_at': 'startedAt'
}
def __init__(self, started_at=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerStateRunning - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._started_at = None
self.discriminator = None
if started_at is not None:
self.started_at = started_at
@property
def started_at(self):
"""Gets the started_at of this V1ContainerStateRunning. # noqa: E501
Time at which the container was last (re-)started # noqa: E501
:return: The started_at of this V1ContainerStateRunning. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this V1ContainerStateRunning.
Time at which the container was last (re-)started # noqa: E501
:param started_at: The started_at of this V1ContainerStateRunning. # noqa: E501
:type: datetime
"""
self._started_at = started_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerStateRunning):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerStateRunning):
return True
return self.to_dict() != other.to_dict()
| 29.569106
| 124
| 0.598295
|
b50b0242fd3422d8333e80a40a09fd8ce1e4c6bb
| 372
|
py
|
Python
|
data.py
|
abc1763613206/UniversalTGChatBot
|
13d8144f71f4c9326b344a8697baa5fa72005e21
|
[
"MIT"
] | null | null | null |
data.py
|
abc1763613206/UniversalTGChatBot
|
13d8144f71f4c9326b344a8697baa5fa72005e21
|
[
"MIT"
] | 41
|
2019-12-05T21:16:22.000Z
|
2021-07-27T21:14:47.000Z
|
data.py
|
abc1763613206/UniversalTGChatBot
|
13d8144f71f4c9326b344a8697baa5fa72005e21
|
[
"MIT"
] | null | null | null |
comment = {
'welcome_msg' : '用户发送 /start 或 /help 时的欢迎消息',
'text_not_supported' : '用户发送非文字时的回复',
'data_saved' : '保存数据后的回复',
'demo_mode_on' : '开启演示模式的回复',
'demo_mode_off' : '关闭演示模式的回复',
'not_admin' : '用户权限不足时的回复',
'quotes' : '(请勿使用该选项设置)语录库',
'texts' : '(请勿使用该选项设置)语录无法匹配时的随机库',
'admins' : '动态添加的管理,存储username,权限应与config.py中写死的admin作区分'
}
| 33.818182
| 61
| 0.637097
|
f50106826a79986acacf743a46b511c03fb23e3d
| 3,363
|
py
|
Python
|
torrent_name_analyzer/name_parser/patterns.py
|
opacam/torrent-name-analyzer
|
3d6a8c05577925c4d8017a8151f725d9752771d7
|
[
"MIT"
] | 1
|
2020-07-08T14:01:36.000Z
|
2020-07-08T14:01:36.000Z
|
torrent_name_analyzer/name_parser/patterns.py
|
opacam/torrent-name-analyzer
|
3d6a8c05577925c4d8017a8151f725d9752771d7
|
[
"MIT"
] | null | null | null |
torrent_name_analyzer/name_parser/patterns.py
|
opacam/torrent-name-analyzer
|
3d6a8c05577925c4d8017a8151f725d9752771d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
delimiters = r'[\.\s\-\+_\/]'
langs = (
r'rus|(?:True)?fr(?:ench)?|e?n(?:g(?:lish)?)?|vost(?:fr)?|ita(?:liano)?|'
r'castellano|swedish|spanish|dk|german|multi|nordic|exyu|chs|hindi|polish'
r'|mandarin'
)
producers = 'ATVP|AMZN|NF|NICK|RED|DSNP'
season_range_pattern = (
r'(?:Complete' + delimiters + r'*)?(?:' + delimiters
+ r'*)?(?:s(?:easons?)?)?' + delimiters
+ r'?(?:s?[0-9]{1,2}[\s]*(?:\-|(?:\s*to\s*))[\s]*s?[0-9]{1,2})(?:'
+ delimiters + r'*Complete)?'
)
episode_pattern = (
'(?:(?:[ex]|ep)(?:[0-9]{1,2}(?:-(?:[ex]|ep)?(?:[0-9]{1,2})))|'
'(?:[ex]|ep)([0-9]{1,2}))'
)
year_pattern = '(?:19[0-9]|20[0-2])[0-9]'
month_pattern = '0[1-9]|1[0-2]'
day_pattern = '[0-2][0-9]|3[01]'
patterns = [
('season', delimiters + '(' # Season description can't be at the beginning, must be after this pattern # noqa: 501
'' + season_range_pattern + '|' # Describes season ranges
'(?:Complete' + delimiters + ')?s([0-9]{1,2})(?:' + episode_pattern + ')?|' # Describes season, optionally with complete or episode # noqa: 501
'([0-9]{1,2})x[0-9]{2}|' # Describes 5x02, 12x15 type descriptions
'(?:Complete' + delimiters + r')?Season[\. -]([0-9]{1,2})' # Describes Season.15 type descriptions # noqa: 501
')(?:' + delimiters + '|$)'),
('episode', '(' + episode_pattern + ')(?:[^0-9]|$)'),
('year', r'([\[\(]?(' + year_pattern + r')[\]\)]?)'),
('month', '(?:' + year_pattern + ')'
+ delimiters + '(' + month_pattern + ')'
+ delimiters + '(?:' + day_pattern + ')'
),
('day', '(?:' + year_pattern + ')' + delimiters
+ '(?:' + month_pattern + ')' + delimiters + '(' + day_pattern + ')'),
('resolution', '([0-9]{3,4}p|1280x720)'),
('quality', (r'((?:PPV\.)?[HP]DTV|CAM-RIP|(?:HD)?CAM|B[DR]Rip|(?:HD-?)?TS|'
r'HDRip|HDTVRip|DVDRip|DVDRIP|CamRip|(?:(?:' + producers + ')'
+ delimiters + r'?)?(?:PPV )?W[EB]B(?:-?DL(?:Mux)?)'
r'?(?:Rip| DVDRip)?|BluRay|DvDScr|hdtv|telesync)')),
('codec', r'(xvid|[hx]\.?26[45])'),
('audio', (r'(MP3|DD5\.?1|Dual[\- ]Audio|LiNE|DTS|DTS5\.1|'
r'AAC[ \.-]LC|AAC(?:(?:\.?2(?:\.0)?)?|(?:\.?5(?:\.1)?)?)|'
r'(?:E-?)?AC-?3(?:' + delimiters + r'*?(?:2\.0|5\.1))?)')),
('group', '(- ?([^-]+(?:-={[^-]+-?$)?))$'),
('region', 'R[0-9]'),
('extended', '(EXTENDED(:?.CUT)?)'),
('hardcoded', 'HC'),
('proper', 'PROPER'),
('repack', 'REPACK'),
('container', '(MKV|AVI|MP4)'),
('widescreen', 'WS'),
('website', r'^(\[ ?([^\]]+?) ?\])'),
('subtitles', r'((?:(?:' + langs + r'|e-?)[\-\s.]*)*subs?)'),
('language', r'((?:(?:' + langs + ')' + delimiters
+ r'*)+)(?!(?:[\-\s.]*(?:' + langs + r')*)+[\-\s.]?subs)'),
('sbs', '(?:Half-)?SBS'),
('unrated', 'UNRATED'),
('size', r'(\d+(?:\.\d+)?(?:GB|MB))'),
('bitDepth', '(?:8|10)bit'),
('3d', '3D'),
('internal', 'iNTERNAL'),
('readnfo', 'READNFO')
]
types = {
'season': 'integer',
'episode': 'integer',
'year': 'integer',
'month': 'integer',
'day': 'integer',
'extended': 'boolean',
'hardcoded': 'boolean',
'proper': 'boolean',
'repack': 'boolean',
'widescreen': 'boolean',
'unrated': 'boolean',
'3d': 'boolean',
'internal': 'boolean',
'readnfo': 'boolean'
}
| 38.655172
| 150
| 0.460898
|
456f0509d21782762e2f0fe95a0b373606af3094
| 99
|
py
|
Python
|
eventsrouter/apps.py
|
The-Politico/django-slack-events-router
|
a838d94a55f7be7afeafa19dad093c29e77ebe67
|
[
"MIT"
] | null | null | null |
eventsrouter/apps.py
|
The-Politico/django-slack-events-router
|
a838d94a55f7be7afeafa19dad093c29e77ebe67
|
[
"MIT"
] | 6
|
2019-12-05T00:43:05.000Z
|
2021-06-09T18:39:48.000Z
|
eventsrouter/apps.py
|
The-Politico/django-slack-events-router
|
a838d94a55f7be7afeafa19dad093c29e77ebe67
|
[
"MIT"
] | 1
|
2021-05-30T15:00:36.000Z
|
2021-05-30T15:00:36.000Z
|
from django.apps import AppConfig
class EventsrouterConfig(AppConfig):
name = "eventsrouter"
| 16.5
| 36
| 0.777778
|
b90f6d3d8627004bd32a830d8f8e54420beaa4d4
| 217
|
py
|
Python
|
src/saml2/profile/samlec.py
|
skanct/pysaml2
|
0c1e26a6dd8759962857a30ebd67f63fe9e881ee
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
src/saml2/profile/samlec.py
|
skanct/pysaml2
|
0c1e26a6dd8759962857a30ebd67f63fe9e881ee
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
src/saml2/profile/samlec.py
|
skanct/pysaml2
|
0c1e26a6dd8759962857a30ebd67f63fe9e881ee
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from saml2 import SamlBase
NAMESPACE = 'urn:ietf:params:xml:ns:samlec'
class GeneratedKey(SamlBase):
c_tag = 'GeneratedKey'
c_namespace = NAMESPACE
ELEMENT_BY_TAG = {
'GeneratedKey': GeneratedKey,
}
| 14.466667
| 43
| 0.718894
|
2450114b0e61dcd27df850da14fe58d0e68c4a21
| 2,959
|
py
|
Python
|
src/chatbot/chatbot.py
|
PYTHONBOY/carhood-garage-chatbot
|
bfa1dfd8ddfaa6bdc847861fa72fcb60a5a3defd
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/chatbot/chatbot.py
|
PYTHONBOY/carhood-garage-chatbot
|
bfa1dfd8ddfaa6bdc847861fa72fcb60a5a3defd
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/chatbot/chatbot.py
|
PYTHONBOY/carhood-garage-chatbot
|
bfa1dfd8ddfaa6bdc847861fa72fcb60a5a3defd
|
[
"Unlicense",
"MIT"
] | null | null | null |
#Meet Robo: your friend
#import necessary libraries
import io
import random
import string # to process standard python strings
import warnings
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import warnings
warnings.filterwarnings('ignore')
import nltk
from chatterbot import ChatBot
from nltk.stem import WordNetLemmatizer
nltk.download('popular', quiet=True) # for downloading packages
# uncomment the following only the first time
#nltk.download('punkt') # first-time use only
#nltk.download('wordnet') # first-time use only
#Reading in the corpus
with open('chatbot.txt','r', encoding='utf8', errors ='ignore') as fin:
raw = fin.read().lower()
#TOkenisation
sent_tokens = nltk.sent_tokenize(raw)# converts to list of sentences
word_tokens = nltk.word_tokenize(raw)# converts to list of words
# Preprocessing
lemmer = WordNetLemmatizer()
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# Keyword Matching
GREETING_INPUTS = ("hello", "hi", "greetings", "sup", "what's up","hey",)
GREETING_RESPONSES = ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"]
def greeting(sentence):
"""If user's input is a greeting, return a greeting response"""
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSES)
# Generating response
def response(user_response):
robo_response=''
sent_tokens.append(user_response)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx=vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if(req_tfidf==0):
robo_response=robo_response+"I am sorry! I don't understand you"
return robo_response
else:
robo_response = robo_response+sent_tokens[idx]
return robo_response
flag=True
print("ROBO: My name is Robo. I will answer your queries about Chatbots. If you want to exit, type Bye!")
while(flag==True):
user_response = input()
user_response=user_response.lower()
if(user_response!='bye'):
if(user_response=='thanks' or user_response=='thank you' ):
flag=False
print("ROBO: You are welcome..")
else:
if(greeting(user_response)!=None):
print("ROBO: "+greeting(user_response))
else:
print("ROBO: ",end="")
print(response(user_response))
sent_tokens.remove(user_response)
else:
flag=False
print("ROBO: Bye! take care..")
| 31.478723
| 105
| 0.690098
|
378c63eddf7e07f5b65538e9e858f1d56951a586
| 1,189
|
py
|
Python
|
src/updatedb.py
|
510908220/django-backend-template
|
77c6a789e86654885616334b4a3590934026b07e
|
[
"MIT"
] | 1
|
2017-01-22T09:24:20.000Z
|
2017-01-22T09:24:20.000Z
|
src/updatedb.py
|
510908220/django-backend-template
|
77c6a789e86654885616334b4a3590934026b07e
|
[
"MIT"
] | null | null | null |
src/updatedb.py
|
510908220/django-backend-template
|
77c6a789e86654885616334b4a3590934026b07e
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
业务监控升级脚本
"""
import subprocess
import os
import MySQLdb
import time
import traceback
import stat
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
def wait_db_ok():
def test_db():
db = MySQLdb.connect("db","root",os.environ['DB_PASSWORD'],os.environ['DB_NAME'] )
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
db.close()
try:
test_db()
return True
except:
print ("test db error:",traceback.format_exc())
return False
def update_supervisor_cfg():
print (subprocess.check_output("python generate_supervisor_conf.py", shell=True))
def update_db():
cmds = [
"python manage.py makemigrations",
"python manage.py migrate",
"python manage.py collectstatic --noinput"
]
for cmd in cmds:
out = subprocess.check_output(cmd, shell=True)
print (out)
def main():
while not wait_db_ok():
time.sleep(5)
print ("db is not ok, wait ....")
old_dir = os.getcwd()
os.chdir(ROOT_DIR)
update_supervisor_cfg()
update_db()
if __name__ == "__main__":
main()
| 22.433962
| 90
| 0.618167
|
3a1c3a1cb678c56447f8b21bdd262bde1a3d8f0f
| 1,416
|
py
|
Python
|
summarizer/sentence_handler.py
|
FrontMage/bert-extractive-summarizer
|
317da7c92c33ed103d7e2e4a35ef10538ce034d6
|
[
"MIT"
] | null | null | null |
summarizer/sentence_handler.py
|
FrontMage/bert-extractive-summarizer
|
317da7c92c33ed103d7e2e4a35ef10538ce034d6
|
[
"MIT"
] | null | null | null |
summarizer/sentence_handler.py
|
FrontMage/bert-extractive-summarizer
|
317da7c92c33ed103d7e2e4a35ef10538ce034d6
|
[
"MIT"
] | 1
|
2021-06-05T19:10:44.000Z
|
2021-06-05T19:10:44.000Z
|
from typing import List
from spacy.lang.zh import Chinese
class SentenceHandler(object):
def __init__(self, language=Chinese):
self.nlp = language()
try:
self.nlp.add_pipe(self.nlp.create_pipe('sentencizer'))
self.is_spacy_3 = False
except:
self.nlp.add_pipe("sentencizer")
self.is_spacy_3 = True
def sentence_processor(self, doc, min_length: int = 40, max_length: int = 600):
to_return = []
for c in doc.sents:
if max_length > len(c.text.strip()) > min_length:
if self.is_spacy_3:
to_return.append(c.text.strip())
else:
to_return.append(c.string.strip())
return to_return
def process(self, body: str, min_length: int = 40, max_length: int = 600) -> List[str]:
"""
Processes the content sentences.
:param body: The raw string body to process
:param min_length: Minimum length that the sentences must be
:param max_length: Max length that the sentences mus fall under
:return: Returns a list of sentences.
"""
doc = self.nlp(body)
return self.sentence_processor(doc, min_length, max_length)
def __call__(self, body: str, min_length: int = 40, max_length: int = 600) -> List[str]:
return self.process(body, min_length, max_length)
| 32.181818
| 92
| 0.605226
|
216b40f57de57ce47aabf8e82042a38dd81901f1
| 192
|
py
|
Python
|
Python2 Tutorials/pythonIntermediateTutorial/Tutorial6 - Modulating/package/subpackage/__init__.py
|
DrapsTV/DrapsTV_Materials
|
1aee31df2d622a312b0d3f4eb2d29080a8043828
|
[
"MIT"
] | 20
|
2016-06-16T12:05:45.000Z
|
2020-11-25T06:31:53.000Z
|
Python3 Tutorials/python3IntermediateTutorial/Tutorial6 - Modulating/package/subpackage/__init__.py
|
AyushLalShrestha/DrapsTV_Materials
|
1aee31df2d622a312b0d3f4eb2d29080a8043828
|
[
"MIT"
] | null | null | null |
Python3 Tutorials/python3IntermediateTutorial/Tutorial6 - Modulating/package/subpackage/__init__.py
|
AyushLalShrestha/DrapsTV_Materials
|
1aee31df2d622a312b0d3f4eb2d29080a8043828
|
[
"MIT"
] | 22
|
2016-05-02T10:16:17.000Z
|
2020-12-30T13:02:25.000Z
|
# This initalizes the subpackage folder.
# the __all__ special variable indicates what modules to load then the * star
# is used for importing.
__all__ = ['mySubModule', 'myOtherSubModule']
| 27.428571
| 77
| 0.765625
|
8d480c37c53a201107eb76160984d7c3ec3f1d35
| 1,741
|
py
|
Python
|
docs/examples/qc_set_next_step.py
|
AvroNelson/s4-clarity-lib
|
af6f2ec77cdb7f942f5ea3990a61950aa2a1b3a4
|
[
"MIT"
] | 11
|
2019-04-11T16:29:36.000Z
|
2022-01-31T18:32:27.000Z
|
docs/examples/qc_set_next_step.py
|
AvroNelson/s4-clarity-lib
|
af6f2ec77cdb7f942f5ea3990a61950aa2a1b3a4
|
[
"MIT"
] | 9
|
2019-04-13T17:08:39.000Z
|
2021-12-07T23:31:01.000Z
|
docs/examples/qc_set_next_step.py
|
AvroNelson/s4-clarity-lib
|
af6f2ec77cdb7f942f5ea3990a61950aa2a1b3a4
|
[
"MIT"
] | 10
|
2019-04-13T14:29:43.000Z
|
2021-04-01T01:35:51.000Z
|
# Copyright 2019 Semaphore Solutions, Inc.
# ---------------------------------------------------------------------------
import logging
from s4.clarity.artifact import Artifact
from s4.clarity.scripts import TriggeredStepEPP
log = logging.getLogger(__name__)
class QCSetNextStep(TriggeredStepEPP):
def should_repeat_step(self, input_analyte):
# type: (Artifact) -> bool
# QC flag is set on output result file artifacts
output_measurements = self.step.details.input_keyed_lookup[input_analyte]
# If QC failed for any replicate of the input it should repeat
return any(output.qc_failed() for output in output_measurements)
def on_record_details_exit(self):
"""
Set the next step actions for the user to inspect.
"""
for analyte, action in self.step.actions.artifact_actions.items():
if self.should_repeat_step(analyte):
log.info("Setting Analyte '%s' (%s) to repeat step." % (analyte.name, analyte.limsid))
action.repeat()
else:
action.next_step()
self.step.actions.commit()
def on_end_of_step(self):
"""
Ensure analytes repeat the step but do not overwrite other user selections.
"""
# As this is a QC step it is the inputs that are moving to the next step.
for input_analyte, action in self.step.actions.artifact_actions.items():
if self.should_repeat_step(input_analyte):
log.info("Setting Analyte '%s' (%s) to repeat step." % (input_analyte.name, input_analyte.limsid))
action.repeat()
self.step.actions.commit()
if __name__ == "__main__":
QCSetNextStep.main()
| 32.240741
| 114
| 0.622631
|
f8f1af4c3da51ca5fcaf5a216c48ebd573192679
| 3,586
|
py
|
Python
|
pandas/tests/arrays/boolean/test_arithmetic.py
|
k-fillmore/pandas
|
67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 28,899
|
2016-10-13T03:32:12.000Z
|
2022-03-31T21:39:05.000Z
|
pandas/tests/arrays/boolean/test_arithmetic.py
|
k-fillmore/pandas
|
67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 31,004
|
2016-10-12T23:22:27.000Z
|
2022-03-31T23:17:38.000Z
|
pandas/tests/arrays/boolean/test_arithmetic.py
|
k-fillmore/pandas
|
67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15,149
|
2016-10-13T03:21:31.000Z
|
2022-03-31T18:46:47.000Z
|
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import FloatingArray
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.fixture
def left_array():
return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
@pytest.fixture
def right_array():
return pd.array([True, False, None] * 3, dtype="boolean")
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[
("add", [True, True, None, True, False, None, None, None, None]),
("mul", [True, False, None, False, False, None, None, None, None]),
],
ids=["add", "mul"],
)
def test_add_mul(left_array, right_array, opname, exp):
op = getattr(operator, opname)
result = op(left_array, right_array)
expected = pd.array(exp, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_sub(left_array, right_array):
msg = (
r"numpy boolean subtract, the `-` operator, is (?:deprecated|not supported), "
r"use the bitwise_xor, the `\^` operator, or the logical_xor function instead\."
)
with pytest.raises(TypeError, match=msg):
left_array - right_array
def test_div(left_array, right_array):
result = left_array / right_array
expected = FloatingArray(
np.array(
[1.0, np.inf, np.nan, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan],
dtype="float64",
),
np.array([False, False, True, False, False, True, True, True, True]),
)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"opname",
[
"floordiv",
"mod",
pytest.param(
"pow", marks=pytest.mark.xfail(reason="TODO follow int8 behaviour? GH34686")
),
],
)
def test_op_int8(left_array, right_array, opname):
op = getattr(operator, opname)
result = op(left_array, right_array)
expected = op(left_array.astype("Int8"), right_array.astype("Int8"))
tm.assert_extension_array_equal(result, expected)
# Test generic characteristics / errors
# -----------------------------------------------------------------------------
def test_error_invalid_values(data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
# invalid scalars
msg = (
"did not contain a loop with signature matching types|"
"BooleanArray cannot perform the operation|"
"not supported for the input types, and the inputs could not be safely coerced "
"to any supported types according to the casting rule ''safe''"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
msg = (
r"unsupported operand type\(s\) for|"
"Concatenation operation is not implemented for NumPy arrays"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
if op not in ("__mul__", "__rmul__"):
# TODO(extension) numpy's mul with object array sees booleans as numbers
msg = (
r"unsupported operand type\(s\) for|can only concatenate str|"
"not all arguments converted during string formatting"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
| 29.393443
| 88
| 0.607641
|
3ee3d28b59182aec404758656033a51d13bede2a
| 6,833
|
py
|
Python
|
worldview.py
|
Berserker66/omnitool
|
6bf88ba86a7c68a968f8c8db569b57e6ba836e8e
|
[
"MIT"
] | 40
|
2015-03-15T14:38:24.000Z
|
2021-12-18T04:30:39.000Z
|
worldview.py
|
Berserker66/omnitool
|
6bf88ba86a7c68a968f8c8db569b57e6ba836e8e
|
[
"MIT"
] | 31
|
2015-03-14T12:12:14.000Z
|
2022-02-27T17:50:56.000Z
|
worldview.py
|
Berserker66/omnitool
|
6bf88ba86a7c68a968f8c8db569b57e6ba836e8e
|
[
"MIT"
] | 13
|
2015-07-31T11:40:41.000Z
|
2021-04-09T14:36:07.000Z
|
from __future__ import with_statement
import time
import sys
from multiprocessing import *
from tlib import get_tile_buffered
from tinterface import get_header, get_pointers
is_exe = hasattr(sys, "frozen")
try:
import pygame._view
except ImportError:
pass
try:
import pygame
except:
raise RuntimeError("Failed to import pygame, please install it - pygame.org")
import colorlib
import database as db
def make_map(path, outputpath=None, mark=False, name=None):
images = {}
render = {}
if mark:
mark = [12, 26, 31]
else:
mark = []
for tile in mark:
try:
images[tile] = (pygame.image.load(str(tile) + ".png"))
except Exception as e:
print("Could not load image for tile ID %d") % (tile)
raise e
render[tile] = []
if len(mark):
chests = []
chestsizes = []
for x in range(6):
chests.append((pygame.image.load("21_" + str(x * 36) + ".png")))
chestsizes.append(chests[-1].get_size())
mark.append(21)
render[21] = []
start = time.clock()
with open(path, "rb") as f:
b = [0]
header = get_header(f)[0] # read header with tlib.get_header and also reach tile data in f
x, y = header["width"], header["height"] #read world size from header cache
s = pygame.surface.Surface((x, y)) #create a software surface to save tile colors in
s.fill((200, 200, 255))
levels = header["groundlevel"], header["rocklevel"]
pygame.draw.rect(s, (150, 75, 0),
((0, levels[0]),
(x, y - levels[0])))
pygame.draw.rect(s, (50, 50, 50),
((0, levels[1]),
(x, y - levels[1])))
for xi in range(x): # for each slice
for yi in range(y): # get the tiles
#tiles start from the upper left corner, then go downwards
# when a slice is complete its starts with the next slice
tile, b = get_tile_buffered(f, b) #tlib.get_tile
tile, wall, liquid, multi, wire = tile
if not liquid: #liquid == 0 means no liquid
# there could be a liquid and a tile, like a chest and water,
#but I can only set one color to a pixel anyway, so I priotise the tile
if tile == None:
if wall:
if wall in colorlib.walldata:
s.set_at((xi, yi), colorlib.walldata[wall])
else:
print(wall)
s.set_at((xi, yi), (wall, wall, wall))
#s.set_at((xi,yi), (255,255,255))#if no tile present, set it white
elif tile in colorlib.data:
s.set_at((xi, yi), colorlib.data[tile]) #if colorlib has a color use it
else:
s.set_at((xi, yi), (tile, tile, tile)) #make a grey
elif liquid > 0: #0>x>256 is water, the higher x is the more water is there
s.set_at((xi, yi), (19, 86, 134))
else: #lava is -256>x>0
s.set_at((xi, yi), (150, 35, 17))
if tile in mark:
if multi == None:
render[tile].append((xi, yi, multi))
elif multi[0] % 36 == 0 and multi[1] == 0:
render[tile].append((xi, yi, multi))
if xi % 100 == 0: # every ten slices print the progress
if name == None:
print("done %5d of %5d\r" % (xi, x))
else:
print("done %5d of %5d, of %s" % (xi, x, name))
for tile in render:
for pos in render[tile]:
if tile == 21:
if pos[2][0] % 36 == 0 and pos[2][1] == 0:
kind = pos[2][0] // 36
try:
s.blit(chests[kind], (pos[0] - chestsizes[kind][0] // 2, pos[1] - chestsizes[kind][1] // 2))
except IndexError:
kind = 0
s.blit(chests[kind], (pos[0] - chestsizes[kind][0] // 2, pos[1] - chestsizes[kind][1] // 2))
else:
i = images[tile]
size = i.get_size()
s.blit(i, (pos[0] - size[0] // 2, pos[1] - size[1] // 2))
try:
if outputpath == None:
pa = path[:-3] + "png"
else:
pa = outputpath[:-3] + "png"
pygame.image.save(s, pa)
except: # if pygame was not build with additional image types, we go with with bmp which is guaranteed to exist
if outputpath == None:
pa = path[:-3] + "bmp"
else:
pa = outputpath[:-3] + "bmp"
pygame.image.save(s, pa) # save the map and exit
if name:
print("completed mapping " + name)
else:
print("Completed mapping a World! ")
return pa
if __name__ == "__main__":
freeze_support()
# sys.argv.append("mark")# remove the # if you dont want to specify mark via command line
if "mark" in sys.argv:
mark = True
else:
mark = False
make = None
#make = "world1.wld"
if make:
make_map(make)
done = False
for world in sys.argv[1:]:
if world[-3:] == "wld":
make_map(sys.argv[1])
done = True
if done:
pass
else:
import os
try:
# get the my documents folder in windows. on other OS it will fail somewhere
import ctypes
dll = ctypes.windll.shell32
buf = ctypes.create_unicode_buffer(300)
dll.SHGetSpecialFolderPathW(None, buf, 0x0005, False)
p = os.path.join(buf.value, "My Games", "Terraria", "Worlds")
except:
p = os.path.expanduser("~/My Games/Terraria/Worlds")
processes = []
for item in os.listdir(p):
if item[-3:] == "wld":
if "drop_low" in sys.argv:
pos = os.path.join("..", item)
else:
pos = item
pro = Process(target=make_map, name=item,
args=(os.path.join(p, item), pos, mark, item))
pro.start()
processes.append(pro)
while len(processes) > 0:
dead = []
for p in processes:
#print (p)
if not p.is_alive(): dead.append(p)
for d in dead:
d.join()
processes.remove(d)
time.sleep(1)
print("All tasks done")
time.sleep(5)
| 35.041026
| 116
| 0.480755
|
8f14569c2661671486a7626e3f7326bf77796283
| 541
|
py
|
Python
|
gui/build/bdist.win32/python2.7-standalone/app/temp/wx/_misc_.py
|
stvnrhodes/CNCAirbrush
|
9ad9721573130d601276ca9f0447132fd8cdde90
|
[
"MIT"
] | 1
|
2018-06-20T07:30:12.000Z
|
2018-06-20T07:30:12.000Z
|
gui/build/bdist.win32/python2.7-standalone/app/temp/wx/_misc_.py
|
stvnrhodes/CNCAirbrush
|
9ad9721573130d601276ca9f0447132fd8cdde90
|
[
"MIT"
] | null | null | null |
gui/build/bdist.win32/python2.7-standalone/app/temp/wx/_misc_.py
|
stvnrhodes/CNCAirbrush
|
9ad9721573130d601276ca9f0447132fd8cdde90
|
[
"MIT"
] | null | null | null |
def __load():
import imp, os, sys
ext = 'wx\\_misc_.pyd'
for path in sys.path:
if not path.endswith('lib-dynload'):
continue
ext = os.path.join(path, ext)
if os.path.exists(ext):
#print "py2app extension module", __name__, "->", ext
mod = imp.load_dynamic(__name__, ext)
#mod.frozen = 1
break
else:
raise ImportError(repr(ext) + " not found")
else:
raise ImportError("lib-dynload not found")
__load()
del __load
| 27.05
| 65
| 0.543438
|
67f68b4c400a701ecf98158ba429403cdac38134
| 14,343
|
py
|
Python
|
dialogflow_ros/dialogflow_client.py
|
hypothe/dialogflow_ros
|
9e804acf31c384286ebf15e666a952740e2341d9
|
[
"MIT"
] | null | null | null |
dialogflow_ros/dialogflow_client.py
|
hypothe/dialogflow_ros
|
9e804acf31c384286ebf15e666a952740e2341d9
|
[
"MIT"
] | null | null | null |
dialogflow_ros/dialogflow_client.py
|
hypothe/dialogflow_ros
|
9e804acf31c384286ebf15e666a952740e2341d9
|
[
"MIT"
] | 2
|
2021-09-09T07:33:27.000Z
|
2021-09-09T07:37:28.000Z
|
#!/usr/bin/env python
# Dialogflow
import dialogflow_v2beta1
from dialogflow_v2beta1.types import Context, EventInput, InputAudioConfig, \
OutputAudioConfig, QueryInput, QueryParameters, \
StreamingDetectIntentRequest, TextInput
from dialogflow_v2beta1.gapic.enums import AudioEncoding, OutputAudioEncoding
import google.api_core.exceptions
import utils
from AudioServerStream import AudioServerStream
from MicrophoneStream import MicrophoneStream
# Python
import pyaudio
import signal
import time
from uuid import uuid4
from yaml import load, YAMLError
# ROS
import rospy
import rospkg
from std_msgs.msg import String
from dialogflow_ros.msg import *
# Use to convert Struct messages to JSON
# from google.protobuf.json_format import MessageToJson
class DialogflowClient(object):
def __init__(self, language_code='en-US', last_contexts=None):
"""Initialize all params and load data"""
""" Constants and params """
self.CHUNK = 4096
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 44100
self.USE_AUDIO_SERVER = rospy.get_param('/dialogflow_client/use_audio_server', False)
self.PLAY_AUDIO = rospy.get_param('/dialogflow_client/play_audio', True)
self.DEBUG = rospy.get_param('/dialogflow_client/debug', False)
# Register Ctrl-C sigint
signal.signal(signal.SIGINT, self._signal_handler)
""" Dialogflow setup """
# Get hints/clues
rp = rospkg.RosPack()
file_dir = rp.get_path('dialogflow_ros') + '/config/context.yaml'
with open(file_dir, 'r') as f:
try:
self.phrase_hints = load(f)
except YAMLError:
rospy.logwarn("DF_CLIENT: Unable to open phrase hints yaml file!")
self.phrase_hints = []
# Dialogflow params
project_id = rospy.get_param('/dialogflow_client/project_id', 'gentle-proton-252714')
session_id = str(uuid4()) # Random
self._language_code = language_code
self.last_contexts = last_contexts if last_contexts else []
# DF Audio Setup
audio_encoding = AudioEncoding.AUDIO_ENCODING_LINEAR_16
# Possibel models: video, phone_call, command_and_search, default
self._audio_config = InputAudioConfig(audio_encoding=audio_encoding,
language_code=self._language_code,
sample_rate_hertz=self.RATE,
phrase_hints=self.phrase_hints,
model='command_and_search')
self._output_audio_config = OutputAudioConfig(
audio_encoding=OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_LINEAR_16
)
# Create a session
self._session_cli = dialogflow_v2beta1.SessionsClient()
self._session = self._session_cli.session_path(project_id, session_id)
rospy.logdebug("DF_CLIENT: Session Path: {}".format(self._session))
""" ROS Setup """
results_topic = rospy.get_param('/dialogflow_client/results_topic',
'/dialogflow_client/results')
requests_topic = rospy.get_param('/dialogflow_client/requests_topic',
'/dialogflow_client/requests')
text_req_topic = requests_topic + '/string_msg'
text_event_topic = requests_topic + '/string_event'
msg_req_topic = requests_topic + '/df_msg'
event_req_topic = requests_topic + '/df_event'
self._results_pub = rospy.Publisher(results_topic, DialogflowResult,
queue_size=10)
rospy.Subscriber(text_req_topic, String, self._text_request_cb)
rospy.Subscriber(text_event_topic, String, self._text_event_cb)
rospy.Subscriber(msg_req_topic, DialogflowRequest, self._msg_request_cb)
rospy.Subscriber(event_req_topic, DialogflowEvent, self._event_request_cb)
""" Audio setup """
# Mic stream input setup
self.audio = pyaudio.PyAudio()
self._server_name = rospy.get_param('/dialogflow_client/server_name',
'127.0.0.1')
self._port = rospy.get_param('/dialogflow_client/port', 4444)
if self.PLAY_AUDIO:
self._create_audio_output()
rospy.logdebug("DF_CLIENT: Last Contexts: {}".format(self.last_contexts))
rospy.loginfo("DF_CLIENT: Ready!")
# ========================================= #
# ROS Utility Functions #
# ========================================= #
def _text_request_cb(self, msg):
"""ROS Callback that sends text received from a topic to Dialogflow,
:param msg: A String message.
:type msg: String
"""
rospy.logdebug("DF_CLIENT: Request received")
new_msg = DialogflowRequest(query_text=msg.data)
df_msg = self.detect_intent_text(new_msg)
def _msg_request_cb(self, msg):
"""ROS Callback that sends text received from a topic to Dialogflow,
:param msg: A DialogflowRequest message.
:type msg: DialogflowRequest
"""
df_msg = self.detect_intent_text(msg)
rospy.logdebug("DF_CLIENT: Request received:\n{}".format(df_msg))
def _event_request_cb(self, msg):
"""
:param msg: DialogflowEvent Message
:type msg: DialogflowEvent"""
new_event = utils.converters.events_msg_to_struct(msg)
self.event_intent(new_event)
def _text_event_cb(self, msg):
new_event = EventInput(name=msg.data, language_code=self._language_code)
self.event_intent(new_event)
# ================================== #
# Setters/Getters #
# ================================== #
def get_language_code(self):
return self._language_code
def set_language_code(self, language_code):
assert isinstance(language_code, str), "Language code must be a string!"
self._language_code = language_code
# ==================================== #
# Utility Functions #
# ==================================== #
def _signal_handler(self, signal, frame):
rospy.logwarn("\nDF_CLIENT: SIGINT caught!")
self.exit()
# ----------------- #
# Audio Utilities #
# ----------------- #
def _create_audio_output(self):
"""Creates a PyAudio output stream."""
rospy.logdebug("DF_CLIENT: Creating audio output...")
self.stream_out = self.audio.open(format=pyaudio.paInt16,
channels=1,
rate=44100,
output=True)
def _play_stream(self, data):
"""Simple function to play a the output Dialogflow response.
:param data: Audio in bytes.
"""
self.stream_out.start_stream()
self.stream_out.write(data)
time.sleep(0.2) # Wait for stream to finish
self.stream_out.stop_stream()
# -------------- #
# DF Utilities #
# -------------- #
def _play_stream(self, data):
"""Simple function to play a the output Dialogflow response.
:param data: Audio in bytes.
"""
self.stream_out.start_stream()
self.stream_out.write(data)
time.sleep(0.2) # Wait for stream to finish
self.stream_out.stop_stream()
# -------------- #
# DF Utilities #
# -------------- #
def _generator(self):
"""Generator function that continuously yields audio chunks from the
buffer. Used to stream data to the Google Speech API Asynchronously.
:return A streaming request with the audio data.
First request carries config data per Dialogflow docs.
:rtype: Iterator[:class:`StreamingDetectIntentRequest`]
"""
# First message contains session, query_input, and params
query_input = QueryInput(audio_config=self._audio_config)
contexts = utils.converters.contexts_msg_to_struct(self.last_contexts)
params = QueryParameters(contexts=contexts)
req = StreamingDetectIntentRequest(
session=self._session,
query_input=query_input,
query_params=params,
single_utterance=True,
output_audio_config=self._output_audio_config
)
yield req
if self.USE_AUDIO_SERVER:
with AudioServerStream() as stream:
audio_generator = stream.generator()
for content in audio_generator:
yield StreamingDetectIntentRequest(input_audio=content)
else:
with MicrophoneStream() as stream:
audio_generator = stream.generator()
for content in audio_generator:
yield StreamingDetectIntentRequest(input_audio=content)
# ======================================== #
# Dialogflow Functions #
# ======================================== #
def detect_intent_text(self, msg):
"""Use the Dialogflow API to detect a user's intent. Goto the Dialogflow
console to define intents and params.
:param msg: DialogflowRequest msg
:return query_result: Dialogflow's query_result with action parameters
:rtype: DialogflowResult
"""
# Create the Query Input
text_input = TextInput(text=msg.query_text, language_code=self._language_code)
query_input = QueryInput(text=text_input)
# Create QueryParameters
user_contexts = utils.converters.contexts_msg_to_struct(msg.contexts)
self.last_contexts = utils.converters.contexts_msg_to_struct(self.last_contexts)
contexts = self.last_contexts + user_contexts
params = QueryParameters(contexts=contexts)
try:
response = self._session_cli.detect_intent(
session=self._session,
query_input=query_input,
query_params=params,
output_audio_config=self._output_audio_config
)
except google.api_core.exceptions.ServiceUnavailable:
rospy.logwarn("DF_CLIENT: Deadline exceeded exception caught. The response "
"took too long or you aren't connected to the internet!")
else:
# Store context for future use
self.last_contexts = utils.converters.contexts_struct_to_msg(
response.query_result.output_contexts
)
df_msg = utils.converters.result_struct_to_msg(
response.query_result)
rospy.loginfo(utils.output.print_result(response.query_result))
# Play audio
if self.PLAY_AUDIO:
self._play_stream(response.output_audio)
self._results_pub.publish(df_msg)
return df_msg
def detect_intent_stream(self, return_result=False):
"""Gets data from an audio generator (mic) and streams it to Dialogflow.
We use a stream for VAD and single utterance detection."""
# Generator yields audio chunks.
requests = self._generator()
responses = self._session_cli.streaming_detect_intent(requests)
resp_list = []
try:
for response in responses:
resp_list.append(response)
rospy.logdebug(
'DF_CLIENT: Intermediate transcript: "{}".'.format(
response.recognition_result.transcript))
except google.api_core.exceptions.Cancelled as c:
rospy.logwarn("DF_CLIENT: Caught a Google API Client cancelled "
"exception:\n{}".format(c))
except google.api_core.exceptions.Unknown as u:
rospy.logwarn("DF_CLIENT: Unknown Exception Caught:\n{}".format(u))
else:
if response is None:
rospy.logwarn("DF_CLIENT: No response received!")
return None
# The response list returns responses in the following order:
# 1. All intermediate recognition results
# 2. The Final query recognition result (no audio!)
# 3. The output audio with config
final_result = resp_list[-2].query_result
final_audio = resp_list[-1]
self.last_contexts = utils.converters.contexts_struct_to_msg(
final_result.output_contexts
)
df_msg = utils.converters.result_struct_to_msg(final_result)
rospy.loginfo(utils.output.print_result(final_result))
# Play audio
if self.PLAY_AUDIO:
self._play_stream(final_audio.output_audio)
# Pub
self._results_pub.publish(df_msg)
if return_result: return df_msg, final_result
return df_msg
def event_intent(self, event):
"""Send an event message to Dialogflow
:param event: The ROS event message
:type event: DialogflowEvent
:return: The result from dialogflow as a ROS msg
:rtype: DialogflowResult
"""
query_input = QueryInput(event=event)
params = utils.converters.create_query_parameters(
contexts=self.last_contexts
)
response = self._session_cli.detect_intent(
session=self._session,
query_input=query_input,
query_params=params,
output_audio_config=self._output_audio_config
)
df_msg = utils.converters.result_struct_to_msg(response)
if self.PLAY_AUDIO:
self._play_stream(response.output_audio)
return df_msg
def start(self):
"""Start the dialogflow client"""
rospy.loginfo("DF_CLIENT: Spinning...")
rospy.spin()
def exit(self):
"""Close as cleanly as possible"""
rospy.loginfo("DF_CLIENT: Shutting down")
self.audio.terminate()
exit()
if __name__ == '__main__':
rospy.init_node('dialogflow_client')
df = DialogflowClient()
df.start()
df.detect_intent_stream()
| 40.631728
| 93
| 0.604894
|
73bfb5a327909f46039ad5066a57b6c22f8ab5d3
| 1,314
|
py
|
Python
|
container/web/rest_api.py
|
pureelk/pureelk
|
cd4bbc369379da4b8dcf42d20e26f31c3314b170
|
[
"Apache-2.0"
] | 14
|
2015-10-15T21:57:17.000Z
|
2018-04-13T23:30:16.000Z
|
container/web/rest_api.py
|
gary-yang/PureElk
|
e87fdb58c29ad406b089226ca65a65ba1b4fb0eb
|
[
"Apache-2.0"
] | 9
|
2016-03-01T21:40:59.000Z
|
2017-01-30T20:00:47.000Z
|
container/web/rest_api.py
|
gary-yang/PureElk
|
e87fdb58c29ad406b089226ca65a65ba1b4fb0eb
|
[
"Apache-2.0"
] | 11
|
2015-09-24T14:48:16.000Z
|
2019-03-08T15:36:52.000Z
|
import flask
from flask import request, make_response, current_app
from errorcodes import ErrorCodes
import json
from functools import wraps
def rest_api(f):
"""
A decorator for rest API
:param f:
:return:
"""
@wraps(f)
def decorator(*args, **kwargs):
json_object = None
if request.data:
try:
json_object = json.loads(request.data)
except ValueError as v:
current_app.logger.info("Invalid input = {}, error = {}".format(request.data, v))
return make_rest_response(make_error(ErrorCodes.InvalidInput.value, "Input is invalid"), 400)
if json_object:
result = f(*args, **dict(kwargs, json_body=json_object))
else:
result = f(*args, **kwargs)
if isinstance(result, flask.Response):
return result
else:
return flask.Response(json.dumps(result), content_type='application/json; charset=utf-8')
return decorator
def make_rest_response(error, status_code):
response = make_response(json.dumps(error), status_code)
response.headers["Content-Type"] = "'application/json; charset=utf-8'"
return response
def make_error(code, message):
return {
"code": code,
"message": message
}
| 26.816327
| 109
| 0.622527
|
af9faf21b79e0c8973f7dc742f7fec3f230fb0eb
| 6,954
|
py
|
Python
|
homeassistant/components/nanoleaf/light.py
|
JeffersonBledsoe/core
|
3825f80a2dd087ae70654079cd9f3071289b8423
|
[
"Apache-2.0"
] | 3
|
2021-11-05T13:24:19.000Z
|
2022-01-08T12:17:09.000Z
|
homeassistant/components/nanoleaf/light.py
|
JeffersonBledsoe/core
|
3825f80a2dd087ae70654079cd9f3071289b8423
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/nanoleaf/light.py
|
JeffersonBledsoe/core
|
3825f80a2dd087ae70654079cd9f3071289b8423
|
[
"Apache-2.0"
] | 3
|
2021-05-31T15:32:08.000Z
|
2021-08-10T22:08:42.000Z
|
"""Support for Nanoleaf Lights."""
from __future__ import annotations
import logging
import math
from typing import Any
from aionanoleaf import Nanoleaf, Unavailable
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from .const import DOMAIN
RESERVED_EFFECTS = ("*Solid*", "*Static*", "*Dynamic*")
DEFAULT_NAME = "Nanoleaf"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Import Nanoleaf light platform."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: config[CONF_HOST], CONF_TOKEN: config[CONF_TOKEN]},
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Nanoleaf light."""
nanoleaf: Nanoleaf = hass.data[DOMAIN][entry.entry_id]
async_add_entities([NanoleafLight(nanoleaf)])
class NanoleafLight(LightEntity):
"""Representation of a Nanoleaf Light."""
def __init__(self, nanoleaf: Nanoleaf) -> None:
"""Initialize an Nanoleaf light."""
self._nanoleaf = nanoleaf
self._attr_unique_id = self._nanoleaf.serial_no
self._attr_name = self._nanoleaf.name
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self._nanoleaf.serial_no)},
name=self._nanoleaf.name,
manufacturer=self._nanoleaf.manufacturer,
model=self._nanoleaf.model,
sw_version=self._nanoleaf.firmware_version,
)
self._attr_min_mireds = math.ceil(
1000000 / self._nanoleaf.color_temperature_max
)
self._attr_max_mireds = kelvin_to_mired(self._nanoleaf.color_temperature_min)
@property
def brightness(self) -> int:
"""Return the brightness of the light."""
return int(self._nanoleaf.brightness * 2.55)
@property
def color_temp(self) -> int:
"""Return the current color temperature."""
return kelvin_to_mired(self._nanoleaf.color_temperature)
@property
def effect(self) -> str | None:
"""Return the current effect."""
# The API returns the *Solid* effect if the Nanoleaf is in HS or CT mode.
# The effects *Static* and *Dynamic* are not supported by Home Assistant.
# These reserved effects are implicitly set and are not in the effect_list.
# https://forum.nanoleaf.me/docs/openapi#_byoot0bams8f
return (
None if self._nanoleaf.effect in RESERVED_EFFECTS else self._nanoleaf.effect
)
@property
def effect_list(self) -> list[str]:
"""Return the list of supported effects."""
return self._nanoleaf.effects_list
@property
def icon(self) -> str:
"""Return the icon to use in the frontend, if any."""
return "mdi:triangle-outline"
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._nanoleaf.is_on
@property
def hs_color(self) -> tuple[int, int]:
"""Return the color in HS."""
return self._nanoleaf.hue, self._nanoleaf.saturation
@property
def supported_features(self) -> int:
"""Flag supported features."""
return (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_COLOR
| SUPPORT_TRANSITION
)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Instruct the light to turn on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
hs_color = kwargs.get(ATTR_HS_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
effect = kwargs.get(ATTR_EFFECT)
transition = kwargs.get(ATTR_TRANSITION)
if hs_color:
hue, saturation = hs_color
await self._nanoleaf.set_hue(int(hue))
await self._nanoleaf.set_saturation(int(saturation))
if color_temp_mired:
await self._nanoleaf.set_color_temperature(
mired_to_kelvin(color_temp_mired)
)
if transition:
if brightness: # tune to the required brightness in n seconds
await self._nanoleaf.set_brightness(
int(brightness / 2.55), transition=int(kwargs[ATTR_TRANSITION])
)
else: # If brightness is not specified, assume full brightness
await self._nanoleaf.set_brightness(100, transition=int(transition))
else: # If no transition is occurring, turn on the light
await self._nanoleaf.turn_on()
if brightness:
await self._nanoleaf.set_brightness(int(brightness / 2.55))
if effect:
if effect not in self.effect_list:
raise ValueError(
f"Attempting to apply effect not in the effect list: '{effect}'"
)
await self._nanoleaf.set_effect(effect)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Instruct the light to turn off."""
transition: float | None = kwargs.get(ATTR_TRANSITION)
await self._nanoleaf.turn_off(None if transition is None else int(transition))
async def async_update(self) -> None:
"""Fetch new state data for this light."""
try:
await self._nanoleaf.get_info()
except Unavailable:
if self.available:
_LOGGER.warning("Could not connect to %s", self.name)
self._attr_available = False
return
if not self.available:
_LOGGER.info("Fetching %s data recovered", self.name)
self._attr_available = True
| 34.77
| 88
| 0.658326
|
70629854deac753fafb41a0850d3fefc2764edf2
| 1,332
|
py
|
Python
|
homeassistant/components/devolo_home_control/devolo_multi_level_switch.py
|
csseal/core
|
5802d65ef71697e6627b82e1677894d13d0f16d7
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/devolo_home_control/devolo_multi_level_switch.py
|
SicAriuSx83/core
|
162c39258e68ae42fe4e1560ae91ed54f5662409
|
[
"Apache-2.0"
] | 45
|
2020-07-23T07:13:34.000Z
|
2022-03-31T06:01:55.000Z
|
homeassistant/components/devolo_home_control/devolo_multi_level_switch.py
|
SicAriuSx83/core
|
162c39258e68ae42fe4e1560ae91ed54f5662409
|
[
"Apache-2.0"
] | 2
|
2020-05-11T00:38:26.000Z
|
2021-01-15T13:23:44.000Z
|
"""Base class for multi level switches in devolo Home Control."""
import logging
from .devolo_device import DevoloDeviceEntity
_LOGGER = logging.getLogger(__name__)
class DevoloMultiLevelSwitchDeviceEntity(DevoloDeviceEntity):
"""Representation of a multi level switch device within devolo Home Control. Something like a dimmer or a thermostat."""
def __init__(self, homecontrol, device_instance, element_uid):
"""Initialize a multi level switch within devolo Home Control."""
super().__init__(
homecontrol=homecontrol,
device_instance=device_instance,
element_uid=element_uid,
name=device_instance.item_name,
sync=self._sync,
)
self._multi_level_switch_property = device_instance.multi_level_switch_property[
element_uid
]
self._value = self._multi_level_switch_property.value
def _sync(self, message):
"""Update the multi level switch state."""
if message[0] == self._multi_level_switch_property.element_uid:
self._value = message[1]
elif message[0].startswith("hdm"):
self._available = self._device_instance.is_online()
else:
_LOGGER.debug("No valid message received: %s", message)
self.schedule_update_ha_state()
| 37
| 124
| 0.683183
|
7e02c097c79d1ccffbd9bb3129c8c9d831a01316
| 848
|
py
|
Python
|
dectauth/urls.py
|
stefanw/eventphoauth
|
bd5678297e998bc4c16892aeff0ce712925f073b
|
[
"MIT"
] | 2
|
2019-08-25T15:18:11.000Z
|
2019-09-06T19:51:10.000Z
|
dectauth/urls.py
|
stefanw/eventphoauth
|
bd5678297e998bc4c16892aeff0ce712925f073b
|
[
"MIT"
] | null | null | null |
dectauth/urls.py
|
stefanw/eventphoauth
|
bd5678297e998bc4c16892aeff0ce712925f073b
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from oauth2_provider.views import (
AuthorizationView, TokenView,
)
from .api_views import ProfileView
from .views import start, challenge
from .callout_views import challenge_voice_response, challenge_gather_input
app_name = 'dectauth'
urlpatterns = [
url(r'^$', start, name="start"),
url(r'^api/user/', ProfileView.as_view(), name='api-user-profile'),
url(r'^challenge/(?P<challenge_uuid>[^/]+)/$', challenge, name='challenge'),
url(r'^authorize/$', AuthorizationView.as_view(), name="authorize"),
url(r'^token/$', TokenView.as_view(), name="token"),
url(r'^callout/(?P<challenge_uuid>[^/]+)/$', challenge_voice_response,
name='challenge-callout'),
url(r'^callout-gather/(?P<challenge_uuid>[^/]+)/$', challenge_gather_input,
name='challenge-callout-gather'),
]
| 35.333333
| 80
| 0.692217
|
2ae2c4adae2c9b8eb32c1ef29941cc19d45e5e1d
| 498
|
py
|
Python
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/version.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/version.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/version.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "2017-06-01"
| 35.571429
| 76
| 0.524096
|
1d29d126f1a167d3ba2539b7d84fd1229213fdfe
| 103
|
py
|
Python
|
DeepRTS/__init__.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | null | null | null |
DeepRTS/__init__.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | null | null | null |
DeepRTS/__init__.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | null | null | null |
try:
from DeepRTS import Engine
except ImportError:
import Engine
from DeepRTS import python
| 12.875
| 30
| 0.757282
|
6b8de90a198da52445bd6fea40e2ccf6ca24edd6
| 2,143
|
py
|
Python
|
token/ERC20/ERC20.q.py
|
ghiliweld/Quetz
|
50d052779e67e7bf9457e565b011cc7287863576
|
[
"MIT"
] | 3
|
2018-01-22T01:52:53.000Z
|
2018-01-26T09:31:04.000Z
|
token/ERC20/ERC20.q.py
|
ghiliweld/Quetz
|
50d052779e67e7bf9457e565b011cc7287863576
|
[
"MIT"
] | 1
|
2018-01-25T02:58:16.000Z
|
2018-01-25T02:58:16.000Z
|
token/ERC20/ERC20.q.py
|
ghiliweld/Quetz
|
50d052779e67e7bf9457e565b011cc7287863576
|
[
"MIT"
] | null | null | null |
Contract: ERC20
QZ: 0.0.1 # Quetz compiler version, will be important in case the standard changes as time goes on
# Hashtags are used for commenting
# Events of the token.
Event: Transfer logs a from{address}, a to{address}, a value{num}
# Variables of the token.
# Watch how variables are declared
# _variableName_ is a _variableType_, set _variableValue_ or just _variableName_ is a _variableType_
name is a string, set "ERC20"
# v.s. variableName{variableType} = variableValue
# The second option has less writting so I'll stick to that but I'll consider both options later
name{string} = "ERC20"
symbol{string} = "20"
totalSupply{num} = 9001 # IT'S OVER 9000!!!!!!!
decimals{num} = 18
balances{mapping}: num[address] # mappingName{mapping}: valueType[keyType]
# FUNCTIONS known as ACTIONS in Quetz
# Functions in Quetz are called Actions, that react to calls and assess and act on parameters fed to them
# Action _actionName_ takes(paramName{paramType}) *takes is optional, is _visibilityStatus, is _read/payable *(this is optional), gives _returnType *gives is optional
Action symbol, is public, is read, gives a string:
give symbol of this contract
# this contract will be standard keywords to literally mean this contract on which the function is being called
#What is the balance of a particularaccount?
Action balanceOf takes(_owner{address}), is public, is read, gives a num:
give balances of _owner
# Return total supply of token.
Action totalSupply, is public, is read, gives num:
give totalSupply of this contract
# Send `_value` tokens to `_to` from your account
Action transfer takes(_to{address}, _amount{num}), is public, gives a bool:
require _amount morethan 0 # No negative amounts, require is a keyword
# the sentence goes on to the part after the comma if the condition isn't fulfilled, returns what is given and throws the transaction
require balances of msg.sender moreequalthan _amount # You can't send more than you have
balances of msg.sender minusequal _amount
balances of _to plusequal _amount
Transfer: msg.sender, _to, _amount # Transfer event call
give True
| 46.586957
| 166
| 0.761549
|
4e8263674995b3d652c2907e0b107740d1a423fb
| 562
|
py
|
Python
|
music/migrations/0015_auto_20200127_1203.py
|
realsifocopypaste333/sifo-player-binary
|
fc4f9f9534a2ea64b09b3b4ba51832d3823c3b64
|
[
"MIT"
] | 3
|
2020-02-08T16:55:03.000Z
|
2021-03-28T06:37:10.000Z
|
music/migrations/0015_auto_20200127_1203.py
|
AshkanGanj/Music-django-app
|
64d9c6557bc9bba212c828f8ca06fbdede38780f
|
[
"MIT"
] | 1
|
2021-11-27T03:35:11.000Z
|
2021-11-27T03:35:11.000Z
|
music/migrations/0015_auto_20200127_1203.py
|
AshkanGanj/Music-django-app
|
64d9c6557bc9bba212c828f8ca06fbdede38780f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-01-27 08:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0014_auto_20200113_1100'),
]
operations = [
migrations.RemoveField(
model_name='song',
name='is_favorite',
),
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, default="{% static 'avatar.png' %}", null=True, upload_to='profile/'),
),
]
| 24.434783
| 118
| 0.581851
|
639188b3e8e69e379d0d834b6bf5ffaa8fd7bc1e
| 5,122
|
py
|
Python
|
frappe/website/utils.py
|
gangadhar-kadam/helpdesk-frappe
|
d50cc423258401ddcc89fb6607d11e5bb6ad8c25
|
[
"MIT"
] | null | null | null |
frappe/website/utils.py
|
gangadhar-kadam/helpdesk-frappe
|
d50cc423258401ddcc89fb6607d11e5bb6ad8c25
|
[
"MIT"
] | null | null | null |
frappe/website/utils.py
|
gangadhar-kadam/helpdesk-frappe
|
d50cc423258401ddcc89fb6607d11e5bb6ad8c25
|
[
"MIT"
] | 1
|
2018-03-21T19:27:17.000Z
|
2018-03-21T19:27:17.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re, os
def delete_page_cache(path):
cache = frappe.cache()
groups = ("page_context", "website_page", "sitemap_options")
if path:
for name in groups:
cache.hdel(name, path)
else:
for name in groups:
cache.delete_key(name)
def find_first_image(html):
m = re.finditer("""<img[^>]*src\s?=\s?['"]([^'"]*)['"]""", html)
try:
return m.next().groups()[0]
except StopIteration:
return None
def can_cache(no_cache=False):
return not (frappe.conf.disable_website_cache or getattr(frappe.local, "no_cache", False) or no_cache)
def get_comment_list(doctype, name):
return frappe.db.sql("""select
comment, comment_by_fullname, creation, comment_by
from `tabComment` where comment_doctype=%s
and ifnull(comment_type, "Comment")="Comment"
and comment_docname=%s order by creation""", (doctype, name), as_dict=1) or []
def get_home_page():
if frappe.local.flags.home_page:
return frappe.local.flags.home_page
def _get_home_page():
role_home_page = frappe.get_hooks("role_home_page")
home_page = None
if role_home_page:
for role in frappe.get_roles():
if role in role_home_page:
home_page = role_home_page[role][-1]
break
if not home_page:
home_page = frappe.get_hooks("home_page")
if home_page:
home_page = home_page[-1]
if not home_page:
home_page = frappe.db.get_value("Website Settings", None, "home_page") or "login"
return home_page
return frappe.cache().hget("home_page", frappe.session.user, _get_home_page)
def is_signup_enabled():
if getattr(frappe.local, "is_signup_enabled", None) is None:
frappe.local.is_signup_enabled = True
if frappe.utils.cint(frappe.db.get_value("Website Settings",
"Website Settings", "disable_signup")):
frappe.local.is_signup_enabled = False
return frappe.local.is_signup_enabled
def cleanup_page_name(title):
"""make page name from title"""
if not title:
return title
name = title.lower()
name = re.sub('[~!@#$%^&*+()<>,."\'\?]', '', name)
name = re.sub('[:/]', '-', name)
name = '-'.join(name.split())
# replace repeating hyphens
name = re.sub(r"(-)\1+", r"\1", name)
return name
def get_shade(color, percent):
color, color_format = detect_color_format(color)
r, g, b, a = color
avg = (float(int(r) + int(g) + int(b)) / 3)
# switch dark and light shades
if avg > 128:
percent = -percent
# stronger diff for darker shades
if percent < 25 and avg < 64:
percent = percent * 2
new_color = []
for channel_value in (r, g, b):
new_color.append(get_shade_for_channel(channel_value, percent))
r, g, b = new_color
return format_color(r, g, b, a, color_format)
def detect_color_format(color):
if color.startswith("rgba"):
color_format = "rgba"
color = [c.strip() for c in color[5:-1].split(",")]
elif color.startswith("rgb"):
color_format = "rgb"
color = [c.strip() for c in color[4:-1].split(",")] + [1]
else:
# assume hex
color_format = "hex"
if color.startswith("#"):
color = color[1:]
if len(color) == 3:
# hex in short form like #fff
color = "{0}{0}{1}{1}{2}{2}".format(*tuple(color))
color = [int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16), 1]
return color, color_format
def get_shade_for_channel(channel_value, percent):
v = int(channel_value) + int(int('ff', 16) * (float(percent)/100))
if v < 0:
v=0
if v > 255:
v=255
return v
def format_color(r, g, b, a, color_format):
if color_format == "rgba":
return "rgba({0}, {1}, {2}, {3})".format(r, g, b, a)
elif color_format == "rgb":
return "rgb({0}, {1}, {2})".format(r, g, b)
else:
# assume hex
return "#{0}{1}{2}".format(convert_to_hex(r), convert_to_hex(g), convert_to_hex(b))
def convert_to_hex(channel_value):
h = hex(channel_value)[2:]
if len(h) < 2:
h = "0" + h
return h
def abs_url(path):
"""Deconstructs and Reconstructs a URL into an absolute URL or a URL relative from root '/'"""
if not path:
return
if path.startswith('http://') or path.startswith('https://'):
return path
if not path.startswith("/"):
path = "/" + path
return path
def get_full_index(route=None, doctype="Web Page", extn = False):
"""Returns full index of the website (on Web Page) upto the n-th level"""
all_routes = []
def get_children(parent):
children = frappe.db.get_all(doctype, ["parent_website_route", "page_name", "title", "template_path"],
{"parent_website_route": parent}, order_by="idx asc")
for d in children:
d.url = abs_url(os.path.join(d.parent_website_route or "", d.page_name))
if d.url not in all_routes:
d.children = get_children(d.url.lstrip("/"))
all_routes.append(d.url)
if extn and os.path.basename(d.template_path).split(".")[0] != "index":
d.url = d.url + ".html"
# no index.html for home page
# home should not be in table of contents
if not parent:
children = [d for d in children if d.page_name not in ("index.html", "index",
"", "contents")]
return children
return get_children(route or "")
| 25.738693
| 104
| 0.671417
|
68beb4211e37917caf2c613ece89fd1790e86701
| 1,560
|
py
|
Python
|
app/fields.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
app/fields.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
app/fields.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
from wtforms import FieldList
from wtforms.utils import unset_value
# override process func of FieldList to make it unordered
class UnorderedFieldList(FieldList):
def process(self, formdata, data=unset_value, extra_filters=None):
if extra_filters:
raise TypeError(
"FieldList does not accept any filters. Instead, define"
" them on the enclosed field."
)
self.entries = []
if not data:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
if formdata:
indices = self._remove_duplicates(self._extract_indices(self.name, formdata))
if self.max_entries:
indices = indices[: self.max_entries]
idata = iter(data)
for index in indices:
try:
obj_data = next(idata)
except StopIteration:
obj_data = unset_value
self._add_entry(formdata, obj_data, index=index)
else:
for obj_data in data:
self._add_entry(formdata, obj_data)
while len(self.entries) < self.min_entries:
self._add_entry(formdata)
@classmethod
def _remove_duplicates(cls, seq):
"""Remove duplicates in a case insensitive, but case preserving manner"""
d = {}
for item in seq:
if item not in d:
d[item] = True
yield item
| 30.588235
| 89
| 0.557051
|
eff5d765fe0855b9e2d3d1f90bd49b4d4ceb206c
| 445
|
py
|
Python
|
pywttr_models/vi.py
|
monosans/pywttr-models
|
da0b257b720b9604d0ed3db68e4f93c9b69e573c
|
[
"MIT"
] | 4
|
2021-12-13T04:56:16.000Z
|
2021-12-25T11:40:29.000Z
|
pywttr_models/vi.py
|
monosans/pywttr-models
|
da0b257b720b9604d0ed3db68e4f93c9b69e573c
|
[
"MIT"
] | null | null | null |
pywttr_models/vi.py
|
monosans/pywttr-models
|
da0b257b720b9604d0ed3db68e4f93c9b69e573c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import List as _List
from pywttr_models import base as _base
class CurrentConditionItem(_base.CurrentConditionItem):
lang_vi: _List[_base.LangItem]
class HourlyItem(_base.HourlyItem):
lang_vi: _List[_base.LangItem]
class WeatherItem(_base.WeatherItem):
hourly: _List[HourlyItem]
class Model(_base.Model):
current_condition: _List[CurrentConditionItem]
weather: _List[WeatherItem]
| 20.227273
| 55
| 0.761798
|
b1f33cd1b32e0700f4e51cf182ed80e0455b8dd4
| 1,584
|
py
|
Python
|
Examples/GraphOperator/Ising/ising.py
|
tvieijra/netket
|
ef3ff32b242f25b6a6ae0f08db1aada85775a2ea
|
[
"Apache-2.0"
] | 10
|
2019-11-29T02:51:53.000Z
|
2021-08-14T18:52:33.000Z
|
Examples/GraphOperator/Ising/ising.py
|
tvieijra/netket
|
ef3ff32b242f25b6a6ae0f08db1aada85775a2ea
|
[
"Apache-2.0"
] | 2
|
2018-11-04T14:38:01.000Z
|
2018-11-08T16:56:10.000Z
|
Examples/GraphOperator/Ising/ising.py
|
tvieijra/netket
|
ef3ff32b242f25b6a6ae0f08db1aada85775a2ea
|
[
"Apache-2.0"
] | 6
|
2019-12-02T07:29:01.000Z
|
2021-04-04T21:55:21.000Z
|
# Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket as nk
import networkx as nx
import numpy as np
sigmax = [[0, 1], [1, 0]]
sigmaz = [[1, 0], [0, -1]]
mszsz = (np.kron(sigmaz, sigmaz)).tolist()
# Notice that the Transverse-Field Ising model as defined here has sign problem
L = 20
site_operator = [sigmax]
bond_operator = [mszsz]
# Hypercube
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Custom Hilbert Space
hi = nk.hilbert.Spin(s=0.5, graph=g)
# Graph Operator
op = nk.operator.GraphOperator(hi, siteops=site_operator, bondops=bond_operator)
# Restricted Boltzmann Machine
ma = nk.machine.RbmSpin(hilbert=hi, alpha=1)
ma.init_random_parameters(seed=1234, sigma=0.01)
# Local Metropolis Sampling
sa = nk.sampler.MetropolisLocal(machine=ma)
# Optimizer
opt = nk.optimizer.AdaMax()
# Stochastic reconfiguration
gs = nk.variational.Vmc(
hamiltonian=op,
sampler=sa,
optimizer=opt,
n_samples=1000,
diag_shift=0.1,
method="Gd",
)
gs.run(output_prefix="test", n_iter=30000)
| 27.310345
| 80
| 0.737374
|
343629237d655fae627ea5d747d8381314f484ef
| 1,226
|
py
|
Python
|
code/phase_plot.py
|
jstac/cycles_moral_hazard
|
ff4881b8b27f6738edfc526ead98579bc801c834
|
[
"BSD-3-Clause"
] | 2
|
2018-01-29T19:39:14.000Z
|
2018-08-24T03:36:25.000Z
|
code/phase_plot.py
|
jstac/cycles_moral_hazard
|
ff4881b8b27f6738edfc526ead98579bc801c834
|
[
"BSD-3-Clause"
] | null | null | null |
code/phase_plot.py
|
jstac/cycles_moral_hazard
|
ff4881b8b27f6738edfc526ead98579bc801c834
|
[
"BSD-3-Clause"
] | 3
|
2018-04-14T19:33:56.000Z
|
2019-12-27T17:24:55.000Z
|
import numpy as np
def phase_plot(ax, g, h, xmin, xmax, ymin, ymax, gridsize=100):
"""
Plots the phase diagram for the system x' = g(x,y), y' = h(x,y)
over the square [xmin, xmax] times [ymin, ymax].
"""
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
delta_g = np.vectorize(lambda x, y: x - g(x, y))
delta_h = np.vectorize(lambda x, y: y - h(x, y))
xgrid = np.linspace(xmin, xmax, gridsize)
ygrid = np.linspace(ymin, ymax, gridsize)
X, Y = np.meshgrid(xgrid, ygrid)
Zg, Zh = delta_g(X, Y), delta_h(X, Y)
ax.contour(X, Y, Zg, [.0], lw=2, alpha=0.8)
ax.contour(X, Y, Zh, [.0], lw=2, alpha=0.8)
def draw_arrow(x, y):
eps = 0.0001
v1, v2 = g(x, y) - x, h(x, y) - y
nrm = np.sqrt(v1**2 + v2**2)
scale = eps / nrm
ax.arrow(x, y, scale * v1, scale * v2,
antialiased=True,
alpha=0.8,
head_length=0.025*(xmax - xmin),
head_width=0.012*(xmax - xmin),
fill=False)
xgrid = np.linspace(xmin * 1.1, xmax * 0.95, 12)
ygrid = np.linspace(ymin * 1.1, ymax * 0.95, 12)
for x in xgrid:
for y in ygrid:
draw_arrow(x, y)
| 28.511628
| 67
| 0.519576
|
081c4869991b6c5d68bb10453d44fae6204dfd56
| 2,864
|
py
|
Python
|
python/moneysocket/protocol/rendezvous/incoming_nexus.py
|
drschwabe/prototype
|
7948727443d5d3255b317fc94cebf03c128a58d3
|
[
"MIT"
] | null | null | null |
python/moneysocket/protocol/rendezvous/incoming_nexus.py
|
drschwabe/prototype
|
7948727443d5d3255b317fc94cebf03c128a58d3
|
[
"MIT"
] | null | null | null |
python/moneysocket/protocol/rendezvous/incoming_nexus.py
|
drschwabe/prototype
|
7948727443d5d3255b317fc94cebf03c128a58d3
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Jarret Dyrbye
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import uuid
import logging
from moneysocket.protocol.nexus import ProtocolNexus
from moneysocket.message.notification.rendezvous import NotifyRendezvous
from moneysocket.message.notification.rendezvous_not_ready import (
NotifyRendezvousNotReady)
from moneysocket.message.notification.rendezvous_end import NotifyRendezvousEnd
class IncomingRendezvousNexus(ProtocolNexus):
def __init__(self, below_nexus, layer):
super().__init__(below_nexus, layer)
self.rendezvous_finished_cb = None
self.request_reference_uuid = None
self.rendezvous_id = None
self.directory = layer.directory
###########################################################################
def is_layer_message(self, msg):
if msg['message_class'] != "REQUEST":
return False
return msg['request_name'] in {"REQUEST_RENDEZVOUS"}
def recv_from_below_cb(self, below_nexus, msg):
logging.info("rdv nexus got msg")
if not self.is_layer_message(msg):
# pass on to above
super().recv_from_below_cb(below_nexus, msg)
return
assert msg['request_name'] == "REQUEST_RENDEZVOUS"
self.rendezvous_id = msg['rendezvous_id']
self.request_reference_uuid = msg['request_uuid']
if self.directory.is_rid_peered(self.rendezvous_id):
self.initiate_close()
pass
self.directory.add_nexus(self, self.rendezvous_id)
peer = self.directory.get_peer_nexus(self)
if peer:
n = NotifyRendezvous(self.rendezvous_id,
self.request_reference_uuid)
self.send(n)
self.rendezvous_finished_cb(self)
peer.rendezvous_achieved()
else:
n = NotifyRendezvousNotReady(self.rendezvous_id,
self.request_reference_uuid)
self.send(n)
def recv_raw_from_below_cb(self, below_nexus, msg_bytes):
logging.info("rdv nexus got raw msg")
super().recv_raw_from_below_cb(below_nexus, msg_bytes)
###########################################################################
def wait_for_rendezvous(self, rendezvous_finished_cb):
self.rendezvous_finished_cb = rendezvous_finished_cb
def rendezvous_achieved(self):
assert self.directory.is_rid_peered(self.rendezvous_id)
n = NotifyRendezvous(self.rendezvous_id, self.request_reference_uuid)
self.send(n)
self.rendezvous_finished_cb(self)
def end_rendezvous(self):
self.directory.remove_nexus(self)
n = NotifyRendezvousEnd(self.rendezvous_id)
self.send(n)
| 36.717949
| 79
| 0.644204
|
369898f8e42e30bf1d9593aa89bb902de60445ae
| 3,526
|
py
|
Python
|
dtx/notify.py
|
qzed/linux-surfacegen5-dtx-daemon
|
1b48933a4e8666d8b63b9e4f7a314d4a9bd7c8f8
|
[
"MIT"
] | 1
|
2019-03-22T19:47:14.000Z
|
2019-03-22T19:47:14.000Z
|
dtx/notify.py
|
qzed/linux-surfacegen5-dtx-daemon
|
1b48933a4e8666d8b63b9e4f7a314d4a9bd7c8f8
|
[
"MIT"
] | 2
|
2019-04-08T01:19:44.000Z
|
2019-04-20T19:50:53.000Z
|
dtx/notify.py
|
qzed/linux-surfacegen5-dtx-daemon
|
1b48933a4e8666d8b63b9e4f7a314d4a9bd7c8f8
|
[
"MIT"
] | null | null | null |
"""
dBus System Notifications.
Allows processes running as root to send notifications to all users.
"""
import dbus
import os
def get_user_paths(bus, clss='user'):
"""Get all dBus `User` object paths of class `clss`."""
user_paths = set()
logind = bus.get_object('org.freedesktop.login1', '/org/freedesktop/login1')
loginm = dbus.Interface(logind, 'org.freedesktop.login1.Manager')
for sess_spec in loginm.ListSessions():
sess_path = sess_spec[4]
sess = bus.get_object('org.freedesktop.login1', sess_path)
sess_p = dbus.Interface(sess, 'org.freedesktop.DBus.Properties')
if sess_p.Get('org.freedesktop.login1.Session', 'Class') == clss:
user_path = sess_p.Get('org.freedesktop.login1.Session', 'User')[1]
user_paths |= {user_path}
return user_paths
def get_user_runtime_paths(bus=dbus.SystemBus()):
"""Get dBus user runtime paths of all active "human" users."""
for user_path in get_user_paths(bus):
user = bus.get_object('org.freedesktop.login1', user_path)
user_p = dbus.Interface(user, 'org.freedesktop.DBus.Properties')
uid = user_p.Get('org.freedesktop.login1.User', 'UID')
rt_path = user_p.Get('org.freedesktop.login1.User', 'RuntimePath')
yield uid, rt_path
def _notify_all_show(n):
"""Show notification to all active "human" users."""
ids = dict()
for uid, path in get_user_runtime_paths():
os.seteuid(uid) # bypass systemd access check
addr = "unix:path={}/bus".format(path)
sess = dbus.bus.BusConnection(addr)
notif_o = sess.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications')
notif_i = dbus.Interface(notif_o, 'org.freedesktop.Notifications')
notif_id = notif_i.Notify(n.app_name, n.replaces_id, n.app_icon, n.summary, n.body,
n.actions, n.hints, n.timeout)
os.seteuid(0)
ids[uid] = notif_id
return ActiveSystemNotification(n, ids)
def _notify_all_close(n):
"""Close the notification on all sessions it is active."""
for uid, path in get_user_runtime_paths():
if uid not in n.ids:
continue
os.seteuid(uid) # bypass systemd access check
addr = "unix:path={}/bus".format(path)
sess = dbus.bus.BusConnection(addr)
notif_o = sess.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications')
notif_i = dbus.Interface(notif_o, 'org.freedesktop.Notifications')
notif_i.CloseNotification(n.ids[uid])
os.seteuid(0)
class SystemNotification:
"""A notification that can be sent to all users."""
def __init__(self, app_name, summary="", body="", replaces_id=0, timeout=-1, app_icon=''):
"""Create a new notification."""
self.app_name = app_name
self.app_icon = app_icon
self.summary = summary
self.body = body
self.replaces_id = replaces_id
self.timeout = timeout
self.hints = dict()
self.actions = list()
def show(self):
"""Show this notification to all "human" users."""
return _notify_all_show(self)
class ActiveSystemNotification:
"""A SystemNotification that has already been sent and is active."""
def __init__(self, notif, ids):
self.notif = notif
self.ids = ids
def close(self):
"""Close this notification on all session it is active."""
_notify_all_close(self)
| 31.20354
| 100
| 0.64776
|
aa49033fcca30e24e21386a120abea03161c5942
| 1,129
|
py
|
Python
|
json_2_yolo.py
|
souvik0306/auairdataset
|
1f9a5fc4835feb49acf8df34b97f0348bdacf910
|
[
"MIT"
] | null | null | null |
json_2_yolo.py
|
souvik0306/auairdataset
|
1f9a5fc4835feb49acf8df34b97f0348bdacf910
|
[
"MIT"
] | null | null | null |
json_2_yolo.py
|
souvik0306/auairdataset
|
1f9a5fc4835feb49acf8df34b97f0348bdacf910
|
[
"MIT"
] | null | null | null |
# This script can convert your AU-AIR annotations to YOLO Format
import json
data = open(r'annotations.json') #Pass in the path for AU-AIR annotation file
ann_file = json.load(data)
ann_list = ann_file['annotations']
b = [i.get('bbox') for i in ann_list] #b is a list which contains the bbox parameters for YOLO Conversion
b = b[:10]
# print(b)
name = []
for i in ann_file['annotations']:
name.append(i.get('image_name'))
# print(name)
YOLO_LABELS_PATH = r'yolo_annotation_folder/' # Pass in a folder to save the YOLO Annotation Files
for a in range(0,len(b)):
c = b[a]
dw = 1/1920
dh = 1/1080
file = name[a]
file = file.replace('jpg','txt')
out_file = open(YOLO_LABELS_PATH + '/' + file, 'w')
for i in range(0,len(c)):
x = c[i]['left'] + c[i]['width']/2
x = x*dw
y = c[i]['top'] + c[i]['height']/2
y = y*dh
w = c[i]['width'] * dw
h = c[i]['height'] * dh
label = (round(x,6),round(y,6),round(w,6),round(h,6))
out_file.write(str(c[i]['class']) + " " + " ".join(str(f'{x:.6f}') for x in label) + '\n')
out_file.close()
| 33.205882
| 105
| 0.585474
|
cad446320eae72fe9dfb800dd34ee6d9e2c314c2
| 391
|
py
|
Python
|
python/python-algorithm-intervew/23-dynamic-programing/85-fibonacci-number-2.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | 1
|
2022-03-06T03:49:31.000Z
|
2022-03-06T03:49:31.000Z
|
python/python-algorithm-intervew/23-dynamic-programing/85-fibonacci-number-2.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
python/python-algorithm-intervew/23-dynamic-programing/85-fibonacci-number-2.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
"""
* 피보나치 수
피보나치 수를 구하라.
"""
import collections
class Solution:
dp = collections.defaultdict(int)
# 메모이제이션 (하향식)
def fib(self, n: int) -> int:
if n <= 1:
return n
if self.dp[n]:
return self.dp[n]
self.dp[n] = self.fib(n - 1) + self.fib(n - 2)
return self.dp[n]
if __name__ == '__main__':
print(Solution().fib(6))
| 17
| 54
| 0.519182
|
2774d1cd1ccd3c0b1e9083cb6784afa7c6243496
| 11,087
|
py
|
Python
|
source/astroNS/astroNS.py
|
pyastroNS/astroNS
|
35687267179467e4cb7ea59ac119c5f0f182107f
|
[
"MIT"
] | null | null | null |
source/astroNS/astroNS.py
|
pyastroNS/astroNS
|
35687267179467e4cb7ea59ac119c5f0f182107f
|
[
"MIT"
] | null | null | null |
source/astroNS/astroNS.py
|
pyastroNS/astroNS
|
35687267179467e4cb7ea59ac119c5f0f182107f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""
astroNS simulator
"""
import simpy
import argparse
import pandas as pd
import datetime
import pytz
import random
import uuid
import os
import sys
import json
from multiprocessing import Queue, Process
from collections import namedtuple
from contextlib import redirect_stdout
from typing import List, Dict, Tuple
#
# Main Simulation Controller
#
# takes a:
# Network Parser
# Input/Data Genertors
#
def runSim(
filename: str, simStop: float, env: simpy.Environment
) -> Tuple[List["BaseNode"], simpy.Environment]:
"""Runs the simulation
Args:
filename: The filename for the network model.
simStop: The length of the scenario to run.
env: The simpy environment class.
Returns:
network_nodes: The nodes to be run.
env: The simpy environment class.
"""
from nodes.core.base import BaseNode
from interfaces.networkfactory import load_network_file
from interfaces.outputdatawriter import output_loaded_config
# Load in the file
network_nodes = load_network_file(filename, env, None)
# Connect the nodes
BaseNode.make_link_map_data(network_nodes)
# Put the nodes back in the environment for simpy
env.network_nodes = network_nodes
#!TODO Convert to logger instead of print
print(
" %| 0.00|2020-10-22T20:58:17.862886+00:00| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Loaded |{}| total nodes".format(
len(network_nodes)
)
)
# Save the configuration to file
with open(
"{}/loaded_node_config.txt".format(env.path_to_results), "w"
) as node_config_file:
output_loaded_config(network_nodes, node_config_file, env)
try:
# Run it
env.run(until=simStop)
except RuntimeError:
print("Simulation process is too slow for real time mode. Stopping.")
return network_nodes, env
# default args part converts a dict to an pythonobject like the args class from __main__
def setup_env(env, args):
"""Sets up the simpy environment for a discrete event simulation
Args:
env: The simpy environment class.
args: Argument class object.
"""
# this copies all the fields of 'args' into the 'env' object
env.__dict__.update(args.__dict__)
# Set the network name
env.network_name = args.model_file
# Set the epoch for the start of the scenario
env.epoch = datetime.datetime.strptime(args.epoch, "%Y-%m-%dT%H:%M:%S.%fZ")
env.epoch = env.epoch.replace(tzinfo=pytz.UTC)
# No idea what this does
env.now_datetime = lambda sim_time=None: env.epoch + datetime.timedelta(
seconds=sim_time if sim_time else env.now
)
# More timestamps?
env.start_datetime = env.now_datetime(0).isoformat(timespec="microseconds")
env.end_simtime_dt = env.now_datetime(env.end_simtime)
# Forces node stats to True as well
if args.node_stats_history:
env.make_node_stats = True
# helps to make the output tables well formatted
pd.set_option("display.width", 150)
# make a token for this run
env.this_runs_uuid = uuid.uuid4()
# More timestamps?
env.start_datetime = env.now_datetime(0).isoformat(timespec="microseconds")
env.end_simtime_dt = env.now_datetime(env.end_simtime)
# Output directory
path_to_results = "./Results/{}{}".format(
args.network_name,
env.now_datetime(0)
.replace(tzinfo=None)
.isoformat(timespec="microseconds")
.replace(":", "-")
.replace(".", "_"),
)
if not os.path.exists(path_to_results):
os.makedirs(path_to_results)
env.path_to_results = path_to_results
# Set the random seed
if args.seed:
seed = args.seed
else:
seed = random.randrange(sys.maxsize)
# Uses random...
random.seed(a=seed, version=2)
env.seed = seed
env.loaded_network_json = []
if args.promise_threads > 0:
job_queue = Queue()
def postprocess_network(env):
"""Post Process
Args:
env: The simpy environment class.
args: Dictionary of all arguments to be passed.
"""
from nodes.core.base import BaseNode
from interfaces.outputdatawriter import (
output_node_stats,
output_msg_history,
output_msg_history_tab,
output_sim_end_state,
)
# Write the network to file
with open(
"{}/loaded_network.json".format(env.path_to_results), "w"
) as loaded_network_json_file:
loaded_network_json_file.write(json.dumps(env.loaded_network_json, indent=2))
if env.node_stats:
with open(
"{}/node_stats.txt".format(env.path_to_results), "w"
) as node_stats_file:
with open(
"{}/node_stats_total.txt".format(env.path_to_results), "w"
) as total_node_stats_file:
output_node_stats(
env.network_nodes, node_stats_file, env.node_stats_history
)
with open(
"{}/msg_history.txt".format(env.path_to_results), "w"
) as msg_history_file:
output_msg_history(BaseNode.msg_history, msg_history_file)
with open(
"{}/msg_history.csv".format(env.path_to_results), "w"
) as msg_history_file:
output_msg_history_tab(BaseNode.msg_history, msg_history_file)
if env.final_node_states:
with open(
"{}/sim_end_state.txt".format(env.path_to_results), "w"
) as sim_end_state_file:
output_sim_end_state(env, sim_end_state_file)
print(
" 100%|{:8.2f}|{}| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Session token: {}".format(
env.now,
env.now_datetime().isoformat(timespec="microseconds"),
env.this_runs_uuid,
)
)
print(
" 100%|{:8.2f}|{}| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Done.".format(
env.now, env.now_datetime().isoformat(timespec="microseconds")
)
)
class Arguments:
def __init__(
self,
model_file,
seed,
end_simtime,
epoch,
terminal,
node_stats,
node_stats_history,
initial_node_states,
final_node_states,
real_time_mode,
real_time_strict,
real_time_factor,
network_name,
promise_threads,
):
self.model_file = model_file
self.seed = seed
self.end_simtime = end_simtime
self.epoch = epoch
self.terminal = terminal
self.node_stats = node_stats
self.node_stats_history = node_stats_history
self.inital_node_states = initial_node_states
self.final_node_states = final_node_states
self.real_time_mode = real_time_mode
self.real_time_strict = real_time_strict
self.real_time_factor = real_time_factor
self.network_name = network_name
self.promise_threads = promise_threads
def main(
model_file,
seed=9001,
end_simtime=200,
epoch=datetime.datetime.now().isoformat() + "Z",
terminal=False,
node_stats=False,
node_stats_history=False,
initial_node_states=False,
final_node_states=False,
real_time_mode=False,
real_time_strict=False,
real_time_factor=1.0,
network_name="Default_",
promise_threads=0,
):
"""Main thread
Args:
model_file: File that contains the network model. Can be an .yml, .json
seed: integer used to set the random stream number of desired
end_simtime: runs sim until this SimTime is reached.
epoch: Sim Start Date/Time. Defaults to now.
terminal: writes the log to the terminal instead of the output file
node_stats: Writes out Node stats data.
node_stats_history: Writes out Node stats data and lists the first/last 30 messages to the node.
initial_node_states: Write initial node state to file before sim is run
final_node_states: Write initial node state to file before sim is run
real_time_mode: runs the sim via real_time clock mode
real_time_strict: if set, throws an error if a process takes more actual time than given in real time mode.
real_time_factor: determines time unit for real_time mode. Default 1 unit = one second
promise_threads: creates multiprocessing threads to parallelize node promises
"""
# env required by the simpy frameworks
env = (
simpy.rt.RealtimeEnvironment(strict=real_time_strict)
if real_time_mode
else simpy.Environment()
)
args = Arguments(
model_file,
seed,
end_simtime,
epoch,
terminal,
node_stats,
node_stats_history,
initial_node_states,
final_node_states,
real_time_mode,
real_time_strict,
real_time_factor,
network_name,
promise_threads,
)
# configure the environment
setup_env(env, args)
print(
" 0%| 0.00|{}| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Session token: {}".format(
env.start_datetime, env.this_runs_uuid
)
)
# change the where to be an UUID representing the run
# Setup the output log file, don't use system logging until we can figure out
# how to play nice with celery logging
with open("{}/simulation.log".format(env.path_to_results), "w") as sim_log:
with open("{}/node_log.txt".format(env.path_to_results), "w") as node_log:
env.node_log = node_log
env.node_log.write(
"SimTime\tNode\tData_ID\tData_Size\tWait_time\tProcessing_time\tDelay_to_Next\n"
)
orig_stdout = sys.stdout
if not args.terminal:
sys.stdout = sim_log
print(
" 0%| 0.00|{}| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Using Random seed: {}".format(
env.start_datetime, env.seed
)
)
print(
" 0%| 0.00|{}| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Session token: {}".format(
env.start_datetime, env.this_runs_uuid
)
)
filename = args.model_file
SimStop = args.end_simtime
network_nodes, env = runSim(filename, SimStop, env)
print(
" 100%|{:8.2f}|{}| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|Session token: {}".format(
env.now,
env.now_datetime().isoformat(timespec="microseconds"),
env.this_runs_uuid,
)
)
sys.stdout = orig_stdout
# grab all of the stats at the end of the simulation
postprocess_network(env)
# Run this code if called directly
if __name__ == "__main__":
import fire
fire.Fire(main)
| 29.884097
| 160
| 0.634617
|
46036e877174ef89ec2bedc6c9e54f20b059e5e2
| 11,982
|
py
|
Python
|
tests/test_graphql.py
|
ammogcoder/saleor
|
e4160652b68de002b51708a775050c95bfd1a3a5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_graphql.py
|
ammogcoder/saleor
|
e4160652b68de002b51708a775050c95bfd1a3a5
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T12:53:30.000Z
|
2022-02-10T12:53:30.000Z
|
tests/test_graphql.py
|
ammogcoder/saleor
|
e4160652b68de002b51708a775050c95bfd1a3a5
|
[
"BSD-3-Clause"
] | 1
|
2020-04-15T08:36:47.000Z
|
2020-04-15T08:36:47.000Z
|
import json
import graphene
import pytest
from django.shortcuts import reverse
from saleor.product.models import Category, Product, ProductAttribute
from .utils import get_graphql_content
def test_category_query(client, product_in_stock):
category = Category.objects.first()
query = '''
query {
category(id: "%(category_pk)s") {
id
name
ancestors {
edges {
node {
name
}
}
}
children {
edges {
node {
name
}
}
}
siblings {
edges {
node {
name
}
}
}
}
}
''' % {'category_pk': graphene.Node.to_global_id('Category', category.pk)}
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
category_data = content['data']['category']
assert category_data is not None
assert category_data['name'] == category.name
assert (
len(category_data['ancestors']['edges']) ==
category.get_ancestors().count())
assert (
len(category_data['children']['edges']) ==
category.get_children().count())
assert (
len(category_data['siblings']['edges']) ==
category.get_siblings().count())
def test_fetch_all_products(client, product_in_stock):
query = '''
query {
products {
totalCount
edges {
node {
id
}
}
}
}
'''
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
num_products = Product.objects.count()
assert content['data']['products']['totalCount'] == num_products
assert len(content['data']['products']['edges']) == num_products
@pytest.mark.djangodb
def test_fetch_unavailable_products(client, product_in_stock):
Product.objects.update(is_published=False)
query = '''
query {
products {
totalCount
edges {
node {
id
}
}
}
}
'''
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
assert content['data']['products']['totalCount'] == 0
assert not content['data']['products']['edges']
def test_product_query(client, product_in_stock):
category = Category.objects.first()
product = category.products.first()
query = '''
query {
category(id: "%(category_id)s") {
products {
edges {
node {
id
name
url
thumbnailUrl
images { url }
variants {
name
stockQuantity
}
availability {
available,
priceRange {
minPrice {
gross
net
currency
}
}
}
}
}
}
}
}
''' % {'category_id': graphene.Node.to_global_id('Category', category.id)}
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
assert content['data']['category'] is not None
product_edges_data = content['data']['category']['products']['edges']
assert len(product_edges_data) == category.products.count()
product_data = product_edges_data[0]['node']
assert product_data['name'] == product.name
assert product_data['url'] == product.get_absolute_url()
gross = product_data['availability']['priceRange']['minPrice']['gross']
assert float(gross) == float(product.price.gross)
def test_filter_product_by_category(client, product_in_stock):
category = product_in_stock.category
query = '''
query getProducts($categoryId: ID) {
products(category: $categoryId) {
edges {
node {
name
}
}
}
}
'''
response = client.post(
reverse('api'),
{
'query': query,
'variables': json.dumps(
{
'categoryId': graphene.Node.to_global_id(
'Category', category.id)}),
'operationName': 'getProducts'})
content = get_graphql_content(response)
assert 'errors' not in content
product_data = content['data']['products']['edges'][0]['node']
assert product_data['name'] == product_in_stock.name
def test_fetch_product_by_id(client, product_in_stock):
query = '''
query ($productId: ID!) {
node(id: $productId) {
... on Product {
name
}
}
}
'''
response = client.post(
reverse('api'),
{
'query': query,
'variables': json.dumps(
{
'productId': graphene.Node.to_global_id(
'Product', product_in_stock.id)})})
content = get_graphql_content(response)
assert 'errors' not in content
product_data = content['data']['node']
assert product_data['name'] == product_in_stock.name
def test_filter_product_by_attributes(client, product_in_stock):
product_attr = product_in_stock.product_type.product_attributes.first()
category = product_in_stock.category
attr_value = product_attr.values.first()
filter_by = '%s:%s' % (product_attr.slug, attr_value.slug)
query = '''
query {
category(id: "%(category_id)s") {
products(attributes: ["%(filter_by)s"]) {
edges {
node {
name
}
}
}
}
}
''' % {
'category_id': graphene.Node.to_global_id('Category', category.id),
'filter_by': filter_by}
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
product_data = content['data']['category']['products']['edges'][0]['node']
assert product_data['name'] == product_in_stock.name
def test_attributes_query(client, product_in_stock):
attributes = ProductAttribute.objects.prefetch_related('values')
query = '''
query {
attributes {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
'''
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
attributes_data = content['data']['attributes']['edges']
assert len(attributes_data) == attributes.count()
def test_attributes_in_category_query(client, product_in_stock):
category = Category.objects.first()
query = '''
query {
attributes(inCategory: "%(category_id)s") {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
''' % {'category_id': graphene.Node.to_global_id('Category', category.id)}
response = client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
attributes_data = content['data']['attributes']['edges']
assert len(attributes_data) == ProductAttribute.objects.count()
def test_real_query(client, product_in_stock):
category = product_in_stock.category
product_attr = product_in_stock.product_type.product_attributes.first()
category = product_in_stock.category
attr_value = product_attr.values.first()
filter_by = '%s:%s' % (product_attr.slug, attr_value.slug)
query = '''
query Root($categoryId: ID!, $sortBy: String, $first: Int, $attributesFilter: [AttributeScalar], $minPrice: Float, $maxPrice: Float) {
category(id: $categoryId) {
...CategoryPageFragmentQuery
__typename
}
attributes(inCategory: $categoryId) {
edges {
node {
...ProductFiltersFragmentQuery
__typename
}
}
}
}
fragment CategoryPageFragmentQuery on Category {
id
name
url
ancestors {
edges {
node {
name
id
url
__typename
}
}
}
children {
edges {
node {
name
id
url
slug
__typename
}
}
}
products(first: $first, sortBy: $sortBy, attributes: $attributesFilter, price_Gte: $minPrice, price_Lte: $maxPrice) {
...ProductListFragmentQuery
__typename
}
__typename
}
fragment ProductListFragmentQuery on ProductCountableConnection {
edges {
node {
...ProductFragmentQuery
__typename
}
__typename
}
pageInfo {
hasNextPage
__typename
}
__typename
}
fragment ProductFragmentQuery on Product {
id
name
price {
currency
gross
grossLocalized
net
__typename
}
availability {
...ProductPriceFragmentQuery
__typename
}
thumbnailUrl1x: thumbnailUrl(size: "255x255")
thumbnailUrl2x: thumbnailUrl(size: "510x510")
url
__typename
}
fragment ProductPriceFragmentQuery on ProductAvailability {
available
discount {
gross
__typename
}
priceRange {
maxPrice {
gross
grossLocalized
currency
__typename
}
minPrice {
gross
grossLocalized
currency
__typename
}
__typename
}
__typename
}
fragment ProductFiltersFragmentQuery on ProductAttribute {
id
name
slug
values {
id
name
slug
color
__typename
}
__typename
}
'''
response = client.post(
'/graphql/', {
'query': query,
'variables': json.dumps(
{
'categoryId': graphene.Node.to_global_id(
'Category', category.id),
'sortBy': 'name',
'first': 1,
'attributesFilter': [filter_by]})})
content = get_graphql_content(response)
assert 'errors' not in content
| 28.393365
| 138
| 0.485144
|
df2e1558ca112a954b404856950bfe5297402869
| 7,436
|
py
|
Python
|
gpvdm_gui/gui/gl_mesh.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 12
|
2016-09-13T08:58:13.000Z
|
2022-01-17T07:04:52.000Z
|
gpvdm_gui/gui/gl_mesh.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 3
|
2017-11-11T12:33:02.000Z
|
2019-03-08T00:48:08.000Z
|
gpvdm_gui/gui/gl_mesh.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 6
|
2019-01-03T06:17:12.000Z
|
2022-01-01T15:59:00.000Z
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package gl_view_point
# The gl_view_point class for the OpenGL display.
#
import sys
from math import fabs
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from PyQt5 import QtOpenGL
from PyQt5.QtOpenGL import QGLWidget
from gl_lib import val_to_rgb
from PyQt5.QtWidgets import QMenu
except:
pass
from PyQt5.QtCore import QTimer
from inp import inp
from epitaxy import get_epi
from mesh import get_mesh
from gl_base_object import gl_base_object
from gpvdm_json import gpvdm_data
class gl_mesh():
def draw_mesh(self):
x=[]
y=[]
z=[]
data=gpvdm_data()
epi=get_epi()
device_start=epi.get_device_start(data)
mesh=get_mesh()
y,temp=mesh.y.calculate_points()
x,temp=mesh.x.calculate_points()
z,temp=mesh.z.calculate_points()
x=self.scale.project_m2screen_x(x)
y=self.scale.project_m2screen_y(y)
z=self.scale.project_m2screen_z(z)
glLineWidth(3)
if mesh.y.circuit_model==False:
self.drift_diffusion_mesh()
def drift_diffusion_mesh(self):
x=[]
y=[]
z=[]
data=gpvdm_data()
epi=get_epi()
device_start=epi.get_device_start(data)
mesh=get_mesh()
y,temp=mesh.y.calculate_points()
x,temp=mesh.x.calculate_points()
z,temp=mesh.z.calculate_points()
for i in range(0,len(y)):
y[i]=y[i]+device_start
x=self.scale.project_m2screen_x(x)
y=self.scale.project_m2screen_y(y)
z=self.scale.project_m2screen_z(z)
glLineWidth(3)
for zi in range(0,len(z)):
for xi in range(0,len(x)):
for yi in range(0,len(y)):
name="mesh:"+str(xi)+":"+str(yi)+":"+str(zi)
if yi==self.dump_energy_slice_ypos and xi==self.dump_energy_slice_xpos and zi==self.dump_energy_slice_zpos:
a=gl_base_object()
a.id=[name]
a.type="ball"
a.x=x[xi]
a.y=y[yi]
a.z=z[zi]
a.dx=0.08
a.r=0.0
a.g=1.0
a.b=0.0
self.gl_objects_add(a)
elif xi==self.dump_1d_slice_xpos and zi==self.dump_1d_slice_zpos:
a=gl_base_object()
a.id=[name]
a.type="ball"
a.x=x[xi]
a.y=y[yi]
a.z=z[zi]
a.dx=0.05
a.r=0.0
a.g=0.0
a.b=1.0
self.gl_objects_add(a)
else:
a=gl_base_object()
a.id=[name]
a.type="ball"
a.x=x[xi]
a.y=y[yi]
a.z=z[zi]
a.dx=0.05
if self.dump_verbose_electrical_solver_results==False:
a.alpha=0.5
else:
a.alpha=0.9
a.r=1.0
a.g=0.0
a.b=0.0
self.gl_objects_add(a)
if yi!=len(y)-1:
a=gl_base_object()
a.id=["electrical_mesh"]
a.type="line"
a.x=x[xi]
a.y=y[yi]
a.z=z[zi]
a.dx=0.0
a.dy=y[yi+1]-y[yi]
a.dz=0.0
a.r=1.0
a.g=0.0
a.b=0.0
self.gl_objects_add(a)
if xi!=len(x)-1:
a=gl_base_object()
a.id=["electrical_mesh"]
a.type="line"
a.x=x[xi]
a.y=y[yi]
a.z=z[zi]
a.dx=x[xi+1]-x[xi]
a.dy=0.0
a.dz=0.0
a.r=1.0
a.g=0.0
a.b=0.0
self.gl_objects_add(a)
if zi!=len(z)-1:
a=gl_base_object()
a.id=["electrical_mesh"]
a.type="line"
a.x=x[xi]
a.y=y[yi]
a.z=z[zi]
a.dx=0.0
a.dy=0.0
a.dz=z[zi+1]-z[zi]
a.r=1.0
a.g=0.0
a.b=0.0
self.gl_objects_add(a)
def mesh_menu(self,event):
view_menu = QMenu(self)
menu = QMenu(self)
view=menu.addMenu(_("Dump"))
if self.dump_energy_slice_xpos==-1:
action=view.addAction(_("Dump slice in energy space"))
else:
action=view.addAction(_("Don't dump slice in energy space"))
action.triggered.connect(self.menu_energy_slice_dump)
if self.dump_1d_slice_xpos==-1:
action=view.addAction(_("Dump 1D slices"))
else:
action=view.addAction(_("Don't dump 1D slice"))
action.triggered.connect(self.menu_1d_slice_dump)
if self.dump_verbose_electrical_solver_results==False:
action=view.addAction(_("Set verbose electrical solver dumping"))
else:
action=view.addAction(_("Unset verbose electrical solver dumping"))
action.triggered.connect(self.menu_dump_verbose_electrical_solver_results)
menu.exec_(event.globalPos())
def menu_energy_slice_dump(self):
obj=self.gl_objects_get_first_selected()
if obj!=None:
s=obj.id_starts_with("mesh").split(":")
x=int(s[1])
y=int(s[2])
z=int(s[3])
if self.dump_energy_slice_xpos==x and self.dump_energy_slice_ypos==y and self.dump_energy_slice_zpos==z:
self.dump_energy_slice_xpos=-1
self.dump_energy_slice_ypos=-1
self.dump_energy_slice_zpos=-1
else:
self.dump_energy_slice_xpos=x
self.dump_energy_slice_ypos=y
self.dump_energy_slice_zpos=z
mesh=get_mesh()
f=inp()
f.load("dump.inp")
f.replace("#dump_energy_slice_xpos",str(x))
f.replace("#dump_energy_slice_ypos",str(len(mesh.y.points)-1-y))
f.replace("#dump_energy_slice_zpos",str(z))
f.save()
self.gl_objects_remove_regex("mesh")
self.draw_mesh()
self.do_draw()
def menu_1d_slice_dump(self):
obj=self.gl_objects_get_first_selected()
if obj!=None:
s=obj.id_starts_with("mesh").split(":")
x=int(s[1])
y=int(s[2])
z=int(s[3])
if self.dump_1d_slice_xpos==x and self.dump_1d_slice_zpos==z:
self.dump_1d_slice_xpos=-1
self.dump_1d_slice_zpos=-1
else:
self.dump_1d_slice_xpos=x
self.dump_1d_slice_zpos=z
f=inp()
f.load("dump.inp")
f.replace("#dump_1d_slice_xpos",str(self.dump_1d_slice_xpos))
f.replace("#dump_1d_slice_zpos",str(self.dump_1d_slice_zpos))
f.save()
self.gl_objects_remove_regex("mesh")
self.draw_mesh()
self.do_draw()
def menu_dump_verbose_electrical_solver_results(self):
self.dump_verbose_electrical_solver_results = not self.dump_verbose_electrical_solver_results
f=inp()
f.load("dump.inp")
f.replace("#dump_verbose_electrical_solver_results",str(self.dump_verbose_electrical_solver_results))
f.save()
self.gl_objects_remove_regex("mesh")
self.draw_mesh()
self.do_draw()
def project_object_through_electrical_mesh(self,o):
mesh=get_mesh()
mesh_with_gaps=False
for l in mesh.x.layers:
if l.points==0:
mesh_with_gaps=True
break
if mesh_with_gaps==False:
self.gl_objects_add(o)
else:
for l in mesh.x.layers:
if l.points!=0:
new_obj=gl_base_object()
new_obj.copy(o)
#print(layer,l.start,l.end-l.start)
new_obj.xyz.x=self.scale.project_m2screen_x(l.start)
new_obj.dxyz.x=(l.end-l.start)*self.scale.x_mul
#print(layer,o.xyz.x,o.dxyz.x)
self.gl_objects_add(new_obj)
| 24.142857
| 112
| 0.666891
|
daecbcc325dd2460b18cdc548a030152904b1bae
| 3,295
|
py
|
Python
|
opentech/apply/funds/management/commands/migrate_fellowship_proposals.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/apply/funds/management/commands/migrate_fellowship_proposals.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/apply/funds/management/commands/migrate_fellowship_proposals.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
from opentech.apply.funds.management.commands.migration_base import MigrateCommand
class Command(MigrateCommand):
CONTENT_TYPE = "fund"
FUND_NAME = "Fellowship (archive fund)"
ROUND_NAME = "Fellowship (archive round)"
APPLICATION_TYPE = "proposal"
STREAMFIELD_MAP = {
"title": {
"id": "title",
"type": "direct",
},
"field_proposal_common_name": {
"id": "full_name",
"type": "value",
"key": "safe_value",
},
"field_proposal_host_text": {
"id": "bc03235e-3c78-4770-9fc2-97feb93c2c8c",
"type": "value",
"key": "safe_value",
},
"field_proposal_start_date": {
"id": "672cb6f1-335c-4005-a0f1-46c414feda06",
"type": "value",
"key": "value",
},
"field_proposal_completion_date": {
"id": "8262f209-f084-4a79-9dfa-2d18137119bb",
"type": "value",
"key": "value",
},
"field_proposal_objectives": {
"id": "af2c5f38-7257-4295-87fa-787060e845ef",
"type": "value",
"key": "safe_value",
},
"field_proposal_activities": {
"id": "3c521847-7642-4cae-aca9-d5336ad8962d",
"type": "value",
"key": "safe_value",
},
"field_proposal_sustainability": {
"id": "fd0eb7ea-e054-4bcf-9580-eb672d44745c",
"type": "value",
"key": "safe_value",
},
"field_proposal_request_questions": {
"id": "b6d71932-98c2-4ce8-a5e6-454a1f800d21",
"type": "merge_value",
"key": "safe_value",
},
"field_proposal_upload": {
"id": "30dfa46e-f656-46c9-9efc-bab9029f2008",
"type": "file",
# TODO: finish mapping
},
}
REQUEST_QUESTION_MAP = {
"3618": {
0: "How will this project leverage the resources made available by the host organization?",
1: "Please detail the specific steps the applicant will take to ensure the project outcomes reach non-technical audiences",
2: "In what ways will this effort advance understanding in the relevant field?",
3: "What risks or variables could jeopardize either the outcomes of the project or the safety of the applicant?",
4: "How is the applicant well equipped to carry out the technical work proposed? (if applicable)",
},
"3681": {
0: "Please detail the specific steps the applicant will take to ensure the project outcomes reach the internet freedom technology community.",
1: "What risks could jeopardize the fellowship project?",
2: "How would those risks be mitigated or addressed?",
3: "How will your work be sustained following the completion of your fellowship?",
4: "If your project includes public gatherings, do you have a code of conduct? If yes, please list below or provide links to where it can be publicly accessed.",
5: "Please include letters of support for the organization or organizations you would be working with (Please attach a file at the bottom of the application)",
},
}
| 40.679012
| 173
| 0.579059
|
804cd0c6674e7b320e0e78c5fa8f8cedf105514a
| 1,872
|
py
|
Python
|
setup.py
|
benevpi/CIRCUITPYTHON_ifttt
|
ec8851c16b2a3bacde02654a104842b6bc1fe3ed
|
[
"MIT"
] | null | null | null |
setup.py
|
benevpi/CIRCUITPYTHON_ifttt
|
ec8851c16b2a3bacde02654a104842b6bc1fe3ed
|
[
"MIT"
] | null | null | null |
setup.py
|
benevpi/CIRCUITPYTHON_ifttt
|
ec8851c16b2a3bacde02654a104842b6bc1fe3ed
|
[
"MIT"
] | 1
|
2020-12-07T13:35:40.000Z
|
2020-12-07T13:35:40.000Z
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='adafruit-circuitpython-ifttt',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='A simple link to If This Then That (IFTTT) webhooks',
long_description=long_description,
long_description_content_type='text/x-rst',
# The project's main homepage.
url='https://github.com/adafruit/Adafruit_CircuitPython_ifttt',
# Author details
author='Adafruit Industries',
author_email='circuitpython@adafruit.com',
install_requires=[
'Adafruit-Blinka'
],
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Hardware',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='adafruit blinka circuitpython micropython ifttt IoT',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# TODO: IF LIBRARY FILES ARE A PACKAGE FOLDER,
# CHANGE `py_modules=['...']` TO `packages=['...']`
py_modules=['ifttt'],
)
| 29.25
| 72
| 0.674145
|
90873cb905284fd68f19e61070028890ef086897
| 23,972
|
py
|
Python
|
mediagoblin/tests/test_api.py
|
saksham1115/mediagoblin
|
41302ad2b622b340caeb13339338ab3a5d0f7e6b
|
[
"CC0-1.0"
] | null | null | null |
mediagoblin/tests/test_api.py
|
saksham1115/mediagoblin
|
41302ad2b622b340caeb13339338ab3a5d0f7e6b
|
[
"CC0-1.0"
] | null | null | null |
mediagoblin/tests/test_api.py
|
saksham1115/mediagoblin
|
41302ad2b622b340caeb13339338ab3a5d0f7e6b
|
[
"CC0-1.0"
] | null | null | null |
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
try:
import mock
except ImportError:
import unittest.mock as mock
import pytest
from webtest import AppError
from .resources import GOOD_JPG
from mediagoblin import mg_globals
from mediagoblin.db.models import User, Activity, MediaEntry, TextComment
from mediagoblin.tools.routing import extract_url_arguments
from mediagoblin.tests.tools import fixture_add_user
from mediagoblin.moderation.tools import take_away_privileges
class TestAPI(object):
""" Test mediagoblin's pump.io complient APIs """
@pytest.fixture(autouse=True)
def setup(self, test_app):
self.test_app = test_app
self.db = mg_globals.database
self.user = fixture_add_user(privileges=[u'active', u'uploader', u'commenter'])
self.other_user = fixture_add_user(
username="otheruser",
privileges=[u'active', u'uploader', u'commenter']
)
self.active_user = self.user
def _activity_to_feed(self, test_app, activity, headers=None):
""" Posts an activity to the user's feed """
if headers:
headers.setdefault("Content-Type", "application/json")
else:
headers = {"Content-Type": "application/json"}
with self.mock_oauth():
response = test_app.post(
"/api/user/{0}/feed".format(self.active_user.username),
json.dumps(activity),
headers=headers
)
return response, json.loads(response.body.decode())
def _upload_image(self, test_app, image):
""" Uploads and image to MediaGoblin via pump.io API """
data = open(image, "rb").read()
headers = {
"Content-Type": "image/jpeg",
"Content-Length": str(len(data))
}
with self.mock_oauth():
response = test_app.post(
"/api/user/{0}/uploads".format(self.active_user.username),
data,
headers=headers
)
image = json.loads(response.body.decode())
return response, image
def _post_image_to_feed(self, test_app, image):
""" Posts an already uploaded image to feed """
activity = {
"verb": "post",
"object": image,
}
return self._activity_to_feed(test_app, activity)
def mocked_oauth_required(self, *args, **kwargs):
""" Mocks mediagoblin.decorator.oauth_required to always validate """
def fake_controller(controller, request, *args, **kwargs):
request.user = User.query.filter_by(id=self.active_user.id).first()
return controller(request, *args, **kwargs)
def oauth_required(c):
return lambda *args, **kwargs: fake_controller(c, *args, **kwargs)
return oauth_required
def mock_oauth(self):
""" Returns a mock.patch for the oauth_required decorator """
return mock.patch(
target="mediagoblin.decorators.oauth_required",
new_callable=self.mocked_oauth_required
)
def test_can_post_image(self, test_app):
""" Tests that an image can be posted to the API """
# First request we need to do is to upload the image
response, image = self._upload_image(test_app, GOOD_JPG)
# I should have got certain things back
assert response.status_code == 200
assert "id" in image
assert "fullImage" in image
assert "url" in image["fullImage"]
assert "url" in image
assert "author" in image
assert "published" in image
assert "updated" in image
assert image["objectType"] == "image"
# Check that we got the response we're expecting
response, _ = self._post_image_to_feed(test_app, image)
assert response.status_code == 200
def test_unable_to_upload_as_someone_else(self, test_app):
""" Test that can't upload as someoen else """
data = open(GOOD_JPG, "rb").read()
headers = {
"Content-Type": "image/jpeg",
"Content-Length": str(len(data))
}
with self.mock_oauth():
# Will be self.user trying to upload as self.other_user
with pytest.raises(AppError) as excinfo:
test_app.post(
"/api/user/{0}/uploads".format(self.other_user.username),
data,
headers=headers
)
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_unable_to_post_feed_as_someone_else(self, test_app):
""" Tests that can't post an image to someone else's feed """
response, data = self._upload_image(test_app, GOOD_JPG)
activity = {
"verb": "post",
"object": data
}
headers = {
"Content-Type": "application/json",
}
with self.mock_oauth():
with pytest.raises(AppError) as excinfo:
test_app.post(
"/api/user/{0}/feed".format(self.other_user.username),
json.dumps(activity),
headers=headers
)
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_only_able_to_update_own_image(self, test_app):
""" Test's that the uploader is the only person who can update an image """
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
activity = {
"verb": "update",
"object": data["object"],
}
headers = {
"Content-Type": "application/json",
}
# Lets change the image uploader to be self.other_user, this is easier
# than uploading the image as someone else as the way self.mocked_oauth_required
# and self._upload_image.
media = MediaEntry.query.filter_by(public_id=data["object"]["id"]).first()
media.actor = self.other_user.id
media.save()
# Now lets try and edit the image as self.user, this should produce a 403 error.
with self.mock_oauth():
with pytest.raises(AppError) as excinfo:
test_app.post(
"/api/user/{0}/feed".format(self.user.username),
json.dumps(activity),
headers=headers
)
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_upload_image_with_filename(self, test_app):
""" Tests that you can upload an image with filename and description """
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
image = data["object"]
# Now we need to add a title and description
title = "My image ^_^"
description = "This is my super awesome image :D"
license = "CC-BY-SA"
image["displayName"] = title
image["content"] = description
image["license"] = license
activity = {"verb": "update", "object": image}
with self.mock_oauth():
response = test_app.post(
"/api/user/{0}/feed".format(self.user.username),
json.dumps(activity),
headers={"Content-Type": "application/json"}
)
image = json.loads(response.body.decode())["object"]
# Check everything has been set on the media correctly
media = MediaEntry.query.filter_by(public_id=image["id"]).first()
assert media.title == title
assert media.description == description
assert media.license == license
# Check we're being given back everything we should on an update
assert image["id"] == media.public_id
assert image["displayName"] == title
assert image["content"] == description
assert image["license"] == license
def test_only_uploaders_post_image(self, test_app):
""" Test that only uploaders can upload images """
# Remove uploader permissions from user
take_away_privileges(self.user.username, u"uploader")
# Now try and upload a image
data = open(GOOD_JPG, "rb").read()
headers = {
"Content-Type": "image/jpeg",
"Content-Length": str(len(data)),
}
with self.mock_oauth():
with pytest.raises(AppError) as excinfo:
test_app.post(
"/api/user/{0}/uploads".format(self.user.username),
data,
headers=headers
)
# Assert that we've got a 403
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_object_endpoint(self, test_app):
""" Tests that object can be looked up at endpoint """
# Post an image
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
# Now lookup image to check that endpoint works.
image = data["object"]
assert "links" in image
assert "self" in image["links"]
# Get URI and strip testing host off
object_uri = image["links"]["self"]["href"]
object_uri = object_uri.replace("http://localhost:80", "")
with self.mock_oauth():
request = test_app.get(object_uri)
image = json.loads(request.body.decode())
entry = MediaEntry.query.filter_by(public_id=image["id"]).first()
assert request.status_code == 200
assert "image" in image
assert "fullImage" in image
assert "pump_io" in image
assert "links" in image
def test_post_comment(self, test_app):
""" Tests that I can post an comment media """
# Upload some media to comment on
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
content = "Hai this is a comment on this lovely picture ^_^"
activity = {
"verb": "post",
"object": {
"objectType": "comment",
"content": content,
"inReplyTo": data["object"],
}
}
response, comment_data = self._activity_to_feed(test_app, activity)
assert response.status_code == 200
# Find the objects in the database
media = MediaEntry.query.filter_by(public_id=data["object"]["id"]).first()
comment = media.get_comments()[0].comment()
# Tests that it matches in the database
assert comment.actor == self.user.id
assert comment.content == content
# Test that the response is what we should be given
assert comment.content == comment_data["object"]["content"]
def test_unable_to_post_comment_as_someone_else(self, test_app):
""" Tests that you're unable to post a comment as someone else. """
# Upload some media to comment on
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
activity = {
"verb": "post",
"object": {
"objectType": "comment",
"content": "comment commenty comment ^_^",
"inReplyTo": data["object"],
}
}
headers = {
"Content-Type": "application/json",
}
with self.mock_oauth():
with pytest.raises(AppError) as excinfo:
test_app.post(
"/api/user/{0}/feed".format(self.other_user.username),
json.dumps(activity),
headers=headers
)
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_unable_to_update_someone_elses_comment(self, test_app):
""" Test that you're able to update someoen elses comment. """
# Upload some media to comment on
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
activity = {
"verb": "post",
"object": {
"objectType": "comment",
"content": "comment commenty comment ^_^",
"inReplyTo": data["object"],
}
}
headers = {
"Content-Type": "application/json",
}
# Post the comment.
response, comment_data = self._activity_to_feed(test_app, activity)
# change who uploaded the comment as it's easier than changing
comment = TextComment.query.filter_by(public_id=comment_data["object"]["id"]).first()
comment.actor = self.other_user.id
comment.save()
# Update the comment as someone else.
comment_data["object"]["content"] = "Yep"
activity = {
"verb": "update",
"object": comment_data["object"]
}
with self.mock_oauth():
with pytest.raises(AppError) as excinfo:
test_app.post(
"/api/user/{0}/feed".format(self.user.username),
json.dumps(activity),
headers=headers
)
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_profile(self, test_app):
""" Tests profile endpoint """
uri = "/api/user/{0}/profile".format(self.user.username)
with self.mock_oauth():
response = test_app.get(uri)
profile = json.loads(response.body.decode())
assert response.status_code == 200
assert profile["preferredUsername"] == self.user.username
assert profile["objectType"] == "person"
assert "links" in profile
def test_user(self, test_app):
""" Test the user endpoint """
uri = "/api/user/{0}/".format(self.user.username)
with self.mock_oauth():
response = test_app.get(uri)
user = json.loads(response.body.decode())
assert response.status_code == 200
assert user["nickname"] == self.user.username
assert user["updated"] == self.user.created.isoformat()
assert user["published"] == self.user.created.isoformat()
# Test profile exists but self.test_profile will test the value
assert "profile" in response
def test_whoami_without_login(self, test_app):
""" Test that whoami endpoint returns error when not logged in """
with pytest.raises(AppError) as excinfo:
response = test_app.get("/api/whoami")
assert "401 UNAUTHORIZED" in excinfo.value.args[0]
def test_read_feed(self, test_app):
""" Test able to read objects from the feed """
response, image_data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, image_data)
uri = "/api/user/{0}/feed".format(self.active_user.username)
with self.mock_oauth():
response = test_app.get(uri)
feed = json.loads(response.body.decode())
assert response.status_code == 200
# Check it has the attributes it should
assert "displayName" in feed
assert "objectTypes" in feed
assert "url" in feed
assert "links" in feed
assert "author" in feed
assert "items" in feed
# Check that image i uploaded is there
assert feed["items"][0]["verb"] == "post"
assert feed["items"][0]["id"] == data["id"]
assert feed["items"][0]["object"]["objectType"] == "image"
assert feed["items"][0]["object"]["id"] == data["object"]["id"]
default_limit = 20
items_count = default_limit * 2
for i in range(items_count):
response, image_data = self._upload_image(test_app, GOOD_JPG)
self._post_image_to_feed(test_app, image_data)
items_count += 1 # because there already is one
#
# default returns default_limit items
#
with self.mock_oauth():
response = test_app.get(uri)
feed = json.loads(response.body.decode())
assert len(feed["items"]) == default_limit
#
# silentely ignore count and offset that that are
# not a number
#
with self.mock_oauth():
response = test_app.get(uri + "?count=BAD&offset=WORSE")
feed = json.loads(response.body.decode())
assert len(feed["items"]) == default_limit
#
# if offset is less than default_limit items
# from the end of the feed, return less than
# default_limit
#
with self.mock_oauth():
near_the_end = items_count - default_limit / 2
response = test_app.get(uri + "?offset=%d" % near_the_end)
feed = json.loads(response.body.decode())
assert len(feed["items"]) < default_limit
#
# count=5 returns 5 items
#
with self.mock_oauth():
response = test_app.get(uri + "?count=5")
feed = json.loads(response.body.decode())
assert len(feed["items"]) == 5
def test_read_another_feed(self, test_app):
""" Test able to read objects from someone else's feed """
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
# Change the active user to someone else.
self.active_user = self.other_user
# Fetch the feed
url = "/api/user/{0}/feed".format(self.user.username)
with self.mock_oauth():
response = test_app.get(url)
feed = json.loads(response.body.decode())
assert response.status_code == 200
# Check it has the attributes it ought to.
assert "displayName" in feed
assert "objectTypes" in feed
assert "url" in feed
assert "links" in feed
assert "author" in feed
assert "items" in feed
# Assert the uploaded image is there
assert feed["items"][0]["verb"] == "post"
assert feed["items"][0]["id"] == data["id"]
assert feed["items"][0]["object"]["objectType"] == "image"
assert feed["items"][0]["object"]["id"] == data["object"]["id"]
def test_cant_post_to_someone_elses_feed(self, test_app):
""" Test that can't post to someone elses feed """
response, data = self._upload_image(test_app, GOOD_JPG)
self.active_user = self.other_user
with self.mock_oauth():
with pytest.raises(AppError) as excinfo:
self._post_image_to_feed(test_app, data)
assert "403 FORBIDDEN" in excinfo.value.args[0]
def test_object_endpoint_requestable(self, test_app):
""" Test that object endpoint can be requested """
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
object_id = data["object"]["id"]
with self.mock_oauth():
response = test_app.get(data["object"]["links"]["self"]["href"])
data = json.loads(response.body.decode())
assert response.status_code == 200
assert object_id == data["id"]
assert "url" in data
assert "links" in data
assert data["objectType"] == "image"
def test_delete_media_by_activity(self, test_app):
""" Test that an image can be deleted by a delete activity to feed """
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
object_id = data["object"]["id"]
activity = {
"verb": "delete",
"object": {
"id": object_id,
"objectType": "image",
}
}
response = self._activity_to_feed(test_app, activity)[1]
# Check the media is no longer in the database
media = MediaEntry.query.filter_by(public_id=object_id).first()
assert media is None
# Check we've been given the full delete activity back
assert "id" in response
assert response["verb"] == "delete"
assert "object" in response
assert response["object"]["id"] == object_id
assert response["object"]["objectType"] == "image"
def test_delete_comment_by_activity(self, test_app):
""" Test that a comment is deleted by a delete activity to feed """
# First upload an image to comment against
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
# Post a comment to delete
activity = {
"verb": "post",
"object": {
"objectType": "comment",
"content": "This is a comment.",
"inReplyTo": data["object"],
}
}
comment = self._activity_to_feed(test_app, activity)[1]
# Now delete the image
activity = {
"verb": "delete",
"object": {
"id": comment["object"]["id"],
"objectType": "comment",
}
}
delete = self._activity_to_feed(test_app, activity)[1]
# Verify the comment no longer exists
assert TextComment.query.filter_by(public_id=comment["object"]["id"]).first() is None
comment_id = comment["object"]["id"]
# Check we've got a delete activity back
assert "id" in delete
assert delete["verb"] == "delete"
assert "object" in delete
assert delete["object"]["id"] == comment["object"]["id"]
assert delete["object"]["objectType"] == "comment"
def test_edit_comment(self, test_app):
""" Test that someone can update their own comment """
# First upload an image to comment against
response, data = self._upload_image(test_app, GOOD_JPG)
response, data = self._post_image_to_feed(test_app, data)
# Post a comment to edit
activity = {
"verb": "post",
"object": {
"objectType": "comment",
"content": "This is a comment",
"inReplyTo": data["object"],
}
}
comment = self._activity_to_feed(test_app, activity)[1]
# Now create an update activity to change the content
activity = {
"verb": "update",
"object": {
"id": comment["object"]["id"],
"content": "This is my fancy new content string!",
"objectType": "comment",
},
}
comment = self._activity_to_feed(test_app, activity)[1]
# Verify the comment reflects the changes
model = TextComment.query.filter_by(public_id=comment["object"]["id"]).first()
assert model.content == activity["object"]["content"]
| 35.886228
| 93
| 0.584766
|
4a3648742bf60264b163a477fc1b74b0c0dc8e60
| 4,516
|
py
|
Python
|
benchmark/startQiskit1802.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1802.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1802.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[3]) # number=59
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=43
prog.cz(input_qubit[4],input_qubit[0]) # number=44
prog.h(input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=56
prog.cz(input_qubit[4],input_qubit[0]) # number=57
prog.h(input_qubit[0]) # number=58
prog.z(input_qubit[4]) # number=47
prog.cx(input_qubit[4],input_qubit[0]) # number=48
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[4],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(-1.0430087609918113,input_qubit[4]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=52
prog.x(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=54
prog.h(input_qubit[0]) # number=49
prog.cz(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=51
prog.x(input_qubit[1]) # number=10
prog.rx(-0.06597344572538572,input_qubit[3]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[2]) # number=28
prog.cz(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=30
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[4]) # number=35
prog.h(input_qubit[0]) # number=17
prog.rx(2.4912829742967055,input_qubit[2]) # number=26
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1802.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.489209
| 82
| 0.614925
|
613e5a61b14f569b04c926da6c8a8dca3dd36e2b
| 2,569
|
py
|
Python
|
src/deduplicate.py
|
hiyoung123/NewWordDetection
|
21f685c15ea2e2bb744ba5e8b6ee10bae00e854d
|
[
"MIT"
] | 6
|
2021-06-24T03:58:32.000Z
|
2022-01-21T11:55:36.000Z
|
src/deduplicate.py
|
hiyoung123/NewWordDetection
|
21f685c15ea2e2bb744ba5e8b6ee10bae00e854d
|
[
"MIT"
] | null | null | null |
src/deduplicate.py
|
hiyoung123/NewWordDetection
|
21f685c15ea2e2bb744ba5e8b6ee10bae00e854d
|
[
"MIT"
] | 1
|
2021-06-24T05:19:34.000Z
|
2021-06-24T05:19:34.000Z
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
from src.simhash import SimHash
class DuplicateRemove:
def __init__(self, hash_size, block_num, dict_file='../data/dict.txt', stopwords=[]):
self.hash_size = hash_size
self.block_num = block_num
self.block_size = hash_size/block_num
self.index = [{} for _ in range(self.block_num)]
self.doc_hash_dict = {}
self.hash = SimHash(hash_size, dict_file, stopwords)
def insert(self, doc, doc_id):
encoded = self.encode(doc)
for b in range(self.block_num):
key = ''
for i in range(0, len(encoded), self.block_num):
key += encoded[i]
if key not in self.index[b]:
self.index[b][key] = []
self.index[b][key].append(doc_id)
self.doc_hash_dict[doc_id] = encoded
def contains(self, doc):
docs = self.recall(doc)
doc_hash_code = self.encode(doc)
for doc_id in docs:
other_hash_code = self.get_hash_code(doc_id)
if self.similar(other_hash_code, doc_hash_code):
return True
return False
def recall(self, doc):
result = []
encoded = self.encode(doc)
for b in range(self.block_num):
key = ''
for i in range(0, len(encoded), self.block_num):
key += encoded[i]
doc_ids = self.index[b].get(key, [])
result.extend(doc_ids)
return result
def similar(self, hash_code1, hash_code2):
t1 = self.covert_str_to_int(hash_code1)
t2 = self.covert_str_to_int(hash_code2)
distance = self.hamming(t1, t2)
return True if distance <= self.block_num-1 else False
def encode(self, text):
return self.hash.encode(text)
def hamming(self, doc1, doc2):
return self.hash.hamming(doc1, doc2, self.hash_size)
def get_hash_code(self, doc_id):
return self.doc_hash_dict.get(doc_id, None)
def covert_str_to_int(self, text):
return self.hash.covert_str_to_int(text)
if __name__ == '__main__':
with open('../data/document1', 'r', encoding='utf-8') as f:
text1 = f.read()
with open('../data/document2', 'r', encoding='utf-8') as f:
text2 = f.read()
with open('../data/document3', 'r', encoding='utf-8') as f:
text3 = f.read()
dr = DuplicateRemove(64, 4)
if not dr.contains(text1):
dr.insert(text1, 1)
print(dr.contains(text1))
print(dr.contains(text2))
print(dr.contains(text3))
| 31.329268
| 89
| 0.591281
|
b942ff3dafb5c886434a478e8bfb0592e83afd1c
| 6,215
|
bzl
|
Python
|
antlir/bzl/image_layer.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 28
|
2020-08-11T16:22:46.000Z
|
2022-03-04T15:41:52.000Z
|
antlir/bzl/image_layer.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 137
|
2020-08-11T16:07:49.000Z
|
2022-02-27T10:59:05.000Z
|
antlir/bzl/image_layer.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 10
|
2020-09-10T00:01:28.000Z
|
2022-03-08T18:00:28.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
An `image.layer` is a set of `feature` with some additional parameters. Its
purpose to materialize those `feature`s as a btrfs subvolume in the
per-repo `buck-image/out/volume/targets`.
We call the subvolume a "layer" because it can be built on top of a snapshot
of its `parent_layer`, and thus can be represented as a btrfs send-stream for
more efficient storage & distribution.
The Buck output of an `image.layer` target is a JSON file with information
on how to find the resulting layer in the per-repo
`buck-image/out/volume/targets`. See `SubvolumeOnDisk.to_json_file`.
## Implementation notes
The implementation of this converter deliberately minimizes the amount of
business logic in its command. The converter must include **only** our
interactions with the buck target graph. Everything else should be
delegated to subcommands.
### Command
In composing the `bash` command, our core maxim is: make it a hermetic
function of the converter's inputs -- do not read data from disk, do not
insert disk paths into the command, do not do anything that might cause the
bytes of the command to vary between machines or between runs. To achieve
this, we use Buck macros to resolve all paths, including those to helper
scripts. We rely on environment variables or pipes to pass data between the
helper scripts.
Another reason to keep this converter minimal is that `buck test` cannot
make assertions about targets that fail to build. Since we only have the
ability to test the "good" targets, it behooves us to put most logic in
external scripts, so that we can unit-test its successes **and** failures
thoroughly.
### Output
We mark `image.layer` uncacheable, because there's no easy way to teach Buck
to serialize a btrfs subvolume (for that, we have `package.new`).
That said, we should still follow best practices to avoid problems if e.g.
the user renames their repo, or similar. These practices include:
- The output JSON must store no absolute paths.
- Store Buck target paths instead of paths into the output directory.
### Dependency resolution
An `image.layer` consumes a set of `feature` outputs to decide what to put into
the btrfs subvolume. These outputs are actually just JSON files that
reference other targets, and do not contain the data to be written into the
image.
Therefore, `image.layer` has to explicitly tell buck that it needs all
direct dependencies of its `feature`s to be present on disk -- see our
`attrfilter` queries below. Without this, Buck would merrily fetch the just
the `feature` JSONs from its cache, and not provide us with any of the
buid artifacts that comprise the image.
We do NOT need the direct dependencies of the parent layer's features,
because we treat the parent layer as a black box -- whatever it has laid
down in the image, that's what it provides (and we don't care about how).
The consequences of this information hiding are:
- Better Buck cache efficiency -- we don't have to download
the dependencies of the ancestor layers' features. Doing that would be
wasteful, since those bits are redundant with what's in the parent.
- Ability to use genrule image layers / apply non-pure post-processing to
a layer. In terms of engineering, both of these non-pure approaches are
a terrible idea and a maintainability headache, but they do provide a
useful bridge for transitioning to Buck image builds from legacy
imperative systems.
- The image compiler needs a litte extra code to walk the parent layer and
determine what it provides.
- We cannot have "unobservable" dependencies between features. Since
feature dependencies are expected to routinely cross layer boundaries,
feature implementations are forced only to depend on data that can be
inferred from the filesystem -- since this is all that the parent layer
implementation can do. NB: This is easy to relax in the future by
writing a manifest with additional metadata into each layer, and using
that metadata during compilation.
"""
load(":compile_image_features.bzl", "compile_image_features")
load(":image_layer_utils.bzl", "image_layer_utils")
load(":image_utils.bzl", "image_utils")
def image_layer(
name,
parent_layer = None,
features = None,
flavor = None,
flavor_config_override = None,
antlir_rule = "user-internal",
**image_layer_kwargs):
"""
Arguments
- `parent_layer`: The name of another `image_layer` target, on
top of which the current layer will install its features.
- `features`: List of `feature` target paths and/or
nameless structs from `feature.new`.
- `flavor`: Picks default build options for the layer, including
`build_appliance`, RPM installer, and others. See `flavor_helpers.bzl`
for details.
- `flavor_config_override`: A struct that can override the default
values fetched from `REPO_CFG[flavor].flavor_to_config`.
- `mount_config`: Specifies how this layer is mounted in the
`mounts` field of a `feature` of a parent layer. See
the field in `_image_layer_impl` in `image_layer_utils.bzl`
- `runtime`: A list of desired helper buck targets to be emitted.
`container` is always included in the list by default.
See the field in `_image_layer_impl` in `image_layer_utils.bzl` and the
[docs](/docs/tutorials/helper-buck-targets#imagelayer) for the list of
possible helpers, their respective behaviours, and how to invoke them.
"""
image_layer_utils.image_layer_impl(
_rule_type = "image_layer",
_layer_name = name,
# Build a new layer. It may be empty.
_make_subvol_cmd = compile_image_features(
name = name,
current_target = image_utils.current_target(name),
parent_layer = parent_layer,
features = features,
flavor = flavor,
flavor_config_override = flavor_config_override,
),
antlir_rule = antlir_rule,
**image_layer_kwargs
)
| 44.078014
| 79
| 0.740628
|
c1cf6547ed7bfd50ae90d81a89e64888002ed05c
| 654
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/PROGRAM_FOR_FACTORIAL_OF_A_NUMBER_2.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/PROGRAM_FOR_FACTORIAL_OF_A_NUMBER_2.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/PROGRAM_FOR_FACTORIAL_OF_A_NUMBER_2.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
return 1 if ( n == 1 or n == 0 ) else n * f_gold ( n - 1 )
#TOFILL
if __name__ == '__main__':
param = [
(24,),
(46,),
(47,),
(41,),
(98,),
(69,),
(83,),
(2,),
(12,),
(11,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 21.8
| 64
| 0.547401
|
d4c01cd0edc8d2153f80658f1a0ab4766c122f46
| 867
|
py
|
Python
|
other/pom.py
|
bartekpacia/python-training
|
00a1047f70ab44cc5afed8619eb4eac0e406f3e3
|
[
"MIT"
] | null | null | null |
other/pom.py
|
bartekpacia/python-training
|
00a1047f70ab44cc5afed8619eb4eac0e406f3e3
|
[
"MIT"
] | null | null | null |
other/pom.py
|
bartekpacia/python-training
|
00a1047f70ab44cc5afed8619eb4eac0e406f3e3
|
[
"MIT"
] | null | null | null |
t = int(input("podaj t: "))
wszystkie_liczby = []
for i in range(0, t):
liczby = input("podaj A B k: ")
wszystkie_liczby.append(liczby.split())
print(wszystkie_liczby)
for i in range(0, len(wszystkie_liczby)):
A = wszystkie_liczby[i][0]
B = wszystkie_liczby[i][1]
k = int(wszystkie_liczby[i][2])
print(f"A: {A}, B: {B}, k: {k}")
najwieksze_dobre_C = 0
for ii in range(0, k):
for j in range(0, len(A)):
for l in range(0, 10):
C_list = list(A)
zmieniona_cyfra = str(l)
C_list[j] = zmieniona_cyfra
kandydat_C = "".join(C_list)
if int(kandydat_C) < int(B):
if int(kandydat_C) > int(najwieksze_dobre_C):
najwieksze_dobre_C = kandydat_C
print(najwieksze_dobre_C + " - najwieksze dobre")
| 28.9
| 65
| 0.54902
|
d611b79120e6d156e719663d34b598736a9cef39
| 287
|
py
|
Python
|
plugins/raid/db/raid.py
|
dwieland/carnibot
|
83d660cac151739b524c6f11e8e7fe0b068869d7
|
[
"Apache-2.0"
] | 1
|
2018-08-02T06:27:37.000Z
|
2018-08-02T06:27:37.000Z
|
plugins/raid/db/raid.py
|
dwieland/carnibot
|
83d660cac151739b524c6f11e8e7fe0b068869d7
|
[
"Apache-2.0"
] | 4
|
2018-08-02T06:35:07.000Z
|
2018-08-02T06:37:14.000Z
|
plugins/raid/db/raid.py
|
dwieland/carnibot
|
83d660cac151739b524c6f11e8e7fe0b068869d7
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import Column, Integer, DateTime, String
from plugins.raid.db import Base
class Raid(Base):
__tablename__ = "RAID"
id = Column(Integer, primary_key=True)
date = Column(DateTime, unique=True)
message_id = Column(String(32))
color = Column(Integer)
| 22.076923
| 56
| 0.710801
|
db9083a55bc6370222cc6fbe21804a38add57a37
| 946
|
py
|
Python
|
pyEpiabm/pyEpiabm/sweep/queue_sweep.py
|
Saketkr21/epiabm
|
3ec0dcbc78d3fd4114ed3c6bdd78ef39f0013d2f
|
[
"BSD-3-Clause"
] | 11
|
2021-12-02T15:24:02.000Z
|
2022-03-10T14:02:13.000Z
|
pyEpiabm/pyEpiabm/sweep/queue_sweep.py
|
Saketkr21/epiabm
|
3ec0dcbc78d3fd4114ed3c6bdd78ef39f0013d2f
|
[
"BSD-3-Clause"
] | 119
|
2021-11-24T13:56:48.000Z
|
2022-03-30T11:52:07.000Z
|
pyEpiabm/pyEpiabm/sweep/queue_sweep.py
|
SABS-R3-Epidemiology/epiabm
|
8eb83fd2de84104f6f77929e3771095f7b033ddc
|
[
"BSD-3-Clause"
] | 3
|
2022-01-13T03:05:19.000Z
|
2022-03-11T22:00:17.000Z
|
#
# Sweeps for enqueued persons to update infection status
#
from pyEpiabm.property import InfectionStatus
from .abstract_sweep import AbstractSweep
class QueueSweep(AbstractSweep):
"""Class to sweep through the enqueued persons
in each cell and update their infection status.
"""
def __call__(self, time: float):
"""Function to run through the queue of people to be exposed.
Parameters
----------
time : float
Simulation time
"""
for cell in self._population.cells:
while not cell.person_queue.empty():
person = cell.person_queue.get()
# Get takes person from the queue and removes them, so clears
# the queue for the next timestep.
# Update the infection status
person.next_infection_status = InfectionStatus.Exposed
person.time_of_status_change = time
| 29.5625
| 77
| 0.627907
|
63c960cd6723083ac7223a2993f19dda5fb19182
| 3,762
|
py
|
Python
|
data/base_dataset.py
|
Uha661/instagan
|
4b49eb85ee1b0ef9c21ca3187604bf9933e0042b
|
[
"BSD-3-Clause"
] | null | null | null |
data/base_dataset.py
|
Uha661/instagan
|
4b49eb85ee1b0ef9c21ca3187604bf9933e0042b
|
[
"BSD-3-Clause"
] | null | null | null |
data/base_dataset.py
|
Uha661/instagan
|
4b49eb85ee1b0ef9c21ca3187604bf9933e0042b
|
[
"BSD-3-Clause"
] | null | null | null |
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def __len__(self):
return 0
def get_transform(opt):
transform_list = []
# Modify transform to specify width and height
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSizeH, opt.loadSizeW]
fsize = [opt.fineSizeH, opt.fineSizeW]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(fsize))
# Original CycleGAN code
# if opt.resize_or_crop == 'resize_and_crop':
# osize = [opt.loadSize, opt.loadSize]
# transform_list.append(transforms.Resize(osize, Image.BICUBIC))
# transform_list.append(transforms.RandomCrop(opt.fineSize))
# elif opt.resize_or_crop == 'crop':
# transform_list.append(transforms.RandomCrop(opt.fineSize))
# elif opt.resize_or_crop == 'scale_width':
# transform_list.append(transforms.Lambda(
# lambda img: __scale_width(img, opt.fineSize)))
# elif opt.resize_or_crop == 'scale_width_and_crop':
# transform_list.append(transforms.Lambda(
# lambda img: __scale_width(img, opt.loadSize)))
# transform_list.append(transforms.RandomCrop(opt.fineSize))
# elif opt.resize_or_crop == 'none':
# transform_list.append(transforms.Lambda(
# lambda img: __adjust(img)))
else:
raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5),
(0.5))]
return transforms.Compose(transform_list)
# just modify the width and height to be multiple of 4
def __adjust(img):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
if ow % mult == 0 and oh % mult == 0:
return img
w = (ow - 1) // mult
w = (w + 1) * mult
h = (oh - 1) // mult
h = (h + 1) * mult
if ow != w or oh != h:
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), Image.BICUBIC)
def __scale_width(img, target_width):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
assert target_width % mult == 0, "the target width needs to be multiple of %d." % mult
if (ow == target_width and oh % mult == 0):
return img
w = target_width
target_height = int(target_width * oh / ow)
m = (target_height - 1) // mult
h = (m + 1) * mult
if target_height != h:
__print_size_warning(target_width, target_height, w, h)
return img.resize((w, h), Image.BICUBIC)
def __print_size_warning(ow, oh, w, h):
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 34.2
| 91
| 0.639288
|
3a3e3e25b79c4b70a9cf9c813684e40159e3515c
| 7,884
|
py
|
Python
|
models/Strategy.py
|
Vashiru/pycryptobot
|
a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe
|
[
"Apache-2.0"
] | 1
|
2021-06-14T18:21:49.000Z
|
2021-06-14T18:21:49.000Z
|
models/Strategy.py
|
Vashiru/pycryptobot
|
a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe
|
[
"Apache-2.0"
] | null | null | null |
models/Strategy.py
|
Vashiru/pycryptobot
|
a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from pandas import DataFrame
from models.PyCryptoBot import PyCryptoBot
from models.AppState import AppState
from models.helper.LogHelper import Logger
class Strategy():
def __init__(self, app: PyCryptoBot=None, state: AppState=AppState, df: DataFrame=DataFrame, iterations: int=0) -> None:
if not isinstance(df, DataFrame):
raise TypeError("'df' not a Pandas dataframe")
if len(df) == 0:
raise ValueError("'df' is empty")
self._action = 'WAIT'
self.app = app
self.state = state
self._df = df
self._df_last = app.getInterval(df, iterations)
def isBuySignal(self, now: datetime = datetime.today().strftime('%Y-%m-%d %H:%M:%S'), price: float=0.0) -> bool:
# required technical indicators or candle sticks for buy signal strategy
required_indicators = [ 'ema12gtema26co', 'macdgtsignal', 'goldencross', 'obv_pc', 'eri_buy' ]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
# buy signal exclusion (if disabled, do not buy within 3% of the dataframe close high)
if self.state.last_action == 'SELL' and self.app.disableBuyNearHigh() is True and (price > (self._df['close'].max() * 0.97)):
log_text = str(now) + ' | ' + self.app.getMarket() + ' | ' + self.app.printGranularity() + ' | Ignoring Buy Signal (price ' + str(price) + ' within 3% of high ' + str(self._df['close'].max()) + ')'
Logger.warning(log_text)
return False
# criteria for a buy signal 1
if bool(self._df_last['ema12gtema26co'].values[0]) is True \
and (bool(self._df_last['macdgtsignal'].values[0]) is True or self.app.disableBuyMACD()) \
and (bool(self._df_last['goldencross'].values[0]) is True or self.app.disableBullOnly()) \
and (float(self._df_last['obv_pc'].values[0]) > -5 or self.app.disableBuyOBV()) \
and (bool(self._df_last['eri_buy'].values[0]) is True or self.app.disableBuyElderRay()) \
and self.state.last_action != 'BUY': # required for all strategies
Logger.debug('*** Buy Signal ***')
for indicator in required_indicators:
Logger.debug(f'{indicator}: {self._df_last[indicator].values[0]}')
Logger.debug(f'last_action: {self.state.last_action}')
return True
# criteria for buy signal 2 (optionally add additional buy singals)
elif bool(self._df_last['ema12gtema26co'].values[0]) is True \
and bool(self._df_last['macdgtsignalco'].values[0]) is True \
and (bool(self._df_last['goldencross'].values[0]) is True or self.app.disableBullOnly()) \
and (float(self._df_last['obv_pc'].values[0]) > -5 or self.app.disableBuyOBV()) \
and (bool(self._df_last['eri_buy'].values[0]) is True or self.app.disableBuyElderRay()) \
and self.state.last_action != 'BUY': # required for all strategies
Logger.debug('*** Buy Signal ***')
for indicator in required_indicators:
Logger.debug(f'{indicator}: {self._df_last[indicator].values[0]}')
Logger.debug(f'last_action: {self.state.last_action}')
return True
return False
def isSellSignal(self) -> bool:
# required technical indicators or candle sticks for buy signal strategy
required_indicators = [ 'ema12ltema26co', 'macdltsignal' ]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
# criteria for a sell signal 1
if bool(self._df_last['ema12ltema26co'].values[0]) is True \
and (bool(self._df_last['macdltsignal'].values[0]) is True or self.app.disableBuyMACD()) \
and self.state.last_action not in ['', 'SELL']:
Logger.debug('*** Sell Signal ***')
for indicator in required_indicators:
Logger.debug(f'{indicator}: {self._df_last[indicator].values[0]}')
Logger.debug(f'last_action: {self.state.last_action}')
return True
return False
def isSellTrigger(self, price: float=0.0, price_exit: float=0.0, margin: float=0.0, change_pcnt_high: float=0.0, obv_pc: float=0.0, macdltsignal: bool=False) -> bool:
# loss failsafe sell at fibonacci band
if self.app.disableFailsafeFibonacciLow() is False and self.app.allowSellAtLoss() and self.app.sellLowerPcnt() is None and self.state.fib_low > 0 and self.state.fib_low >= float(price):
log_text = '! Loss Failsafe Triggered (Fibonacci Band: ' + str(self.state.fib_low) + ')'
Logger.warning(log_text)
self.app.notifyTelegram(self.app.getMarket() + ' (' + self.app.printGranularity() + ') ' + log_text)
return True
# loss failsafe sell at trailing_stop_loss
if self.app.trailingStopLoss() != None and change_pcnt_high < self.app.trailingStopLoss() and (self.app.allowSellAtLoss() or margin > 0):
log_text = '! Trailing Stop Loss Triggered (< ' + str(self.app.trailingStopLoss()) + '%)'
Logger.warning(log_text)
self.app.notifyTelegram(self.app.getMarket() + ' (' + self.app.printGranularity() + ') ' + log_text)
return True
# loss failsafe sell at sell_lower_pcnt
elif self.app.disableFailsafeLowerPcnt() is False and self.app.allowSellAtLoss() and self.app.sellLowerPcnt() != None and margin < self.app.sellLowerPcnt():
log_text = '! Loss Failsafe Triggered (< ' + str(self.app.sellLowerPcnt()) + '%)'
Logger.warning(log_text)
self.app.notifyTelegram(self.app.getMarket() + ' (' + self.app.printGranularity() + ') ' + log_text)
return True
# profit bank at sell_upper_pcnt
if self.app.disableProfitbankUpperPcnt() is False and self.app.sellUpperPcnt() != None and margin > self.app.sellUpperPcnt():
log_text = '! Profit Bank Triggered (> ' + str(self.app.sellUpperPcnt()) + '%)'
Logger.warning(log_text)
self.app.notifyTelegram(self.app.getMarket() + ' (' + self.app.printGranularity() + ') ' + log_text)
return True
# profit bank when strong reversal detected
if self.app.disableProfitbankReversal() is False and margin > 3 and obv_pc < 0 and macdltsignal is True:
log_text = '! Profit Bank Triggered (Strong Reversal Detected)'
Logger.warning(log_text)
self.app.notifyTelegram(self.app.getMarket() + ' (' + self.app.printGranularity() + ') ' + log_text)
return True
# profit bank when strong reversal detected
if self.app.sellAtResistance() is True and margin >= 2 and price > 0 and price != price_exit:
log_text = '! Profit Bank Triggered (Selling At Resistance)'
Logger.warning(log_text)
if not (not self.app.allowSellAtLoss() and margin <= 0):
self.app.notifyTelegram(self.app.getMarket() + ' (' + self.app.printGranularity() + ') ' + log_text)
return True
return False
def isWaitTrigger(self, margin: float=0.0):
# configuration specifies to not sell at a loss
if self.state.action == 'SELL' and not self.app.allowSellAtLoss() and margin <= 0:
log_text = '! Ignore Sell Signal (No Sell At Loss)'
Logger.warning(log_text)
return True
return False
def getAction(self):
if self.isBuySignal():
return 'BUY'
elif self.isSellSignal():
return 'SELL'
else:
return 'WAIT'
| 51.529412
| 209
| 0.622146
|
dcb3a903d6323b772ed3f8765d19bdc1a890ffc4
| 543
|
py
|
Python
|
phunspell/tests/test__es.py
|
dvwright/phunspell
|
818bbd081f84c570ec304fdc235ca112f9abd869
|
[
"MIT"
] | null | null | null |
phunspell/tests/test__es.py
|
dvwright/phunspell
|
818bbd081f84c570ec304fdc235ca112f9abd869
|
[
"MIT"
] | 1
|
2021-04-19T13:14:24.000Z
|
2021-05-16T04:43:24.000Z
|
phunspell/tests/test__es.py
|
dvwright/phunspell
|
818bbd081f84c570ec304fdc235ca112f9abd869
|
[
"MIT"
] | null | null | null |
import phunspell
import inspect
import unittest
class TestES(unittest.TestCase):
pspell = phunspell.Phunspell('es')
def test_word_found(self):
self.assertTrue(self.pspell.lookup("pianista"))
def test_word_not_found(self):
self.assertFalse(self.pspell.lookup("phunspell"))
def test_lookup_list_return_not_found(self):
words = "pianista borken"
self.assertListEqual(
self.pspell.lookup_list(words.split(" ")), ["borken"]
)
if __name__ == "__main__":
unittest.main()
| 22.625
| 65
| 0.674033
|
1abd108bb22953377a0b8bd40f37ac8a5b5f5abf
| 3,158
|
py
|
Python
|
dags/scripts/spark/algorand_price_lin_regression.py
|
Tcfocus/ml_price_prediction_airflow_pysparkml_emr
|
735f4432398c48e1486715bea4eaf7b620dd1298
|
[
"Apache-2.0"
] | null | null | null |
dags/scripts/spark/algorand_price_lin_regression.py
|
Tcfocus/ml_price_prediction_airflow_pysparkml_emr
|
735f4432398c48e1486715bea4eaf7b620dd1298
|
[
"Apache-2.0"
] | null | null | null |
dags/scripts/spark/algorand_price_lin_regression.py
|
Tcfocus/ml_price_prediction_airflow_pysparkml_emr
|
735f4432398c48e1486715bea4eaf7b620dd1298
|
[
"Apache-2.0"
] | null | null | null |
# pyspark
import argparse
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.sql.window import Window
import pandas as pd
import ta
# function for calculating RSI from price
def getRsi(x):
ta_rsi = ta.momentum.RSIIndicator(close=x, window=14)
return ta_rsi.rsi()
def linear_regression_prediction(input_location, output_location):
"""
Create a Linear Regression model for predicting price based off of current price, volume, market cap, and RSI values
"""
df_input = spark.read.csv(input_location, header=True, inferSchema=True)
# transform to pandas df and calculate RSI column then revert back to spark df
df_input_ta = df_input.toPandas()
df_input_ta['rsi'] = df_input_ta.price.transform(getRsi)
df_input_ta = spark.createDataFrame(df_input_ta)
# calculate 'next_price' column
window = Window.orderBy("date")
a = lead(col("price")).over(window)
final_df = df_input_ta.withColumn("next_price", a).dropna(how="any")
# Prepare data for Machine Learning by establishing features and label ("new_step") columns
feature = VectorAssembler(inputCols=['price', 'volume', 'marketCap', 'rsi'], outputCol='features')
final_ml_data = feature.transform(final_df)
# select only 'features' and 'next_price' for the model
final_ml_data = final_ml_data.select('features', 'next_price')
# split data into test and train data
splits = final_ml_data.randomSplit([0.7, 0.3])
train_df, test_df = final_ml_data.randomSplit([0.7, 0.3])
# Linear regression
lr = LinearRegression(featuresCol='features', labelCol='next_price')
model = lr.fit(train_df)
# Run model on test set to make predictions
predictions = model.transform(test_df)
# Clean up final predictions data by extracting the values from the vector column
split1_udf = udf(lambda value: value[0].item(), FloatType())
split2_udf = udf(lambda value: value[1].item(), FloatType())
split1_udf = udf(lambda value: value[2].item(), FloatType())
split2_udf = udf(lambda value: value[3].item(), FloatType())
predictions = predictions.withColumn('price', split1_udf('features')) \
.withColumn('volume', split2_udf('features')) \
.withColumn('marketCap', split2_udf('features')) \
.withColumn('rsi', split2_udf('features'))
predictions = predictions.select("price", "volume", "marketCap", "rsi", "next_price", "prediction")
# Output data as a parquet = a columnar storage format
predictions.write.mode("overwrite").parquet(output_location)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="HDFS input", default="/source")
parser.add_argument("--output", type=str, help="HDFS output", default="/output")
args = parser.parse_args()
spark = SparkSession.builder.appName("Linear Regression Prediction").getOrCreate()
linear_regression_prediction(input_location=args.input, output_location=args.output)
| 40.487179
| 120
| 0.722926
|
0e31edc5db0cd67f8a3dd74eecaca5c7baa139e6
| 14,705
|
py
|
Python
|
bin/automated_detection_testing/ci/detection_testing_batch/modules/validate_args.py
|
kirtankhatana-crest/security_content
|
cc63c0d635cb56f4756e209b02e13fd839f1e383
|
[
"Apache-2.0"
] | null | null | null |
bin/automated_detection_testing/ci/detection_testing_batch/modules/validate_args.py
|
kirtankhatana-crest/security_content
|
cc63c0d635cb56f4756e209b02e13fd839f1e383
|
[
"Apache-2.0"
] | null | null | null |
bin/automated_detection_testing/ci/detection_testing_batch/modules/validate_args.py
|
kirtankhatana-crest/security_content
|
cc63c0d635cb56f4756e209b02e13fd839f1e383
|
[
"Apache-2.0"
] | 1
|
2022-01-27T05:29:43.000Z
|
2022-01-27T05:29:43.000Z
|
import argparse
import copy
import io
import json
import modules.jsonschema_errorprinter as jsonschema_errorprinter
import sys
from typing import Union
# If we want, we can easily add a description field to any of the objects here!
ES_APP_NAME = "SPLUNK_ES_CONTENT_UPDATE"
setup_schema = {
"type": "object",
"properties": {
"branch": {
"type": "string",
"default": "develop"
},
"commit_hash": {
"type": ["string", "null"],
"default": None
},
"container_tag": {
"type": "string",
"default": "latest"
},
"no_interactive_failure": {
"type": "boolean",
"default": False
},
"interactive": {
"type": "boolean",
"default": False
},
"detections_list": {
"type": ["array", "null"],
"items": {
"type": "string"
},
"default": None,
},
"detections_file": {
"type": ["string", "null"],
"default": None
},
"apps": {
"type": "object",
"additionalProperties": False,
"patternProperties": {
"^.*$": {
"type": "object",
"additionalProperties": False,
"properties": {
"app_number": {
"type": ["integer","null"]
},
"app_version": {
"type": ["string","null"]
},
"local_path": {
"type": ["string","null"]
},
"http_path": {
"type": ["string", "null"]
}
},
"anyOf": [
{"required": ["local_path"]},
{"required": ["http_path"] },
{"required": ["app_number", "app_version"] },
]
}
},
"default": {
ES_APP_NAME : {
"app_number": 3449,
"app_version": None,
"local_path": None
},
#The default apps below were taken from the attack_range loadout: https://github.com/splunk/attack_range/blob/develop/attack_range.conf.template
"SPLUNK_ADD_ON_FOR_MICROSOFT_WINDOWS": {
"app_number": 742,
"app_version": "8.4.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-microsoft-windows_840.tgz"
},
"ADD_ON_FOR_LINUX_SYSMON": {
"app_number": 6176,
"app_version": "1.0.4",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/add-on-for-linux-sysmon_104.tgz"
},
"SPLUNK_ADD_ON_FOR_SYSMON": {
"app_number": 5709,
"app_version": "2.0.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-sysmon_200.tgz"
},
"SPLUNK_COMMON_INFORMATION_MODEL": {
"app_number": 1621,
"app_version": "5.0.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-common-information-model-cim_500.tgz"
},
#Note - end of life on July 15, 2022 - https://splunkbase.splunk.com/app/1274/
"SPLUNK_APP_FOR_AWS": {
"app_number": 1274,
"app_version": "6.0.3",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-app-for-aws_603.tgz"
},
"PYTHON_FOR_SCIENTIFIC_COMPUTING_FOR_LINUX_64_BIT": {
"app_number": 2882,
"app_version": "3.0.2",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/python-for-scientific-computing-for-linux-64-bit_302.tgz"
},
"SPLUNK_MACHINE_LEARNING_TOOLKIT": {
"app_number": 2890,
"app_version": "5.3.1",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-machine-learning-toolkit_531.tgz"
},
"SPLUNK_APP_FOR_STREAM": {
"app_number": 1809,
"app_version": "8.0.1",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-app-for-stream_801.tgz"
},
"SPLUNK_ADD_ON_FOR_STREAM_WIRE_DATA": {
"app_number": 5234,
"app_version": "8.0.1",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-stream-wire-data_801.tgz"
},
"SPLUNK_ADD_ON_FOR_STREAM_FORWARDERS": {
"app_number": 5238,
"app_version": "8.0.1",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-stream-forwarders_801.tgz"
},
"SPLUNK_SECURITY_ESSENTIALS": {
"app_number": 3435,
"app_version": "3.4.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-security-essentials_340.tgz"
},
"SPLUNK_ADD_ON_FOR_AMAZON_KINESIS_FIREHOSE": {
"app_number": 3719,
"app_version": "1.3.2",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-amazon-kinesis-firehose_132.tgz"
},
"SPLUNK_ADD_ON_FOR_MICROSOFT_OFFICE_365": {
"app_number": 4055,
"app_version": "2.2.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-microsoft-office-365_220.tgz"
},
"SPLUNK_ADD_ON_FOR_UNIX_AND_LINUX": {
"app_number": 833,
"app_version": "8.4.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-unix-and-linux_840.tgz"
},
"SPLUNK_ADD_ON_FOR_NGINX": {
"app_number": 3258,
"app_version": "3.1.0",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-nginx_310.tgz"
},
"SPLUNK_TA_FOR_ZEEK": {
"app_number": 5466,
"app_version": "1.0.5",
"http_path": "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/ta-for-zeek_105.tgz"
},
}
},
"mode": {
"type": "string",
"enum": ["changes", "selected", "all"],
"default": "changes"
},
"num_containers": {
"type": "integer",
"minimum": 1,
"default": 1
},
"persist_security_content": {
"type": "boolean",
"default": False
},
"pr_number": {
"type": ["integer", "null"],
"default": None
},
"reuse_image": {
"type": "boolean",
"default": True
},
"show_splunk_app_password": {
"type": "boolean",
"default": False
},
"splunkbase_username": {
"type": ["string", "null"],
"default": None
},
"splunkbase_password": {
"type": ["string", "null"],
"default": None
},
"splunk_app_password": {
"type": ["string", "null"],
"default": None
},
"splunk_container_apps_directory": {
"type": "string",
"default": "/opt/splunk/etc/apps"
},
"local_base_container_name": {
"type": "string",
"default": "splunk_test_%d"
},
"mock": {
"type": "boolean",
"default": False
},
"folders": {
"type": "array",
"items": {
"type": "string",
"enum": ["endpoint", "cloud", "network","web","experimental"]
},
"default": ["endpoint", "cloud", "network","web"]
},
"types": {
"type": "array",
"items": {
"type": "string",
"enum": ["Anomaly", "Hunting", "TTP"]
},
"default": ["Anomaly", "Hunting", "TTP"]
},
}
}
def validate_file(file: io.TextIOWrapper) -> tuple[Union[dict, None], dict]:
try:
settings = json.loads(file.read())
return validate(settings)
except Exception as e:
raise(e)
def check_dependencies(settings: dict, skip_password_accessibility_check:bool=True) -> bool:
# Check complex mode dependencies
error_free = True
# Make sure that all the mode arguments are sane
if settings['mode'] == 'selected':
# Make sure that exactly one of the following fields is populated
if settings['detections_file'] == None and settings['detections_list'] == None:
print("Error - mode was 'selected' but no detections_list or detections_file were supplied.", file=sys.stderr)
error_free = False
elif settings['detections_file'] != None and settings['detections_list'] != None:
print("Error - mode was 'selected' but detections_list and detections_file were supplied.", file=sys.stderr)
error_free = False
if settings['mode'] != 'selected' and settings['detections_file'] != None:
print("Error - mode was not 'selected' but detections_file was supplied.", file=sys.stderr)
error_free = False
elif settings['mode'] != 'selected' and settings['detections_list'] != None:
print("Error - mode was not 'selected' but detections_list was supplied.", file=sys.stderr)
error_free = False
# Make sure that if we will be in an interactive mode, that either the user has provided the password or the password will be printed
if skip_password_accessibility_check:
pass
elif (settings['interactive'] or not settings['no_interactive_failure']) and settings['show_splunk_app_password'] is False:
print("\n\n******************************************************\n\n")
if settings['splunk_app_password'] is not None:
print("Warning: You have chosen an interactive mode, set show_splunk_app_password False,\n"\
"and provided a password in the config file. We will NOT print this password to\n"\
"stdout. Look in the config file for this password.",file=sys.stderr)
else:
print("Warning: You have chosen an interactive mode, set show_splunk_app_password False,\n"\
"and DID NOT provide a password in the config file. We have updated show_splunk_app_password\n"\
"to True for you. Otherwise, interactive mode login would be impossible.",file=sys.stderr)
settings['show_splunk_app_password'] = True
print("\n\n******************************************************\n\n")
# Returns true if there are not errors
return error_free
def validate_and_write(configuration: dict, output_file: Union[io.TextIOWrapper, None] = None, strip_credentials: bool = False, skip_password_accessibility_check:bool=True) -> tuple[Union[dict, None], dict]:
closeFile = False
if output_file is None:
import datetime
now = datetime.datetime.now()
configname = now.strftime('%Y-%m-%dT%H:%M:%S%z') + '-test-run.json'
output_file = open(configname, "w")
closeFile = True
if strip_credentials:
configuration = copy.deepcopy(configuration)
configuration['splunkbase_password'] = None
configuration['splunkbase_username'] = None
configuration['container_password'] = None
configuration['show_splunk_app_password'] = True
validated_json, setup_schema = validate(configuration,skip_password_accessibility_check)
if validated_json == None:
print("Error in the new settings! No output file written")
else:
print("Settings updated. Writing results to: %s" %
(output_file.name))
try:
output_file.write(json.dumps(
validated_json, sort_keys=True, indent=4))
except Exception as e:
print("Error writing settings to %s: [%s]" % (
output_file.name, str(e)), file=sys.stderr)
sys.exit(1)
if closeFile is True:
output_file.close()
return validated_json, setup_schema
def validate(configuration: dict, skip_password_accessibility_check:bool=True) -> tuple[Union[dict, None], dict]:
# v = jsonschema.Draft201909Validator(argument_schema)
try:
validation_errors, validated_json = jsonschema_errorprinter.check_json(
configuration, setup_schema)
if len(validation_errors) == 0:
# check to make sure there were no complex errors
no_complex_errors = check_dependencies(validated_json,skip_password_accessibility_check)
if no_complex_errors:
return validated_json, setup_schema
else:
print("Validation failed due to error(s) listed above.",
file=sys.stderr)
return None, setup_schema
else:
print("[%d] failures detected during validation of the configuration!" % (
len(validation_errors)), file=sys.stderr)
for error in validation_errors:
print(error, end="\n\n", file=sys.stderr)
return None, setup_schema
except Exception as e:
print("There was an error validation the configuration: [%s]" % (
str(e)), file=sys.stderr)
return None, setup_schema
| 39.636119
| 207
| 0.521863
|
5c939586932a641c82633706d42d5d43e9f5f1d0
| 37,308
|
py
|
Python
|
numpy/distutils/fcompiler/__init__.py
|
numpy/numpy-refactor
|
6de313865ec3f49bcdd06ccbc879f27e65acf818
|
[
"BSD-3-Clause"
] | 15
|
2015-03-23T09:40:07.000Z
|
2020-04-19T15:14:49.000Z
|
numpy/distutils/fcompiler/__init__.py
|
numpy/numpy-refactor
|
6de313865ec3f49bcdd06ccbc879f27e65acf818
|
[
"BSD-3-Clause"
] | 5
|
2016-07-02T15:34:00.000Z
|
2020-09-01T06:45:13.000Z
|
numpy/distutils/fcompiler/__init__.py
|
numpy/numpy-refactor
|
6de313865ec3f49bcdd06ccbc879f27e65acf818
|
[
"BSD-3-Clause"
] | 17
|
2015-02-12T21:33:40.000Z
|
2021-02-23T06:53:03.000Z
|
"""numpy.distutils.fcompiler
Contains FCompiler, an abstract base class that defines the interface
for the numpy.distutils Fortran compiler abstraction model.
Terminology:
To be consistent, where the term 'executable' is used, it means the single
file, like 'gcc', that is executed, and should be a string. In contrast,
'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
__all__ = ['FCompiler','new_fcompiler','show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
import types
try:
set
except NameError:
from sets import Set as set
from numpy.compat import open_latin1
from distutils.sysconfig import get_config_var, get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
from distutils.util import split_quoted, strtobool
from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, make_temp_file
from numpy.distutils.environment import EnvironmentConfig
from numpy.distutils.exec_command import find_executable
from numpy.distutils.compat import get_exception
__metaclass__ = type
class CompilerNotFound(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
class FCompiler(CCompiler):
"""Abstract base class to define the interface that must be implemented
by real Fortran compiler classes.
Methods that subclasses may redefine:
update_executables(), find_executables(), get_version()
get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
get_flags_arch_f90(), get_flags_debug_f90(),
get_flags_fix(), get_flags_linker_so()
DON'T call these methods (except get_version) after
constructing a compiler instance or inside any other method.
All methods, except update_executables() and find_executables(),
may call the get_version() method.
After constructing a compiler instance, always call customize(dist=None)
method that finalizes compiler construction and makes the following
attributes available:
compiler_f77
compiler_f90
compiler_fix
linker_so
archiver
ranlib
libraries
library_dirs
"""
# These are the environment variables and distutils keys used.
# Each configuration descripition is
# (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
# - names like 'flags.YYY' return self.get_flag_YYY()
# convert is either None or a function to convert a string to the
# appropiate type used.
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
noopt = (None, None, 'noopt', str2bool),
noarch = (None, None, 'noarch', str2bool),
debug = (None, None, 'debug', str2bool),
verbose = (None, None, 'verbose', str2bool),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None),
compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None),
compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None),
version_cmd = ('exe.version_cmd', None, None, None),
linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None),
linker_exe = ('exe.linker_exe', 'LD', 'ld', None),
archiver = (None, 'AR', 'ar', None),
ranlib = (None, 'RANLIB', 'ranlib', None),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist),
f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist),
free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist),
fix = ('flags.fix', None, None, flaglist),
opt = ('flags.opt', 'FOPT', 'opt', flaglist),
opt_f77 = ('flags.opt_f77', None, None, flaglist),
opt_f90 = ('flags.opt_f90', None, None, flaglist),
arch = ('flags.arch', 'FARCH', 'arch', flaglist),
arch_f77 = ('flags.arch_f77', None, None, flaglist),
arch_f90 = ('flags.arch_f90', None, None, flaglist),
debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist),
debug_f77 = ('flags.debug_f77', None, None, flaglist),
debug_f90 = ('flags.debug_f90', None, None, flaglist),
flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist),
linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist),
linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist),
ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist),
)
language_map = {'.f':'f77',
'.for':'f77',
'.F':'f77', # XXX: needs preprocessor
'.ftn':'f77',
'.f77':'f77',
'.f90':'f90',
'.F90':'f90', # XXX: needs preprocessor
'.f95':'f90',
}
language_order = ['f90','f77']
# These will be set by the subclass
compiler_type = None
compiler_aliases = ()
version_pattern = None
possible_executables = []
executables = {
'version_cmd' : ["f77", "-v"],
'compiler_f77' : ["f77"],
'compiler_f90' : ["f90"],
'compiler_fix' : ["f90", "-fixed"],
'linker_so' : ["f90", "-shared"],
'linker_exe' : ["f90"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
# If compiler does not support compiling Fortran 90 then it can
# suggest using another compiler. For example, gnu would suggest
# gnu95 compiler type when there are F90 sources.
suggested_f90_compiler = None
compile_switch = "-c"
object_switch = "-o " # Ending space matters! It will be stripped
# but if it is missing then object_switch
# will be prefixed to object file name by
# string concatenation.
library_switch = "-o " # Ditto!
# Switch to specify where module files are created and searched
# for USE statement. Normally it is a string and also here ending
# space matters. See above.
module_dir_switch = None
# Switch to specify where module files are searched for USE statement.
module_include_switch = '-I'
pic_flags = [] # Flags to create position-independent code
src_extensions = ['.for','.ftn','.f77','.f','.f90','.f95','.F','.F90']
obj_extension = ".o"
shared_lib_extension = get_config_var('SO') # or .dll
static_lib_extension = ".a" # or .lib
static_lib_format = "lib%s%s" # or %s%s
shared_lib_format = "%s%s"
exe_extension = ""
_exe_cache = {}
_executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
'ranlib']
# This will be set by new_fcompiler when called in
# command/{build_ext.py, build_clib.py, config.py} files.
c_compiler = None
def __init__(self, *args, **kw):
CCompiler.__init__(self, *args, **kw)
self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
self.command_vars = self.command_vars.clone(self._environment_hook)
self.flag_vars = self.flag_vars.clone(self._environment_hook)
self.executables = self.executables.copy()
for e in self._executable_keys:
if e not in self.executables:
self.executables[e] = None
# Some methods depend on .customize() being called first, so
# this keeps track of whether that's happened yet.
self._is_customised = False
def __copy__(self):
obj = self.__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
obj.command_vars = obj.command_vars.clone(obj._environment_hook)
obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
obj.executables = obj.executables.copy()
return obj
def copy(self):
return self.__copy__()
# Use properties for the attributes used by CCompiler. Setting them
# as attributes from the self.executables dictionary is error-prone,
# so we get them from there each time.
def _command_property(key):
def fget(self):
assert self._is_customised
return self.executables[key]
return property(fget=fget)
version_cmd = _command_property('version_cmd')
compiler_f77 = _command_property('compiler_f77')
compiler_f90 = _command_property('compiler_f90')
compiler_fix = _command_property('compiler_fix')
linker_so = _command_property('linker_so')
linker_exe = _command_property('linker_exe')
archiver = _command_property('archiver')
ranlib = _command_property('ranlib')
# Make our terminology consistent.
def set_executable(self, key, value):
self.set_command(key, value)
def set_commands(self, **kw):
for k, v in kw.items():
self.set_command(k, v)
def set_command(self, key, value):
if not key in self._executable_keys:
raise ValueError(
"unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
if is_string(value):
value = split_quoted(value)
assert value is None or is_sequence_of_strings(value[1:]), (key, value)
self.executables[key] = value
######################################################################
## Methods that subclasses may redefine. But don't call these methods!
## They are private to FCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def find_executables(self):
"""Go through the self.executables dictionary, and attempt to
find and assign appropiate executables.
Executable names are looked for in the environment (environment
variables, the distutils.cfg, and command line), the 0th-element of
the command list, and the self.possible_executables list.
Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
or the Fortran 90 compiler executable is used, unless overridden
by an environment setting.
Subclasses should call this if overriden.
"""
assert self._is_customised
exe_cache = self._exe_cache
def cached_find_executable(exe):
if exe in exe_cache:
return exe_cache[exe]
fc_exe = find_executable(exe)
exe_cache[exe] = exe_cache[fc_exe] = fc_exe
return fc_exe
def verify_command_form(name, value):
if value is not None and not is_sequence_of_strings(value):
raise ValueError(
"%s value %r is invalid in class %s" %
(name, value, self.__class__.__name__))
def set_exe(exe_key, f77=None, f90=None):
cmd = self.executables.get(exe_key, None)
if not cmd:
return None
# Note that we get cmd[0] here if the environment doesn't
# have anything set
exe_from_environ = getattr(self.command_vars, exe_key)
if not exe_from_environ:
possibles = [f90, f77] + self.possible_executables
else:
possibles = [exe_from_environ] + self.possible_executables
seen = set()
unique_possibles = []
for e in possibles:
if e == '<F77>':
e = f77
elif e == '<F90>':
e = f90
if not e or e in seen:
continue
seen.add(e)
unique_possibles.append(e)
for exe in unique_possibles:
fc_exe = cached_find_executable(exe)
if fc_exe:
cmd[0] = fc_exe
return fc_exe
self.set_command(exe_key, None)
return None
ctype = self.compiler_type
f90 = set_exe('compiler_f90')
if not f90:
f77 = set_exe('compiler_f77')
if f77:
log.warn('%s: no Fortran 90 compiler found' % ctype)
else:
raise CompilerNotFound('%s: f90 nor f77' % ctype)
else:
f77 = set_exe('compiler_f77', f90=f90)
if not f77:
log.warn('%s: no Fortran 77 compiler found' % ctype)
set_exe('compiler_fix', f90=f90)
set_exe('linker_so', f77=f77, f90=f90)
set_exe('linker_exe', f77=f77, f90=f90)
set_exe('version_cmd', f77=f77, f90=f90)
set_exe('archiver')
set_exe('ranlib')
def update_executables(elf):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
Note that self.find_executables() is run afterwards, so the
self.executables dictionary values can contain <F77> or <F90> as
the command, which will be replaced by the found F77 or F90
compiler.
"""
pass
def get_flags(self):
"""List of flags common to all compiler types."""
return [] + self.pic_flags
def _get_command_flags(self, key):
cmd = self.executables.get(key, None)
if cmd is None:
return []
return cmd[1:]
def get_flags_f77(self):
"""List of Fortran 77 specific flags."""
return self._get_command_flags('compiler_f77')
def get_flags_f90(self):
"""List of Fortran 90 specific flags."""
return self._get_command_flags('compiler_f90')
def get_flags_free(self):
"""List of Fortran 90 free format specific flags."""
return []
def get_flags_fix(self):
"""List of Fortran 90 fixed format specific flags."""
return self._get_command_flags('compiler_fix')
def get_flags_linker_so(self):
"""List of linker flags to build a shared library."""
return self._get_command_flags('linker_so')
def get_flags_linker_exe(self):
"""List of linker flags to build an executable."""
return self._get_command_flags('linker_exe')
def get_flags_ar(self):
"""List of archiver flags. """
return self._get_command_flags('archiver')
def get_flags_opt(self):
"""List of architecture independent compiler flags."""
return []
def get_flags_arch(self):
"""List of architecture dependent compiler flags."""
return []
def get_flags_debug(self):
"""List of compiler flags to compile with debugging information."""
return []
get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
def get_libraries(self):
"""List of compiler libraries."""
return self.libraries[:]
def get_library_dirs(self):
"""List of compiler library directories."""
return self.library_dirs[:]
def get_version(self, force=False, ok_status=[0]):
assert self._is_customised
version = CCompiler.get_version(self, force=force, ok_status=ok_status)
if version is None:
raise CompilerNotFound()
return version
############################################################
## Public methods:
def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77flags = self.flag_vars.f77
if f90:
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
if fix:
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=[f77]+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=[fix]+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries())
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
for key in self.executables.keys() + \
['version','libraries','library_dirs',
'object_switch','compile_switch']:
if hasattr(self,key):
v = getattr(self,key)
props.append((key, None, '= '+repr(v)))
props.sort()
pretty_printer = FancyGetopt(props)
for l in pretty_printer.generate_help("%s instance properties:" \
% (self.__class__.__name__)):
if l[:4]==' --':
l = ' ' + l[4:]
print(l)
###################
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
src_flags = {}
if is_f_file(src) and not has_f90_header(src):
flavor = ':f77'
compiler = self.compiler_f77
src_flags = get_f77flags(src)
elif is_free_format(src):
flavor = ':f90'
compiler = self.compiler_f90
if compiler is None:
raise DistutilsExecError('f90 not supported by %s needed for %s'\
% (self.__class__.__name__,src))
else:
flavor = ':fix'
compiler = self.compiler_fix
if compiler is None:
raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
% (self.__class__.__name__,src))
if self.object_switch[-1]==' ':
o_args = [self.object_switch.strip(),obj]
else:
o_args = [self.object_switch.strip()+obj]
assert self.compile_switch.strip()
s_args = [self.compile_switch, src]
extra_flags = src_flags.get(self.compiler_type,[])
if extra_flags:
log.info('using compile options from source: %r' \
% ' '.join(extra_flags))
command = compiler + cc_args + extra_flags + s_args + o_args \
+ extra_postargs
display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
src)
try:
self.spawn(command,display=display)
except DistutilsExecError:
msg = str(get_exception())
raise CompileError(msg)
def module_options(self, module_dirs, module_build_dir):
options = []
if self.module_dir_switch is not None:
if self.module_dir_switch[-1]==' ':
options.extend([self.module_dir_switch.strip(),module_build_dir])
else:
options.append(self.module_dir_switch.strip()+module_build_dir)
else:
print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
print('XXX: Fix module_dir_switch for ',self.__class__.__name__)
if self.module_include_switch is not None:
for d in [module_build_dir]+module_dirs:
options.append('%s%s' % (self.module_include_switch, d))
else:
print('XXX: module_dirs=%r option ignored' % (module_dirs))
print('XXX: Fix module_include_switch for ',self.__class__.__name__)
return options
def library_option(self, lib):
return "-l" + lib
def library_dir_option(self, dir):
return "-L" + dir
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if is_string(output_dir):
output_filename = os.path.join(output_dir, output_filename)
elif output_dir is not None:
raise TypeError("'output_dir' must be a string or None")
if self._need_link(objects, output_filename):
if self.library_switch[-1]==' ':
o_args = [self.library_switch.strip(),output_filename]
else:
o_args = [self.library_switch.strip()+output_filename]
if is_string(self.objects):
ld_args = objects + [self.objects]
else:
ld_args = objects + self.objects
ld_args = ld_args + lib_opts + o_args
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
command = linker + ld_args
try:
self.spawn(command)
except DistutilsExecError:
msg = str(get_exception())
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _environment_hook(self, name, hook_name):
if hook_name is None:
return None
if is_string(hook_name):
if hook_name.startswith('self.'):
hook_name = hook_name[5:]
hook = getattr(self, hook_name)
return hook()
elif hook_name.startswith('exe.'):
hook_name = hook_name[4:]
var = self.executables[hook_name]
if var:
return var[0]
else:
return None
elif hook_name.startswith('flags.'):
hook_name = hook_name[6:]
hook = getattr(self, 'get_flags_' + hook_name)
return hook()
else:
return hook_name()
## class FCompiler
_default_compilers = (
# sys.platform mappings
('win32', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95',
'intelvem', 'intelem')),
('cygwin.*', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')),
('linux.*', ('gnu','intel','lahey','pg','absoft','nag','vast','compaq',
'intele','intelem','gnu95','g95')),
('darwin.*', ('nag', 'absoft', 'ibm', 'intel', 'gnu', 'gnu95', 'g95', 'pg')),
('sunos.*', ('sun','gnu','gnu95','g95')),
('irix.*', ('mips','gnu','gnu95',)),
('aix.*', ('ibm','gnu','gnu95',)),
# os.name mappings
('posix', ('gnu','gnu95',)),
('nt', ('gnu','gnu95',)),
('mac', ('gnu','gnu95','pg')),
)
fcompiler_class = None
fcompiler_aliases = None
def load_all_fcompiler_classes():
"""Cache all the FCompiler classes found in modules in the
numpy.distutils.fcompiler package.
"""
from glob import glob
global fcompiler_class, fcompiler_aliases
if fcompiler_class is not None:
return
pys = os.path.join(os.path.dirname(__file__), '*.py')
fcompiler_class = {}
fcompiler_aliases = {}
for fname in glob(pys):
module_name, ext = os.path.splitext(os.path.basename(fname))
module_name = 'numpy.distutils.fcompiler.' + module_name
__import__ (module_name)
module = sys.modules[module_name]
if hasattr(module, 'compilers'):
for cname in module.compilers:
klass = getattr(module, cname)
desc = (klass.compiler_type, klass, klass.description)
fcompiler_class[klass.compiler_type] = desc
for alias in klass.compiler_aliases:
if alias in fcompiler_aliases:
raise ValueError("alias %r defined for both %s and %s"
% (alias, klass.__name__,
fcompiler_aliases[alias][1].__name__))
fcompiler_aliases[alias] = desc
def _find_existing_fcompiler(compiler_types,
osname=None, platform=None,
requiref90=False,
c_compiler=None):
from numpy.distutils.core import get_distribution
dist = get_distribution(always=True)
for compiler_type in compiler_types:
v = None
try:
c = new_fcompiler(plat=platform, compiler=compiler_type,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if requiref90 and c.compiler_f90 is None:
v = None
new_compiler = c.suggested_f90_compiler
if new_compiler:
log.warn('Trying %r compiler as suggested by %r '
'compiler for f90 support.' % (compiler_type,
new_compiler))
c = new_fcompiler(plat=platform, compiler=new_compiler,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if v is not None:
compiler_type = new_compiler
if requiref90 and c.compiler_f90 is None:
raise ValueError('%s does not support compiling f90 codes, '
'skipping.' % (c.__class__.__name__))
except DistutilsModuleError:
log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
except CompilerNotFound:
log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
if v is not None:
return compiler_type
return None
def available_fcompilers_for_platform(osname=None, platform=None):
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
matching_compiler_types = []
for pattern, compiler_type in _default_compilers:
if re.match(pattern, platform) or re.match(pattern, osname):
for ct in compiler_type:
if ct not in matching_compiler_types:
matching_compiler_types.append(ct)
if not matching_compiler_types:
matching_compiler_types.append('gnu')
return matching_compiler_types
def get_default_fcompiler(osname=None, platform=None, requiref90=False,
c_compiler=None):
"""Determine the default Fortran compiler to use for the given
platform."""
matching_compiler_types = available_fcompilers_for_platform(osname,
platform)
compiler_type = _find_existing_fcompiler(matching_compiler_types,
osname=osname,
platform=platform,
requiref90=requiref90,
c_compiler=c_compiler)
return compiler_type
def new_fcompiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0,
requiref90=False,
c_compiler = None):
"""Generate an instance of some FCompiler subclass for the supplied
platform/compiler combination.
"""
load_all_fcompiler_classes()
if plat is None:
plat = os.name
if compiler is None:
compiler = get_default_fcompiler(plat, requiref90=requiref90,
c_compiler=c_compiler)
if compiler in fcompiler_class:
module_name, klass, long_description = fcompiler_class[compiler]
elif compiler in fcompiler_aliases:
module_name, klass, long_description = fcompiler_aliases[compiler]
else:
msg = "don't know how to compile Fortran code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler." % compiler
msg = msg + " Supported compilers are: %s)" \
% (','.join(fcompiler_class.keys()))
log.warn(msg)
return None
compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
compiler.c_compiler = c_compiler
return compiler
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = []
compilers_na = []
compilers_ni = []
if not fcompiler_class:
load_all_fcompiler_classes()
platform_compilers = available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (DistutilsModuleError, CompilerNotFound):
e = get_exception()
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2] + ' (%s)' % v))
compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
for fc in compilers_ni]
compilers.sort()
compilers_na.sort()
compilers_ni.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("Fortran compilers found:")
pretty_printer = FancyGetopt(compilers_na)
pretty_printer.print_help("Compilers available for this "
"platform, but not found:")
if compilers_ni:
pretty_printer = FancyGetopt(compilers_ni)
pretty_printer.print_help("Compilers not available on this platform:")
print("For compiler details, run 'config_fc --verbose' setup command.")
def dummy_fortran_file():
fo, name = make_temp_file(suffix='.f')
fo.write(" subroutine dummy()\n end\n")
fo.close()
return name[:-2]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search
_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]',re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open_latin1(file,'r')
line = f.readline()
n = 10000 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
line = line.rstrip()
if line and line[0]!='!':
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
result = 1
break
line = f.readline()
f.close()
return result
def has_f90_header(src):
f = open_latin1(src,'r')
line = f.readline()
f.close()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)',re.I)
def get_f77flags(src):
"""
Search the first 20 lines of fortran 77 code for line pattern
`CF77FLAGS(<fcompiler type>)=<f77 flags>`
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
f = open_latin1(src,'r')
i = 0
for line in f.readlines():
i += 1
if i>20: break
m = _f77flags_re.match(line)
if not m: continue
fcname = m.group('fcname').strip()
fflags = m.group('fflags').strip()
flags[fcname] = split_quoted(fflags)
f.close()
return flags
if __name__ == '__main__':
show_fcompilers()
| 38.701245
| 112
| 0.592715
|
5a34c47c4ada998bb1cd75a98c48136412d8322f
| 2,718
|
py
|
Python
|
PLC/Methods/GetSliceTicket.py
|
dreibh/planetlab-lxc-plcapi
|
065dfc54a2b668e99eab343d113f1a31fb154b13
|
[
"BSD-3-Clause"
] | null | null | null |
PLC/Methods/GetSliceTicket.py
|
dreibh/planetlab-lxc-plcapi
|
065dfc54a2b668e99eab343d113f1a31fb154b13
|
[
"BSD-3-Clause"
] | null | null | null |
PLC/Methods/GetSliceTicket.py
|
dreibh/planetlab-lxc-plcapi
|
065dfc54a2b668e99eab343d113f1a31fb154b13
|
[
"BSD-3-Clause"
] | null | null | null |
import time
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Slices import Slice, Slices
from PLC.Auth import Auth
from PLC.GPG import gpg_sign, gpg_verify
from PLC.InitScripts import InitScript, InitScripts
from PLC.Methods.GetSlivers import get_slivers
class GetSliceTicket(Method):
"""
Returns a ticket for, or signed representation of, the specified
slice. Slice tickets may be used to manually instantiate or update
a slice on a node. Present this ticket to the local Node Manager
interface to redeem it.
If the slice has not been added to a node with AddSliceToNodes,
and the ticket is redeemed on that node, it will be deleted the
next time the Node Manager contacts the API.
Users may only obtain tickets for slices of which they are
members. PIs may obtain tickets for any of the slices at their
sites, or any slices of which they are members. Admins may obtain
tickets for any slice.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin', 'pi', 'user', 'peer']
accepts = [
Auth(),
Mixed(Slice.fields['slice_id'],
Slice.fields['name']),
]
returns = Parameter(str, 'Signed slice ticket')
def call(self, auth, slice_id_or_name):
slices = Slices(self.api, [slice_id_or_name])
if not slices:
raise PLCInvalidArgument("No such slice")
slice = slices[0]
# Allow peers to obtain tickets for their own slices
if slice['peer_id'] is not None:
if not isinstance(self.caller, Peer):
raise PLCInvalidArgument("Not a local slice")
elif slice['peer_id'] != self.caller['peer_id']:
raise PLCInvalidArgument("Only the authoritative peer may obtain tickets for that slice")
# Tickets are the canonicalized XML-RPC methodResponse
# representation of a partial GetSlivers() response, i.e.,
initscripts = InitScripts(self.api, {'enabled': True})
data = {
'timestamp': int(time.time()),
'initscripts': initscripts,
'slivers': get_slivers(self.api, self.caller, auth, [slice['slice_id']]),
}
# Sign ticket
signed_ticket = gpg_sign((data,),
self.api.config.PLC_ROOT_GPG_KEY,
self.api.config.PLC_ROOT_GPG_KEY_PUB,
methodresponse = True,
detach_sign = False)
# Verify ticket
gpg_verify(signed_ticket,
self.api.config.PLC_ROOT_GPG_KEY_PUB)
return signed_ticket
| 34.846154
| 105
| 0.632082
|
6a3289f903f123d63fe70c39d4e66eaba78c75a6
| 8,148
|
py
|
Python
|
python/viz/motifs.py
|
icdm-extract/extract
|
18d6e8509f2f35719535e1de6c88874ec533cfb9
|
[
"MIT"
] | 1
|
2019-03-05T01:17:05.000Z
|
2019-03-05T01:17:05.000Z
|
python/viz/motifs.py
|
icdm-extract/extract
|
18d6e8509f2f35719535e1de6c88874ec533cfb9
|
[
"MIT"
] | null | null | null |
python/viz/motifs.py
|
icdm-extract/extract
|
18d6e8509f2f35719535e1de6c88874ec533cfb9
|
[
"MIT"
] | null | null | null |
#!/bin/env/python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
from ..algo.motif import findMotif, findAllMotifInstances
from ..utils.subseq import simMatFromDistTensor
from viz_utils import plotRect, plotRanges
def findAndPlotMotif(seq, lengths, **kwargs):
motif = findMotif([seq], lengths)
plotMotif(seq, motif, **kwargs)
def findAndPlotMotifInstances(seq, lengths, truthStartEndPairs=None,
saveas=None, findMotifKwargs=None, **kwargs):
# XXX this func will break if seq is a list of seqs, not just one ndarray
if findMotifKwargs:
startIdxs, instances, motif = findAllMotifInstances([seq], lengths,
**findMotifKwargs)
else:
startIdxs, instances, motif = findAllMotifInstances([seq], lengths)
# ------------------------ plot reported motif instances
endIdxs = startIdxs + motif.length
startEndPairs = np.c_[startIdxs, endIdxs]
ax = plotMotifInstances(seq, startEndPairs, **kwargs)
# ------------------------ plot ground truth
if truthStartEndPairs is not None and len(truthStartEndPairs):
try:
if len(truthStartEndPairs[0]) == 1: # single points, not ranges
color = 'k'
truthStartEndPairs = np.asarray(truthStartEndPairs)
truthStartEndPairs = np.c_[truthStartEndPairs, truthStartEndPairs]
else:
color = 'g'
except: # elements are scalars and so len() throws
color = 'k'
truthStartEndPairs = np.asarray(truthStartEndPairs)
truthStartEndPairs = np.c_[truthStartEndPairs, truthStartEndPairs]
# make a vert line (spanning less than full graph height)
# where the labels are
yMin, yMax = np.min(seq), np.max(seq)
yRange = yMax - yMin
lineMin, lineMax = [yMin + frac * yRange for frac in (.4, .6)]
plotMotifInstances(None, truthStartEndPairs, ax=ax,
color=color, linestyle='-', lw=2,
ymin=lineMin, ymax=lineMax)
if saveas:
plt.savefig(saveas)
else:
plt.show()
def plotMotif(seq, motif, showExtracted=True, color='gray',
title=None, saveas=None):
start1 = motif[3]
start2 = motif[4]
end1 = start1 + len(motif[0]) - 1
end2 = start2 + len(motif[1]) - 1
# just show where the motif is in the original signal
if not showExtracted:
_, ax = plt.subplots()
ax.autoscale(False)
ax.plot(seq)
plotRect(ax, start1, end1, color=color)
plotRect(ax, start2, end2, color=color)
if saveas:
plt.savefig(saveas)
else:
plt.show()
return ax
# set up axes
ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)
ax2 = plt.subplot2grid((2,2), (1,0))
ax3 = plt.subplot2grid((2,2), (1,1))
ax1.autoscale(tight=True)
ax2.autoscale(tight=True)
ax3.autoscale(tight=True)
# plot raw ts on top and motif instances on the bottom
ax1.plot(seq, lw=2)
ax2.plot(motif[0], lw=2)
ax3.plot(motif[1], lw=2)
ax1.set_title('Original Signal')
ax2.set_title('Motif Instance at %d' % start1)
ax3.set_title('Motif Instance at %d' % start2)
# draw rects in the ts where the motif is
plotRect(ax1, start1, end1, color=color)
plotRect(ax1, start2, end2, color=color)
plt.tight_layout()
if saveas:
plt.savefig(saveas)
else:
plt.show()
return ax1, ax2, ax3
def plotMotifInstances(seq, startEndIdxPairs, title=None, ax=None,
saveas=None, **kwargs):
if ax is None:
_, ax = plt.subplots()
# ax.autoscale(False) # makes it not actually work...
if seq is not None and len(seq): # plot original seq if one is provided
ax.plot(seq, **kwargs)
plotRanges(ax, startEndIdxPairs, **kwargs)
if not title:
title = "Motif Instances in Data"
ax.set_title(title)
if seq is not None and len(seq):
ax.set_ylim([np.min(seq), np.max(seq)])
ax.set_xlim([0, len(seq)])
if saveas:
plt.savefig(saveas)
return ax
def showPairwiseSims(origSignal, m, simMat, clamp=True, pruneCorrAbove=-1,
plotMotifs=True, showEigenVect=False, hasPadding=True, saveas=None):
print "origSignal shape", origSignal.shape
# padLen = len(origSignal) - simMat.shape[1]
padLen = m - 1 if hasPadding else 0
subseqLen = m
plt.figure(figsize=(8,10))
if showEigenVect:
ax1 = plt.subplot2grid((20,10), (0,0), colspan=8, rowspan=5)
ax2 = plt.subplot2grid((20,10), (5,0), colspan=8, rowspan=15)
ax3 = plt.subplot2grid((20,10), (5,8), colspan=2, rowspan=15)
ax3.autoscale(tight=True)
ax3.set_title('Extracted')
else:
ax1 = plt.subplot2grid((4,2), (0,0), colspan=2)
ax2 = plt.subplot2grid((4,2), (1,0), colspan=2, rowspan=3)
ax1.autoscale(tight=True)
ax2.autoscale(tight=True)
ax1.set_title('Original Signal')
ax1.set_ylabel('Value')
if pruneCorrAbove > 0:
ax2.set_title('Subsequence Cosine Similarities to Dictionary Sequences')
else:
ax2.set_title('Subsequence Pairwise Cosine Similarities')
ax2.set_xlabel('Subsequence Start Index')
ax2.set_ylabel('"Dictionary" Sequence Number')
seq = origSignal
imgMat = simMat
print "imgMat shape: ", imgMat.shape
# # show magnitude of similarities in each row in descending order; there are
# # only about 60 entries > .01 in *any* row for msrc, and way fewer in most
# # plt.figure()
# # thresh = .5
# # sortedSimsByRow = np.sort(imgMat, axis=1)
# # sortedSimsByRow = sortedSimsByRow[:, ::-1]
# # nonzeroCols = np.sum(sortedSimsByRow, axis=0) > thresh # ignore tiny similarities
# # sortedSimsByRow = sortedSimsByRow[:, nonzeroCols]
# # # plt.imshow(sortedSimsByRow)
# # # plt.plot(np.mean(sortedSimsByRow, axis=1))
# # plt.plot(np.sum(sortedSimsByRow > thresh, axis=1)) # entries > thresh per row
# if pruneCorrAbove > 0.:
# print "ImgMat Shape:"
# print imgMat.shape
# imgMat = removeCorrelatedRows(imgMat, pruneCorrAbove)
# print imgMat.shape
# print "NaNs at:", np.where(np.isnan(imgMat))[0]
# print "Infs at:", np.where(np.isinf(imgMat))[0]
# power iteration to see what we get
if showEigenVect:
width = int(subseqLen * 1.5)
nRows, nCols = imgMat.shape
nPositions = nCols - width + 1
if nPositions > 1:
elementsPerPosition = nRows * width # size of 2d slice
dataMat = np.empty((nPositions, elementsPerPosition))
# for i in range(nPositions): # step by 1
for i in range(0, nPositions, width): # step by width, so non-overlapping
startCol = i
endCol = startCol + width
data = imgMat[:, startCol:endCol]
dataMat[i] = data.flatten()
# ah; power iteration is for cov matrix, cuz needs a square mat
# v = np.ones(elementsPerPosition) / elementsPerPosition # uniform start vect
# for i in range(3):
# v = np.dot(dataMat.T, v)
svd = TruncatedSVD(n_components=1, random_state=42)
svd.fit(dataMat)
v = svd.components_[0]
learnedFilt = v.reshape((nRows, width))
ax3.imshow(learnedFilt) # seems to be pretty good
# plt.show()
ax1.plot(seq)
ax2.imshow(imgMat, interpolation='nearest', aspect='auto')
plt.tight_layout()
if plotMotifs:
searchSeq = seq
print "searchSeq shape:", searchSeq.shape
motif = findMotif([searchSeq], subseqLen) # motif of min length
start1 = motif[3]
start2 = motif[4]
end1 = start1 + len(motif[0]) - 1
end2 = start2 + len(motif[1]) - 1
ax2.autoscale(False)
color = 'grey'
plotRect(ax1, start1, end1, color=color)
plotRect(ax2, start1, end1, color=color)
plotRect(ax1, start2, end2, color=color)
plotRect(ax2, start2, end2, color=color)
print "imgMat shape: ", imgMat.shape
print "padLen: ", padLen
if padLen:
searchSeq = imgMat[:,:-padLen].T
else:
searchSeq = imgMat.T
print "searchSeq shape:", searchSeq.shape
print "subseqLen:", subseqLen
motif = findMotif([searchSeq], subseqLen) # motif of min length
start1 = motif[3]
start2 = motif[4]
end1 = start1 + len(motif[0]) - 1
end2 = start2 + len(motif[1]) - 1
print [start1, end1, start2, end2]
color = 'm' # magenta
plotRect(ax1, start1, end1, color=color)
plotRect(ax2, start1, end1, color=color)
plotRect(ax1, start2, end2, color=color)
plotRect(ax2, start2, end2, color=color)
if saveas:
plt.savefig(saveas)
else:
plt.show()
if showEigenVect:
return ax1, ax2, ax3
return ax1, ax2
def showPairwiseDists(origSignal, m, Dtensor, **kwargs):
padLen = len(origSignal) - Dtensor.shape[1]
simMat = simMatFromDistTensor(Dtensor, m, padLen)
showPairwiseSims(origSignal, m, simMat, **kwargs)
| 30.402985
| 87
| 0.695508
|
10aa41e19a171f1a075c8c7ccbde004092fa3e9c
| 3,235
|
py
|
Python
|
validator/schemas/specs.py
|
ChauffeurPrive/nestor-api
|
364b5f034eeb929932a5a8c3f3b00d1275a7ae5b
|
[
"Apache-2.0"
] | 2
|
2020-08-17T09:59:03.000Z
|
2020-08-17T09:59:23.000Z
|
validator/schemas/specs.py
|
ChauffeurPrive/nestor-api
|
364b5f034eeb929932a5a8c3f3b00d1275a7ae5b
|
[
"Apache-2.0"
] | 83
|
2020-06-12T14:37:35.000Z
|
2022-01-26T14:10:10.000Z
|
validator/schemas/specs.py
|
ChauffeurPrive/nestor-api
|
364b5f034eeb929932a5a8c3f3b00d1275a7ae5b
|
[
"Apache-2.0"
] | 1
|
2020-07-02T14:33:45.000Z
|
2020-07-02T14:33:45.000Z
|
"""Schemas managed by Nestor"""
SPECS = {
"variables": {
"confSubObjects": {"type": "object", "patternProperties": {"": {"type": "string",},},},
"subObjectSecrets": {
"type": "object",
"patternProperties": {
"": {
"type": "object",
"properties": {"name": {"type": "string",}, "key": {"type": "string",},},
},
},
},
"projectSubObjects": {"type": "object", "patternProperties": {"": {"type": "string",},},},
},
"scales": {
"type": "object",
"patternProperties": {"": {"$ref": "#/definitions/scales/definitions/scaleSubProperty",},},
"definitions": {
"scaleSubProperty": {
"type": "object",
"properties": {
"maxReplicas": {"type": "integer",},
"minReplicas": {"type": "integer",},
"targetCPUUtilizationPercentage": {
"type": "integer",
"maximum": 100,
"minimum": 0,
},
},
"required": ["maxReplicas", "minReplicas", "targetCPUUtilizationPercentage",],
"additionalProperties": False,
},
},
},
"crons": {
"type": "object",
"patternProperties": {"": {"$ref": "#/definitions/crons/definitions/cronProperty",},},
"additionalProperties": False,
"definitions": {
"cronProperty": {
"type": "object",
"properties": {
"schedule": {
"type": "string",
"pattern": "([0-9]{0,2}[*/,-]{0,3}[0-9]*\\s?){5}",
},
"concurrency_policy": {"type": "string",},
"suspend": {"type": "boolean",},
},
"required": ["schedule", "concurrency_policy"],
"additionalProperties": False,
},
},
},
"probes": {
"type": "object",
"patternProperties": {
"": {
"properties": {
"path": {"type": "string",},
"delay": {"type": "integer",},
"timeout": {"type": "integer",},
},
"required": ["path"],
},
},
},
"resources": {
"type": "object",
"patternProperties": {
"": {
"type": "object",
"properties": {
"limits": {"$ref": "#/definitions/resources/definitions/resourcesSubProperty",},
"requests": {
"$ref": "#/definitions/resources/definitions/resourcesSubProperty",
},
},
},
},
"definitions": {
"resourcesSubProperty": {
"type": "object",
"properties": {
"cpu": {"anyOf": [{"type": "string",}, {"type": "number", "minimum": 0,},],},
"memory": {"type": "string",},
},
},
},
},
}
| 34.414894
| 100
| 0.374652
|
2e9b99a9f3b2a8cc525aa4db5abc3f2b3b943010
| 13,922
|
py
|
Python
|
waad/utils/indicators.py
|
ANSSI-FR/WAAD
|
276820be3e1aa45c52351b481105ab95a069b3e0
|
[
"BSD-2-Clause"
] | 13
|
2021-04-08T15:59:57.000Z
|
2022-03-28T14:04:23.000Z
|
waad/utils/indicators.py
|
ANSSI-FR/WAAD
|
276820be3e1aa45c52351b481105ab95a069b3e0
|
[
"BSD-2-Clause"
] | null | null | null |
waad/utils/indicators.py
|
ANSSI-FR/WAAD
|
276820be3e1aa45c52351b481105ab95a069b3e0
|
[
"BSD-2-Clause"
] | 1
|
2022-03-08T19:50:36.000Z
|
2022-03-08T19:50:36.000Z
|
"""This module implements the computation of some indicators on assets."""
import bisect
from datetime import datetime, timedelta
from enum import Enum
from functools import partial
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import Any, Callable, Dict, List, Optional
from waad.utils.asset import Asset
from waad.utils.config import ANOMALIES_SCORES
from waad.utils.postgreSQL_utils import Table
from waad.utils.rule import Rule
from waad.utils.time_series_utils import StatSeries, TimeSeries
class Indicator:
"""Defines an `Indicator` on a StatSeries and the conditions for which it applies.
Attributes:
name (str): Name of the indicator to compute.
additional_columns (List[str]): Initially, only 'systemtime' is requested alongside with asset_1 and asset_2 see `ComputeIndicators` down below.
All columns from `additional_columns` will be requested as well if needed in an `Indicator`.
step_by_step_computation (Callable): For each `time_step`, all asset_1 corresponding authentication are gathered and `step_by_step_computation`
is applied on the corresponding authenticaions dataframe. Exemples of such functions are : lambda window: window.shape[0] (to get number of asset_1
authentications for every time_step), lambda window: set(window['asset_2'].unique()) (to get all distinct asset_2 reached per `time_step` under
the rule), get_privileges(window: pd.DataFrame) defined down below to get all distinct privileges granted per `time_step`. `step_by_step_computation`
must be a function taking as input a window of authentications as built in `ComputeIndicators` and return a float for each `time_step`.
intermediary_content_function (Optional[Callable]): After `step_by_step_computation` we are left with a list of float for each `time_step`.
If `intermediary_content_function` is `None`, Timeseries.intermediary_content will be set to None. Else, `intermediary_content_function` is applied
on the list to compute Timeseries.intermediary_content. For example, in the case of 'nb_new_assets_reached', after `step_by_step_computation` we
are left with the list of all asset_2 reached at each `time_step`, with `intermediary_content_function` we want to keep in memory the new assets reached
at each `time_step`. `intermediary_content_function` must be a function taking as input a list of floats and returning something from it (if we want exactly
the output of `step_by_step_computation`, just write identity lambda x: x).
time_series_function (Optional[Callable]): This function populates the attibute `series` of the `Timeseries` returned after computation. If None, it uses
the result of `step_by_step_computation` as a result, else it takes as an input the output of `step_by_step_computation` and returns a list, computed
from it. `time_series_function` must be either `None` or a function taking as input a list and returning a list from the same size.
anomalies_detector (Optional[Callable]): This function defines how to compute possible anomalies on the `StatSeries` corresponding to the indicator. If
`None`, the StatSeries.custom_outlier_detection wil be used with parameters correponding to the indicator written in config.py. Else, `anomalies_detector`
must be a function taking as an input the StatSeries.series and returning the possible anomalies detected in a list of indices.
"""
def __init__(self,
name: str,
additional_columns: List[str] = [],
step_by_step_computation: Callable = lambda window: window.shape[0],
intermediary_content_function: Optional[Callable] = None,
time_series_function: Optional[Callable] = None,
anomalies_detector: Optional[Callable] = None
):
self.name = name
self.additional_columns = additional_columns
self.step_by_step_computation = step_by_step_computation
self.intermediary_content_function = intermediary_content_function
self.time_series_function = time_series_function
self.anomalies_detector = anomalies_detector
def __repr__(self):
return self.name
class ComputeIndicators:
"""This class defines a framework to compute timeseries indicators from a dataset.
Attributes:
table (Table): `Table` object pointing to the postgreSQL dataset.
rule (Rule): `Rule` object defining how to filter dataset rows based on analysts' requirements.
time_step (int): Time step in between each time series tick.
indicator_objects (List[Indicator]): List of all types of indicators we want to compute for each asset_1 object.
"""
def __init__(self, table: Table, rule: Rule, indicator_objects: List[Indicator], time_step: int = 86400):
self.table = table
self.rule = rule
self.indicator_objects = indicator_objects
self.time_step = time_step
self.indicators: Dict[Asset, Dict[Indicator, TimeSeries]] = {}
def run(self):
cache = {}
additional_columns = [e for indicator in self.indicator_objects for e in indicator.additional_columns]
def update_cache(row: List, dataset_columns: List[str], condition: Dict):
row = {col: value for col, value in zip(dataset_columns, row)}
if condition['filter_function'](row):
asset_1 = condition['asset_1'](row)
asset_2 = condition['asset_2'](row)
line_summary = {'systemtime': row['systemtime'], 'asset_2': asset_2}
line_summary.update({col: row[col] for col in additional_columns})
try:
cache[asset_1].append(line_summary)
except Exception:
cache[asset_1] = [line_summary]
for condition in self.rule.conditions:
sql_command = self.table.custom_psql_request({'fields_to_request': '*', 'filters': [condition['pre_filters']]})
cursor = self.table.database.get_iterator_from_command(sql_command, chunk_size=1000000)
row_0 = next(cursor)
columns = [desc[0] for desc in cursor.description]
update_cache(row_0, columns, condition)
for row in cursor:
update_cache(row, columns, condition)
cursor.close()
self.table.database.disconnect()
for asset, asset_authentications in cache.items():
self.indicators[asset] = ComputeIndicators.compute_indicators_over_time(
pd.DataFrame(asset_authentications), indicators=self.indicator_objects, time_step=self.time_step
)
@staticmethod
def compute_new_items(items_sets: List[set], legitimate_model_duration: int = 25, enrich_model=True):
"""Compute the new items reached from a list of ``items_sets``.
This is usable for example to compute the number of new computers reached or the number of new privileges granted.
Args:
items_sets: List of all items reached ``time_step`` after ``time_step``.
legitimate_model_duration: Percentage of the dataset used to build the corresponding legitimate model for items reached.
enrich_model: Boolean to know if after ``legitimate_model_duration`` new items are used to enrich the legitimate model.
Returns:
The new items reached ``time_step`` after ``time_step``.
"""
new_items: List[set] = []
legitimate_model = set()
for index, items in enumerate(items_sets):
if index <= int(legitimate_model_duration / 100 * len(items_sets)):
new_items.append(set())
legitimate_model.update(items)
else:
new_items.append(items.difference(legitimate_model))
if enrich_model:
legitimate_model.update(items)
return new_items
@staticmethod
def compute_nb_new_items(items_sets: List[set], legitimate_model_duration: int = 25, enrich_model=True):
"""Compute the number of new items reached from a list of ``items_sets``.
This is usable for example to compute the number of new computers reached or the number of new privileges granted.
Args:
items_sets: List of all items reached ``time_step`` after ``time_step``.
legitimate_model_duration: Percentage of the dataset used to build the corresponding legitimate model for items reached.
enrich_model: Boolean to know if after ``legitimate_model_duration`` new items are used to enrich the legitimate model.
Returns:
The number of new items reached ``time_step`` after ``time_step``.
"""
return [len(e) for e in ComputeIndicators.compute_new_items(items_sets=items_sets, legitimate_model_duration=legitimate_model_duration, enrich_model=enrich_model)]
@staticmethod
def get_privileges(window: pd.DataFrame):
res = set()
window['privilegelist'].apply(lambda x: res.update(set(x.split(':'))))
try:
res.remove('?')
except Exception:
pass
return res
@staticmethod
def compute_indicators(window: pd.DataFrame, indicators: List[Indicator]) -> Dict:
"""Compute some indicators over ``window``.
Args:
window: Pandas ``Dataframe``, slice of the dataset.
indicators: List containing the ``Indicator`` objects.
Returns:
A dictionnary containing the indicators computed on ``window``.
"""
return {indicator: indicator.step_by_step_computation(window) for indicator in indicators}
@staticmethod
def compute_indicators_over_time(data: pd.DataFrame, indicators: List[Indicator], time_step: int = 86400):
"""Compute some indicators over time, should be way faster because it uses `bisect` search.
Args:
data: Pandas ``Dataframe``, part of the dataset.
indicators_name: List containing the names of indicators needed.
time_step: Time step in seconds desired between each loop of indicators computation.
Returns:
A dictionnary containing the indicators computed on each time ``window``.
"""
step_by_step_results = {indicator: [] for indicator in indicators}
# TODO: Implement better inclusion of a posteriori indicators
if data.shape[0] == 0:
return [TimeSeries(name=indicator.name, series=[], time_step=time_step) for indicator in indicators]
# ISO8601 is not handled yet by datetime so as a fix we replace 'Z' by '+00:00'
data = data.sort_values("systemtime")
data.reset_index(drop=True, inplace=True)
is_iso8601 = "Z" in data["systemtime"][0]
if is_iso8601:
data["systemtime"] = data["systemtime"].apply(lambda x: str(x).replace("Z", "+00:00").replace("T", " "))
start_time = data.systemtime.iloc[0]
end_time = data.systemtime.iloc[-1]
current_time = start_time
current_index = 0
while current_time < end_time:
to_insert = (datetime.fromisoformat(current_time) + timedelta(seconds=time_step)).isoformat().replace("T", " ")
index = bisect.bisect_left(data.systemtime, to_insert, lo=current_index)
window = data.iloc[current_index:index]
window_indicators = ComputeIndicators.compute_indicators(window, indicators=indicators)
for indicator, v in window_indicators.items():
step_by_step_results[indicator].append(v)
current_index = index
current_time = to_insert
res = {}
for indicator in indicators:
intermediary_content = None
series = step_by_step_results[indicator]
if indicator.intermediary_content_function is not None:
intermediary_content = indicator.intermediary_content_function(series)
if indicator.time_series_function is not None:
series = indicator.time_series_function(intermediary_content)
res[indicator] = TimeSeries(name=indicator.name, series=series, time_step=time_step, start_time=start_time, intermediary_content=intermediary_content)
return res
class Indicators(Enum):
"""Enum to associate an indicator to its corresponding function."""
NB_AUTHENTICATIONS = Indicator(name='nb_authentications', step_by_step_computation=lambda window: window.shape[0])
NB_ASSETS_REACHED = Indicator(
name='nb_assets_reached',
step_by_step_computation=lambda window: set(window['asset_2'].unique()),
intermediary_content_function=lambda x: x,
time_series_function=lambda x: [len(e) for e in x]
)
NB_NEW_ASSETS_REACHED = Indicator(
name='nb_new_assets_reached',
step_by_step_computation=lambda window: set(window['asset_2'].unique()),
intermediary_content_function=lambda x: ComputeIndicators.compute_new_items(x, ANOMALIES_SCORES['nb_new_assets_reached']['legitimate_model_duration']),
time_series_function=lambda x: ComputeIndicators.compute_nb_new_items(x, ANOMALIES_SCORES['nb_new_assets_reached']['legitimate_model_duration'])
)
NB_PRIVILEGES_GRANTED = Indicator(
name='nb_privileges_granted',
additional_columns=['privilegelist'],
step_by_step_computation=ComputeIndicators.get_privileges,
intermediary_content_function=lambda x: x,
time_series_function=lambda x: [len(e) for e in x],
anomalies_detector=lambda series: StatSeries.detect_abnormal_outbreak_static(series, ANOMALIES_SCORES['nb_privileges_granted']['legitimate_model_duration'])
)
| 50.810219
| 171
| 0.690418
|
a4f2d798b1b1b426e04a125f35f8f5ba51c419ad
| 23,477
|
py
|
Python
|
mypy/test/data.py
|
aghast/mypy
|
13ae58ffe8bedb7da9f4c657297f0d61e681d671
|
[
"PSF-2.0"
] | 1
|
2021-01-19T09:59:44.000Z
|
2021-01-19T09:59:44.000Z
|
mypy/test/data.py
|
aghast/mypy
|
13ae58ffe8bedb7da9f4c657297f0d61e681d671
|
[
"PSF-2.0"
] | 1
|
2020-10-24T15:04:11.000Z
|
2020-10-24T15:04:11.000Z
|
mypy/test/data.py
|
aghast/mypy
|
13ae58ffe8bedb7da9f4c657297f0d61e681d671
|
[
"PSF-2.0"
] | 1
|
2020-12-22T10:18:11.000Z
|
2020-12-22T10:18:11.000Z
|
"""Utilities for processing .test files containing test case descriptions."""
import os.path
import os
import tempfile
import posixpath
import re
import shutil
from abc import abstractmethod
import sys
import pytest
from typing import List, Tuple, Set, Optional, Iterator, Any, Dict, NamedTuple, Union
from mypy.test.config import test_data_prefix, test_temp_dir, PREFIX
root_dir = os.path.normpath(PREFIX)
# File modify/create operation: copy module contents from source_path.
UpdateFile = NamedTuple('UpdateFile', [('module', str),
('source_path', str),
('target_path', str)])
# File delete operation: delete module file.
DeleteFile = NamedTuple('DeleteFile', [('module', str),
('path', str)])
FileOperation = Union[UpdateFile, DeleteFile]
def parse_test_case(case: 'DataDrivenTestCase') -> None:
"""Parse and prepare a single case from suite with test case descriptions.
This method is part of the setup phase, just before the test case is run.
"""
test_items = parse_test_data(case.data, case.name)
base_path = case.suite.base_path
if case.suite.native_sep:
join = os.path.join
else:
join = posixpath.join
out_section_missing = case.suite.required_out_section
normalize_output = True
files = [] # type: List[Tuple[str, str]] # path and contents
output_files = [] # type: List[Tuple[str, str]] # path and contents for output files
output = [] # type: List[str] # Regular output errors
output2 = {} # type: Dict[int, List[str]] # Output errors for incremental, runs 2+
deleted_paths = {} # type: Dict[int, Set[str]] # from run number of paths
stale_modules = {} # type: Dict[int, Set[str]] # from run number to module names
rechecked_modules = {} # type: Dict[ int, Set[str]] # from run number module names
triggered = [] # type: List[str] # Active triggers (one line per incremental step)
targets = {} # type: Dict[int, List[str]] # Fine-grained targets (per fine-grained update)
# Process the parsed items. Each item has a header of form [id args],
# optionally followed by lines of text.
item = first_item = test_items[0]
for item in test_items[1:]:
if item.id == 'file' or item.id == 'outfile':
# Record an extra file needed for the test case.
assert item.arg is not None
contents = expand_variables('\n'.join(item.data))
file_entry = (join(base_path, item.arg), contents)
if item.id == 'file':
files.append(file_entry)
else:
output_files.append(file_entry)
elif item.id in ('builtins', 'builtins_py2'):
# Use an alternative stub file for the builtins module.
assert item.arg is not None
mpath = join(os.path.dirname(case.file), item.arg)
fnam = 'builtins.pyi' if item.id == 'builtins' else '__builtin__.pyi'
with open(mpath, encoding='utf8') as f:
files.append((join(base_path, fnam), f.read()))
elif item.id == 'typing':
# Use an alternative stub file for the typing module.
assert item.arg is not None
src_path = join(os.path.dirname(case.file), item.arg)
with open(src_path, encoding='utf8') as f:
files.append((join(base_path, 'typing.pyi'), f.read()))
elif re.match(r'stale[0-9]*$', item.id):
passnum = 1 if item.id == 'stale' else int(item.id[len('stale'):])
assert passnum > 0
modules = (set() if item.arg is None else {t.strip() for t in item.arg.split(',')})
stale_modules[passnum] = modules
elif re.match(r'rechecked[0-9]*$', item.id):
passnum = 1 if item.id == 'rechecked' else int(item.id[len('rechecked'):])
assert passnum > 0
modules = (set() if item.arg is None else {t.strip() for t in item.arg.split(',')})
rechecked_modules[passnum] = modules
elif re.match(r'targets[0-9]*$', item.id):
passnum = 1 if item.id == 'targets' else int(item.id[len('targets'):])
assert passnum > 0
reprocessed = [] if item.arg is None else [t.strip() for t in item.arg.split(',')]
targets[passnum] = reprocessed
elif item.id == 'delete':
# File to delete during a multi-step test case
assert item.arg is not None
m = re.match(r'(.*)\.([0-9]+)$', item.arg)
assert m, 'Invalid delete section: {}'.format(item.arg)
num = int(m.group(2))
assert num >= 2, "Can't delete during step {}".format(num)
full = join(base_path, m.group(1))
deleted_paths.setdefault(num, set()).add(full)
elif re.match(r'out[0-9]*$', item.id):
if item.arg == 'skip-path-normalization':
normalize_output = False
tmp_output = [expand_variables(line) for line in item.data]
if os.path.sep == '\\' and normalize_output:
tmp_output = [fix_win_path(line) for line in tmp_output]
if item.id == 'out' or item.id == 'out1':
output = tmp_output
else:
passnum = int(item.id[len('out'):])
assert passnum > 1
output2[passnum] = tmp_output
out_section_missing = False
elif item.id == 'triggered' and item.arg is None:
triggered = item.data
else:
raise ValueError(
'Invalid section header {} in {} at line {}'.format(
item.id, case.file, item.line))
if out_section_missing:
raise ValueError(
'{}, line {}: Required output section not found'.format(
case.file, first_item.line))
for passnum in stale_modules.keys():
if passnum not in rechecked_modules:
# If the set of rechecked modules isn't specified, make it the same as the set
# of modules with a stale public interface.
rechecked_modules[passnum] = stale_modules[passnum]
if (passnum in stale_modules
and passnum in rechecked_modules
and not stale_modules[passnum].issubset(rechecked_modules[passnum])):
raise ValueError(
('Stale modules after pass {} must be a subset of rechecked '
'modules ({}:{})').format(passnum, case.file, first_item.line))
input = first_item.data
expand_errors(input, output, 'main')
for file_path, contents in files:
expand_errors(contents.split('\n'), output, file_path)
case.input = input
case.output = output
case.output2 = output2
case.lastline = item.line
case.files = files
case.output_files = output_files
case.expected_stale_modules = stale_modules
case.expected_rechecked_modules = rechecked_modules
case.deleted_paths = deleted_paths
case.triggered = triggered or []
case.normalize_output = normalize_output
case.expected_fine_grained_targets = targets
class DataDrivenTestCase(pytest.Item):
"""Holds parsed data-driven test cases, and handles directory setup and teardown."""
# Override parent member type
parent = None # type: DataSuiteCollector
input = None # type: List[str]
output = None # type: List[str] # Output for the first pass
output2 = None # type: Dict[int, List[str]] # Output for runs 2+, indexed by run number
# full path of test suite
file = ''
line = 0
# (file path, file content) tuples
files = None # type: List[Tuple[str, str]]
expected_stale_modules = None # type: Dict[int, Set[str]]
expected_rechecked_modules = None # type: Dict[int, Set[str]]
expected_fine_grained_targets = None # type: Dict[int, List[str]]
# Whether or not we should normalize the output to standardize things like
# forward vs backward slashes in file paths for Windows vs Linux.
normalize_output = True
# Extra attributes used by some tests.
lastline = None # type: int
output_files = None # type: List[Tuple[str, str]] # Path and contents for output files
deleted_paths = None # type: Dict[int, Set[str]] # Mapping run number -> paths
triggered = None # type: List[str] # Active triggers (one line per incremental step)
def __init__(self,
parent: 'DataSuiteCollector',
suite: 'DataSuite',
file: str,
name: str,
writescache: bool,
only_when: str,
platform: Optional[str],
skip: bool,
data: str,
line: int) -> None:
super().__init__(name, parent)
self.suite = suite
self.file = file
self.writescache = writescache
self.only_when = only_when
if ((platform == 'windows' and sys.platform != 'win32')
or (platform == 'posix' and sys.platform == 'win32')):
skip = True
self.skip = skip
self.data = data
self.line = line
self.old_cwd = None # type: Optional[str]
self.tmpdir = None # type: Optional[tempfile.TemporaryDirectory[str]]
def runtest(self) -> None:
if self.skip:
pytest.skip()
suite = self.parent.obj()
suite.setup()
try:
suite.run_case(self)
except Exception:
# As a debugging aid, support copying the contents of the tmp directory somewhere
save_dir = self.config.getoption('--save-failures-to', None) # type: Optional[str]
if save_dir:
assert self.tmpdir is not None
target_dir = os.path.join(save_dir, os.path.basename(self.tmpdir.name))
print("Copying data from test {} to {}".format(self.name, target_dir))
if not os.path.isabs(target_dir):
assert self.old_cwd
target_dir = os.path.join(self.old_cwd, target_dir)
shutil.copytree(self.tmpdir.name, target_dir)
raise
def setup(self) -> None:
parse_test_case(case=self)
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.TemporaryDirectory(prefix='mypy-test-')
os.chdir(self.tmpdir.name)
os.mkdir(test_temp_dir)
for path, content in self.files:
dir = os.path.dirname(path)
os.makedirs(dir, exist_ok=True)
with open(path, 'w', encoding='utf8') as f:
f.write(content)
def teardown(self) -> None:
assert self.old_cwd is not None and self.tmpdir is not None, \
"test was not properly set up"
os.chdir(self.old_cwd)
try:
self.tmpdir.cleanup()
except OSError:
pass
self.old_cwd = None
self.tmpdir = None
def reportinfo(self) -> Tuple[str, int, str]:
return self.file, self.line, self.name
def repr_failure(self, excinfo: Any, style: Optional[Any] = None) -> str:
if excinfo.errisinstance(SystemExit):
# We assume that before doing exit() (which raises SystemExit) we've printed
# enough context about what happened so that a stack trace is not useful.
# In particular, uncaught exceptions during semantic analysis or type checking
# call exit() and they already print out a stack trace.
excrepr = excinfo.exconly()
else:
self.parent._prunetraceback(excinfo) # type: ignore[no-untyped-call]
excrepr = excinfo.getrepr(style='short')
return "data: {}:{}:\n{}".format(self.file, self.line, excrepr)
def find_steps(self) -> List[List[FileOperation]]:
"""Return a list of descriptions of file operations for each incremental step.
The first list item corresponds to the first incremental step, the second for the
second step, etc. Each operation can either be a file modification/creation (UpdateFile)
or deletion (DeleteFile).
Defaults to having two steps if there aern't any operations.
"""
steps = {} # type: Dict[int, List[FileOperation]]
for path, _ in self.files:
m = re.match(r'.*\.([0-9]+)$', path)
if m:
num = int(m.group(1))
assert num >= 2
target_path = re.sub(r'\.[0-9]+$', '', path)
module = module_from_path(target_path)
operation = UpdateFile(module, path, target_path)
steps.setdefault(num, []).append(operation)
for num, paths in self.deleted_paths.items():
assert num >= 2
for path in paths:
module = module_from_path(path)
steps.setdefault(num, []).append(DeleteFile(module, path))
max_step = max(steps) if steps else 2
return [steps.get(num, []) for num in range(2, max_step + 1)]
def module_from_path(path: str) -> str:
path = re.sub(r'\.pyi?$', '', path)
# We can have a mix of Unix-style and Windows-style separators.
parts = re.split(r'[/\\]', path)
assert parts[0] == test_temp_dir
del parts[0]
module = '.'.join(parts)
module = re.sub(r'\.__init__$', '', module)
return module
class TestItem:
"""Parsed test caseitem.
An item is of the form
[id arg]
.. data ..
"""
id = ''
arg = '' # type: Optional[str]
# Text data, array of 8-bit strings
data = None # type: List[str]
file = ''
line = 0 # Line number in file
def __init__(self, id: str, arg: Optional[str], data: List[str],
line: int) -> None:
self.id = id
self.arg = arg
self.data = data
self.line = line
def parse_test_data(raw_data: str, name: str) -> List[TestItem]:
"""Parse a list of lines that represent a sequence of test items."""
lines = ['', '[case ' + name + ']'] + raw_data.split('\n')
ret = [] # type: List[TestItem]
data = [] # type: List[str]
id = None # type: Optional[str]
arg = None # type: Optional[str]
i = 0
i0 = 0
while i < len(lines):
s = lines[i].strip()
if lines[i].startswith('[') and s.endswith(']'):
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, strip_list(data), i0 + 1))
i0 = i
id = s[1:-1]
arg = None
if ' ' in id:
arg = id[id.index(' ') + 1:]
id = id[:id.index(' ')]
data = []
elif lines[i].startswith('\\['):
data.append(lines[i][1:])
elif not lines[i].startswith('--'):
data.append(lines[i])
elif lines[i].startswith('----'):
data.append(lines[i][2:])
i += 1
# Process the last item.
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, data, i0 + 1))
return ret
def strip_list(l: List[str]) -> List[str]:
"""Return a stripped copy of l.
Strip whitespace at the end of all lines, and strip all empty
lines from the end of the array.
"""
r = [] # type: List[str]
for s in l:
# Strip spaces at end of line
r.append(re.sub(r'\s+$', '', s))
while len(r) > 0 and r[-1] == '':
r.pop()
return r
def collapse_line_continuation(l: List[str]) -> List[str]:
r = [] # type: List[str]
cont = False
for s in l:
ss = re.sub(r'\\$', '', s)
if cont:
r[-1] += re.sub('^ +', '', ss)
else:
r.append(ss)
cont = s.endswith('\\')
return r
def expand_variables(s: str) -> str:
return s.replace('<ROOT>', root_dir)
def expand_errors(input: List[str], output: List[str], fnam: str) -> None:
"""Transform comments such as '# E: message' or
'# E:3: message' in input.
The result is lines like 'fnam:line: error: message'.
"""
for i in range(len(input)):
# The first in the split things isn't a comment
for possible_err_comment in input[i].split(' # ')[1:]:
m = re.search(
r'^([ENW]):((?P<col>\d+):)? (?P<message>.*)$',
possible_err_comment.strip())
if m:
if m.group(1) == 'E':
severity = 'error'
elif m.group(1) == 'N':
severity = 'note'
elif m.group(1) == 'W':
severity = 'warning'
col = m.group('col')
message = m.group('message')
message = message.replace('\\#', '#') # adds back escaped # character
if col is None:
output.append(
'{}:{}: {}: {}'.format(fnam, i + 1, severity, message))
else:
output.append('{}:{}:{}: {}: {}'.format(
fnam, i + 1, col, severity, message))
def fix_win_path(line: str) -> str:
r"""Changes Windows paths to Linux paths in error messages.
E.g. foo\bar.py -> foo/bar.py.
"""
line = line.replace(root_dir, root_dir.replace('\\', '/'))
m = re.match(r'^([\S/]+):(\d+:)?(\s+.*)', line)
if not m:
return line
else:
filename, lineno, message = m.groups()
return '{}:{}{}'.format(filename.replace('\\', '/'),
lineno or '', message)
def fix_cobertura_filename(line: str) -> str:
r"""Changes filename paths to Linux paths in Cobertura output files.
E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
"""
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
if not m:
return line
return '{}{}{}'.format(line[:m.start(1)],
m.group('filename').replace('\\', '/'),
line[m.end(1):])
##
#
# pytest setup
#
##
# This function name is special to pytest. See
# https://docs.pytest.org/en/latest/reference.html#initialization-hooks
def pytest_addoption(parser: Any) -> None:
group = parser.getgroup('mypy')
group.addoption('--update-data', action='store_true', default=False,
help='Update test data to reflect actual output'
' (supported only for certain tests)')
group.addoption('--save-failures-to', default=None,
help='Copy the temp directories from failing tests to a target directory')
group.addoption('--mypy-verbose', action='count',
help='Set the verbose flag when creating mypy Options')
group.addoption('--mypyc-showc', action='store_true', default=False,
help='Display C code on mypyc test failures')
# This function name is special to pytest. See
# http://doc.pytest.org/en/latest/writing_plugins.html#collection-hooks
def pytest_pycollect_makeitem(collector: Any, name: str,
obj: object) -> 'Optional[Any]':
"""Called by pytest on each object in modules configured in conftest.py files.
collector is pytest.Collector, returns Optional[pytest.Class]
"""
if isinstance(obj, type):
# Only classes derived from DataSuite contain test cases, not the DataSuite class itself
if issubclass(obj, DataSuite) and obj is not DataSuite:
# Non-None result means this obj is a test case.
# The collect method of the returned DataSuiteCollector instance will be called later,
# with self.obj being obj.
return DataSuiteCollector.from_parent( # type: ignore[no-untyped-call]
parent=collector, name=name
)
return None
def split_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
file: str) -> Iterator['DataDrivenTestCase']:
"""Iterate over raw test cases in file, at collection time, ignoring sub items.
The collection phase is slow, so any heavy processing should be deferred to after
uninteresting tests are filtered (when using -k PATTERN switch).
"""
with open(file, encoding='utf-8') as f:
data = f.read()
cases = re.split(r'^\[case ([a-zA-Z_0-9]+)'
r'(-writescache)?'
r'(-only_when_cache|-only_when_nocache)?'
r'(-posix|-windows)?'
r'(-skip)?'
r'\][ \t]*$\n',
data,
flags=re.DOTALL | re.MULTILINE)
line_no = cases[0].count('\n') + 1
for i in range(1, len(cases), 6):
name, writescache, only_when, platform_flag, skip, data = cases[i:i + 6]
platform = platform_flag[1:] if platform_flag else None
yield DataDrivenTestCase.from_parent(
parent=parent,
suite=suite,
file=file,
name=add_test_name_suffix(name, suite.test_name_suffix),
writescache=bool(writescache),
only_when=only_when,
platform=platform,
skip=bool(skip),
data=data,
line=line_no,
)
line_no += data.count('\n') + 1
class DataSuiteCollector(pytest.Class):
def collect(self) -> Iterator[pytest.Item]:
"""Called by pytest on each of the object returned from pytest_pycollect_makeitem"""
# obj is the object for which pytest_pycollect_makeitem returned self.
suite = self.obj # type: DataSuite
for f in suite.files:
yield from split_test_cases(self, suite, os.path.join(suite.data_prefix, f))
def add_test_name_suffix(name: str, suffix: str) -> str:
# Find magic suffix of form "-foobar" (used for things like "-skip").
m = re.search(r'-[-A-Za-z0-9]+$', name)
if m:
# Insert suite-specific test name suffix before the magic suffix
# which must be the last thing in the test case name since we
# are using endswith() checks.
magic_suffix = m.group(0)
return name[:-len(magic_suffix)] + suffix + magic_suffix
else:
return name + suffix
def is_incremental(testcase: DataDrivenTestCase) -> bool:
return 'incremental' in testcase.name.lower() or 'incremental' in testcase.file
def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
if any(re.match(r'# flags[2-9]:', line) for line in testcase.input):
return False
for filename, contents in testcase.files:
if os.path.basename(filename).startswith('mypy.ini.'):
return False
return True
class DataSuite:
# option fields - class variables
files = None # type: List[str]
base_path = test_temp_dir
# Allow external users of the test code to override the data prefix
data_prefix = test_data_prefix
required_out_section = False
native_sep = False
# Name suffix automatically added to each test case in the suite (can be
# used to distinguish test cases in suites that share data files)
test_name_suffix = ''
def setup(self) -> None:
"""Setup fixtures (ad-hoc)"""
pass
@abstractmethod
def run_case(self, testcase: DataDrivenTestCase) -> None:
raise NotImplementedError
| 37.988673
| 98
| 0.579972
|
3a425bf3275d325bab05c4e255cdf8dccbba3cb0
| 518
|
py
|
Python
|
test/test_edit_contact.py
|
Physic67/python_training
|
a19206b8dc2fc642f0cd29781eb8431122501630
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_contact.py
|
Physic67/python_training
|
a19206b8dc2fc642f0cd29781eb8431122501630
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_contact.py
|
Physic67/python_training
|
a19206b8dc2fc642f0cd29781eb8431122501630
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_edit_first_contact(app):
if app.contact.count() == 0:
app.contact.add_new_contact(Contact(firstname="Edward", middlename="Vampire", lastname="Kallen", address="Forks, Washington", homephone="495-1234567", mobilephone="901-1234567", email="123@gmail.ru"))
app.contact.edit_first_contact(Contact(firstname="Shrek", middlename="Vampire", lastname="Kallen", address="Forks, Washington", homephone="495", mobilephone="901", email="123"))
| 57.555556
| 208
| 0.727799
|
fe7068afc5aec7d30790d1569645ee0bf22a0ebe
| 17,651
|
py
|
Python
|
release/scripts/modules/bl_keymap_utils/keymap_from_toolbar.py
|
atlantic-crypto/blender
|
65fdf6f0ed592082ead87c76ea9be46cd54dba26
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/modules/bl_keymap_utils/keymap_from_toolbar.py
|
atlantic-crypto/blender
|
65fdf6f0ed592082ead87c76ea9be46cd54dba26
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/modules/bl_keymap_utils/keymap_from_toolbar.py
|
atlantic-crypto/blender
|
65fdf6f0ed592082ead87c76ea9be46cd54dba26
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2019-10-23T12:02:29.000Z
|
2019-10-23T12:02:29.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Dynamically create a keymap which is used by the popup toolbar
# for accelerator key access.
__all__ = (
"generate",
)
def generate(context, space_type):
"""
Keymap for popup toolbar, currently generated each time.
"""
from bl_ui.space_toolsystem_common import ToolSelectPanelHelper
def modifier_keywords_from_item(kmi):
kw = {}
for (attr, default) in (
("any", False),
("shift", False),
("ctrl", False),
("alt", False),
("oskey", False),
("key_modifier", 'NONE'),
):
val = getattr(kmi, attr)
if val != default:
kw[attr] = val
return kw
def dict_as_tuple(d):
return tuple((k, v) for (k, v) in sorted(d.items()))
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
items_all = [
# 0: tool
# 1: keymap item (direct access)
# 2: keymap item (newly calculated for toolbar)
[item, None, None]
for item in ToolSelectPanelHelper._tools_flatten(cls.tools_from_context(context))
if item is not None
]
items_all_id = {item_container[0].idname for item_container in items_all}
# Press the toolbar popup key again to set the default tool,
# this is useful because the select box tool is useful as a way
# to 'drop' currently active tools (it's basically a 'none' tool).
# so this allows us to quickly go back to a state that allows
# a shortcut based workflow (before the tool system was added).
use_tap_reset = True
# TODO: support other tools for modes which don't use this tool.
tap_reset_tool = "builtin.cursor"
# Check the tool is available in the current context.
if tap_reset_tool not in items_all_id:
use_tap_reset = False
from bl_operators.wm import use_toolbar_release_hack
# Pie-menu style release to activate.
use_release_confirm = True
# Generate items when no keys are mapped.
use_auto_keymap_alpha = False # Map manially in the default keymap
use_auto_keymap_num = True
# Temporary, only create so we can pass 'properties' to find_item_from_operator.
use_hack_properties = True
km_name_default = "Toolbar Popup"
km_name = km_name_default + " <temp>"
wm = context.window_manager
keyconf_user = wm.keyconfigs.user
keyconf_active = wm.keyconfigs.active
keymap = keyconf_active.keymaps.get(km_name)
if keymap is None:
keymap = keyconf_active.keymaps.new(km_name, space_type='EMPTY', region_type='TEMPORARY')
for kmi in keymap.keymap_items:
keymap.keymap_items.remove(kmi)
keymap_src = keyconf_user.keymaps.get(km_name_default)
if keymap_src is not None:
for kmi_src in keymap_src.keymap_items:
# Skip tools that aren't currently shown.
if (
(kmi_src.idname == "wm.tool_set_by_id") and
(kmi_src.properties.name not in items_all_id)
):
continue
keymap.keymap_items.new_from_item(kmi_src)
del keymap_src
del items_all_id
kmi_unique_args = set()
def kmi_unique_or_pass(kmi_args):
kmi_unique_len = len(kmi_unique_args)
kmi_unique_args.add(dict_as_tuple(kmi_args))
return kmi_unique_len != len(kmi_unique_args)
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if use_hack_properties:
kmi_hack = keymap.keymap_items.new("wm.tool_set_by_id", 'NONE', 'PRESS')
kmi_hack_properties = kmi_hack.properties
kmi_hack.active = False
kmi_hack_brush_select = keymap.keymap_items.new("paint.brush_select", 'NONE', 'PRESS')
kmi_hack_brush_select_properties = kmi_hack_brush_select.properties
kmi_hack_brush_select.active = False
if use_release_confirm or use_tap_reset:
kmi_toolbar = wm.keyconfigs.find_item_from_operator(
idname="wm.toolbar",
)[1]
kmi_toolbar_type = None if not kmi_toolbar else kmi_toolbar.type
if use_tap_reset and kmi_toolbar_type is not None:
kmi_toolbar_args_type_only = {"type": kmi_toolbar_type}
kmi_toolbar_args = {**kmi_toolbar_args_type_only, **modifier_keywords_from_item(kmi_toolbar)}
else:
use_tap_reset = False
del kmi_toolbar
if use_tap_reset:
kmi_found = None
if use_hack_properties:
# First check for direct assignment, if this tool already has a key, no need to add a new one.
kmi_hack_properties.name = tap_reset_tool
kmi_found = wm.keyconfigs.find_item_from_operator(
idname="wm.tool_set_by_id",
context='INVOKE_REGION_WIN',
# properties={"name": item.idname},
properties=kmi_hack_properties,
include={'KEYBOARD'},
)[1]
if kmi_found:
use_tap_reset = False
del kmi_found
if use_tap_reset:
use_tap_reset = kmi_unique_or_pass(kmi_toolbar_args)
if use_tap_reset:
items_all[:] = [
item_container
for item_container in items_all
if item_container[0].idname != tap_reset_tool
]
# -----------------------
# Begin Keymap Generation
# -------------------------------------------------------------------------
# Direct Tool Assignment & Brushes
for item_container in items_all:
item = item_container[0]
# Only check the first item in the tools key-map (a little arbitrary).
if use_hack_properties:
# First check for direct assignment.
kmi_hack_properties.name = item.idname
kmi_found = wm.keyconfigs.find_item_from_operator(
idname="wm.tool_set_by_id",
context='INVOKE_REGION_WIN',
# properties={"name": item.idname},
properties=kmi_hack_properties,
include={'KEYBOARD'},
)[1]
if kmi_found is None:
if item.data_block:
# PAINT_OT_brush_select
mode = context.active_object.mode
# See: BKE_paint_get_tool_prop_id_from_paintmode
attr = {
'SCULPT': "sculpt_tool",
'VERTEX_PAINT': "vertex_tool",
'WEIGHT_PAINT': "weight_tool",
'TEXTURE_PAINT': "image_tool",
'PAINT_GPENCIL': "gpencil_tool",
}.get(mode, None)
if attr is not None:
setattr(kmi_hack_brush_select_properties, attr, item.data_block)
kmi_found = wm.keyconfigs.find_item_from_operator(
idname="paint.brush_select",
context='INVOKE_REGION_WIN',
properties=kmi_hack_brush_select_properties,
include={'KEYBOARD'},
)[1]
elif mode in {'PARTICLE_EDIT', 'SCULPT_GPENCIL'}:
# Doesn't use brushes
pass
else:
print("Unsupported mode:", mode)
del mode, attr
else:
kmi_found = None
if kmi_found is not None:
pass
elif item.operator is not None:
kmi_found = wm.keyconfigs.find_item_from_operator(
idname=item.operator,
context='INVOKE_REGION_WIN',
include={'KEYBOARD'},
)[1]
elif item.keymap is not None:
km = keyconf_user.keymaps.get(item.keymap[0])
if km is None:
print("Keymap", repr(item.keymap[0]), "not found for tool", item.idname)
kmi_found = None
else:
kmi_first = km.keymap_items
kmi_first = kmi_first[0] if kmi_first else None
if kmi_first is not None:
kmi_found = wm.keyconfigs.find_item_from_operator(
idname=kmi_first.idname,
# properties=kmi_first.properties, # prevents matches, don't use.
context='INVOKE_REGION_WIN',
include={'KEYBOARD'},
)[1]
if kmi_found is None:
# We need non-keyboard events so keys with 'key_modifier' key is found.
kmi_found = wm.keyconfigs.find_item_from_operator(
idname=kmi_first.idname,
# properties=kmi_first.properties, # prevents matches, don't use.
context='INVOKE_REGION_WIN',
exclude={'KEYBOARD'},
)[1]
if kmi_found is not None:
if kmi_found.key_modifier == 'NONE':
kmi_found = None
else:
kmi_found = None
del kmi_first
del km
else:
kmi_found = None
item_container[1] = kmi_found
# -------------------------------------------------------------------------
# Single Key Access
# More complex multi-pass test.
for item_container in items_all:
item, kmi_found = item_container[:2]
if kmi_found is None:
continue
kmi_found_type = kmi_found.type
# Only for single keys.
if (
(len(kmi_found_type) == 1) or
# When a tool is being activated instead of running an operator, just copy the shortcut.
(kmi_found.idname in {"wm.tool_set_by_id", "WM_OT_tool_set_by_id"})
):
kmi_args = {"type": kmi_found_type, **modifier_keywords_from_item(kmi_found)}
if kmi_unique_or_pass(kmi_args):
kmi = keymap.keymap_items.new(idname="wm.tool_set_by_id", value='PRESS', **kmi_args)
kmi.properties.name = item.idname
item_container[2] = kmi
# -------------------------------------------------------------------------
# Single Key Modifier
#
#
# Test for key_modifier, where alpha key is used as a 'key_modifier'
# (grease pencil holding 'D' for example).
for item_container in items_all:
item, kmi_found, kmi_exist = item_container
if kmi_found is None or kmi_exist:
continue
kmi_found_type = kmi_found.type
if kmi_found_type in {
'LEFTMOUSE',
'RIGHTMOUSE',
'MIDDLEMOUSE',
'BUTTON4MOUSE',
'BUTTON5MOUSE',
'BUTTON6MOUSE',
'BUTTON7MOUSE',
}:
kmi_found_type = kmi_found.key_modifier
# excludes 'NONE'
if len(kmi_found_type) == 1:
kmi_args = {"type": kmi_found_type, **modifier_keywords_from_item(kmi_found)}
del kmi_args["key_modifier"]
if kmi_unique_or_pass(kmi_args):
kmi = keymap.keymap_items.new(idname="wm.tool_set_by_id", value='PRESS', **kmi_args)
kmi.properties.name = item.idname
item_container[2] = kmi
# -------------------------------------------------------------------------
# Assign A-Z to Keys
#
# When the keys are free.
if use_auto_keymap_alpha:
# Map all unmapped keys to numbers,
# while this is a bit strange it means users will not confuse regular key bindings to ordered bindings.
# First map A-Z.
kmi_type_alpha_char = [chr(i) for i in range(65, 91)]
kmi_type_alpha_args = {c: {"type": c} for c in kmi_type_alpha_char}
kmi_type_alpha_args_tuple = {c: dict_as_tuple(kmi_type_alpha_args[c]) for c in kmi_type_alpha_char}
for item_container in items_all:
item, kmi_found, kmi_exist = item_container
if kmi_exist:
continue
kmi_type = item.label[0].upper()
kmi_tuple = kmi_type_alpha_args_tuple.get(kmi_type)
if kmi_tuple and kmi_tuple not in kmi_unique_args:
kmi_unique_args.add(kmi_tuple)
kmi = keymap.keymap_items.new(
idname="wm.tool_set_by_id",
value='PRESS',
**kmi_type_alpha_args[kmi_type],
)
kmi.properties.name = item.idname
item_container[2] = kmi
del kmi_type_alpha_char, kmi_type_alpha_args, kmi_type_alpha_args_tuple
# -------------------------------------------------------------------------
# Assign Numbers to Keys
if use_auto_keymap_num:
# Free events (last used first).
kmi_type_auto = ('ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE', 'ZERO')
# Map both numbers and num-pad.
kmi_type_dupe = {
'ONE': 'NUMPAD_1',
'TWO': 'NUMPAD_2',
'THREE': 'NUMPAD_3',
'FOUR': 'NUMPAD_4',
'FIVE': 'NUMPAD_5',
'SIX': 'NUMPAD_6',
'SEVEN': 'NUMPAD_7',
'EIGHT': 'NUMPAD_8',
'NINE': 'NUMPAD_9',
'ZERO': 'NUMPAD_0',
}
def iter_free_events():
for mod in ({}, {"shift": True}, {"ctrl": True}, {"alt": True}):
for e in kmi_type_auto:
yield (e, mod)
iter_events = iter(iter_free_events())
for item_container in items_all:
item, kmi_found, kmi_exist = item_container
if kmi_exist:
continue
kmi_args = None
while True:
key, mod = next(iter_events, (None, None))
if key is None:
break
kmi_args = {"type": key, **mod}
kmi_tuple = dict_as_tuple(kmi_args)
if kmi_tuple in kmi_unique_args:
kmi_args = None
else:
break
if kmi_args is not None:
kmi = keymap.keymap_items.new(idname="wm.tool_set_by_id", value='PRESS', **kmi_args)
kmi.properties.name = item.idname
item_container[2] = kmi
kmi_unique_args.add(kmi_tuple)
key = kmi_type_dupe.get(kmi_args["type"])
if key is not None:
kmi_args["type"] = key
kmi_tuple = dict_as_tuple(kmi_args)
if not kmi_tuple in kmi_unique_args:
kmi = keymap.keymap_items.new(idname="wm.tool_set_by_id", value='PRESS', **kmi_args)
kmi.properties.name = item.idname
kmi_unique_args.add(kmi_tuple)
# ---------------------
# End Keymap Generation
if use_hack_properties:
keymap.keymap_items.remove(kmi_hack)
keymap.keymap_items.remove(kmi_hack_brush_select)
# Keep last so we can try add a key without any modifiers
# in the case this toolbar was activated with modifiers.
if use_tap_reset:
if len(kmi_toolbar_args_type_only) == len(kmi_toolbar_args):
kmi_toolbar_args_available = kmi_toolbar_args
else:
# We have modifiers, see if we have a free key w/o modifiers.
kmi_toolbar_tuple = dict_as_tuple(kmi_toolbar_args_type_only)
if kmi_toolbar_tuple not in kmi_unique_args:
kmi_toolbar_args_available = kmi_toolbar_args_type_only
kmi_unique_args.add(kmi_toolbar_tuple)
else:
kmi_toolbar_args_available = kmi_toolbar_args
del kmi_toolbar_tuple
kmi = keymap.keymap_items.new(
"wm.tool_set_by_id",
value='PRESS' if use_toolbar_release_hack else 'DOUBLE_CLICK',
**kmi_toolbar_args_available,
)
kmi.properties.name = tap_reset_tool
if use_release_confirm:
kmi = keymap.keymap_items.new(
"ui.button_execute",
type=kmi_toolbar_type,
value='RELEASE',
any=True,
)
kmi.properties.skip_depressed = True
if use_toolbar_release_hack:
# ... or pass through to let the toolbar know we're released.
# Let the operator know we're released.
kmi = keymap.keymap_items.new(
"wm.tool_set_by_id",
type=kmi_toolbar_type,
value='RELEASE',
any=True,
)
wm.keyconfigs.update()
return keymap
| 38.708333
| 111
| 0.560365
|
02007ba86cb5579cf788732d1ea362c37d877f41
| 28
|
py
|
Python
|
mercadopago/__init__.py
|
amarisi/sdk-python
|
c22edfb19a48e0862e4317a34fff919528824546
|
[
"MIT"
] | null | null | null |
mercadopago/__init__.py
|
amarisi/sdk-python
|
c22edfb19a48e0862e4317a34fff919528824546
|
[
"MIT"
] | null | null | null |
mercadopago/__init__.py
|
amarisi/sdk-python
|
c22edfb19a48e0862e4317a34fff919528824546
|
[
"MIT"
] | null | null | null |
from .mercadopago import MP
| 14
| 27
| 0.821429
|
41675f266c2fac2d598d962d5082d7a9df551ab9
| 942
|
py
|
Python
|
isi_sdk_8_2_0/test/test_storagepool_nodepools.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/test/test_storagepool_nodepools.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/test/test_storagepool_nodepools.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.storagepool_nodepools import StoragepoolNodepools # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestStoragepoolNodepools(unittest.TestCase):
"""StoragepoolNodepools unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolNodepools(self):
"""Test StoragepoolNodepools"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.storagepool_nodepools.StoragepoolNodepools() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.97561
| 97
| 0.717622
|
574deb50a852e77e676763ecf9e8b742c4618002
| 4,641
|
py
|
Python
|
readthedocs/builds/views.py
|
Hiroshi18/readthedocs.org
|
95c00d0adec1b3b935ba49068c89375cc129238e
|
[
"MIT"
] | 1
|
2021-07-01T01:31:58.000Z
|
2021-07-01T01:31:58.000Z
|
readthedocs/builds/views.py
|
Hiroshi18/readthedocs.org
|
95c00d0adec1b3b935ba49068c89375cc129238e
|
[
"MIT"
] | null | null | null |
readthedocs/builds/views.py
|
Hiroshi18/readthedocs.org
|
95c00d0adec1b3b935ba49068c89375cc129238e
|
[
"MIT"
] | null | null | null |
"""Views for builds app."""
import logging
import textwrap
from urllib.parse import urlparse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import (
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import DetailView, ListView
from requests.utils import quote
from readthedocs.builds.models import Build, Version
from readthedocs.core.permissions import AdminPermission
from readthedocs.core.utils import trigger_build
from readthedocs.doc_builder.exceptions import BuildEnvironmentError
from readthedocs.projects.models import Project
log = logging.getLogger(__name__)
class BuildBase:
model = Build
def get_queryset(self):
self.project_slug = self.kwargs.get('project_slug', None)
self.project = get_object_or_404(
Project.objects.public(self.request.user),
slug=self.project_slug,
)
queryset = Build.objects.public(
user=self.request.user,
project=self.project,
).select_related('project', 'version')
return queryset
class BuildTriggerMixin:
@method_decorator(login_required)
def post(self, request, project_slug):
project = get_object_or_404(Project, slug=project_slug)
if not AdminPermission.is_admin(request.user, project):
return HttpResponseForbidden()
version_slug = request.POST.get('version_slug')
version = get_object_or_404(
Version.internal.all(),
project=project,
slug=version_slug,
)
update_docs_task, build = trigger_build(
project=project,
version=version,
)
if (update_docs_task, build) == (None, None):
# Build was skipped
messages.add_message(
request,
messages.WARNING,
"This project is currently disabled and can't trigger new builds.",
)
return HttpResponseRedirect(
reverse('builds_project_list', args=[project.slug]),
)
return HttpResponseRedirect(
reverse('builds_detail', args=[project.slug, build.pk]),
)
class BuildList(BuildBase, BuildTriggerMixin, ListView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
active_builds = self.get_queryset().exclude(
state='finished',
).values('id')
context['project'] = self.project
context['active_builds'] = active_builds
context['versions'] = Version.internal.public(
user=self.request.user,
project=self.project,
)
context['build_qs'] = self.get_queryset()
return context
class BuildDetail(BuildBase, DetailView):
pk_url_kwarg = 'build_pk'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['project'] = self.project
build = self.get_object()
if build.error != BuildEnvironmentError.GENERIC_WITH_BUILD_ID.format(build_id=build.pk):
# Do not suggest to open an issue if the error is not generic
return context
scheme = (
'https://github.com/rtfd/readthedocs.org/issues/new'
'?title={title}{build_id}'
'&body={body}'
)
# TODO: we could use ``.github/ISSUE_TEMPLATE.md`` here, but we would
# need to add some variables to it which could impact in the UX when
# filling an issue from the web
body = """
## Details:
* Project URL: https://readthedocs.org/projects/{project_slug}/
* Build URL(if applicable): https://readthedocs.org{build_path}
* Read the Docs username(if applicable): {username}
## Expected Result
*A description of what you wanted to happen*
## Actual Result
*A description of what actually happened*""".format(
project_slug=self.project,
build_path=self.request.path,
username=self.request.user,
)
scheme_dict = {
'title': quote('Build error with build id #'),
'build_id': context['build'].id,
'body': quote(textwrap.dedent(body)),
}
issue_url = scheme.format(**scheme_dict)
issue_url = urlparse(issue_url).geturl()
context['issue_url'] = issue_url
return context
| 30.136364
| 96
| 0.636716
|
26857ba235e4fe13904ca4f1334f4662a795f8a8
| 31,754
|
py
|
Python
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state space model infrastructure."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
class RandomStateSpaceModel(state_space_model.StateSpaceModel):
def __init__(self,
state_dimension,
state_noise_dimension,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.transition = numpy.random.normal(
size=[state_dimension, state_dimension]).astype(
configuration.dtype.as_numpy_dtype)
self.noise_transform = numpy.random.normal(
size=(state_dimension, state_noise_dimension)).astype(
configuration.dtype.as_numpy_dtype)
# Test batch broadcasting
self.observation_model = numpy.random.normal(
size=(configuration.num_features, state_dimension)).astype(
configuration.dtype.as_numpy_dtype)
super(RandomStateSpaceModel, self).__init__(
configuration=configuration._replace(
covariance_prior_fn=lambda _: 0.))
def get_state_transition(self):
return self.transition
def get_noise_transform(self):
return self.noise_transform
def get_observation_model(self, times):
return self.observation_model
class ConstructionTests(test.TestCase):
def test_initialize_graph_error(self):
with self.assertRaisesRegexp(ValueError, "initialize_graph"):
model = RandomStateSpaceModel(2, 2)
outputs = model.define_loss(
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_initialize_graph_state_manager_error(self):
with self.assertRaisesRegexp(ValueError, "initialize_graph"):
model = RandomStateSpaceModel(2, 2)
state_manager = state_management.ChainingStateManager()
outputs = state_manager.define_loss(
model=model,
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
class GapTests(test.TestCase):
def _gap_test_template(self, times, values):
random_model = RandomStateSpaceModel(
state_dimension=1, state_noise_dimension=1,
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=1))
random_model.initialize_graph()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}))
features, _ = input_fn()
times = features[feature_keys.TrainEvalFeatures.TIMES]
values = features[feature_keys.TrainEvalFeatures.VALUES]
model_outputs = random_model.get_batch_loss(
features={
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
},
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(times)[0]))
with self.cached_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
def test_start_gap(self):
self._gap_test_template(times=[20, 21, 22], values=numpy.arange(3))
def test_mid_gap(self):
self._gap_test_template(times=[2, 60, 61], values=numpy.arange(3))
def test_end_gap(self):
self._gap_test_template(times=[2, 3, 73], values=numpy.arange(3))
def test_all_gaps(self):
self._gap_test_template(times=[2, 4, 8, 16, 32, 64, 128],
values=numpy.arange(7))
class StateSpaceEquivalenceTests(test.TestCase):
def test_savedmodel_state_override(self):
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
exogenous_feature_columns=[layers.real_valued_column("exogenous")],
dtype=dtypes.float64, num_features=1))
estimator = estimators.StateSpaceRegressor(
model=random_model,
optimizer=gradient_descent.GradientDescentOptimizer(0.1))
combined_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [-1., -2., -3., -4.]
}))
estimator.train(combined_input_fn, steps=1)
export_location = estimator.export_saved_model(
self.get_temp_dir(), estimator.build_raw_serving_input_receiver_fn())
with ops.Graph().as_default() as graph:
random_model.initialize_graph()
with self.session(graph=graph) as session:
variables.global_variables_initializer().run()
evaled_start_state = session.run(random_model.get_start_state())
evaled_start_state = [
state_element[None, ...] for state_element in evaled_start_state]
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
first_split_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2],
feature_keys.FilteringFeatures.VALUES: [1., 2.],
"exogenous": [[-1.], [-2.]]})
second_split_filtering = saved_model_utils.filter_continuation(
continue_from=first_split_filtering,
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [3, 4],
feature_keys.FilteringFeatures.VALUES: [3., 4.],
"exogenous": [[-3.], [-4.]]
})
combined_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [[-1.], [-2.], [-3.], [-4.]]
})
split_predict = saved_model_utils.predict_continuation(
continue_from=second_split_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
combined_predict = saved_model_utils.predict_continuation(
continue_from=combined_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
for state_key, combined_state_value in combined_filtering.items():
if state_key == feature_keys.FilteringResults.TIMES:
continue
self.assertAllClose(
combined_state_value, second_split_filtering[state_key])
for prediction_key, combined_value in combined_predict.items():
self.assertAllClose(combined_value, split_predict[prediction_key])
def _equivalent_to_single_model_test_template(self, model_generator):
with self.cached_session() as session:
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtypes.float64, num_features=1))
random_model.initialize_graph()
series_length = 10
model_data = random_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=random_model.random_model_parameters())
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(model_data))
features, _ = input_fn()
model_outputs = random_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
compare_outputs_evaled_fn = model_generator(
random_model, model_data)
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
compare_outputs_evaled = compare_outputs_evaled_fn(session)
model_outputs_evaled = session.run(
(model_outputs.end_state, model_outputs.predictions))
coordinator.request_stop()
coordinator.join()
model_posteriors, model_predictions = model_outputs_evaled
(_, compare_posteriors,
compare_predictions) = compare_outputs_evaled
(model_posterior_mean, model_posterior_var,
model_from_time) = model_posteriors
(compare_posterior_mean, compare_posterior_var,
compare_from_time) = compare_posteriors
self.assertAllClose(model_posterior_mean, compare_posterior_mean[0])
self.assertAllClose(model_posterior_var, compare_posterior_var[0])
self.assertAllClose(model_from_time, compare_from_time)
self.assertEqual(sorted(model_predictions.keys()),
sorted(compare_predictions.keys()))
for prediction_name in model_predictions:
if prediction_name == "loss":
# Chunking means that losses will be different; skip testing them.
continue
# Compare the last chunk to their corresponding un-chunked model
# predictions
last_prediction_chunk = compare_predictions[prediction_name][-1]
comparison_values = last_prediction_chunk.shape[0]
model_prediction = (
model_predictions[prediction_name][0, -comparison_values:])
self.assertAllClose(model_prediction,
last_prediction_chunk)
def _model_equivalent_to_chained_model_test_template(self, chunk_size):
def chained_model_outputs(original_model, data):
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=chunk_size)
state_manager = state_management.ChainingStateManager(
state_saving_interval=1)
features, _ = input_fn()
state_manager.initialize_graph(original_model)
model_outputs = state_manager.define_loss(
model=original_model,
features=features,
mode=estimator_lib.ModeKeys.TRAIN)
def _eval_outputs(session):
for _ in range(50):
# Warm up saved state
model_outputs.loss.eval()
(posterior_mean, posterior_var,
priors_from_time) = model_outputs.end_state
posteriors = ((posterior_mean,), (posterior_var,), priors_from_time)
outputs = (model_outputs.loss, posteriors,
model_outputs.predictions)
chunked_outputs_evaled = session.run(outputs)
return chunked_outputs_evaled
return _eval_outputs
self._equivalent_to_single_model_test_template(chained_model_outputs)
def test_model_equivalent_to_chained_model_chunk_size_one(self):
numpy.random.seed(2)
random_seed.set_random_seed(3)
self._model_equivalent_to_chained_model_test_template(1)
def test_model_equivalent_to_chained_model_chunk_size_five(self):
numpy.random.seed(4)
random_seed.set_random_seed(5)
self._model_equivalent_to_chained_model_test_template(5)
class PredictionTests(test.TestCase):
def _check_predictions(
self, predicted_mean, predicted_covariance, window_size):
self.assertAllEqual(predicted_covariance.shape,
[1, # batch
window_size,
1, # num features
1]) # num features
self.assertAllEqual(predicted_mean.shape,
[1, # batch
window_size,
1]) # num features
for position in range(window_size - 2):
self.assertGreater(predicted_covariance[0, position + 2, 0, 0],
predicted_covariance[0, position, 0, 0])
def test_predictions_direct(self):
dtype = dtypes.float64
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
random_model.initialize_graph()
prediction_dict = random_model.predict(features={
feature_keys.PredictionFeatures.TIMES: [[1, 3, 5, 6]],
feature_keys.PredictionFeatures.STATE_TUPLE:
math_utils.replicate_state(
start_state=random_model.get_start_state(), batch_size=1)
})
with self.cached_session():
variables.global_variables_initializer().run()
predicted_mean = prediction_dict["mean"].eval()
predicted_covariance = prediction_dict["covariance"].eval()
self._check_predictions(predicted_mean, predicted_covariance,
window_size=4)
def test_predictions_after_loss(self):
dtype = dtypes.float32
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
features = {
feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3, 4]],
feature_keys.TrainEvalFeatures.VALUES:
array_ops.ones([1, 4, 1], dtype=dtype)
}
passthrough = state_management.PassthroughStateManager()
random_model.initialize_graph()
passthrough.initialize_graph(random_model)
model_outputs = passthrough.define_loss(
model=random_model,
features=features,
mode=estimator_lib.ModeKeys.EVAL)
predictions = random_model.predict({
feature_keys.PredictionFeatures.TIMES: [[5, 7, 8]],
feature_keys.PredictionFeatures.STATE_TUPLE: model_outputs.end_state
})
with self.cached_session():
variables.global_variables_initializer().run()
predicted_mean = predictions["mean"].eval()
predicted_covariance = predictions["covariance"].eval()
self._check_predictions(predicted_mean, predicted_covariance,
window_size=3)
class ExogenousTests(test.TestCase):
def test_noise_increasing(self):
for dtype in [dtypes.float32, dtypes.float64]:
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
original_covariance = array_ops.diag(array_ops.ones(shape=[5]))
_, new_covariance, _ = random_model._exogenous_noise_increasing(
current_times=[[1]],
exogenous_values=[[5.]],
state=[
array_ops.ones(shape=[1, 5]), original_covariance[None], [0]
])
with self.cached_session() as session:
variables.global_variables_initializer().run()
evaled_new_covariance, evaled_original_covariance = session.run(
[new_covariance[0], original_covariance])
new_variances = numpy.diag(evaled_new_covariance)
original_variances = numpy.diag(evaled_original_covariance)
for i in range(5):
self.assertGreater(new_variances[i], original_variances[i])
def test_noise_decreasing(self):
for dtype in [dtypes.float32, dtypes.float64]:
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
random_model.initialize_graph()
original_covariance = array_ops.diag(
array_ops.ones(shape=[5], dtype=dtype))
_, new_covariance, _ = random_model._exogenous_noise_decreasing(
current_times=[[1]],
exogenous_values=constant_op.constant([[-2.]], dtype=dtype),
state=[
-array_ops.ones(shape=[1, 5], dtype=dtype),
original_covariance[None], [0]
])
with self.cached_session() as session:
variables.global_variables_initializer().run()
evaled_new_covariance, evaled_original_covariance = session.run(
[new_covariance[0], original_covariance])
new_variances = numpy.diag(evaled_new_covariance)
original_variances = numpy.diag(evaled_original_covariance)
for i in range(5):
self.assertLess(new_variances[i], original_variances[i])
class StubStateSpaceModel(state_space_model.StateSpaceModel):
def __init__(self,
transition,
state_noise_dimension,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.transition = transition
self.noise_transform = numpy.random.normal(
size=(transition.shape[0], state_noise_dimension)).astype(numpy.float32)
# Test feature + batch broadcasting
self.observation_model = numpy.random.normal(
size=(transition.shape[0])).astype(numpy.float32)
super(StubStateSpaceModel, self).__init__(
configuration=configuration)
def get_state_transition(self):
return self.transition
def get_noise_transform(self):
return self.noise_transform
def get_observation_model(self, times):
return self.observation_model
GeneratedModel = collections.namedtuple(
"GeneratedModel", ["model", "data", "true_parameters"])
class PosteriorTests(test.TestCase):
def _get_cycle_transition(self, period):
cycle_transition = numpy.zeros([period - 1, period - 1],
dtype=numpy.float32)
cycle_transition[0, :] = -1
cycle_transition[1:, :-1] = numpy.identity(period - 2)
return cycle_transition
_adder_transition = numpy.array([[1, 1],
[0, 1]], dtype=numpy.float32)
def _get_single_model(self):
numpy.random.seed(8)
stub_model = StubStateSpaceModel(
transition=self._get_cycle_transition(5), state_noise_dimension=0)
series_length = 1000
stub_model.initialize_graph()
true_params = stub_model.random_model_parameters()
data = stub_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=true_params)
return GeneratedModel(
model=stub_model, data=data, true_parameters=true_params)
def test_exact_posterior_recovery_no_transition_noise(self):
with self.cached_session() as session:
stub_model, data, true_params = self._get_single_model()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(data))
features, _ = input_fn()
model_outputs = stub_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=stub_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
posterior_mean, posterior_var, posterior_times = session.run(
# Feed the true model parameters so that this test doesn't depend on
# the generated parameters being close to the variable initializations
# (an alternative would be training steps to fit the noise values,
# which would be slow).
model_outputs.end_state, feed_dict=true_params)
coordinator.request_stop()
coordinator.join()
self.assertAllClose(numpy.zeros([1, 4, 4]), posterior_var,
atol=1e-2)
self.assertAllClose(
numpy.dot(
numpy.linalg.matrix_power(
stub_model.transition,
data[feature_keys.TrainEvalFeatures.TIMES].shape[1]),
true_params[stub_model.prior_state_mean]),
posterior_mean[0],
rtol=1e-1)
self.assertAllClose(
math_utils.batch_end_time(
features[feature_keys.TrainEvalFeatures.TIMES]).eval(),
posterior_times)
def test_chained_exact_posterior_recovery_no_transition_noise(self):
with self.cached_session() as session:
stub_model, data, true_params = self._get_single_model()
chunk_size = 10
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=chunk_size)
features, _ = input_fn()
state_manager = state_management.ChainingStateManager(
state_saving_interval=1)
state_manager.initialize_graph(stub_model)
model_outputs = state_manager.define_loss(
model=stub_model,
features=features,
mode=estimator_lib.ModeKeys.TRAIN)
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(
data[feature_keys.TrainEvalFeatures.TIMES].shape[1] // chunk_size):
model_outputs.loss.eval()
posterior_mean, posterior_var, posterior_times = session.run(
model_outputs.end_state, feed_dict=true_params)
coordinator.request_stop()
coordinator.join()
self.assertAllClose(numpy.zeros([1, 4, 4]), posterior_var,
atol=1e-2)
self.assertAllClose(
numpy.dot(
numpy.linalg.matrix_power(
stub_model.transition,
data[feature_keys.TrainEvalFeatures.TIMES].shape[1]),
true_params[stub_model.prior_state_mean]),
posterior_mean[0],
rtol=1e-1)
self.assertAllClose(data[feature_keys.TrainEvalFeatures.TIMES][:, -1],
posterior_times)
class TimeDependentStateSpaceModel(state_space_model.StateSpaceModel):
"""A mostly trivial model which predicts values = times + 1."""
def __init__(self, static_unrolling_window_size_threshold=None):
super(TimeDependentStateSpaceModel, self).__init__(
configuration=state_space_model.StateSpaceModelConfiguration(
use_observation_noise=False,
transition_covariance_initial_log_scale_bias=5.,
static_unrolling_window_size_threshold=
static_unrolling_window_size_threshold))
def get_state_transition(self):
return array_ops.ones(shape=[1, 1])
def get_noise_transform(self):
return array_ops.ones(shape=[1, 1])
def get_observation_model(self, times):
return array_ops.reshape(
tensor=math_ops.cast(times + 1, dtypes.float32), shape=[-1, 1, 1])
def make_priors(self):
return (ops.convert_to_tensor([1.]), ops.convert_to_tensor([[0.]]))
class UnknownShapeModel(TimeDependentStateSpaceModel):
def get_observation_model(self, times):
parent_model = super(UnknownShapeModel, self).get_observation_model(times)
return array_ops.placeholder_with_default(
input=parent_model, shape=tensor_shape.unknown_shape())
class TimeDependentTests(test.TestCase):
def _time_dependency_test_template(self, model_type):
"""Test that a time-dependent observation model influences predictions."""
model = model_type()
estimator = estimators.StateSpaceRegressor(
model=model, optimizer=gradient_descent.GradientDescentOptimizer(0.1))
values = numpy.reshape([1., 2., 3., 4.],
newshape=[1, 4, 1])
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: [[0, 1, 2, 3]],
feature_keys.TrainEvalFeatures.VALUES: values
}))
estimator.train(input_fn=input_fn, max_steps=1)
predicted_values = estimator.evaluate(input_fn=input_fn, steps=1)["mean"]
# Throw out the first value so we don't test the prior
self.assertAllEqual(values[1:], predicted_values[1:])
def test_undefined_shape_time_dependency(self):
self._time_dependency_test_template(UnknownShapeModel)
def test_loop_unrolling(self):
"""Tests running/restoring from a checkpoint with static unrolling."""
model = TimeDependentStateSpaceModel(
# Unroll during training, but not evaluation
static_unrolling_window_size_threshold=2)
estimator = estimators.StateSpaceRegressor(model=model)
times = numpy.arange(100)
values = numpy.arange(100)
dataset = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(dataset), batch_size=16, window_size=2)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(dataset))
estimator.train(input_fn=train_input_fn, max_steps=1)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class LevelOnlyModel(state_space_model.StateSpaceModel):
def get_state_transition(self):
return linalg_ops.eye(1, dtype=self.dtype)
def get_noise_transform(self):
return linalg_ops.eye(1, dtype=self.dtype)
def get_observation_model(self, times):
return [1]
class MultivariateLevelModel(
state_space_model.StateSpaceCorrelatedFeaturesEnsemble):
def __init__(self, configuration):
univariate_component_configuration = configuration._replace(
num_features=1)
components = []
for feature in range(configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
components.append(
LevelOnlyModel(configuration=univariate_component_configuration))
super(MultivariateLevelModel, self).__init__(
ensemble_members=components, configuration=configuration)
class MultivariateTests(test.TestCase):
def test_multivariate(self):
dtype = dtypes.float32
num_features = 3
covariance = numpy.eye(num_features)
# A single off-diagonal has a non-zero value in the true transition
# noise covariance.
covariance[-1, 0] = 1.
covariance[0, -1] = 1.
dataset_size = 100
values = numpy.cumsum(
numpy.random.multivariate_normal(
mean=numpy.zeros(num_features),
cov=covariance,
size=dataset_size),
axis=0)
times = numpy.arange(dataset_size)
model = MultivariateLevelModel(
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
use_observation_noise=False,
transition_covariance_initial_log_scale_bias=5.))
estimator = estimators.StateSpaceRegressor(
model=model, optimizer=gradient_descent.GradientDescentOptimizer(0.1))
data = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(data), batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=1)
for component in model._ensemble_members:
# Check that input statistics propagated to component models
self.assertTrue(component._input_statistics)
def test_ensemble_observation_noise(self):
model = MultivariateLevelModel(
configuration=state_space_model.StateSpaceModelConfiguration())
model.initialize_graph()
outputs = model.define_loss(
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
if __name__ == "__main__":
test.main()
| 42.002646
| 96
| 0.692196
|
a05b8c2457a0177569eaa6eb7ee34f15a128436c
| 1,017
|
py
|
Python
|
view/workspace.py
|
LianGee/zed
|
0838eec03733a26705126d96dfb59af6bdf19a9e
|
[
"MIT"
] | null | null | null |
view/workspace.py
|
LianGee/zed
|
0838eec03733a26705126d96dfb59af6bdf19a9e
|
[
"MIT"
] | null | null | null |
view/workspace.py
|
LianGee/zed
|
0838eec03733a26705126d96dfb59af6bdf19a9e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : workspace.py
# @Author: zaoshu
# @Date : 2020-03-04
# @Desc :
from flask import Blueprint, request, g
from common.loged import log_this
from common.login import login_required
from common.response import Response
from service.workspace_service import WorkspaceService
workspace_bp = Blueprint('workspace', __name__)
@workspace_bp.route('/save', methods=['post'])
@login_required
@log_this
def save():
id = request.json.get('id')
name = request.json.get('name')
user_name = g.user.name
return Response.success(WorkspaceService.save(user_name, id, name))
@workspace_bp.route('/list', methods=['get'])
@login_required
@log_this
def get_workspace_list():
return Response.success(WorkspaceService.list(g.user.name))
@workspace_bp.route('/delete', methods=['delete'])
@login_required
@log_this
def delete():
id = request.args.get('id')
user_name = g.user.name
return Response.success(WorkspaceService.delete(user_name, id))
| 24.804878
| 71
| 0.72763
|
52f6f9985bd28239334335b092ab3cfc5dceee78
| 1,058
|
py
|
Python
|
setup.py
|
mechaphish/meister
|
d115688f1a994984748a7d0dee31162ce2b3627f
|
[
"BSD-2-Clause"
] | 9
|
2016-08-20T23:39:45.000Z
|
2020-11-06T23:10:04.000Z
|
setup.py
|
mechaphish/meister
|
d115688f1a994984748a7d0dee31162ce2b3627f
|
[
"BSD-2-Clause"
] | 1
|
2016-11-14T07:16:27.000Z
|
2017-10-24T01:36:46.000Z
|
setup.py
|
mechaphish/meister
|
d115688f1a994984748a7d0dee31162ce2b3627f
|
[
"BSD-2-Clause"
] | 8
|
2016-08-21T13:13:51.000Z
|
2020-11-06T23:10:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Distutil setup scripts for meister and its requirements."""
# pylint: disable=import-error,no-name-in-module
import os
import os.path
import shutil
from distutils.core import setup
REQUIREMENTS, DEPENDENCIES = [], []
with open('requirements.txt', 'r') as req_file:
for r in req_file.readlines():
r_ = r.strip()
if r_.startswith('git+'):
DEPENDENCIES.append(r_)
# We discard the version number of our requirements
REQUIREMENTS.append(r_.rsplit("egg=", 1)[1].rsplit("-", 1)[0])
else:
REQUIREMENTS.append(r_)
setup(name='meister',
version='1.0.1',
packages=['meister', 'meister.creators', 'meister.schedulers'],
install_requires=REQUIREMENTS,
dependency_links=DEPENDENCIES,
entry_points={
'console_scripts': [
"meister=meister.__main__:main"
],
},
description='Master component of the Shellphish CRS.',
url='https://github.com/mechaphish/meister')
| 28.594595
| 74
| 0.625709
|
d994e9a11094147ac6efb378b9b0dc5725c9344b
| 2,042
|
py
|
Python
|
grr/server/grr_response_server/databases/mysql_artifacts.py
|
oueldz4/grr
|
8c60d9198cc0875a8ea80b90237eb0a8272082ff
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/databases/mysql_artifacts.py
|
oueldz4/grr
|
8c60d9198cc0875a8ea80b90237eb0a8272082ff
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/databases/mysql_artifacts.py
|
oueldz4/grr
|
8c60d9198cc0875a8ea80b90237eb0a8272082ff
|
[
"Apache-2.0"
] | 1
|
2020-07-09T01:08:48.000Z
|
2020-07-09T01:08:48.000Z
|
#!/usr/bin/env python
"""The MySQL database methods for handling artifacts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import MySQLdb
from MySQLdb.constants import ER as mysql_error_constants
from typing import Text
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_server import db
from grr_response_server.databases import mysql_utils
def _RowToArtifact(row):
return rdf_artifacts.Artifact.FromSerializedString(row[0])
class MySQLDBArtifactsMixin(object):
"""An MySQL database mixin with artifact-related methods."""
@mysql_utils.WithTransaction()
def WriteArtifact(self, artifact, cursor=None):
"""Writes new artifact to the database."""
name = Text(artifact.name)
try:
cursor.execute("INSERT INTO artifacts (name, definition) VALUES (%s, %s)",
[name, artifact.SerializeToString()])
except MySQLdb.IntegrityError as error:
if error.args[0] == mysql_error_constants.DUP_ENTRY:
raise db.DuplicatedArtifactError(name, cause=error)
else:
raise
@mysql_utils.WithTransaction()
def ReadArtifact(self, name, cursor=None):
"""Looks up an artifact with given name from the database."""
cursor.execute("SELECT definition FROM artifacts WHERE name = %s", [name])
row = cursor.fetchone()
if row is None:
raise db.UnknownArtifactError(name)
else:
return _RowToArtifact(row)
@mysql_utils.WithTransaction()
def ReadAllArtifacts(self, cursor=None):
"""Lists all artifacts that are stored in the database."""
cursor.execute("SELECT definition FROM artifacts")
return [_RowToArtifact(row) for row in cursor.fetchall()]
@mysql_utils.WithTransaction()
def DeleteArtifact(self, name, cursor=None):
"""Deletes an artifact with given name from the database."""
cursor.execute("DELETE FROM artifacts WHERE name = %s", [name])
if cursor.rowcount == 0:
raise db.UnknownArtifactError(name)
| 32.935484
| 80
| 0.733105
|
61d270fe1a630d504b6900531a9e5c922e1e56f3
| 301
|
py
|
Python
|
7_funktionen/1_drawBotFunktionen.py
|
Coding-for-the-Arts/drawbot-samples
|
e37994f3497aca252312431100b53548b4573f15
|
[
"CC0-1.0"
] | null | null | null |
7_funktionen/1_drawBotFunktionen.py
|
Coding-for-the-Arts/drawbot-samples
|
e37994f3497aca252312431100b53548b4573f15
|
[
"CC0-1.0"
] | null | null | null |
7_funktionen/1_drawBotFunktionen.py
|
Coding-for-the-Arts/drawbot-samples
|
e37994f3497aca252312431100b53548b4573f15
|
[
"CC0-1.0"
] | null | null | null |
"""
DrawBot-Funktionen
"""
newPage("1000, 1000")
oval(200, 200, 600, 600)
saveImage("~/Desktop/myImage.pdf")
"""
Dies sind Beispiele für Funktionen, die nur in DrawBot
existieren, aber nicht in Python.
Informationen zu ihnen findest du in der DrawBot-Dokumentation.
https://www.drawbot.com/
"""
| 17.705882
| 64
| 0.724252
|
edc2bb9df188e52f787cdb79f68c203be8241f6a
| 45,345
|
py
|
Python
|
patsy/build.py
|
neurodebian/patsy
|
de8d1633f8db42a9c68507a7da0a32d62866a283
|
[
"PSF-2.0"
] | null | null | null |
patsy/build.py
|
neurodebian/patsy
|
de8d1633f8db42a9c68507a7da0a32d62866a283
|
[
"PSF-2.0"
] | null | null | null |
patsy/build.py
|
neurodebian/patsy
|
de8d1633f8db42a9c68507a7da0a32d62866a283
|
[
"PSF-2.0"
] | null | null | null |
# This file is part of Patsy
# Copyright (C) 2011-2013 Nathaniel Smith <njs@pobox.com>
# See file LICENSE.txt for license information.
# This file defines the core design matrix building functions.
# These are made available in the patsy.* namespace
__all__ = ["design_matrix_builders", "DesignMatrixBuilder",
"build_design_matrices"]
import itertools
import six
import numpy as np
from patsy import PatsyError
from patsy.categorical import (guess_categorical,
CategoricalSniffer,
categorical_to_int)
from patsy.util import (atleast_2d_column_default,
have_pandas, have_pandas_categorical,
asarray_or_pandas)
from patsy.design_info import DesignMatrix, DesignInfo
from patsy.redundancy import pick_contrasts_for_term
from patsy.desc import ModelDesc
from patsy.eval import EvalEnvironment
from patsy.contrasts import code_contrast_matrix, Treatment
from patsy.compat import OrderedDict
from patsy.missing import NAAction
if have_pandas:
import pandas
class _MockFactor(object):
def __init__(self, name="MOCKMOCK"):
self._name = name
def eval(self, state, env):
return env["mock"]
def name(self):
return self._name
def _max_allowed_dim(dim, arr, factor):
if arr.ndim > dim:
msg = ("factor '%s' evaluates to an %s-dimensional array; I only "
"handle arrays with dimension <= %s"
% (factor.name(), arr.ndim, dim))
raise PatsyError(msg, factor)
def test__max_allowed_dim():
from nose.tools import assert_raises
f = _MockFactor()
_max_allowed_dim(1, np.array(1), f)
_max_allowed_dim(1, np.array([1]), f)
assert_raises(PatsyError, _max_allowed_dim, 1, np.array([[1]]), f)
assert_raises(PatsyError, _max_allowed_dim, 1, np.array([[[1]]]), f)
_max_allowed_dim(2, np.array(1), f)
_max_allowed_dim(2, np.array([1]), f)
_max_allowed_dim(2, np.array([[1]]), f)
assert_raises(PatsyError, _max_allowed_dim, 2, np.array([[[1]]]), f)
class _NumFactorEvaluator(object):
def __init__(self, factor, state, expected_columns):
# This one instance variable is part of our public API:
self.factor = factor
self._state = state
self._expected_columns = expected_columns
# Returns either a 2d ndarray, or a DataFrame, plus is_NA mask
def eval(self, data, NA_action):
result = self.factor.eval(self._state, data)
result = atleast_2d_column_default(result, preserve_pandas=True)
_max_allowed_dim(2, result, self.factor)
if result.shape[1] != self._expected_columns:
raise PatsyError("when evaluating factor %s, I got %s columns "
"instead of the %s I was expecting"
% (self.factor.name(), self._expected_columns,
result.shape[1]),
self.factor)
if not np.issubdtype(np.asarray(result).dtype, np.number):
raise PatsyError("when evaluating numeric factor %s, "
"I got non-numeric data of type '%s'"
% (self.factor.name(), result.dtype),
self.factor)
return result, NA_action.is_numerical_NA(result)
def test__NumFactorEvaluator():
from nose.tools import assert_raises
naa = NAAction()
f = _MockFactor()
nf1 = _NumFactorEvaluator(f, {}, 1)
assert nf1.factor is f
eval123, is_NA = nf1.eval({"mock": [1, 2, 3]}, naa)
assert eval123.shape == (3, 1)
assert np.all(eval123 == [[1], [2], [3]])
assert is_NA.shape == (3,)
assert np.all(~is_NA)
assert_raises(PatsyError, nf1.eval, {"mock": [[[1]]]}, naa)
assert_raises(PatsyError, nf1.eval, {"mock": [[1, 2]]}, naa)
assert_raises(PatsyError, nf1.eval, {"mock": ["a", "b"]}, naa)
assert_raises(PatsyError, nf1.eval, {"mock": [True, False]}, naa)
nf2 = _NumFactorEvaluator(_MockFactor(), {}, 2)
eval123321, is_NA = nf2.eval({"mock": [[1, 3], [2, 2], [3, 1]]}, naa)
assert eval123321.shape == (3, 2)
assert np.all(eval123321 == [[1, 3], [2, 2], [3, 1]])
assert is_NA.shape == (3,)
assert np.all(~is_NA)
assert_raises(PatsyError, nf2.eval, {"mock": [1, 2, 3]}, naa)
assert_raises(PatsyError, nf2.eval, {"mock": [[1, 2, 3]]}, naa)
ev_nan, is_NA = nf1.eval({"mock": [1, 2, np.nan]},
NAAction(NA_types=["NaN"]))
assert np.array_equal(is_NA, [False, False, True])
ev_nan, is_NA = nf1.eval({"mock": [1, 2, np.nan]},
NAAction(NA_types=[]))
assert np.array_equal(is_NA, [False, False, False])
if have_pandas:
eval_ser, _ = nf1.eval({"mock":
pandas.Series([1, 2, 3], index=[10, 20, 30])},
naa)
assert isinstance(eval_ser, pandas.DataFrame)
assert np.array_equal(eval_ser, [[1], [2], [3]])
assert np.array_equal(eval_ser.index, [10, 20, 30])
eval_df1, _ = nf1.eval({"mock":
pandas.DataFrame([[2], [1], [3]],
index=[20, 10, 30])},
naa)
assert isinstance(eval_df1, pandas.DataFrame)
assert np.array_equal(eval_df1, [[2], [1], [3]])
assert np.array_equal(eval_df1.index, [20, 10, 30])
eval_df2, _ = nf2.eval({"mock":
pandas.DataFrame([[2, 3], [1, 4], [3, -1]],
index=[20, 30, 10])},
naa)
assert isinstance(eval_df2, pandas.DataFrame)
assert np.array_equal(eval_df2, [[2, 3], [1, 4], [3, -1]])
assert np.array_equal(eval_df2.index, [20, 30, 10])
assert_raises(PatsyError,
nf2.eval,
{"mock": pandas.Series([1, 2, 3], index=[10, 20, 30])},
naa)
assert_raises(PatsyError,
nf1.eval,
{"mock":
pandas.DataFrame([[2, 3], [1, 4], [3, -1]],
index=[20, 30, 10])},
naa)
class _CatFactorEvaluator(object):
def __init__(self, factor, state, levels):
# This one instance variable is part of our public API:
self.factor = factor
self._state = state
self._levels = tuple(levels)
# returns either a 1d ndarray or a pandas.Series, plus is_NA mask
def eval(self, data, NA_action):
result = self.factor.eval(self._state, data)
result = categorical_to_int(result, self._levels, NA_action,
origin=self.factor)
assert result.ndim == 1
return result, np.asarray(result == -1)
def test__CatFactorEvaluator():
from nose.tools import assert_raises
from patsy.categorical import C
naa = NAAction()
f = _MockFactor()
cf1 = _CatFactorEvaluator(f, {}, ["a", "b"])
assert cf1.factor is f
cat1, _ = cf1.eval({"mock": ["b", "a", "b"]}, naa)
assert cat1.shape == (3,)
assert np.all(cat1 == [1, 0, 1])
assert_raises(PatsyError, cf1.eval, {"mock": ["c"]}, naa)
assert_raises(PatsyError, cf1.eval, {"mock": C(["a", "c"])}, naa)
assert_raises(PatsyError, cf1.eval,
{"mock": C(["a", "b"], levels=["b", "a"])}, naa)
assert_raises(PatsyError, cf1.eval, {"mock": [1, 0, 1]}, naa)
bad_cat = np.asarray(["b", "a", "a", "b"])
bad_cat.resize((2, 2))
assert_raises(PatsyError, cf1.eval, {"mock": bad_cat}, naa)
cat1_NA, is_NA = cf1.eval({"mock": ["a", None, "b"]},
NAAction(NA_types=["None"]))
assert np.array_equal(is_NA, [False, True, False])
assert np.array_equal(cat1_NA, [0, -1, 1])
assert_raises(PatsyError, cf1.eval,
{"mock": ["a", None, "b"]}, NAAction(NA_types=[]))
cf2 = _CatFactorEvaluator(_MockFactor(), {}, [False, True])
cat2, _ = cf2.eval({"mock": [True, False, False, True]}, naa)
assert cat2.shape == (4,)
assert np.all(cat2 == [1, 0, 0, 1])
if have_pandas:
s = pandas.Series(["b", "a"], index=[10, 20])
cat_s, _ = cf1.eval({"mock": s}, naa)
assert isinstance(cat_s, pandas.Series)
assert np.array_equal(cat_s, [1, 0])
assert np.array_equal(cat_s.index, [10, 20])
sbool = pandas.Series([True, False], index=[11, 21])
cat_sbool, _ = cf2.eval({"mock": sbool}, naa)
assert isinstance(cat_sbool, pandas.Series)
assert np.array_equal(cat_sbool, [1, 0])
assert np.array_equal(cat_sbool.index, [11, 21])
def _column_combinations(columns_per_factor):
# For consistency with R, the left-most item iterates fastest:
iterators = [range(n) for n in reversed(columns_per_factor)]
for reversed_combo in itertools.product(*iterators):
yield reversed_combo[::-1]
def test__column_combinations():
assert list(_column_combinations([2, 3])) == [(0, 0),
(1, 0),
(0, 1),
(1, 1),
(0, 2),
(1, 2)]
assert list(_column_combinations([3])) == [(0,), (1,), (2,)]
# This class is responsible for producing some columns in a final design
# matrix output:
class _ColumnBuilder(object):
def __init__(self, factors, num_columns, cat_contrasts):
self._factors = factors
self._num_columns = num_columns
self._cat_contrasts = cat_contrasts
self._columns_per_factor = []
for factor in self._factors:
if factor in self._cat_contrasts:
columns = self._cat_contrasts[factor].matrix.shape[1]
else:
columns = num_columns[factor]
self._columns_per_factor.append(columns)
self.total_columns = np.prod(self._columns_per_factor, dtype=int)
def column_names(self):
if not self._factors:
return ["Intercept"]
column_names = []
for i, column_idxs in enumerate(_column_combinations(self._columns_per_factor)):
name_pieces = []
for factor, column_idx in zip(self._factors, column_idxs):
if factor in self._num_columns:
if self._num_columns[factor] > 1:
name_pieces.append("%s[%s]"
% (factor.name(), column_idx))
else:
assert column_idx == 0
name_pieces.append(factor.name())
else:
contrast = self._cat_contrasts[factor]
suffix = contrast.column_suffixes[column_idx]
name_pieces.append("%s%s" % (factor.name(), suffix))
column_names.append(":".join(name_pieces))
assert len(column_names) == self.total_columns
return column_names
def build(self, factor_values, out):
assert self.total_columns == out.shape[1]
out[:] = 1
for i, column_idxs in enumerate(_column_combinations(self._columns_per_factor)):
for factor, column_idx in zip(self._factors, column_idxs):
if factor in self._cat_contrasts:
contrast = self._cat_contrasts[factor]
if np.any(factor_values[factor] < 0):
raise PatsyError("can't build a design matrix "
"containing missing values", factor)
out[:, i] *= contrast.matrix[factor_values[factor],
column_idx]
else:
assert (factor_values[factor].shape[1]
== self._num_columns[factor])
out[:, i] *= factor_values[factor][:, column_idx]
def test__ColumnBuilder():
from nose.tools import assert_raises
from patsy.contrasts import ContrastMatrix
from patsy.categorical import C
f1 = _MockFactor("f1")
f2 = _MockFactor("f2")
f3 = _MockFactor("f3")
contrast = ContrastMatrix(np.array([[0, 0.5],
[3, 0]]),
["[c1]", "[c2]"])
cb = _ColumnBuilder([f1, f2, f3], {f1: 1, f3: 1}, {f2: contrast})
mat = np.empty((3, 2))
assert cb.column_names() == ["f1:f2[c1]:f3", "f1:f2[c2]:f3"]
cb.build({f1: atleast_2d_column_default([1, 2, 3]),
f2: np.asarray([0, 0, 1]),
f3: atleast_2d_column_default([7.5, 2, -12])},
mat)
assert np.allclose(mat, [[0, 0.5 * 1 * 7.5],
[0, 0.5 * 2 * 2],
[3 * 3 * -12, 0]])
# Check that missing categorical values blow up
assert_raises(PatsyError, cb.build,
{f1: atleast_2d_column_default([1, 2, 3]),
f2: np.asarray([0, -1, 1]),
f3: atleast_2d_column_default([7.5, 2, -12])},
mat)
cb2 = _ColumnBuilder([f1, f2, f3], {f1: 2, f3: 1}, {f2: contrast})
mat2 = np.empty((3, 4))
cb2.build({f1: atleast_2d_column_default([[1, 2], [3, 4], [5, 6]]),
f2: np.asarray([0, 0, 1]),
f3: atleast_2d_column_default([7.5, 2, -12])},
mat2)
assert cb2.column_names() == ["f1[0]:f2[c1]:f3",
"f1[1]:f2[c1]:f3",
"f1[0]:f2[c2]:f3",
"f1[1]:f2[c2]:f3"]
assert np.allclose(mat2, [[0, 0, 0.5 * 1 * 7.5, 0.5 * 2 * 7.5],
[0, 0, 0.5 * 3 * 2, 0.5 * 4 * 2],
[3 * 5 * -12, 3 * 6 * -12, 0, 0]])
# Check intercept building:
cb_intercept = _ColumnBuilder([], {}, {})
assert cb_intercept.column_names() == ["Intercept"]
mat3 = np.empty((3, 1))
cb_intercept.build({f1: [1, 2, 3], f2: [1, 2, 3], f3: [1, 2, 3]}, mat3)
assert np.allclose(mat3, 1)
def _factors_memorize(factors, data_iter_maker):
# First, start off the memorization process by setting up each factor's
# state and finding out how many passes it will need:
factor_states = {}
passes_needed = {}
for factor in factors:
state = {}
which_pass = factor.memorize_passes_needed(state)
factor_states[factor] = state
passes_needed[factor] = which_pass
# Now, cycle through the data until all the factors have finished
# memorizing everything:
memorize_needed = set()
for factor, passes in six.iteritems(passes_needed):
if passes > 0:
memorize_needed.add(factor)
which_pass = 0
while memorize_needed:
for data in data_iter_maker():
for factor in memorize_needed:
state = factor_states[factor]
factor.memorize_chunk(state, which_pass, data)
for factor in list(memorize_needed):
factor.memorize_finish(factor_states[factor], which_pass)
if which_pass == passes_needed[factor] - 1:
memorize_needed.remove(factor)
which_pass += 1
return factor_states
def test__factors_memorize():
class MockFactor(object):
def __init__(self, requested_passes, token):
self._requested_passes = requested_passes
self._token = token
self._chunk_in_pass = 0
self._seen_passes = 0
def memorize_passes_needed(self, state):
state["calls"] = []
state["token"] = self._token
return self._requested_passes
def memorize_chunk(self, state, which_pass, data):
state["calls"].append(("memorize_chunk", which_pass))
assert data["chunk"] == self._chunk_in_pass
self._chunk_in_pass += 1
def memorize_finish(self, state, which_pass):
state["calls"].append(("memorize_finish", which_pass))
self._chunk_in_pass = 0
class Data(object):
CHUNKS = 3
def __init__(self):
self.calls = 0
self.data = [{"chunk": i} for i in range(self.CHUNKS)]
def __call__(self):
self.calls += 1
return iter(self.data)
data = Data()
f0 = MockFactor(0, "f0")
f1 = MockFactor(1, "f1")
f2a = MockFactor(2, "f2a")
f2b = MockFactor(2, "f2b")
factor_states = _factors_memorize(set([f0, f1, f2a, f2b]), data)
assert data.calls == 2
mem_chunks0 = [("memorize_chunk", 0)] * data.CHUNKS
mem_chunks1 = [("memorize_chunk", 1)] * data.CHUNKS
expected = {
f0: {
"calls": [],
"token": "f0",
},
f1: {
"calls": mem_chunks0 + [("memorize_finish", 0)],
"token": "f1",
},
f2a: {
"calls": mem_chunks0 + [("memorize_finish", 0)]
+ mem_chunks1 + [("memorize_finish", 1)],
"token": "f2a",
},
f2b: {
"calls": mem_chunks0 + [("memorize_finish", 0)]
+ mem_chunks1 + [("memorize_finish", 1)],
"token": "f2b",
},
}
assert factor_states == expected
def _examine_factor_types(factors, factor_states, data_iter_maker, NA_action):
num_column_counts = {}
cat_sniffers = {}
examine_needed = set(factors)
for data in data_iter_maker():
for factor in list(examine_needed):
value = factor.eval(factor_states[factor], data)
if factor in cat_sniffers or guess_categorical(value):
if factor not in cat_sniffers:
cat_sniffers[factor] = CategoricalSniffer(NA_action,
factor.origin)
done = cat_sniffers[factor].sniff(value)
if done:
examine_needed.remove(factor)
else:
# Numeric
value = atleast_2d_column_default(value)
_max_allowed_dim(2, value, factor)
column_count = value.shape[1]
num_column_counts[factor] = column_count
examine_needed.remove(factor)
if not examine_needed:
break
# Pull out the levels
cat_levels_contrasts = {}
for factor, sniffer in six.iteritems(cat_sniffers):
cat_levels_contrasts[factor] = sniffer.levels_contrast()
return (num_column_counts, cat_levels_contrasts)
def test__examine_factor_types():
from patsy.categorical import C
class MockFactor(object):
def __init__(self):
# You should check this using 'is', not '=='
from patsy.origin import Origin
self.origin = Origin("MOCK", 1, 2)
def eval(self, state, data):
return state[data]
def name(self):
return "MOCK MOCK"
# This hacky class can only be iterated over once, but it keeps track of
# how far it got.
class DataIterMaker(object):
def __init__(self):
self.i = -1
def __call__(self):
return self
def __iter__(self):
return self
def next(self):
self.i += 1
if self.i > 1:
raise StopIteration
return self.i
__next__ = next
num_1dim = MockFactor()
num_1col = MockFactor()
num_4col = MockFactor()
categ_1col = MockFactor()
bool_1col = MockFactor()
string_1col = MockFactor()
object_1col = MockFactor()
object_levels = (object(), object(), object())
factor_states = {
num_1dim: ([1, 2, 3], [4, 5, 6]),
num_1col: ([[1], [2], [3]], [[4], [5], [6]]),
num_4col: (np.zeros((3, 4)), np.ones((3, 4))),
categ_1col: (C(["a", "b", "c"], levels=("a", "b", "c"),
contrast="MOCK CONTRAST"),
C(["c", "b", "a"], levels=("a", "b", "c"),
contrast="MOCK CONTRAST")),
bool_1col: ([True, True, False], [False, True, True]),
# It has to read through all the data to see all the possible levels:
string_1col: (["a", "a", "a"], ["c", "b", "a"]),
object_1col: ([object_levels[0]] * 3, object_levels),
}
it = DataIterMaker()
(num_column_counts, cat_levels_contrasts,
) = _examine_factor_types(factor_states.keys(), factor_states, it,
NAAction())
assert it.i == 2
iterations = 0
assert num_column_counts == {num_1dim: 1, num_1col: 1, num_4col: 4}
assert cat_levels_contrasts == {
categ_1col: (("a", "b", "c"), "MOCK CONTRAST"),
bool_1col: ((False, True), None),
string_1col: (("a", "b", "c"), None),
object_1col: (tuple(sorted(object_levels, key=id)), None),
}
# Check that it doesn't read through all the data if that's not necessary:
it = DataIterMaker()
no_read_necessary = [num_1dim, num_1col, num_4col, categ_1col, bool_1col]
(num_column_counts, cat_levels_contrasts,
) = _examine_factor_types(no_read_necessary, factor_states, it,
NAAction())
assert it.i == 0
assert num_column_counts == {num_1dim: 1, num_1col: 1, num_4col: 4}
assert cat_levels_contrasts == {
categ_1col: (("a", "b", "c"), "MOCK CONTRAST"),
bool_1col: ((False, True), None),
}
# Illegal inputs:
bool_3col = MockFactor()
num_3dim = MockFactor()
# no such thing as a multi-dimensional Categorical
# categ_3dim = MockFactor()
string_3col = MockFactor()
object_3col = MockFactor()
illegal_factor_states = {
num_3dim: (np.zeros((3, 3, 3)), np.ones((3, 3, 3))),
string_3col: ([["a", "b", "c"]], [["b", "c", "a"]]),
object_3col: ([[[object()]]], [[[object()]]]),
}
from nose.tools import assert_raises
for illegal_factor in illegal_factor_states:
it = DataIterMaker()
try:
_examine_factor_types([illegal_factor], illegal_factor_states, it,
NAAction())
except PatsyError as e:
assert e.origin is illegal_factor.origin
else:
assert False
def _make_term_column_builders(terms,
num_column_counts,
cat_levels_contrasts):
# Sort each term into a bucket based on the set of numeric factors it
# contains:
term_buckets = OrderedDict()
bucket_ordering = []
for term in terms:
num_factors = []
for factor in term.factors:
if factor in num_column_counts:
num_factors.append(factor)
bucket = frozenset(num_factors)
if bucket not in term_buckets:
bucket_ordering.append(bucket)
term_buckets.setdefault(bucket, []).append(term)
# Special rule: if there is a no-numerics bucket, then it always comes
# first:
if frozenset() in term_buckets:
bucket_ordering.remove(frozenset())
bucket_ordering.insert(0, frozenset())
term_to_column_builders = {}
new_term_order = []
# Then within each bucket, work out which sort of contrasts we want to use
# for each term to avoid redundancy
for bucket in bucket_ordering:
bucket_terms = term_buckets[bucket]
# Sort by degree of interaction
bucket_terms.sort(key=lambda t: len(t.factors))
new_term_order += bucket_terms
used_subterms = set()
for term in bucket_terms:
column_builders = []
factor_codings = pick_contrasts_for_term(term,
num_column_counts,
used_subterms)
# Construct one _ColumnBuilder for each subterm
for factor_coding in factor_codings:
builder_factors = []
num_columns = {}
cat_contrasts = {}
# In order to preserve factor ordering information, the
# coding_for_term just returns dicts, and we refer to
# the original factors to figure out which are included in
# each subterm, and in what order
for factor in term.factors:
# Numeric factors are included in every subterm
if factor in num_column_counts:
builder_factors.append(factor)
num_columns[factor] = num_column_counts[factor]
elif factor in factor_coding:
builder_factors.append(factor)
levels, contrast = cat_levels_contrasts[factor]
# This is where the default coding is set to
# Treatment:
coded = code_contrast_matrix(factor_coding[factor],
levels, contrast,
default=Treatment)
cat_contrasts[factor] = coded
column_builder = _ColumnBuilder(builder_factors,
num_columns,
cat_contrasts)
column_builders.append(column_builder)
term_to_column_builders[term] = column_builders
return new_term_order, term_to_column_builders
def design_matrix_builders(termlists, data_iter_maker, NA_action="drop"):
"""Construct several :class:`DesignMatrixBuilders` from termlists.
This is one of Patsy's fundamental functions. This function and
:func:`build_design_matrices` together form the API to the core formula
interpretation machinery.
:arg termlists: A list of termlists, where each termlist is a list of
:class:`Term` objects which together specify a design matrix.
:arg data_iter_maker: A zero-argument callable which returns an iterator
over dict-like data objects. This must be a callable rather than a
simple iterator because sufficiently complex formulas may require
multiple passes over the data (e.g. if there are nested stateful
transforms).
:arg NA_action: An :class:`NAAction` object or string, used to determine
what values count as 'missing' for purposes of determining the levels of
categorical factors.
:returns: A list of :class:`DesignMatrixBuilder` objects, one for each
termlist passed in.
This function performs zero or more iterations over the data in order to
sniff out any necessary information about factor types, set up stateful
transforms, pick column names, etc.
See :ref:`formulas` for details.
.. versionadded:: 0.2.0
The ``NA_action`` argument.
"""
if isinstance(NA_action, str):
NA_action = NAAction(NA_action)
all_factors = set()
for termlist in termlists:
for term in termlist:
all_factors.update(term.factors)
factor_states = _factors_memorize(all_factors, data_iter_maker)
# Now all the factors have working eval methods, so we can evaluate them
# on some data to find out what type of data they return.
(num_column_counts,
cat_levels_contrasts) = _examine_factor_types(all_factors,
factor_states,
data_iter_maker,
NA_action)
# Now we need the factor evaluators, which encapsulate the knowledge of
# how to turn any given factor into a chunk of data:
factor_evaluators = {}
for factor in all_factors:
if factor in num_column_counts:
evaluator = _NumFactorEvaluator(factor,
factor_states[factor],
num_column_counts[factor])
else:
assert factor in cat_levels_contrasts
levels = cat_levels_contrasts[factor][0]
evaluator = _CatFactorEvaluator(factor, factor_states[factor],
levels)
factor_evaluators[factor] = evaluator
# And now we can construct the DesignMatrixBuilder for each termlist:
builders = []
for termlist in termlists:
result = _make_term_column_builders(termlist,
num_column_counts,
cat_levels_contrasts)
new_term_order, term_to_column_builders = result
assert frozenset(new_term_order) == frozenset(termlist)
term_evaluators = set()
for term in termlist:
for factor in term.factors:
term_evaluators.add(factor_evaluators[factor])
builders.append(DesignMatrixBuilder(new_term_order,
term_evaluators,
term_to_column_builders))
return builders
class DesignMatrixBuilder(object):
"""An opaque class representing Patsy's knowledge about
how to build a specific design matrix.
You get these objects from :func:`design_matrix_builders`, and pass them
to :func:`build_design_matrices`.
"""
def __init__(self, terms, evaluators, term_to_column_builders):
self._termlist = terms
self._evaluators = evaluators
self._term_to_column_builders = term_to_column_builders
term_column_count = []
self._column_names = []
for term in self._termlist:
column_builders = self._term_to_column_builders[term]
this_count = 0
for column_builder in column_builders:
this_names = column_builder.column_names()
this_count += len(this_names)
self._column_names += this_names
term_column_count.append(this_count)
term_column_starts = np.concatenate(([0], np.cumsum(term_column_count)))
self._term_slices = []
for i, term in enumerate(self._termlist):
span = slice(term_column_starts[i], term_column_starts[i + 1])
self._term_slices.append((term, span))
self.total_columns = np.sum(term_column_count, dtype=int)
# Generate this on demand, to avoid a reference loop:
@property
def design_info(self):
"""A :class:`DesignInfo` object giving information about the design
matrices that this DesignMatrixBuilder can be used to create."""
return DesignInfo(self._column_names, self._term_slices,
builder=self)
def subset(self, which_terms):
"""Create a new :class:`DesignMatrixBuilder` that includes only a
subset of the terms that this object does.
For example, if `builder` has terms `x`, `y`, and `z`, then::
builder2 = builder.subset(["x", "z"])
will return a new builder that will return design matrices with only
the columns corresponding to the terms `x` and `z`. After we do this,
then in general these two expressions will return the same thing (here
we assume that `x`, `y`, and `z` each generate a single column of the
output)::
build_design_matrix([builder], data)[0][:, [0, 2]]
build_design_matrix([builder2], data)[0]
However, a critical difference is that in the second case, `data` need
not contain any values for `y`. This is very useful when doing
prediction using a subset of a model, in which situation R usually
forces you to specify dummy values for `y`.
If using a formula to specify the terms to include, remember that like
any formula, the intercept term will be included by default, so use
`0` or `-1` in your formula if you want to avoid this.
:arg which_terms: The terms which should be kept in the new
:class:`DesignMatrixBuilder`. If this is a string, then it is parsed
as a formula, and then the names of the resulting terms are taken as
the terms to keep. If it is a list, then it can contain a mixture of
term names (as strings) and :class:`Term` objects.
.. versionadded: 0.2.0
"""
factor_to_evaluators = {}
for evaluator in self._evaluators:
factor_to_evaluators[evaluator.factor] = evaluator
design_info = self.design_info
term_name_to_term = dict(zip(design_info.term_names,
design_info.terms))
if isinstance(which_terms, str):
# We don't use this EvalEnvironment -- all we want to do is to
# find matching terms, and we can't do that use == on Term
# objects, because that calls == on factor objects, which in turn
# compares EvalEnvironments. So all we do with the parsed formula
# is pull out the term *names*, which the EvalEnvironment doesn't
# effect. This is just a placeholder then to allow the ModelDesc
# to be created:
env = EvalEnvironment({})
desc = ModelDesc.from_formula(which_terms, env)
if desc.lhs_termlist:
raise PatsyError("right-hand-side-only formula required")
which_terms = [term.name() for term in desc.rhs_termlist]
terms = []
evaluators = set()
term_to_column_builders = {}
for term_or_name in which_terms:
if isinstance(term_or_name, six.string_types):
if term_or_name not in term_name_to_term:
raise PatsyError("requested term %r not found in "
"this DesignMatrixBuilder"
% (term_or_name,))
term = term_name_to_term[term_or_name]
else:
term = term_or_name
if term not in self._termlist:
raise PatsyError("requested term '%s' not found in this "
"DesignMatrixBuilder" % (term,))
for factor in term.factors:
evaluators.add(factor_to_evaluators[factor])
terms.append(term)
column_builder = self._term_to_column_builders[term]
term_to_column_builders[term] = column_builder
return DesignMatrixBuilder(terms,
evaluators,
term_to_column_builders)
def _build(self, evaluator_to_values, dtype):
factor_to_values = {}
need_reshape = False
num_rows = None
for evaluator, value in six.iteritems(evaluator_to_values):
if evaluator in self._evaluators:
factor_to_values[evaluator.factor] = value
if num_rows is not None:
assert num_rows == value.shape[0]
else:
num_rows = value.shape[0]
if num_rows is None:
# We have no dependence on the data -- e.g. an empty termlist, or
# only an intercept term.
num_rows = 1
need_reshape = True
m = DesignMatrix(np.empty((num_rows, self.total_columns), dtype=dtype),
self.design_info)
start_column = 0
for term in self._termlist:
for column_builder in self._term_to_column_builders[term]:
end_column = start_column + column_builder.total_columns
m_slice = m[:, start_column:end_column]
column_builder.build(factor_to_values, m_slice)
start_column = end_column
assert start_column == self.total_columns
return need_reshape, m
class _CheckMatch(object):
def __init__(self, name, eq_fn):
self._name = name
self._eq_fn = eq_fn
self.value = None
self._value_desc = None
self._value_origin = None
def check(self, seen_value, desc, origin):
if self.value is None:
self.value = seen_value
self._value_desc = desc
self._value_origin = origin
else:
if not self._eq_fn(self.value, seen_value):
msg = ("%s mismatch between %s and %s"
% (self._name, self._value_desc, desc))
if isinstance(self.value, int):
msg += " (%r versus %r)" % (self.value, seen_value)
# XX FIXME: this is a case where having discontiguous Origins
# would be useful...
raise PatsyError(msg, origin)
def build_design_matrices(builders, data,
NA_action="drop",
return_type="matrix",
dtype=np.dtype(float)):
"""Construct several design matrices from :class:`DesignMatrixBuilder`
objects.
This is one of Patsy's fundamental functions. This function and
:func:`design_matrix_builders` together form the API to the core formula
interpretation machinery.
:arg builders: A list of :class:`DesignMatrixBuilders` specifying the
design matrices to be built.
:arg data: A dict-like object which will be used to look up data.
:arg NA_action: What to do with rows that contain missing values. You can
``"drop"`` them, ``"raise"`` an error, or for customization, pass an
:class:`NAAction` object. See :class:`NAAction` for details on what
values count as 'missing' (and how to alter this).
:arg return_type: Either ``"matrix"`` or ``"dataframe"``. See below.
:arg dtype: The dtype of the returned matrix. Useful if you want to use
single-precision or extended-precision.
This function returns either a list of :class:`DesignMatrix` objects (for
``return_type="matrix"``) or a list of :class:`pandas.DataFrame` objects
(for ``return_type="dataframe"``). In both cases, all returned design
matrices will have ``.design_info`` attributes containing the appropriate
:class:`DesignInfo` objects.
Note that unlike :func:`design_matrix_builders`, this function takes only
a simple data argument, not any kind of iterator. That's because this
function doesn't need a global view of the data -- everything that depends
on the whole data set is already encapsulated in the `builders`. If you
are incrementally processing a large data set, simply call this function
for each chunk.
Index handling: This function always checks for indexes in the following
places:
* If ``data`` is a :class:`pandas.DataFrame`, its ``.index`` attribute.
* If any factors evaluate to a :class:`pandas.Series` or
:class:`pandas.DataFrame`, then their ``.index`` attributes.
If multiple indexes are found, they must be identical (same values in the
same order). If no indexes are found, then a default index is generated
using ``np.arange(num_rows)``. One way or another, we end up with a single
index for all the data. If ``return_type="dataframe"``, then this index is
used as the index of the returned DataFrame objects. Examining this index
makes it possible to determine which rows were removed due to NAs.
Determining the number of rows in design matrices: This is not as obvious
as it might seem, because it's possible to have a formula like "~ 1" that
doesn't depend on the data (it has no factors). For this formula, it's
obvious what every row in the design matrix should look like (just the
value ``1``); but, how many rows like this should there be? To determine
the number of rows in a design matrix, this function always checks in the
following places:
* If ``data`` is a :class:`pandas.DataFrame`, then its number of rows.
* The number of entries in any factors present in any of the design
* matrices being built.
All these values much match. In particular, if this function is called to
generate multiple design matrices at once, then they must all have the
same number of rows.
.. versionadded:: 0.2.0
The ``NA_action`` argument.
"""
if isinstance(NA_action, str):
NA_action = NAAction(NA_action)
if return_type == "dataframe" and not have_pandas:
raise PatsyError("pandas.DataFrame was requested, but pandas "
"is not installed")
if return_type not in ("matrix", "dataframe"):
raise PatsyError("unrecognized output type %r, should be "
"'matrix' or 'dataframe'" % (return_type,))
# Evaluate factors
evaluator_to_values = {}
evaluator_to_isNAs = {}
import operator
rows_checker = _CheckMatch("Number of rows", lambda a, b: a == b)
index_checker = _CheckMatch("Index", lambda a, b: a.equals(b))
if have_pandas and isinstance(data, pandas.DataFrame):
index_checker.check(data.index, "data.index", None)
rows_checker.check(data.shape[0], "data argument", None)
for builder in builders:
# We look at evaluators rather than factors here, because it might
# happen that we have the same factor twice, but with different
# memorized state.
for evaluator in builder._evaluators:
if evaluator not in evaluator_to_values:
value, is_NA = evaluator.eval(data, NA_action)
evaluator_to_isNAs[evaluator] = is_NA
# value may now be a Series, DataFrame, or ndarray
name = evaluator.factor.name()
origin = evaluator.factor.origin
rows_checker.check(value.shape[0], name, origin)
if (have_pandas
and isinstance(value, (pandas.Series, pandas.DataFrame))):
index_checker.check(value.index, name, origin)
# Strategy: we work with raw ndarrays for doing the actual
# combining; DesignMatrixBuilder objects never sees pandas
# objects. Then at the end, if a DataFrame was requested, we
# convert. So every entry in this dict is either a 2-d array
# of floats, or a 1-d array of integers (representing
# categories).
value = np.asarray(value)
evaluator_to_values[evaluator] = value
# Handle NAs
values = list(evaluator_to_values.values())
is_NAs = list(evaluator_to_isNAs.values())
origins = [evaluator.factor.origin for evaluator in evaluator_to_values]
pandas_index = index_checker.value
num_rows = rows_checker.value
# num_rows is None iff evaluator_to_values (and associated sets like
# 'values') are empty, i.e., we have no actual evaluators involved
# (formulas like "~ 1").
if return_type == "dataframe" and num_rows is not None:
if pandas_index is None:
pandas_index = np.arange(num_rows)
values.append(pandas_index)
is_NAs.append(np.zeros(len(pandas_index), dtype=bool))
origins.append(None)
new_values = NA_action.handle_NA(values, is_NAs, origins)
# NA_action may have changed the number of rows.
if new_values:
num_rows = new_values[0].shape[0]
if return_type == "dataframe" and num_rows is not None:
pandas_index = new_values.pop()
evaluator_to_values = dict(zip(evaluator_to_values, new_values))
# Build factor values into matrices
results = []
for builder in builders:
results.append(builder._build(evaluator_to_values, dtype))
matrices = []
for need_reshape, matrix in results:
if need_reshape:
# There is no data-dependence, at all -- a formula like "1 ~ 1".
# In this case the builder just returns a single-row matrix, and
# we have to broadcast it vertically to the appropriate size. If
# we can figure out what that is...
assert matrix.shape[0] == 1
if num_rows is not None:
matrix = DesignMatrix(np.repeat(matrix, num_rows, axis=0),
matrix.design_info)
else:
raise PatsyError(
"No design matrix has any non-trivial factors, "
"the data object is not a DataFrame. "
"I can't tell how many rows the design matrix should "
"have!"
)
matrices.append(matrix)
if return_type == "dataframe":
assert have_pandas
for i, matrix in enumerate(matrices):
di = matrix.design_info
matrices[i] = pandas.DataFrame(matrix,
columns=di.column_names,
index=pandas_index)
matrices[i].design_info = di
return matrices
# It should be possible to do just the factors -> factor evaluators stuff
# alone, since that, well, makes logical sense to do. though categorical
# coding has to happen afterwards, hmm.
| 44.368885
| 88
| 0.581145
|
623e4190969b5f361d088a7c2e809afcc439139f
| 507
|
py
|
Python
|
ch04/fig04-01_onboard-led.py
|
ricelee-com/pico-starter-kit
|
f8286fab0bdd868e8fbe9def1dae23bd5d88215c
|
[
"BSD-3-Clause"
] | null | null | null |
ch04/fig04-01_onboard-led.py
|
ricelee-com/pico-starter-kit
|
f8286fab0bdd868e8fbe9def1dae23bd5d88215c
|
[
"BSD-3-Clause"
] | null | null | null |
ch04/fig04-01_onboard-led.py
|
ricelee-com/pico-starter-kit
|
f8286fab0bdd868e8fbe9def1dae23bd5d88215c
|
[
"BSD-3-Clause"
] | 3
|
2021-03-28T15:54:13.000Z
|
2021-03-31T16:08:54.000Z
|
#!/usr/bin/python3
#+-+-+-+-+-+-+-+-+-+-+-+
#|R|i|c|e|L|e|e|.|c|o|m|
#+-+-+-+-+-+-+-+-+-+-+-+
# Copyright (c) 2021, ricelee.com
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Origin: p48 at https://hackspace.raspberrypi.org/books/micropython-pico
import machine
import utime
led_onboard = machine.Pin(25, machine.Pin.OUT)
while True:
led_onboard.value(1)
utime.sleep(5)
led_onboard.value(0)
utime.sleep(5)
| 23.045455
| 73
| 0.641026
|
0b3c7500de5fe0a02bfe5e22c21ec8d3f3454166
| 223
|
py
|
Python
|
tests/settings.py
|
codesankalp/django-swappable-models
|
82f3bcda94b5c0b3bd028f2c4c03aecf4fc34e04
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
codesankalp/django-swappable-models
|
82f3bcda94b5c0b3bd028f2c4c03aecf4fc34e04
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
codesankalp/django-swappable-models
|
82f3bcda94b5c0b3bd028f2c4c03aecf4fc34e04
|
[
"MIT"
] | null | null | null |
SECRET_KEY = '1234'
INSTALLED_APPS = (
'tests.default_app',
)
MIDDLEWARE_CLASSES = tuple()
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SWAP = False
| 17.153846
| 47
| 0.587444
|
a3f27f48e7b564a096deb9d2a5b3f01814ff1e46
| 16,023
|
py
|
Python
|
cep_price_console/unified_upload/view.py
|
zanebclark/cep_price_console
|
2f0e0a487a02a1eeceab9bc326d4cef153bd6781
|
[
"MIT"
] | null | null | null |
cep_price_console/unified_upload/view.py
|
zanebclark/cep_price_console
|
2f0e0a487a02a1eeceab9bc326d4cef153bd6781
|
[
"MIT"
] | 5
|
2021-03-31T19:27:48.000Z
|
2022-01-13T01:57:46.000Z
|
cep_price_console/unified_upload/view.py
|
zanebclark/cep_price_console
|
2f0e0a487a02a1eeceab9bc326d4cef153bd6781
|
[
"MIT"
] | null | null | null |
from cep_price_console.utils.log_utils import CustomAdapter, debug
from cep_price_console.utils import config
from cep_price_console.utils.gui_utils import center_window
from cep_price_console.unified_upload.view_1_file_selection import FileSelection
from cep_price_console.unified_upload.view_2_worksheet_selection import WorksheetSelection
from cep_price_console.unified_upload.view_3_column_mapping import ColumnSelection
from cep_price_console.unified_upload.model import Model
import tkinter as tk
from tkinter import messagebox
import tkinter.ttk as ttk
import logging
"""
Make a list of a available steps. In the model? Maybe. Initially, this list will be the input file,
but there might be branching logic that would append a different step for contract uploads or
essendant cost updates.
1) start with file selection
2) when the filepath is valid, enable the proceed button
3) if the step id isn't the smallest id, grid the previous button
4) if the step id isn't the highest, grid the next button
"""
"""
All of the steps need to be initialisable without data. The application needs to be stateless. Moving the view from
one state to another should require some logic, but the views should exist and be populated with the appropriate data
upon transition
Steps:
1) File selection
Encoding dropdown
Which formats do I accommodate?
3) Header row hinting / Column Hinting/ Ignore empty columns
Some tools have some logic built in to detect this. Feed it in and ask the user to verify it
4) Which vendor is this? Is it a customer?
Use the paned frame approach from the price list along with the tkintertable selection method
5) Column Datatype Definition/Function Mapping
This is the hardest part. Do I upload the data into SQL and then re-upload it with the new data definition when the
user changes it?
Or, do I keep the data in memory and upload post data type selection?
Need:
Universal button status function that calls the current step's get button status method that checks to see if we're
ready to move on
"""
class View(tk.Toplevel):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
@debug(lvl=logging.DEBUG, prefix='')
def __init__(self, master, *args, **kwargs):
self.master = master
super().__init__(*args, **kwargs)
self.title("Contract Upload Procedure: Step 1")
self.iconbitmap(config.FAVICON)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.model = Model(self)
self.col_mapper = None
self.match_review = None
self.vpn_rev = None
self.pop_frames = False
self.checked_dict = None
self.column_mapping_dataframe = None
self.current_step_index = 0
# Force the notebook to fill the window
self.protocol("WM_DELETE_WINDOW", self.close)
# something = FileSelection(self)
# self.frame_main = ttk.Frame(self,
# style="odd.group.TFrame")
# self.frame_main.grid(row=0, column=0, sticky=tk.NSEW)
self.frame_cmd = ttk.Frame(self,
style="even.group.TFrame")
self.frame_cmd.grid(row=1, column=0, sticky=tk.E + tk.SW)
self.frame_cmd.columnconfigure(0, weight=1)
self.btn_next = ttk.Button(self.frame_cmd,
text="Proceed",
command=self.next)
self.btn_next.grid(row=0, column=1, sticky=tk.SE)
self.btn_prev = ttk.Button(self.frame_cmd,
text="Previous",
command=self.prev)
self.btn_prev.grid(row=0, column=0, sticky=tk.SW)
self.btn_prev.grid_remove()
self.all_steps = {
"file_selection": FileSelection(self),
"sheet_selection": WorksheetSelection(self),
"column_selection": ColumnSelection(self)
}
self.avail_step_list = []
self.testing()
self.avail_step_list.append(self.all_steps["file_selection"])
self.current_step_index = 0
self.avail_step_list[self.current_step_index].open()
# Center window on screen
center_window(win_obj=self, width=1000, height=800)
self.update_next_status()
@debug(lvl=logging.DEBUG, prefix='')
def reset_downstream(self):
close_list = self.avail_step_list[self.current_step_index+1:]
print("Close List: ")
for step in close_list:
print(str(step.__class__))
step.close()
self.avail_step_list = [step for step in self.avail_step_list if step not in close_list]
@debug(lvl=logging.DEBUG, prefix='')
def testing(self):
self.wb_filename = config.MEDIA_PATH / "LAGB_02-25-2019_test3.xlsx"
self.ws_name_selection = "ESSENDANT FEBRUARY"
self.header_row = 1
self.btn_prev.grid()
# region wb_filename ###############################################################################################
@property
@debug(lvl=logging.DEBUG, prefix='')
def wb_filename(self):
return self.model.wb_filename
@wb_filename.setter
@debug(lvl=logging.DEBUG, prefix='')
def wb_filename(self, value):
if value != self.model.wb_filename:
self.reset_downstream()
self.model.wb_filename = value
self.column_mapping_dataframe = None
if self.all_steps["sheet_selection"] not in self.avail_step_list:
self.avail_step_list.append(self.all_steps["sheet_selection"])
self.update_next_status()
else:
raise ValueError
# endregion ########################################################################################################
# region ws_sheet_names ############################################################################################
@property
@debug(lvl=logging.DEBUG, prefix='')
def ws_sheet_names(self):
return self.model.ws_sheet_names
# endregion ########################################################################################################
# region ws_name_selection #########################################################################################
@property
@debug(lvl=logging.DEBUG, prefix='')
def ws_name_selection(self):
return self.model.ws_name_selection
@ws_name_selection.setter
@debug(lvl=logging.DEBUG, prefix='')
def ws_name_selection(self, value):
if value != self.model.ws_name_selection:
self.reset_downstream()
self.model.ws_name_selection = value
self.column_mapping_dataframe = None
if self.all_steps["column_selection"] not in self.avail_step_list:
self.avail_step_list.append(self.all_steps["column_selection"])
self.update_next_status()
# endregion ########################################################################################################
# region header_row #########################################################################################
@property
@debug(lvl=logging.DEBUG, prefix='')
def header_row(self):
return self.model.header_row
@header_row.setter
@debug(lvl=logging.DEBUG, prefix='')
def header_row(self, value):
if value != self.model.ws_name_selection:
self.reset_downstream()
self.model.header_row = value
self.column_mapping_dataframe = None
if self.all_steps["column_selection"] not in self.avail_step_list:
self.avail_step_list.append(self.all_steps["column_selection"])
self.update_next_status()
# endregion ########################################################################################################
# region Directory Section ########################################################################################
@debug(lvl=logging.DEBUG, prefix='')
def flow_manager(self, add=False, subtract=False):
self.avail_step_list[self.current_step_index].close()
if add:
self.current_step_index += 1
elif subtract:
self.current_step_index -= 1
current_step_obj = self.avail_step_list[self.current_step_index]
current_step_obj.open()
if current_step_obj.initial:
self.btn_prev.grid_remove()
else:
self.btn_prev.grid()
if current_step_obj.terminal:
self.btn_next.grid_remove()
else:
self.btn_next.grid()
self.update_next_status()
@debug(lvl=logging.DEBUG, prefix='')
def update_next_status(self):
print("Next Button Disable Logic:")
print(self.current_step_index)
print(len(self.avail_step_list)-1)
if self.current_step_index == len(self.avail_step_list) - 1:
self.btn_next.state(['disabled'])
else:
self.btn_next.state(['!disabled'])
@debug(lvl=logging.DEBUG, prefix='')
def close(self):
msgbox = messagebox.askokcancel("Quit", "Do you want to quit?", parent=self)
if msgbox:
self.destroy()
# noinspection PyUnusedLocal
@debug(lvl=logging.DEBUG, prefix='')
def next(self, *args):
self.flow_manager(add=True)
# noinspection PyUnusedLocal
@debug(lvl=logging.DEBUG, prefix='')
def prev(self, *args):
self.flow_manager(subtract=True)
#
# # TODO: Create some unified function that would accept a step "id" and init/destroy without having separate fns
# # TODO: Each step's next/prev button should run this unified function instead of a step-specific one.
# @debug(lvl=logging.DEBUG, prefix='')
# def flow_manager(self):
# if self.file_selection is None:
# self.file_selection = FileSelection(view=self)
# center_window(win_obj=self)
# elif Step.step_dict.get("FileSelection").complete:
# if self.col_mapper is None:
# self.file_selection.close()
# self.model.sql_import()
# self.col_mapper = ColumnMapping(view=self)
# self.col_mapper.populate_frame()
# center_window(win_obj=self)
# elif Step.step_dict.get("ColumnMapping").complete:
# if self.match_review is None:
# self.col_mapper.close()
# self.match_review = MatchReview(view=self)
# self.match_review.populate_frame()
# center_window(win_obj=self)
# elif Step.step_dict.get("MatchReview").complete:
# self.model.check_upload_table(self.checked_dict)
# output_query = self.model.get_output_query()
#
# filename_options = dict(
# title='Save Output',
# initialdir=str(os.path.expanduser('~')).replace('\\', '/'),
# initialfile=None,
# parent=self.root,
# filetypes=[('Workbook', '.xlsx')])
#
# fullpath_var = str(filedialog.asksaveasfilename(**filename_options)).replace("/", "\\")
# filename, _ = os.path.splitext(fullpath_var)
# workbook = xlsxwriter.Workbook('{}.xlsx'.format(filename))
# worksheet = workbook.add_worksheet()
# col_number = 0
# row_number = 0
# col_list = []
# for desc in output_query.column_descriptions:
# name = desc.get('name').replace("'", "").replace('"', "")
# col_list.append(name)
# worksheet.write(row_number, col_number, name)
#
# col_number += 1
#
# row_number += 1
# for row in output_query.all():
# col_number = 0
# for col_name in col_list:
# # noinspection PyProtectedMember
# value = row._asdict().get(col_name)
# if isinstance(value, str):
# value.replace("{", "").replace("}", "")
# worksheet.write(row_number, col_number, value)
# col_number += 1
# row_number += 1
# workbook.close()
# self.close()
# # if self.vpn_rev is None and \
# # self.model.func_dict.get("Vendor Part Number Revision").upload_col is not None:
# # self.match_review.close()
# # self.model.check_upload_table(self.checked_dict)
# # self.vpn_rev = PartNumRevision(view=self)
# # self.vpn_rev.populate_frame()
# # center_window(win_obj=self)
#
# # if not self.pop_frames:
# # self.process_definition()
# # for next_step in sorted(Step.step_dict.values(), key=lambda x: int(x.order)):
# # if not next_step.complete:
# # print("Here's where you would do something")
# # break
#
# def process_definition(self):
# pass
# # if self.model.func_dict.get("Action Indicator").upload_col is not None:
# # self.action_ind = ActionIndicator(view=self)
# # if self.model.func_dict.get("Primary UOM").upload_col is not None:
# # self.uom = UnitOfMeasure(view=self)
#
#
# class PartNumRevision(Step):
# logger = CustomAdapter(logging.getLogger(str(__name__)), None)
#
# @debug(lvl=logging.DEBUG, prefix='')
# def __init__(self, view, *args, **kwargs):
# super().__init__(namely=str(PartNumRevision.__name__), order=4, view=view, *args, **kwargs)
# self.part_num_rev_hdr = ttk.Label(self.frame_main)
# self.part_num_rev_hdr.config(text="Vendor Part Number Revision", style="heading1.TLabel")
# self.part_num_rev_hdr.grid(row=0, column=0, sticky=tk.NW)
#
# self.part_num_rev_instr_lbl = ttk.Label(self.frame_main)
# self.part_num_rev_instr_lbl.config(text="These are the instructions on how to do this thing. \n"
# "1) You need to do something. \n"
# "2) You need to do something else. \n"
# "3) Finally, you need to do something else. \n"
# "Then you are done!")
# self.part_num_rev_instr_lbl.grid(row=1, column=0)
#
# self.treeview = TreeviewConstructor(self.frame_main)
# self.treeview.grid(row=2,
# column=0,
# sticky=tk.NSEW)
#
# self.btn_next.state(['!disabled'])
#
# @debug(lvl=logging.DEBUG, prefix='')
# def populate_frame(self):
# pass
#
# @debug(lvl=logging.DEBUG, prefix='')
# def next(self, *args):
# self.complete = True
# self.view.flow_manager()
#
#
# class ActionIndicator(Step):
# logger = CustomAdapter(logging.getLogger(str(__name__)), None)
#
# @debug(lvl=logging.DEBUG, prefix='')
# def __init__(self, view, *args, **kwargs):
# super().__init__(namely=str(ActionIndicator.__name__), order=5, view=view, *args, **kwargs)
#
#
# class UnitOfMeasure(Step):
# logger = CustomAdapter(logging.getLogger(str(__name__)), None)
#
# @debug(lvl=logging.DEBUG, prefix='')
# def __init__(self, view, *args, **kwargs):
# super().__init__(namely=str(ColumnMapping.__name__), order=5, view=view, *args, **kwargs)
#
| 42.957105
| 120
| 0.565312
|
326398b3a4041d233b390d4364ffc68e2793bf4a
| 20,474
|
py
|
Python
|
python/ray/tune/ray_trial_executor.py
|
vladfi1/ray
|
3b141b26cd4af491b3c1fb8ce4dbb00265246b1e
|
[
"Apache-2.0"
] | 2
|
2019-03-26T12:40:26.000Z
|
2020-07-23T13:45:39.000Z
|
python/ray/tune/ray_trial_executor.py
|
vladfi1/ray
|
3b141b26cd4af491b3c1fb8ce4dbb00265246b1e
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/ray_trial_executor.py
|
vladfi1/ray
|
3b141b26cd4af491b3c1fb8ce4dbb00265246b1e
|
[
"Apache-2.0"
] | 2
|
2019-04-09T12:30:24.000Z
|
2020-07-23T13:45:40.000Z
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import random
import time
import traceback
import ray
from ray.tune.error import TuneError, AbortTrialExecution
from ray.tune.logger import NoopLogger
from ray.tune.trial import Trial, Resources, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.tune.util import warn_if_slow
logger = logging.getLogger(__name__)
RESOURCE_REFRESH_PERIOD = 0.5 # Refresh resources every 500 ms
BOTTLENECK_WARN_PERIOD_S = 60
NONTRIVIAL_WAIT_TIME_THRESHOLD_S = 1e-3
class _LocalWrapper(object):
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class RayTrialExecutor(TrialExecutor):
"""An implemention of TrialExecutor based on Ray."""
def __init__(self,
queue_trials=False,
reuse_actors=False,
refresh_period=RESOURCE_REFRESH_PERIOD):
super(RayTrialExecutor, self).__init__(queue_trials)
self._running = {}
# Since trial resume after paused should not run
# trial.train.remote(), thus no more new remote object id generated.
# We use self._paused to store paused trials here.
self._paused = {}
self._reuse_actors = reuse_actors
self._cached_actor = None
self._avail_resources = Resources(cpu=0, gpu=0)
self._committed_resources = Resources(cpu=0, gpu=0)
self._resources_initialized = False
self._refresh_period = refresh_period
self._last_resource_refresh = float("-inf")
self._last_nontrivial_wait = time.time()
if ray.is_initialized():
self._update_avail_resources()
def _setup_runner(self, trial, reuse_allowed):
if (self._reuse_actors and reuse_allowed
and self._cached_actor is not None):
logger.debug("Reusing cached runner {} for {}".format(
self._cached_actor, trial.trial_id))
existing_runner = self._cached_actor
self._cached_actor = None
else:
if self._cached_actor:
logger.debug(
"Cannot reuse cached runner {} for new trial".format(
self._cached_actor))
self._cached_actor.stop.remote()
self._cached_actor.__ray_terminate__.remote()
self._cached_actor = None
existing_runner = None
cls = ray.remote(
num_cpus=trial.resources.cpu,
num_gpus=trial.resources.gpu,
resources=trial.resources.custom_resources)(
trial._get_trainable_cls())
trial.init_logger()
# We checkpoint metadata here to try mitigating logdir duplication
self.try_checkpoint_metadata(trial)
remote_logdir = trial.logdir
if existing_runner:
trial.runner = existing_runner
if not self.reset_trial(trial, trial.config, trial.experiment_tag):
raise AbortTrialExecution(
"Trial runner reuse requires reset_trial() to be "
"implemented and return True.")
return existing_runner
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
return cls.remote(config=trial.config, logger_creator=logger_creator)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
"""
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(
trial,
reuse_allowed=checkpoint is not None
or trial._checkpoint.value is not None)
if not self.restore(trial, checkpoint):
if trial.status == Trial.ERROR:
raise RuntimeError(
"Restore from checkpoint failed for Trial {}.".format(
str(trial)))
previous_run = self._find_item(self._paused, trial)
if (prior_status == Trial.PAUSED and previous_run):
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial)
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, 'runner') and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# note that we don't return the resources, since they may
# have been lost
def _find_item(self, dictionary, item):
out = [rid for rid, t in dictionary.items() if t is item]
return out
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Returning resources for Trial %s.", str(trial))
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id)
def continue_training(self, trial):
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
"""
trial_future = self._find_item(self._running, trial)
if trial_future:
self._paused[trial_future[0]] = trial
super(RayTrialExecutor, self).pause_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val
def get_running_trials(self):
"""Returns the running trials."""
return list(self._running.values())
def get_next_available_trial(self):
shuffled_results = list(self._running.keys())
random.shuffle(shuffled_results)
# Note: We shuffle the results because `ray.wait` by default returns
# the first available result, and we want to guarantee that slower
# trials (i.e. trials that run remotely) also get fairly reported.
# See https://github.com/ray-project/ray/issues/4211 for details.
start = time.time()
[result_id], _ = ray.wait(shuffled_results)
wait_time = time.time() - start
if wait_time > NONTRIVIAL_WAIT_TIME_THRESHOLD_S:
self._last_nontrivial_wait = time.time()
if time.time() - self._last_nontrivial_wait > BOTTLENECK_WARN_PERIOD_S:
logger.warn(
"Over the last {} seconds, the Tune event loop has been "
"backlogged processing new results. Consider increasing your "
"period of result reporting to improve performance.".format(
BOTTLENECK_WARN_PERIOD_S))
self._last_nontrivial_wait = time.time()
return self._running[result_id]
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run."""
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0])
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result
def _commit_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) + resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu + resources.cpu_total(),
committed.gpu + resources.gpu_total(),
custom_resources=custom_resources)
def _return_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) - resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu - resources.cpu_total(),
committed.gpu - resources.gpu_total(),
custom_resources=custom_resources)
assert self._committed_resources.is_nonnegative(), (
"Resource invalid: {}".format(resources))
def _update_avail_resources(self, num_retries=5):
for i in range(num_retries):
try:
resources = ray.global_state.cluster_resources()
except Exception:
# TODO(rliaw): Remove this when local mode is fixed.
# https://github.com/ray-project/ray/issues/4147
logger.debug("Using resources for local machine.")
resources = ray.services.check_and_update_resources(
None, None, None)
if not resources:
logger.warning("Cluster resources not detected. Retrying...")
time.sleep(0.5)
if not resources or "CPU" not in resources:
raise TuneError("Cluster resources cannot be detected. "
"You can resume this experiment by passing in "
"`resume=True` to `run`.")
resources = resources.copy()
num_cpus = resources.pop("CPU")
num_gpus = resources.pop("GPU")
custom_resources = resources
self._avail_resources = Resources(
int(num_cpus), int(num_gpus), custom_resources=custom_resources)
self._last_resource_refresh = time.time()
self._resources_initialized = True
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
"""
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
return True
can_overcommit = self._queue_trials
if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \
(resources.gpu_total() > 0 and currently_available.gpu <= 0) or \
any((resources.get_res_total(res_name) > 0
and currently_available.get(res_name) <= 0)
for res_name in resources.custom_resources):
can_overcommit = False # requested resource is already saturated
if can_overcommit:
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = "Resources requested: {}/{} CPUs, {}/{} GPUs".format(
self._committed_resources.cpu, self._avail_resources.cpu,
self._committed_resources.gpu, self._avail_resources.gpu)
customs = ", ".join([
"{}/{} {}".format(
self._committed_resources.get_res_total(name),
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources
])
if customs:
status += " ({})".format(customs)
return status
else:
return "Resources requested: ?"
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
self._avail_resources.gpu)
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs"
def on_step_begin(self):
"""Before step() called, update the available resources."""
self._update_avail_resources()
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint."""
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.value = trial.runner.save_to_object.remote()
else:
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
return trial._checkpoint.value
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
This will also sync the trial results to a new location
if restoring on a different node.
"""
if checkpoint is None or checkpoint.value is None:
checkpoint = trial._checkpoint
if checkpoint is None or checkpoint.value is None:
return True
if trial.runner is None:
logger.error("Unable to restore - no runner.")
self.set_status(trial, Trial.ERROR)
return False
try:
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
assert type(value) != Checkpoint, type(value)
trial.runner.restore_from_object.remote(value)
else:
worker_ip = ray.get(trial.runner.current_ip.remote())
trial.sync_logger_to_new_location(worker_ip)
with warn_if_slow("restore_from_disk"):
ray.get(trial.runner.restore.remote(value))
trial.last_result = checkpoint.last_result
return True
except Exception:
logger.exception("Error restoring runner for Trial %s.", trial)
self.set_status(trial, Trial.ERROR)
return False
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
return ray.get(
trial.runner.export_model.remote(trial.export_formats))
return {}
| 39.601547
| 79
| 0.605744
|
765e4527947fe9e37849daff30ab175fb2c01db7
| 550
|
py
|
Python
|
202-happy-number/happy_number.py
|
cnluocj/leetcode
|
5b870a63ba1aab3db1e05421c91f404a9aabc489
|
[
"MIT"
] | null | null | null |
202-happy-number/happy_number.py
|
cnluocj/leetcode
|
5b870a63ba1aab3db1e05421c91f404a9aabc489
|
[
"MIT"
] | null | null | null |
202-happy-number/happy_number.py
|
cnluocj/leetcode
|
5b870a63ba1aab3db1e05421c91f404a9aabc489
|
[
"MIT"
] | null | null | null |
"""
70.49%
"""
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 0 or n == 1:
return True
x = [n]
curr = n
ishappy = False
while True:
digits = list(str(curr))
curr = 0
for d in digits:
curr += int(d) ** 2
x.append(curr)
if curr == 1:
ishappy = True
if x.count(curr) >= 2:
break
return ishappy
| 19.642857
| 36
| 0.370909
|
bc1ddea700b49d9bf2b707a5c7ced44846e158e3
| 577
|
py
|
Python
|
trees/bstser.py
|
dhruvsharma1999/data-structures
|
f3b51ebca2f6e28eee3dcb5692f92d2f7f3533e5
|
[
"MIT"
] | null | null | null |
trees/bstser.py
|
dhruvsharma1999/data-structures
|
f3b51ebca2f6e28eee3dcb5692f92d2f7f3533e5
|
[
"MIT"
] | null | null | null |
trees/bstser.py
|
dhruvsharma1999/data-structures
|
f3b51ebca2f6e28eee3dcb5692f92d2f7f3533e5
|
[
"MIT"
] | null | null | null |
#implementing bst search operation using python 3
#Node class
class Node:
def __init__(self,key):
self.right = None
self.left = None
self.val = key
#Utility function to search a given key in BST
def search(root, key):
#base case is root is NULL or key is present at the root
if root is None or root.val == key:
return root
#if key greater than roots key
if root.val < key:
return search(root.right, key)
#if key is smaller than root key
return search(root.left, key)
| 25.086957
| 64
| 0.608319
|
ad228972520e6ef5fe24c9739ecf0cec3daa0122
| 2,239
|
py
|
Python
|
skater/core/visualizer/image_relevance_visualizer.py
|
RPUTHUMA/Skater
|
317460b88065b41eebe6790e9efdbb0595cbe450
|
[
"UPL-1.0"
] | 718
|
2017-05-19T22:49:40.000Z
|
2019-03-27T06:40:54.000Z
|
skater/core/visualizer/image_relevance_visualizer.py
|
quant1729/Skater
|
b46a4abe3465ddc7b19ffc762ad45d1414b060a6
|
[
"UPL-1.0"
] | 114
|
2017-05-24T16:55:59.000Z
|
2019-03-27T12:48:18.000Z
|
skater/core/visualizer/image_relevance_visualizer.py
|
quant1729/Skater
|
b46a4abe3465ddc7b19ffc762ad45d1414b060a6
|
[
"UPL-1.0"
] | 121
|
2017-05-22T17:20:19.000Z
|
2019-03-21T15:06:19.000Z
|
# -*- coding: UTF-8 -*-
from skimage.filters import roberts, sobel
import numpy as np
from skater.util.exceptions import MatplotlibUnavailableError
try:
import matplotlib.pyplot as plt
except ImportError:
raise (MatplotlibUnavailableError("Matplotlib is required but unavailable on your system."))
from skater.util.image_ops import normalize
# helper function to enable or disable matplotlib access
_enable_axis = lambda ax, flag: ax.axis("off") if flag is True else ax.axis("on")
def visualize(relevance_score, original_input_img=None, edge_detector_type='sobel', cmap='bwr', axis=plt,
percentile=100, alpha_edges=0.8, alpha_bgcolor=1, disable_axis=True):
dx, dy = 0.01, 0.01
xx = np.arange(0.0, relevance_score.shape[1], dx)
yy = np.arange(0.0, relevance_score.shape[0], dy)
x_min, x_max, y_min, y_max = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
extent = x_min, x_max, y_min, y_max
xi_cmap = plt.cm.gray
xi_cmap.set_bad(alpha=0)
edges = _edge_detection(original_input_img, edge_detector_type) if original_input_img is not None else None
# draw the edges of the image before overlaying rest of the image
if edges is not None:
axis.imshow(edges, extent=extent, interpolation='nearest', cmap=xi_cmap, alpha=alpha_edges)
abs_max = np.percentile(np.abs(relevance_score), percentile)
abs_min = abs_max
relevance_score = relevance_score[:, :, 0] if len(relevance_score.shape) == 3 else relevance_score
# Plot the image with relevance scores
axis.imshow(relevance_score, extent=extent, interpolation='nearest', cmap=cmap,
vmin=-abs_min, vmax=abs_max, alpha=alpha_bgcolor)
_enable_axis(axis, disable_axis)
return axis
def _edge_detection(original_input_img=None, edge_detector_alg='sobel'):
# Normalize the input image to (0,1)
xi = normalize(original_input_img)
xi_greyscale = xi if len(xi.shape) == 2 else np.mean(xi, axis=-1)
# Applying edge detection ( Roberts or Sobel edge detection )
# Reference: http://scikit-image.org/docs/0.11.x/auto_examples/plot_edge_filter.html
edge_detector = {'robert': roberts, 'sobel': sobel}
return edge_detector[edge_detector_alg](xi_greyscale)
| 41.462963
| 111
| 0.72711
|
38d1cf4c1414cf7c200d72497d11fd3c0e463ea8
| 2,334
|
py
|
Python
|
test/test_task_callbacks_api.py
|
nodeum-io/nodeum-sdk-python
|
205536491bff507dea7be44af46202c17e7121d9
|
[
"MIT"
] | null | null | null |
test/test_task_callbacks_api.py
|
nodeum-io/nodeum-sdk-python
|
205536491bff507dea7be44af46202c17e7121d9
|
[
"MIT"
] | null | null | null |
test/test_task_callbacks_api.py
|
nodeum-io/nodeum-sdk-python
|
205536491bff507dea7be44af46202c17e7121d9
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Nodeum API
# About This document describes the Nodeum API version 2: If you are looking for any information about the product itself, reach the product website https://www.nodeum.io. You can also contact us at this email address : info@nodeum.io # Filter parameters When browsing a list of items, multiple filter parameters may be applied. Some operators can be added to the value as a prefix: - `=` value is equal. Default operator, may be omitted - `!=` value is different - `>` greater than - `>=` greater than or equal - `<` lower than - `>=` lower than or equal - `><` included in list, items should be separated by `|` - `!><` not included in list, items should be separated by `|` - `~` pattern matching, may include `%` (any characters) and `_` (one character) - `!~` pattern not matching, may include `%` (any characters) and `_` (one character) # noqa: E501
The version of the OpenAPI document: 2.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import nodeum_sdk
from nodeum_sdk.api.task_callbacks_api import TaskCallbacksApi # noqa: E501
from nodeum_sdk.rest import ApiException
class TestTaskCallbacksApi(unittest.TestCase):
"""TaskCallbacksApi unit test stubs"""
def setUp(self):
self.api = nodeum_sdk.api.task_callbacks_api.TaskCallbacksApi() # noqa: E501
def tearDown(self):
pass
def test_create_task_callback(self):
"""Test case for create_task_callback
Creates a new task callback. # noqa: E501
"""
pass
def test_destroy_task_callback(self):
"""Test case for destroy_task_callback
Destroys a specific task callback. # noqa: E501
"""
pass
def test_index_task_callbacks(self):
"""Test case for index_task_callbacks
Lists all task callbacks. # noqa: E501
"""
pass
def test_show_task_callback(self):
"""Test case for show_task_callback
Displays a specific task callback. # noqa: E501
"""
pass
def test_update_task_callback(self):
"""Test case for update_task_callback
Updates a specific task callback. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 33.826087
| 876
| 0.668809
|
d28a2d764f2fd5496af214b347faac1ebe81578e
| 31,018
|
py
|
Python
|
scripts/bot.py
|
parimple/zagadka
|
1cad1af657d9a621e6c42e3a66fb4dac10770810
|
[
"MIT"
] | null | null | null |
scripts/bot.py
|
parimple/zagadka
|
1cad1af657d9a621e6c42e3a66fb4dac10770810
|
[
"MIT"
] | null | null | null |
scripts/bot.py
|
parimple/zagadka
|
1cad1af657d9a621e6c42e3a66fb4dac10770810
|
[
"MIT"
] | null | null | null |
import asyncio
from datetime import datetime, timedelta
import discord
from datasources.queries import *
import inspect
import datasources.models as models
import datasources.queries as queries
from mappings import BOT, GUILD, COMMANDS, MUSIC_PREFIX, MUSIC_COMMANDS
from datasources import session, engine
from random import randint
from functions import *
from colour import Color
if not engine.dialect.has_table(engine, 'member'):
datasources.models.Base.metadata.create_all(engine)
client = discord.Client()
invites = []
channels = {}
async def presence():
while True:
await client.change_presence(activity=discord.Game(name='R'))
await asyncio.sleep(8)
await client.change_presence(activity=discord.Game(name='G'))
await asyncio.sleep(8)
await client.change_presence(activity=discord.Game(name='B'))
await asyncio.sleep(8)
async def minute():
while True:
guild = client.get_guild(GUILD['id'])
date_db = get_guild_date(GUILD['id'])
date_now = datetime.now()
invites_new = await guild.invites()
diff = list(set(invites_new) - set(invites))
if len(diff) > 0:
invites.extend(diff)
if date_now.minute % 10 == 0:
private_category = guild.get_channel(GUILD['private_category'])
for channel in guild.voice_channels:
if (len(channel.members) < 1) and (channel in private_category.channels):
if channel.id != GUILD['create_channel']:
del channels[channel]
await channel.delete()
continue
for member in channel.members:
if (not member.bot) and (check_member(member.id) is False):
if (date_now - member.joined_at).seconds > 60:
set_member(member.id, member.name, member.discriminator, None, datetime.now())
session.commit()
set_member_scores(member.id, ['week'])
session.commit()
if member.voice.self_mute or member.voice.self_deaf or member.bot:
continue
else:
if len(channel.members) > 1:
add_member_score(member.id, date_now.strftime("%A"), 10)
session.commit()
else:
add_member_score(member.id, date_now.strftime("%A"), 1)
session.commit()
if date_db.strftime("%A") != date_now.strftime("%A"):
role_recruiter = guild.get_role(GUILD['recruiter'])
for member in role_recruiter.members:
await member.remove_roles(role_recruiter)
role_temp_bonus = guild.get_role(GUILD['temp_bonus_id'])
for member in role_temp_bonus.members:
await member.remove_roles(role_temp_bonus)
for role in guild.roles:
if role.name == GUILD['colored_name'] or role.name == GUILD['multi_colored_name']:
await role.delete()
reset_points_global(date_now.strftime("%A"))
if date_now.hour != date_db.hour:
role_everyone = guild.get_role(GUILD['fake_everyone_id'])
role_here = guild.get_role(GUILD['fake_here_id'])
for member in role_everyone.members:
await member.remove_roles(role_everyone)
await member.remove_roles(role_here)
members_top = get_top_members(GUILD['top'])
roles_top = get_top_roles(GUILD['top'])
for (role_top_id,), member_top in zip(roles_top, members_top):
role_top = guild.get_role(role_top_id)
member_top_id, member_top_score = member_top
member = guild.get_member(member_top_id)
if member is None:
reset_points_by_id(member_top_id)
continue
else:
for member_old in role_top.members:
if member_old.id == member.id:
continue
else:
await member_old.remove_roles(role_top, reason='top remove')
await member.add_roles(role_top, reason='top add')
set_guild_date(GUILD['id'], date_now)
session.commit()
await asyncio.sleep(60)
@client.event
async def on_voice_state_update(member, before, after):
guild = member.guild
private_category = guild.get_channel(GUILD['private_category'])
if after.channel:
if before.channel != after.channel:
if after.channel.id == GUILD['create_channel']:
overwrite = get_member_permissions(member.id)
permission_overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=overwrite.host_everyone_view_channel,
connect=overwrite.host_everyone_connect,
speak=overwrite.host_everyone_speak)
}
overwrite_guests = get_member_guests(member.id)
for guest_id, view_channel, connect, speak in overwrite_guests:
guest = guild.get_member(guest_id)
if guest:
if all(p is None for p in [view_channel, connect, speak]):
continue
else:
permission_overwrites[guest] = discord.PermissionOverwrite(read_messages=view_channel,
connect=connect,
speak=speak)
permission_overwrites[member] = discord.PermissionOverwrite(
read_messages=True,
connect=True,
speak=True,
move_members=False)
new_channel = await guild.create_voice_channel(
member.display_name,
category=after.channel.category,
bitrate=GUILD['bitrate'],
user_limit=overwrite.host_channel_limit,
overwrites=permission_overwrites)
await member.move_to(new_channel)
channels[new_channel] = member
if before.channel:
if before.channel != after.channel:
if before.channel in private_category.channels:
if (len(before.channel.members) == 0) and before.channel.id != GUILD['create_channel']:
if before.channel in channels:
del channels[before.channel]
await before.channel.delete()
@client.event
async def on_member_join(member):
member_hosts = get_member_hosts(member.id)
guild = member.guild
print(member_hosts)
if member_hosts:
for host_id in member_hosts:
print(host_id.speak, host_id.connect, host_id.view_channel)
host = guild.get_member(host_id.member_host)
if host:
print(host)
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(member,
speak=host_id.speak,
connect=host_id.connect,
read_messages=host_id.view_channel)
invites_old = invites.copy()
invites.clear()
invites.extend(await member.guild.invites())
try:
invite = discord.utils.find(lambda inv: inv.uses > discord.utils.get(invites_old, id=inv.id).uses, invites)
except AttributeError:
diff = list(set(invites) - set(invites_old))
if len(diff) > 0:
diff.sort(key=lambda inv: inv.created_at, reverse=True)
invite = diff[0]
else:
invite = None
if check_member(member.id) is False:
if invite:
set_member(member.id, member.name, member.discriminator, invite.inviter.id, datetime.now())
inviter = member.guild.get_member(invite.inviter.id)
if (member.joined_at - member.created_at) > timedelta(days=3):
if inviter:
if not inviter.bot:
await inviter.add_roles(member.guild.get_role(GUILD['recruiter']), reason='recruiter')
if (member.joined_at - member.created_at) > timedelta(days=7):
invited_list = get_invited_list(inviter.id)
real_count = 0
for invited_id in invited_list:
invited = guild.get_member(invited_id)
if invited:
if invited.avatar and (invited.joined_at - invited.created_at) > timedelta(days=3):
real_count += 1
if real_count > 1:
await inviter.add_roles(member.guild.get_role(GUILD['dj_id']), reason='dj')
if real_count > 3:
await inviter.add_roles(member.guild.get_role(GUILD['join_id']), reason='join')
if real_count > 7:
await inviter.add_roles(member.guild.get_role(GUILD['temp_bonus_id']), reason='bonus')
if real_count > 15:
await inviter.add_roles(member.guild.get_role(GUILD['bonus_id']), reason='bonus')
if real_count > 31:
pass
if real_count > 63:
if any(role.name == GUILD['colored_name'] for role in inviter.roles):
pass
else:
colored_role = await guild.create_role(name=GUILD['colored_name'])
colored_role_position = guild.get_role(GUILD['colored_role_position'])
print('role position', colored_role_position.position)
await inviter.add_roles(colored_role, reason='colored')
await asyncio.sleep(10)
await colored_role.edit(position=colored_role_position.position+1,reason='position')
if real_count > 128:
pass
else:
set_member(member.id, member.name, member.discriminator, None, datetime.now())
session.commit()
set_member_scores(member.id, ['week'])
session.commit()
join_logs = member.guild.get_channel(GUILD['join_logs_id'])
if invite:
await join_logs.send('member: {}, display_name: {}, inviter: {} <@{}>'
.format(member.mention, member.display_name, invite.inviter, invite.inviter.id))
else:
await join_logs.send('member: {}, display_name: {}, inviter: {}'
.format(member.mention, member.display_name, None))
@client.event
async def on_message(message):
message_save = message
date_now = datetime.now()
if not message.content:
return
if message.author.bot:
return
if message.role_mentions:
await message.author.add_roles(message.guild.get_role(GUILD['fake_everyone_id']), reason='everyone ping')
await message.author.add_roles(message.guild.get_role(GUILD['fake_here_id']), reason='here ping')
args = message.content.split(' ')
if len(message.content) < 2:
return
if message.channel.id != GUILD['bots_channel_id'] and len(args[0]) > 1 and \
args[0][0] in MUSIC_PREFIX and args[0][1:].lower() in MUSIC_COMMANDS:
await message.delete()
return
for command in COMMANDS:
if command in args[0].lower():
return
points = 0
if len(args) > 32:
points += 32
else:
points += len(args)
nitro_booster = message_save.guild.get_role(GUILD['nitro_booster_id'])
if (nitro_booster in message.author.roles) and (randint(1, 100) < GUILD['rand_boost']):
points += len(args)
patreon_2 = message_save.guild.get_role(GUILD['patreon_2_id'])
if (patreon_2 in message.author.roles) and (randint(1, 100) < GUILD['rand_boost']):
points += len(args)
temp_bonus = message_save.guild.get_role(GUILD['temp_bonus_id'])
if (temp_bonus in message.author.roles) and (randint(1, 100) < GUILD['rand_boost']):
points += len(args)
bonus = message_save.guild.get_role(GUILD['bonus_id'])
if (bonus in message.author.roles) and (randint(1, 100) < GUILD['rand_boost']):
points += len(args)
if check_member(message.author.id) is False:
if (date_now - message.author.joined_at).seconds > 60:
set_member(message.author.id, message.author.name, message.author.discriminator, None, datetime.now())
session.commit()
set_member_scores(message.author.id, ['week'])
session.commit()
add_member_score(message.author.id, date_now.strftime("%A"), points)
session.commit()
parent_id = get_member_parent_id(message.author.id)
if parent_id:
if (check_member(parent_id)) and (randint(1, 100) < GUILD['rand_parent']):
add_member_score(parent_id, date_now.strftime("%A"), points)
session.commit()
if not message.attachments and message.content[0] == BOT['prefix']:
command = args.pop(0)[1:]
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), message.author, command, args)
if command in ['color', 'colour']:
if args:
try:
color_string = ''.join(args)
if '#' in color_string:
color_string = color_string.replace('#', '')
new_color = Color(color_string)
print(new_color.hex_l)
hex_string = new_color.hex_l.replace('#', '')
discord_color = discord.Color(int(hex_string, 16))
for role in message.author.roles:
if role.name == GUILD['colored_name']:
await role.edit(colour=discord_color)
return
except ValueError:
pass
try:
color_string = ''.join(args)
if '#' in color_string:
color_string = color_string.replace('#', '')
discord_color = discord.Color(int(color_string, 16))
for role in message.author.roles:
if role.name == GUILD['colored_name']:
await role.edit(colour=discord_color)
return
except ValueError:
pass
if command in ['help', 'h']:
embed = discord.Embed()
embed.add_field(name='connect permission',
value='{}connect - <@{}>'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='view permission',
value='{}view - <@{}>'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='speak permission',
value='{}speak - <@{}>'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='reset permissions',
value='{}reset <@{}>'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='global connect permission',
value='{}connect -'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='global view permission',
value='{}view -'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='global speak permission',
value='{}speak -'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='reset global permissions',
value='{}reset'.format(BOT['prefix'], message.author.id), inline=True)
embed.add_field(name='user limit',
value='{}limit 2'.format(BOT['prefix'], message.author.id), inline=True)
embed.set_footer(text="to allow permission use +")
await message.channel.send(embed=embed)
return
if command == 'profile':
inviter = message.guild.get_member(parent_id)
if inviter is None:
inviter_name = 'None'
else:
inviter_name = inviter.display_name
invited_count = get_invited_count(message.author.id)
invited_list = get_invited_list(message.author.id)
real_count = 0
for invited_id in invited_list:
invited = message.guild.get_member(invited_id)
if invited:
if invited.avatar and (invited.joined_at - invited.created_at) > timedelta(days=3):
real_count += 1
embed = discord.Embed()
embed.add_field(name='invited people', value="{}({})".format(real_count, invited_count), inline=True)
embed.set_footer(text="invited by {}".format(inviter_name))
await message.channel.send(embed=embed)
return
if command in ['speak', 's', 'connect', 'c', 'view', 'v', 'reset', 'r']:
if (len(args) < 1) and command not in ['reset', 'r']:
return
host = message.author
if command in ['reset', 'r']:
parameter = '+'
else:
parameter = args.pop(0)
allowed = BOT['true'] + BOT['false']
if parameter not in allowed:
parameter = True
else:
if parameter in BOT['true']:
parameter = True
elif parameter in BOT['false']:
parameter = False
if message.mentions:
guest = message.mentions[0]
else:
guest = None
if guest:
overwrites = get_member_member(host.id, guest.id)
if not overwrites:
set_member_member(host.id, guest.id)
session.commit()
if command in ['reset', 'r']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(guest, overwrite=None)
if guest in channel.members:
afk_channel = message.guild.get_channel(GUILD['afk_channel_id'])
await guest.move_to(afk_channel)
await guest.move_to(channel)
update_member_member(host.id, guest.id, speak=None, connect=None, view_channel=None)
session.commit()
elif command in ['speak', 's']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(guest, speak=parameter)
if guest in channel.members:
afk_channel = message.guild.get_channel(GUILD['afk_channel_id'])
await guest.move_to(afk_channel)
await guest.move_to(channel)
update_member_member(host.id, guest.id, speak=parameter)
session.commit()
elif command in ['connect', 'c']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(guest, connect=parameter)
if parameter is False:
if guest in channel.members:
await guest.move_to(None)
update_member_member(host.id, guest.id, connect=parameter)
session.commit()
elif command in ['view', 'v']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(guest, view_channel=parameter)
if parameter is False:
if guest in channel.members:
await guest.move_to(None)
update_member_member(host.id, guest.id, view_channel=parameter)
session.commit()
else:
if command in ['reset', 'r']:
create_channel = message.guild.get_channel(GUILD['create_channel'])
update_member(host.id, speak=True, view_channel=True, connect=True, limit=99)
update_member_members(host.id)
session.commit()
try:
await host.move_to(create_channel)
except discord.errors.HTTPException:
return
elif command in ['speak', 's']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(message.guild.default_role, speak=parameter)
update_member(host.id, speak=parameter)
session.commit()
elif command in ['connect', 'c']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(message.guild.default_role, connect=parameter)
update_member(host.id, connect=parameter)
session.commit()
elif command in ['view', 'v']:
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.set_permissions(message.guild.default_role, view_channel=parameter)
update_member(host.id, view_channel=parameter)
session.commit()
if command in ['limit', 'l']:
if len(args) < 1:
return
host = message.author
limit_str = args.pop(0)
if limit_str.isdigit():
limit = int(limit_str)
if isinstance(limit, int):
if limit < 0:
limit = 0
elif limit > 99:
limit = 99
else:
return
temp_channels = {k: v for k, v in channels.items() if v}
for channel in temp_channels:
if host == temp_channels[channel]:
await channel.edit(user_limit=limit)
update_member(host.id, limit=limit)
session.commit()
if message.author.id == BOT['owner']:
if command == 'ban':
if message.mentions:
to_ban = message.mentions[0]
print(to_ban)
else:
to_ban_id = args.pop(0).strip('<@!>')
to_ban = await client.fetch_user(to_ban_id)
print(to_ban)
if command == 'ban_all':
if message.mentions:
inviter = message.mentions[0]
else:
inviter_id = args.pop(0).strip('<@!>')
inviter = await client.fetch_user(inviter_id)
ban_list = get_invited_list_minutes(inviter.id,
datetime.now() - timedelta(minutes=int(args.pop())))
for member_id in ban_list:
# member = message.guild.get_member(member_id)
member = discord.Object(member_id)
print(member)
try:
await message.channel.send('<@{}> banned'.format(member_id))
await message.guild.ban(member)
except discord.errors.NotFound:
continue
if command == 'eval':
# try:
code = cleanup_code(' '.join(args))
evaled = eval(code)
# print(type(evaled))
if inspect.isawaitable(evaled):
await message.channel.send(await evaled)
else:
await message.channel.send("```{}```".format(evaled))
# except:
# print('eval except')
# if type(evaled) !=
if command == 'dateTime':
print(date_now.strftime("%A"))
elif command == 'addNsfw':
members = message.guild.members
for member in members:
if (date_now - member.created_at).days < 14:
await member.add_roles(message.guild.get_role(GUILD['nsfw_id']), reason='nsfw new account')
elif command == 'resetPoints':
if len(args) > 0:
if message.mentions:
reset_points_by_id(message.mentions[0].id)
else:
reset_points_by_id(args[0])
else:
print(message.content)
return
elif command == 'delTopRoles':
top_roles = get_top_roles(128)
for role_id, in top_roles:
print(role_id)
role = message.guild.get_role(role_id)
await role.delete()
return
elif command == 'say':
await message.channel.send(' '.join(args))
await message.delete()
elif command == 'sayhc':
message_old = await message.channel.fetch_message(592443154058182686)
content = """
**Rangi numeryczne [1, 2, 3 … 128] na liście użytkowników. Co to jest?**
Jest to autorski system rankingu aktywności, stworzony na potrzeby tego serwera.
**Jak zdobyć własną rangę na liście użytkowników? Sposobów jest kilka:**
1. Aktywność na czacie tekstowym.
2. Przebywanie na kanałach głosowych.
3. Aktywność osób, które zostały przez Ciebie zaproszone (10% wygenerowanego ruchu pojawia się też na Twoim koncie)
4. Nitro Serwer Boost lub Patronite (https://www.patreon.com/zaGadka), zwiększają wszystkie pozostałe bonusy o 25%
**Co dostaniesz za zaproszenie nowej osoby na serwer?** (ilość zaproszonych osób `?profile`)
1 osoba ♳ - czarny kolor nicku do końca dnia
2 osoby ♴ - DJ, czyli kontrola nad botami muzycznymi
4 osoby ♵ - możliwość tworzenia własnego kanału głosowego z możliwością mutowania (więcej komend: `?help`)
8+ osób ♶ - +25% punktów do końca dnia
16 osób ♷ - +25% punktów na stałe
**32 osoby ✪ - dołączenie do moderacji zagadki na okres próbny**
64+ osoby ♸ - indywidualna ranga, możesz zmieniać jej kolor za pomocą komendy np `?color light blue` do końca dnia
128+ osób ♹ - indywidualna ranga jak wyżej, z tą różnicą że cyklicznie smienia ona wybrane przez Ciebie kolory
+ oznacza, że w następnym dniu, rola się odświeży po dołączeniu jednej osoby
**Jak zmienić kolor nicku?**
1. Wybrać <a:zaGadkaRed:592406448302981149> <a:zaGadkaGreen:592406448315564062> <a:zaGadjaBlue:592406448386998289> za pomocą reakcji widocznych na samym dole
2. Aby otrzymać wyjątkowy, czarny kolor należy boostować serwer, być patronem($2 - czarny, $4 - dowolny), lub zaprosić nową osobę"""
await message_old.edit(content=content)
elif command == 'everyone':
await message.channel.send('@everyone')
await message.delete()
elif command == 'rgb':
message_rgb = await message.channel.send(GUILD['roles_rgb'])
await message_rgb.delete()
await message.delete()
elif command == 'editMessage':
message_old = await message.channel.fetch_message(args.pop(0))
content = ' '.join(args)
await message_old.edit(content=content)
@client.event
async def on_member_remove(member):
leave_logs = member.guild.get_channel(GUILD['leave_logs_id'])
await leave_logs.send('member: {}, display_name: {}'.format(member.mention, member.display_name))
@client.event
async def on_ready():
date_now = datetime.now()
guild = client.get_guild(GUILD['id'])
invites.extend(await guild.invites())
if check_score_by_type('week') is False:
set_weeks()
if check_role_by_type('top') is False:
for i in range(1, GUILD['top'] + 1):
print(i)
role = await guild.create_role(name=i, hoist=True)
set_role(role.id, i, 'top')
session.commit()
if check_guild_by_id(GUILD['id']) is False:
set_guild_by_id(GUILD['id'], date_now)
session.commit()
# private_category = guild.get_channel(GUILD['private_category'])
# for channel in private_category.channels:
# if channel.id != GUILD['create_channel']:
# await channel.delete()
print(client.user.id)
print(client.user.name)
print('---------------')
print('This bot is ready for action!')
client.loop.create_task(presence())
client.loop.create_task(minute())
if __name__ == '__main__':
try:
client.run(BOT['token'])
except Exception as e:
print('Could Not Start Bot')
print(e)
finally:
print('Closing Session')
session.close()
| 47.211568
| 157
| 0.536173
|
550ad1d4a3a8dc2ba0d0b2f552a896950c473d5d
| 326
|
py
|
Python
|
portfolio/migrations/0025_remove_data_hits.py
|
ElmanTr/simple-django-web
|
07f9b938e42ade4b515cbb8b41364089a5f9f2b1
|
[
"MIT"
] | 1
|
2020-10-07T14:03:13.000Z
|
2020-10-07T14:03:13.000Z
|
portfolio/migrations/0025_remove_data_hits.py
|
ElmanTr/simple-django-web
|
07f9b938e42ade4b515cbb8b41364089a5f9f2b1
|
[
"MIT"
] | null | null | null |
portfolio/migrations/0025_remove_data_hits.py
|
ElmanTr/simple-django-web
|
07f9b938e42ade4b515cbb8b41364089a5f9f2b1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-11-22 12:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0024_auto_20201115_1919'),
]
operations = [
migrations.RemoveField(
model_name='data',
name='hits',
),
]
| 18.111111
| 49
| 0.588957
|
efa1ecf696ba58ce944e26cc3a9ac3069b8628d9
| 898
|
py
|
Python
|
azure-keyvault/azure/keyvault/models/deleted_key_item_paged.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/keyvault/models/deleted_key_item_paged.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure/keyvault/models/deleted_key_item_paged.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class DeletedKeyItemPaged(Paged):
"""
A paging container for iterating over a list of DeletedKeyItem object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[DeletedKeyItem]'}
}
def __init__(self, *args, **kwargs):
super(DeletedKeyItemPaged, self).__init__(*args, **kwargs)
| 32.071429
| 76
| 0.569042
|
292fc2dba5da6be1968db15edb858c98bf0c8afe
| 178
|
py
|
Python
|
kuwala/common/python_utils/src/error_handler.py
|
SamDuan/kuwala
|
b417444493b3efcde33747e201b5d0bf13f8e14a
|
[
"Apache-2.0"
] | null | null | null |
kuwala/common/python_utils/src/error_handler.py
|
SamDuan/kuwala
|
b417444493b3efcde33747e201b5d0bf13f8e14a
|
[
"Apache-2.0"
] | null | null | null |
kuwala/common/python_utils/src/error_handler.py
|
SamDuan/kuwala
|
b417444493b3efcde33747e201b5d0bf13f8e14a
|
[
"Apache-2.0"
] | null | null | null |
from quart import jsonify
def general_error(error):
return (
jsonify({"success": False, "error": {"message": error.description}}),
error.status_code,
)
| 19.777778
| 77
| 0.629213
|
2a30357973cd0fcb2a9e8dbb88984bfc75364b0d
| 213
|
py
|
Python
|
tests/src/__init__.py
|
char-lie/software_design
|
8784cc5f4dea1e1290f8e3ea0425677d1cbeb273
|
[
"MIT"
] | null | null | null |
tests/src/__init__.py
|
char-lie/software_design
|
8784cc5f4dea1e1290f8e3ea0425677d1cbeb273
|
[
"MIT"
] | 4
|
2016-02-23T14:27:43.000Z
|
2016-02-24T19:08:46.000Z
|
tests/src/__init__.py
|
char-lie/software_design
|
8784cc5f4dea1e1290f8e3ea0425677d1cbeb273
|
[
"MIT"
] | null | null | null |
from unittest import defaultTestLoader
from TransactionManager import TransactionManagerBase
test_cases = [TransactionManagerBase]
tests = [defaultTestLoader.loadTestsFromTestCase(test) for test in test_cases]
| 26.625
| 78
| 0.859155
|
ca3c2a8823ade1d2385c16ae7794f178282abe61
| 407
|
py
|
Python
|
torch/distributed/pipeline/sync/__init__.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 7
|
2021-05-29T16:31:51.000Z
|
2022-02-21T18:52:25.000Z
|
torch/distributed/pipeline/sync/__init__.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 1
|
2022-01-18T12:17:29.000Z
|
2022-01-18T12:17:29.000Z
|
torch/distributed/pipeline/sync/__init__.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 2
|
2021-07-02T10:18:21.000Z
|
2021-08-18T10:10:28.000Z
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""A Pipe implementation in PyTorch."""
from .checkpoint import is_checkpointing, is_recomputing
from .pipe import Pipe
__all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
| 33.916667
| 71
| 0.764128
|
21913d1641940905c07a6732a1068b272a202e65
| 774
|
py
|
Python
|
get_baseline_auc.py
|
helmersl/patent_similarity_search
|
8d6833607a58257a4e4b2c2c0ce58472d248805a
|
[
"CC-BY-4.0"
] | 36
|
2018-12-10T08:08:21.000Z
|
2022-03-12T21:55:43.000Z
|
get_baseline_auc.py
|
helmersl/patent_similarity_search
|
8d6833607a58257a4e4b2c2c0ce58472d248805a
|
[
"CC-BY-4.0"
] | 3
|
2019-07-18T05:01:02.000Z
|
2021-04-20T14:58:16.000Z
|
get_baseline_auc.py
|
helmersl/patent_similarity_search
|
8d6833607a58257a4e4b2c2c0ce58472d248805a
|
[
"CC-BY-4.0"
] | 10
|
2018-08-09T05:10:19.000Z
|
2021-12-23T05:40:28.000Z
|
import numpy as np
from plot_utils import plot_score_distr, group_combis, calc_auc
binary_label_pairs = np.load('human_eval/corpus_info/binary_label_pairs.npy').item()
human_label_pairs = np.load('human_eval/corpus_info/human_label_pairs.npy').item()
combis = np.load('human_eval/corpus_info/combis.npy')
human_sim_combis, human_diff_combis = group_combis(human_label_pairs)
sim_vals = [binary_label_pairs[combi] for combi in human_sim_combis]
diff_vals = [binary_label_pairs[combi] for combi in human_diff_combis]
fpr, tpr, auc_val = calc_auc(sim_vals, diff_vals)
plot_score_distr('human_eval', 'cited', ['relevant', 'not relevant'],
{'relevant': sim_vals, 'not relevant': diff_vals},
auc_val, ['relevant'], histdir='baseline', bins=10)
| 55.285714
| 84
| 0.760982
|
34c3574c5179689640e3901e226c4b79f06968db
| 2,257
|
py
|
Python
|
tests/unit_testing/test_argument.py
|
pengzhengyi/Streamlined
|
59743f44e349318e51e1db4e72c7d1b5992f25be
|
[
"MIT"
] | null | null | null |
tests/unit_testing/test_argument.py
|
pengzhengyi/Streamlined
|
59743f44e349318e51e1db4e72c7d1b5992f25be
|
[
"MIT"
] | 5
|
2021-09-11T07:46:55.000Z
|
2022-03-12T02:03:54.000Z
|
tests/unit_testing/test_argument.py
|
pengzhengyi/Streamlined
|
59743f44e349318e51e1db4e72c7d1b5992f25be
|
[
"MIT"
] | null | null | null |
from unittest.mock import Mock
import pytest
from streamlined.common import TYPE, VALUE
from streamlined.middlewares import (
ARGPARSE,
ARGS,
ARGTYPE,
ARGUMENT,
ARGUMENTS,
CLEANUP,
NAME,
SKIP,
Argument,
Arguments,
)
@pytest.mark.asyncio
async def test_argument_set_in_scope(simple_executor):
argument = Argument({ARGUMENT: {NAME: "first_name", VALUE: "Alice"}})
scoped = await argument.run(simple_executor)
assert scoped["first_name"] == "Alice"
@pytest.mark.asyncio
async def test_argument_skip(simple_executor):
argument = Argument({ARGUMENT: {NAME: "first_name", VALUE: "Alice", SKIP: True}})
scoped = await argument.run(simple_executor)
with pytest.raises(KeyError):
scoped["first_name"]
@pytest.mark.asyncio
async def test_argument_set_after_action(simple_executor):
mock = Mock()
def is_name_set(name) -> str:
mock(name)
argument = Argument({ARGUMENT: {NAME: "name", VALUE: "Alice", CLEANUP: is_name_set}})
scoped = await argument.run(simple_executor)
mock.assert_called_once_with("Alice")
@pytest.mark.asyncio
async def test_argument_argparse(simple_executor):
argument = Argument(
{
ARGUMENT: {
NAME: "num_processors",
VALUE: {TYPE: ARGPARSE, NAME: "-p", ARGTYPE: int, ARGS: ["-p", "10", "--help"]},
}
}
)
scoped = await argument.run(simple_executor)
assert scoped.get("num_processors") == 10
@pytest.mark.asyncio
async def test_argument_argparse_parsed_argument_not_present(simple_executor):
argument = Argument(
{
ARGUMENT: {
NAME: "num_processors",
VALUE: {TYPE: ARGPARSE, NAME: "-p", ARGTYPE: int, ARGS: ["--foo"]},
}
}
)
scoped = await argument.run(simple_executor)
assert scoped.get("num_processors") is None
@pytest.mark.asyncio
async def test_arguments_set_in_scope(simple_executor):
arguments = Arguments(
{ARGUMENTS: [{NAME: "first_name", VALUE: "John"}, {NAME: "last_name", VALUE: "Doe"}]}
)
scoped = await arguments.run(simple_executor)
assert scoped["first_name"] == "John"
assert scoped["last_name"] == "Doe"
| 25.359551
| 96
| 0.648206
|
06d6ceb93d922c8230fd50384732f9651b40abd3
| 10,987
|
py
|
Python
|
basil/web/oauth2.py
|
stmobo/BasilBot
|
96497786dc9ad6604ade47cac1ad101084da2346
|
[
"MIT"
] | null | null | null |
basil/web/oauth2.py
|
stmobo/BasilBot
|
96497786dc9ad6604ade47cac1ad101084da2346
|
[
"MIT"
] | null | null | null |
basil/web/oauth2.py
|
stmobo/BasilBot
|
96497786dc9ad6604ade47cac1ad101084da2346
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from aiohttp import ClientSession, ClientResponse, ClientResponseError
from aioredis import Redis, RedisError
import logging
import secrets
import time
from typing import Optional, Dict, Iterable, Union, Tuple
from urllib.parse import urljoin, urlencode
from sanic import Sanic
from sanic.request import Request
from sanic.response import redirect, HTTPResponse
from sanic.exceptions import InvalidUsage, ServerError
STATE_START = "unauthenticated"
STATE_INPROGRESS = "in-progress"
STATE_AUTHORIZED = "authorized"
REDIS_BASE_PREFIX = "sessions:auth"
app = Sanic.get_app("basil")
class OAuth2API(object):
def __init__(
self,
authorize_url: str,
token_url: str,
revoke_url: str,
redirect_url: str,
client_id: str,
client_secret: str,
prefix: str,
):
self.authorize_url: str = authorize_url
self.token_url: str = token_url
self.revoke_url: str = revoke_url
self.redirect_url: str = redirect_url
self.client_id: str = client_id
self.client_secret: str = client_secret
self.prefix: str = prefix
def authorization_url(self, nonce: str, scopes: str, **kwargs) -> str:
qstring = urlencode(
{
"client_id": self.client_id,
"redirect_uri": self.redirect_url,
"response_type": "code",
"state": nonce,
"scope": scopes,
**kwargs,
}
)
return urljoin(self.authorize_url, "?" + qstring)
async def exchange_code(
self, session: ClientSession, scopes: str, code: str
) -> ClientResponse:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.redirect_url,
"scope": scopes,
"grant_type": "authorization_code",
"code": code,
}
return await session.post(
self.token_url, data=data, headers=headers, raise_for_status=True
)
async def refresh_token(
self, session: ClientSession, scopes: str, refresh_token: str
) -> ClientResponse:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.redirect_url,
"scope": scopes,
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
return await session.post(
self.token_url, data=data, headers=headers, raise_for_status=True
)
async def revoke_token(self, session: ClientSession, token: str) -> ClientResponse:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"token": token,
"client_id": self.client_id,
"client_secret": self.client_secret,
}
return await session.post(
self.revoke_url, data=data, headers=headers, raise_for_status=True
)
async def load_request_context(self, req: Request) -> OAuth2Context:
return await OAuth2Context.load(
req.ctx.session, app.ctx.http_session, app.ctx.redis, self
)
class OAuth2Context(object):
def __init__(
self,
session_id: str,
http_session: ClientSession,
redis: Redis,
api: OAuth2API,
**kwargs
):
self.session_id = session_id
self.http = http_session
self.redis = redis
self.api = api
self.state: str = kwargs.get("state", STATE_START)
self.nonce: Optional[str] = kwargs.get("nonce")
self.landing_page: Optional[str] = kwargs.get("landing")
self.scopes: Optional[str] = kwargs.get("scopes")
self.access_token: Optional[str] = kwargs.get("access_token")
self.token_type: Optional[str] = kwargs.get("token_type")
self.refresh_token: Optional[str] = kwargs.get("refresh_token")
self.expire_time: Optional[float] = None
if "expire_time" in kwargs:
self.expire_time = float(kwargs["expire_time"])
@property
def auth_data_key(self) -> str:
return REDIS_BASE_PREFIX + ":" + self.api.prefix + ":" + self.session_id
@property
def expire_in(self) -> float:
if self.expire_time is None:
return 0
return self.expire_time - time.time()
@staticmethod
def generate_nonce() -> str:
return secrets.token_urlsafe(16)
@classmethod
async def load(
cls, session_id: str, http_session: ClientSession, redis: Redis, api: OAuth2API
) -> OAuth2Context:
data = await redis.hgetall(
REDIS_BASE_PREFIX + ":" + api.prefix + ":" + session_id
)
if data is None:
data = {}
return cls(session_id, http_session, redis, api, **data)
async def reset(self):
"""Completely reset all state associated with this context."""
if self.access_token is not None and self.expire_in > 0:
try:
resp = await self.api.revoke_token(self.http, self.access_token)
async with resp:
msg = await resp.text()
logging.info(
"Revoked token for session {}: {}".format(self.session_id, msg)
)
except ClientResponseError:
logging.error(
"Could not revoke token for session {}".format(self.session_id),
exc_info=True,
)
try:
await self.redis.delete(self.auth_data_key)
except RedisError:
logging.error(
"Could not delete Redis key {} for session {}".format(
self.auth_data_key, self.session_id
),
exc_info=True,
)
self.access_token = None
self.token_type = None
self.refresh_token = None
self.expire_time = None
self.nonce = None
self.landing_page = None
self.scopes = None
self.state = STATE_START
async def start(
self, scopes: Union[str, Iterable[str]], landing_page: str, **kwargs
) -> HTTPResponse:
"""Begin an OAuth2 client authentication flow."""
if isinstance(scopes, str):
scopes = scopes.split()
else:
scopes = list(scopes)
await self.reset()
self.scopes = " ".join(scopes)
self.nonce = self.generate_nonce()
self.landing_page = landing_page
self.state = STATE_INPROGRESS
async with self.redis.pipeline(transaction=True) as tr:
tr.hmset(
self.auth_data_key,
{
"state": STATE_INPROGRESS,
"nonce": self.nonce,
"scopes": self.scopes,
"landing": landing_page,
},
)
tr.expire(self.auth_data_key, 900)
await tr.execute()
return redirect(
self.api.authorization_url(self.nonce, self.scopes, **kwargs), status=303
)
async def _save_token(self, resp: ClientResponse):
async with resp:
token_data: dict = await resp.json()
self.access_token = token_data["access_token"]
self.token_type = token_data["token_type"]
self.refresh_token = token_data.get("refresh_token")
self.expire_time = time.time() + token_data["expires_in"]
if "scope" in token_data:
self.scopes = token_data["scope"]
try:
async with self.redis.pipeline(transaction=True) as tr:
tr.hmset(
self.auth_data_key,
{
"state": STATE_AUTHORIZED,
"access_token": self.access_token,
"token_type": self.token_type,
"refresh_token": self.refresh_token,
"expire_time": str(self.expire_time),
"scopes": self.scopes,
},
)
tr.hdel(self.auth_data_key, "nonce", "landing")
tr.expireat(self.auth_data_key, int(self.expire_time))
await tr.execute()
except RedisError:
raise ServerError("Could not save authorization data to Redis")
async def redirect(self, code: str, state_param: str) -> HTTPResponse:
"""Handle an authorization redirection in the OAuth2 flow."""
if self.state != STATE_INPROGRESS:
raise ServerError(
"Authorization flow in incorrect state for handling redirect"
)
if self.nonce != state_param:
raise InvalidUsage("Incorrect state parameter in redirect")
try:
resp = await self.api.exchange_code(self.http, self.scopes, code)
except ClientResponseError as err:
raise ServerError(
"Could not get access token (error {}): {}".format(
err.status, err.message
)
)
await self._save_token(resp)
return redirect(self.landing_page, status=303)
async def refresh(self):
"""Refresh the access token associated with this context."""
if self.state != STATE_AUTHORIZED:
return
try:
resp = await self.api.refresh_token(
self.http, self.scopes, self.refresh_token
)
except ClientResponseError as err:
raise ServerError(
"Could not get access token (error {}): {}".format(
err.status, err.message
)
)
await self._save_token(resp)
async def credentials(self) -> Optional[Tuple[str, str]]:
"""Get the access token and its type, refreshing as necessary.
If the token is close to expiring, it will automatically be refreshed.
If the token has expired or has not been obtained yet, returns None.
"""
if self.state != STATE_AUTHORIZED:
return None
if self.expire_in <= 0:
await self.reset()
return None
elif self.expire_in <= 86400:
await self.refresh()
if self.token_type is None or self.access_token is None:
return None
return (self.token_type, self.access_token)
async def auth_header(self) -> Optional[Dict[str, str]]:
"""Get the HTTP headers used for authorizing requests in this context."""
creds = await self.credentials()
if creds is None:
return None
return {"Authorization": creds[0] + " " + creds[1]}
| 34.01548
| 87
| 0.572859
|
d354efd0fbdfa0802f732396a8841b6eccb09145
| 320
|
py
|
Python
|
2_viewing_chain.py
|
gadsater/blockchain
|
c22ced0a8b1f5ff01cd928c4e79191f773684238
|
[
"MIT"
] | 2
|
2018-10-16T10:15:57.000Z
|
2019-03-24T05:45:00.000Z
|
2_viewing_chain.py
|
sudar-coder321/BlockChain_For_TKnow
|
278e5d7ea9d34e7acfe795c561fc5fa906c9c8c0
|
[
"MIT"
] | 1
|
2018-10-16T10:33:47.000Z
|
2018-10-16T10:33:47.000Z
|
2_viewing_chain.py
|
sudar-coder321/BlockChain_For_TKnow
|
278e5d7ea9d34e7acfe795c561fc5fa906c9c8c0
|
[
"MIT"
] | 1
|
2018-10-14T16:03:05.000Z
|
2018-10-14T16:03:05.000Z
|
from blockchain import Blockchain
if __name__ == '__main__':
testchain = Blockchain()
for i in range(5):
proof = testchain.proof_of_work(testchain.last_block)
hashval = testchain.hash(testchain.last_block)
block = testchain.new_block(proof,hashval)
print (*testchain.chain,sep="\n")
| 32
| 61
| 0.69375
|
6bc162cb1a22247d03e3efa9d9b015db7d994fbd
| 9,779
|
py
|
Python
|
stubs.min/System/Windows/Media/__init___parts/SkewTransform.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/System/Windows/Media/__init___parts/SkewTransform.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/Windows/Media/__init___parts/SkewTransform.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class SkewTransform(Transform,ISealable,IAnimatable,IResource,IFormattable):
"""
Represents a 2-D�skew.
SkewTransform()
SkewTransform(angleX: float,angleY: float)
SkewTransform(angleX: float,angleY: float,centerX: float,centerY: float)
"""
def Clone(self):
"""
Clone(self: SkewTransform) -> SkewTransform
Creates a modifiable copy of this System.Windows.Media.SkewTransform by making
deep copies of its values.
Returns: A modifiable deep copy of the current object. The
System.Windows.Freezable.IsFrozen property of the cloned object returns false
even if the System.Windows.Freezable.IsFrozen property of the source is true.
"""
pass
def CloneCore(self,*args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified
System.Windows.Freezable using base (non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValue(self):
"""
CloneCurrentValue(self: SkewTransform) -> SkewTransform
Creates a modifiable copy of this System.Windows.Media.SkewTransform object by
making deep copies of its values. This method does not copy resource
references,data bindings,or animations,although it does copy their current
values.
Returns: A modifiable deep copy of the current object. The
System.Windows.Freezable.IsFrozen property of the cloned object is false even
if the System.Windows.Freezable.IsFrozen property of the source is true.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified
System.Windows.Freezable using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
""" CreateInstanceCore(self: SkewTransform) -> Freezable """
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Animatable,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.Animatable object unmodifiable or
determines whether it can be made unmodifiable.
isChecking: true if this method should simply determine whether this instance can be
frozen. false if this instance should actually freeze itself when this method
is called.
Returns: If isChecking is true,this method returns true if this
System.Windows.Media.Animation.Animatable can be made unmodifiable,or false if
it cannot be made unmodifiable. If isChecking is false,this method returns
true if the if this System.Windows.Media.Animation.Animatable is now
unmodifiable,or false if it cannot be made unmodifiable,with the side effect
of having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable
using base (non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified
System.Windows.Freezable. If the object has animated dependency properties,
their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure
and is not intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPrope
rtyChangedEventArgs) to also invoke any System.Windows.Freezable.Changed
handlers in response to a changing dependency property of type
System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old
and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid
thread. Inheritors of System.Windows.Freezable must call this method at the
beginning of any API that reads data members that are not dependency
properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize
the value for the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized;
otherwise,false.
ShouldSerializeProperty(self: Window_16$17,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Label_17$18,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: TextBox_18$19,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Button_19$20,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: CheckBox_20$21,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: ComboBox_21$22,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Separator_22$23,dp: DependencyProperty) -> bool
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the
System.Windows.Freezable and invokes its System.Windows.Freezable.OnChanged
method. Classes that derive from System.Windows.Freezable should call this
method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being
accessed from a valid threading context. System.Windows.Freezable inheritors
should call this method at the beginning of any API that writes to data members
that are not dependency properties.
"""
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,angleX=None,angleY=None,centerX=None,centerY=None):
"""
__new__(cls: type)
__new__(cls: type,angleX: float,angleY: float)
__new__(cls: type,angleX: float,angleY: float,centerX: float,centerY: float)
"""
pass
def __str__(self,*args):
pass
AngleX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the x-axis skew angle,which is measured in degrees counterclockwise from the y-axis.
Get: AngleX(self: SkewTransform) -> float
Set: AngleX(self: SkewTransform)=value
"""
AngleY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the y-axis skew angle,which is measured in degrees counterclockwise from the x-axis.
Get: AngleY(self: SkewTransform) -> float
Set: AngleY(self: SkewTransform)=value
"""
CenterX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the x-coordinate of the transform center.
Get: CenterX(self: SkewTransform) -> float
Set: CenterX(self: SkewTransform)=value
"""
CenterY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the y-coordinate of the transform center.
Get: CenterY(self: SkewTransform) -> float
Set: CenterY(self: SkewTransform)=value
"""
Value=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the current transformation value as a System.Windows.Media.Matrix.
Get: Value(self: SkewTransform) -> Matrix
"""
AngleXProperty=None
AngleYProperty=None
CenterXProperty=None
CenterYProperty=None
| 38.199219
| 215
| 0.722262
|
151479260ee96c8163f1b3916b969be1b6582bd4
| 56,278
|
py
|
Python
|
components/isceobj/Alos2Proc/Alos2ProcPublic.py
|
yuankailiu/isce2
|
c2567a33b3f96cc264f49972682c869fa36b9054
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,133
|
2022-01-07T21:24:57.000Z
|
2022-01-07T21:33:08.000Z
|
components/isceobj/Alos2Proc/Alos2ProcPublic.py
|
yuankailiu/isce2
|
c2567a33b3f96cc264f49972682c869fa36b9054
|
[
"ECL-2.0",
"Apache-2.0"
] | 276
|
2019-02-10T07:18:28.000Z
|
2022-03-31T21:45:55.000Z
|
components/isceobj/Alos2Proc/Alos2ProcPublic.py
|
yuankailiu/isce2
|
c2567a33b3f96cc264f49972682c869fa36b9054
|
[
"ECL-2.0",
"Apache-2.0"
] | 235
|
2019-02-10T05:00:53.000Z
|
2022-03-18T07:37:24.000Z
|
#!/usr/bin/env python3
#
# Author: Cunren Liang
# Copyright 2015-present, NASA-JPL/Caltech
#
def runCmd(cmd, silent=0):
import os
if silent == 0:
print("{}".format(cmd))
status = os.system(cmd)
if status != 0:
raise Exception('error when running:\n{}\n'.format(cmd))
def find_vrt_keyword(xmlfile, keyword):
from xml.etree.ElementTree import ElementTree
value = None
xmlx = ElementTree(file=open(xmlfile,'r')).getroot()
#try 10 times
for i in range(10):
path=''
for j in range(i):
path += '*/'
value0 = xmlx.find(path+keyword)
if value0 != None:
value = value0.text
break
return value
def find_vrt_file(xmlfile, keyword, relative_path=True):
'''
find file in vrt in another directory
xmlfile: vrt file
relative_path: True: return relative (to current directory) path of the file
False: return absolute path of the file
'''
import os
#get absolute directory of xmlfile
xmlfile_dir = os.path.dirname(os.path.abspath(xmlfile))
#find source file path
file = find_vrt_keyword(xmlfile, keyword)
#get absolute path of source file
file = os.path.abspath(os.path.join(xmlfile_dir, file))
#get relative path of source file
if relative_path:
file = os.path.relpath(file, './')
return file
def create_xml(fileName, width, length, fileType):
import isceobj
if fileType == 'slc':
image = isceobj.createSlcImage()
elif fileType == 'int':
image = isceobj.createIntImage()
elif fileType == 'amp':
image = isceobj.createAmpImage()
elif fileType == 'cor':
image = isceobj.createOffsetImage()
elif fileType == 'rmg' or fileType == 'unw':
image = isceobj.Image.createUnwImage()
elif fileType == 'byte':
image = isceobj.createImage()
image.setDataType('BYTE')
elif fileType == 'float':
image = isceobj.createImage()
image.setDataType('FLOAT')
elif fileType == 'double':
image = isceobj.createImage()
image.setDataType('DOUBLE')
else:
raise Exception('format not supported yet!\n')
image.setFilename(fileName)
image.extraFilename = fileName + '.vrt'
image.setWidth(width)
image.setLength(length)
#image.setAccessMode('read')
#image.createImage()
image.renderHdr()
#image.finalizeImage()
def multilook_v1(data, nalks, nrlks, mean=True):
'''
doing multiple looking
ATTENSION: original array changed after running this function
'''
(length, width)=data.shape
width2 = int(width/nrlks)
length2 = int(length/nalks)
for i in range(1, nalks):
data[0:length2*nalks:nalks, :] += data[i:length2*nalks:nalks, :]
for i in range(1, nrlks):
data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks] += data[0:length2*nalks:nalks, i:width2*nrlks:nrlks]
if mean:
return data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks] / nrlks / nalks
else:
return data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks]
def multilook(data, nalks, nrlks, mean=True):
'''
doing multiple looking
'''
import numpy as np
(length, width)=data.shape
width2 = int(width/nrlks)
length2 = int(length/nalks)
data2=np.zeros((length2, width), dtype=data.dtype)
for i in range(0, nalks):
data2 += data[i:length2*nalks:nalks, :]
for i in range(1, nrlks):
data2[:, 0:width2*nrlks:nrlks] += data2[:, i:width2*nrlks:nrlks]
if mean:
return data2[:, 0:width2*nrlks:nrlks] / nrlks / nalks
else:
return data2[:, 0:width2*nrlks:nrlks]
def cal_coherence_1(inf, win=5):
'''
Compute coherence using scipy convolve 2D. Same as "def cal_coherence(inf, win=5):" in funcs.py in insarzd
#still use standard coherence estimation equation, but with magnitude removed.
#for example, equation (2) in
#H. Zebker and K. Chen, Accurate Estimation of Correlation in InSAR Observations,
#IEEE GEOSCIENCE AND REMOTE SENSING LETTERS, VOL. 2, NO. 2, APRIL 2005.
'''
import numpy as np
import scipy.signal as ss
filt = np.ones((win,win))/ (1.0*win*win)
flag = ss.convolve2d((inf!=0), filt, mode='same')
angle = inf / (np.absolute(inf)+(inf==0))
cor = ss.convolve2d(angle, filt, mode='same')
cor = np.absolute(cor)
#remove incomplete convolution result
cor[np.nonzero(flag < 0.999)] = 0.0
#print(np.max(cor), np.min(cor))
#cor.astype(np.float32).tofile(f)
return cor
def computeOffsetFromOrbit(referenceSwath, referenceTrack, secondarySwath, secondaryTrack, referenceSample, referenceLine):
'''
compute range and azimuth offsets using orbit. all range/azimuth indexes start with 0
referenceSample: reference sample where offset is computed, no need to be integer
referenceLine: reference line where offset is computed, no need to be integer
'''
import datetime
pointingDirection = {'right': -1, 'left' :1}
#compute a pair of range and azimuth offsets using geometry
#using Piyush's code for computing range and azimuth offsets
midRange = referenceSwath.startingRange + referenceSwath.rangePixelSize * referenceSample
midSensingStart = referenceSwath.sensingStart + datetime.timedelta(seconds = referenceLine / referenceSwath.prf)
llh = referenceTrack.orbit.rdr2geo(midSensingStart, midRange, side=pointingDirection[referenceTrack.pointingDirection])
slvaz, slvrng = secondaryTrack.orbit.geo2rdr(llh, side=pointingDirection[referenceTrack.pointingDirection])
###Translate to offsets
#at this point, secondary range pixel size and prf should be the same as those of reference
rgoff = ((slvrng - secondarySwath.startingRange) / referenceSwath.rangePixelSize) - referenceSample
azoff = ((slvaz - secondarySwath.sensingStart).total_seconds() * referenceSwath.prf) - referenceLine
return (rgoff, azoff)
def overlapFrequency(centerfreq1, bandwidth1, centerfreq2, bandwidth2):
startfreq1 = centerfreq1 - bandwidth1 / 2.0
endingfreq1 = centerfreq1 + bandwidth1 / 2.0
startfreq2 = centerfreq2 - bandwidth2 / 2.0
endingfreq2 = centerfreq2 + bandwidth2 / 2.0
overlapfreq = []
if startfreq2 <= startfreq1 <= endingfreq2:
overlapfreq.append(startfreq1)
if startfreq2 <= endingfreq1 <= endingfreq2:
overlapfreq.append(endingfreq1)
if startfreq1 < startfreq2 < endingfreq1:
overlapfreq.append(startfreq2)
if startfreq1 < endingfreq2 < endingfreq1:
overlapfreq.append(endingfreq2)
if len(overlapfreq) != 2:
#no overlap bandwidth
return None
else:
startfreq = min(overlapfreq)
endingfreq = max(overlapfreq)
return [startfreq, endingfreq]
def readOffset(filename):
from isceobj.Location.Offset import OffsetField,Offset
with open(filename, 'r') as f:
lines = f.readlines()
# 0 1 2 3 4 5 6 7
#retstr = "%s %s %s %s %s %s %s %s" % (self.x,self.dx,self.y,self.dy,self.snr, self.sigmax, self.sigmay, self.sigmaxy)
offsets = OffsetField()
for linex in lines:
#linexl = re.split('\s+', linex)
#detect blank lines with only spaces and tabs
if linex.strip() == '':
continue
linexl = linex.split()
offset = Offset()
#offset.setCoordinate(int(linexl[0]),int(linexl[2]))
offset.setCoordinate(float(linexl[0]),float(linexl[2]))
offset.setOffset(float(linexl[1]),float(linexl[3]))
offset.setSignalToNoise(float(linexl[4]))
offset.setCovariance(float(linexl[5]),float(linexl[6]),float(linexl[7]))
offsets.addOffset(offset)
return offsets
def writeOffset(offset, fileName):
offsetsPlain = ''
for offsetx in offset:
offsetsPlainx = "{}".format(offsetx)
offsetsPlainx = offsetsPlainx.split()
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(float(offsetsPlainx[0])),
float(offsetsPlainx[1]),
int(float(offsetsPlainx[2])),
float(offsetsPlainx[3]),
float(offsetsPlainx[4]),
float(offsetsPlainx[5]),
float(offsetsPlainx[6]),
float(offsetsPlainx[7])
)
offsetFile = fileName
with open(offsetFile, 'w') as f:
f.write(offsetsPlain)
def reformatGeometricalOffset(rangeOffsetFile, azimuthOffsetFile, reformatedOffsetFile, rangeStep=1, azimuthStep=1, maximumNumberOfOffsets=10000):
'''
reformat geometrical offset as ampcor output format
'''
import numpy as np
import isceobj
img = isceobj.createImage()
img.load(rangeOffsetFile+'.xml')
width = img.width
length = img.length
step = int(np.sqrt(width*length/maximumNumberOfOffsets) + 0.5)
if step == 0:
step = 1
rgoff = np.fromfile(rangeOffsetFile, dtype=np.float32).reshape(length, width)
azoff = np.fromfile(azimuthOffsetFile, dtype=np.float32).reshape(length, width)
offsetsPlain = ''
for i in range(0, length, step):
for j in range(0, width, step):
if (rgoff[i][j] == -999999.0) or (azoff[i][j] == -999999.0):
continue
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(j*rangeStep+1),
float(rgoff[i][j])*rangeStep,
int(i*azimuthStep+1),
float(azoff[i][j])*azimuthStep,
float(22.00015),
float(0.000273),
float(0.002126),
float(0.000013)
)
with open(reformatedOffsetFile, 'w') as f:
f.write(offsetsPlain)
return
def cullOffsets(offsets):
import isceobj
from iscesys.StdOEL.StdOELPy import create_writer
distances = (10,5,3,3,3,3,3,3)
#numCullOffsetsLimits = (100, 75, 50, 50, 50, 50, 50, 50)
numCullOffsetsLimits = (50, 40, 30, 30, 30, 30, 30, 30)
refinedOffsets = offsets
for i, (distance, numCullOffsetsLimit) in enumerate(zip(distances, numCullOffsetsLimits)):
cullOff = isceobj.createOffoutliers()
cullOff.wireInputPort(name='offsets', object=refinedOffsets)
cullOff.setSNRThreshold(2.0)
cullOff.setDistance(distance)
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
stdWriter = create_writer("log", "", True, filename="offoutliers.log")
stdWriter.setFileTag("offoutliers", "log")
stdWriter.setFileTag("offoutliers", "err")
stdWriter.setFileTag("offoutliers", "out")
cullOff.setStdWriter(stdWriter)
#run it
cullOff.offoutliers()
refinedOffsets = cullOff.getRefinedOffsetField()
numLeft = len(refinedOffsets._offsets)
print('Number of offsets left after %2dth culling: %5d'%(i, numLeft))
if numLeft < numCullOffsetsLimit:
refinedOffsets = None
stdWriter.finalize()
return refinedOffsets
def cullOffsetsRoipac(offsets, numThreshold=50):
'''
cull offsets using fortran program from ROI_PAC
numThreshold: minmum number of offsets left
'''
import os
from contrib.alos2proc_f.alos2proc_f import fitoff
from isceobj.Alos2Proc.Alos2ProcPublic import readOffset
from isceobj.Alos2Proc.Alos2ProcPublic import writeOffset
offsetFile = 'offset.off'
cullOffsetFile = 'cull.off'
writeOffset(offsets, offsetFile)
#try different parameters to cull offsets
breakFlag = 0
for maxrms in [0.08, 0.16, 0.24]:
for nsig in [1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9]:
fitoff(offsetFile, cullOffsetFile, nsig, maxrms, numThreshold)
#check number of matching points left
with open(cullOffsetFile, 'r') as ff:
numCullOffsets = sum(1 for linex in ff)
if numCullOffsets < numThreshold:
print('offsets culling with nsig {} maxrms {}: {} left after culling, too few points'.format(nsig, maxrms, numCullOffsets))
else:
print('offsets culling with nsig {} maxrms {}: {} left after culling, success'.format(nsig, maxrms, numCullOffsets))
breakFlag = 1
break
if breakFlag == 1:
break
if numCullOffsets < numThreshold:
refinedOffsets = None
else:
refinedOffsets = readOffset(cullOffsetFile)
os.remove(offsetFile)
os.remove(cullOffsetFile)
return refinedOffsets
def meanOffset(offsets):
rangeOffset = 0.0
azimuthOffset = 0.0
i = 0
for offsetx in offsets:
i += 1
rangeOffset += offsetx.dx
azimuthOffset += offsetx.dy
rangeOffset /= i
azimuthOffset /= i
return (rangeOffset, azimuthOffset)
def fitOffset(inputOffset, order=1, axis='range'):
'''fit a polynomial to the offset
order=0 also works, output is mean offset
'''
import numpy as np
index = []
offset = []
for a in inputOffset:
if axis=='range':
index.append(a.x)
offset.append(a.dx)
else:
index.append(a.y)
offset.append(a.dy)
p = np.polyfit(index, offset, order)
return list(p[::-1])
def topo(swath, track, demFile, latFile, lonFile, hgtFile, losFile=None, incFile=None, mskFile=None, numberRangeLooks=1, numberAzimuthLooks=1, multilookTimeOffset=True):
import datetime
import isceobj
from zerodop.topozero import createTopozero
from isceobj.Planet.Planet import Planet
pointingDirection = {'right': -1, 'left' :1}
demImage = isceobj.createDemImage()
demImage.load(demFile + '.xml')
demImage.setAccessMode('read')
#####Run Topo
planet = Planet(pname='Earth')
topo = createTopozero()
topo.slantRangePixelSpacing = numberRangeLooks * swath.rangePixelSize
topo.prf = 1.0 / (numberAzimuthLooks * swath.azimuthLineInterval)
topo.radarWavelength = track.radarWavelength
topo.orbit = track.orbit
topo.width = int(swath.numberOfSamples/numberRangeLooks)
topo.length = int(swath.numberOfLines/numberAzimuthLooks)
topo.wireInputPort(name='dem', object=demImage)
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = 1 #must be set as 1
topo.numberAzimuthLooks = 1 #must be set as 1 Cunren
topo.lookSide = pointingDirection[track.pointingDirection]
if multilookTimeOffset == True:
topo.sensingStart = swath.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0/swath.prf)
topo.rangeFirstSample = swath.startingRange + (numberRangeLooks-1.0)/2.0 * swath.rangePixelSize
else:
topo.sensingStart = swath.sensingStart
topo.rangeFirstSample = swath.startingRange
topo.demInterpolationMethod='BIQUINTIC'
topo.latFilename = latFile
topo.lonFilename = lonFile
topo.heightFilename = hgtFile
if losFile != None:
topo.losFilename = losFile
if incFile != None:
topo.incFilename = incFile
if mskFile != None:
topo.maskFilename = mskFile
topo.topo()
return list(topo.snwe)
def geo2rdr(swath, track, latFile, lonFile, hgtFile, rangeOffsetFile, azimuthOffsetFile, numberRangeLooks=1, numberAzimuthLooks=1, multilookTimeOffset=True):
import datetime
import isceobj
from zerodop.geo2rdr import createGeo2rdr
from isceobj.Planet.Planet import Planet
pointingDirection = {'right': -1, 'left' :1}
latImage = isceobj.createImage()
latImage.load(latFile + '.xml')
latImage.setAccessMode('read')
lonImage = isceobj.createImage()
lonImage.load(lonFile + '.xml')
lonImage.setAccessMode('read')
hgtImage = isceobj.createDemImage()
hgtImage.load(hgtFile + '.xml')
hgtImage.setAccessMode('read')
planet = Planet(pname='Earth')
topo = createGeo2rdr()
topo.configure()
topo.slantRangePixelSpacing = numberRangeLooks * swath.rangePixelSize
topo.prf = 1.0 / (numberAzimuthLooks * swath.azimuthLineInterval)
topo.radarWavelength = track.radarWavelength
topo.orbit = track.orbit
topo.width = int(swath.numberOfSamples/numberRangeLooks)
topo.length = int(swath.numberOfLines/numberAzimuthLooks)
topo.demLength = hgtImage.length
topo.demWidth = hgtImage.width
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = 1
topo.numberAzimuthLooks = 1 #must be set to be 1
topo.lookSide = pointingDirection[track.pointingDirection]
if multilookTimeOffset == True:
topo.sensingStart = swath.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0*swath.azimuthLineInterval)
topo.rangeFirstSample = swath.startingRange + (numberRangeLooks-1.0)/2.0*swath.rangePixelSize
else:
topo.setSensingStart(swath.sensingStart)
topo.rangeFirstSample = swath.startingRange
topo.dopplerCentroidCoeffs = [0.] #we are using zero doppler geometry
topo.demImage = hgtImage
topo.latImage = latImage
topo.lonImage = lonImage
topo.rangeOffsetImageName = rangeOffsetFile
topo.azimuthOffsetImageName = azimuthOffsetFile
topo.geo2rdr()
return
def waterBodyRadar(latFile, lonFile, wbdFile, wbdOutFile):
'''
create water boday in radar coordinates
'''
import numpy as np
import isceobj
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
demImage = isceobj.createDemImage()
demImage.load(wbdFile + '.xml')
#demImage.setAccessMode('read')
wbd=np.memmap(wbdFile, dtype='byte', mode='r', shape=(demImage.length, demImage.width))
image = isceobj.createImage()
image.load(latFile+'.xml')
width = image.width
length = image.length
latFp = open(latFile, 'rb')
lonFp = open(lonFile, 'rb')
wbdOutFp = open(wbdOutFile, 'wb')
wbdOutIndex = np.arange(width, dtype=np.int32)
print("create water body in radar coordinates...")
for i in range(length):
if (((i+1)%200) == 0):
print("processing line %6d of %6d" % (i+1, length), end='\r', flush=True)
wbdOut = np.zeros(width, dtype='byte')-2
lat = np.fromfile(latFp, dtype=np.float64, count=width)
lon = np.fromfile(lonFp, dtype=np.float64, count=width)
#indexes start with zero
lineIndex = np.int32((lat - demImage.firstLatitude) / demImage.deltaLatitude + 0.5)
sampleIndex = np.int32((lon - demImage.firstLongitude) / demImage.deltaLongitude + 0.5)
inboundIndex = np.logical_and(
np.logical_and(lineIndex>=0, lineIndex<=demImage.length-1),
np.logical_and(sampleIndex>=0, sampleIndex<=demImage.width-1)
)
#keep SRTM convention. water body. (0) --- land; (-1) --- water; (-2 or other value) --- no data.
wbdOut[(wbdOutIndex[inboundIndex],)] = wbd[(lineIndex[inboundIndex], sampleIndex[inboundIndex])]
wbdOut.astype(np.int8).tofile(wbdOutFp)
print("processing line %6d of %6d" % (length, length))
#create_xml(wbdOutFile, width, length, 'byte')
image = isceobj.createImage()
image.setDataType('BYTE')
image.addDescription('water body. (0) --- land; (-1) --- water; (-2) --- no data.')
image.setFilename(wbdOutFile)
image.extraFilename = wbdOutFile + '.vrt'
image.setWidth(width)
image.setLength(length)
image.renderHdr()
del wbd, demImage, image
latFp.close()
lonFp.close()
wbdOutFp.close()
def renameFile(oldname, newname):
import os
import isceobj
img = isceobj.createImage()
img.load(oldname + '.xml')
img.setFilename(newname)
img.extraFilename = newname+'.vrt'
img.renderHdr()
os.rename(oldname, newname)
os.remove(oldname + '.xml')
os.remove(oldname + '.vrt')
def cal_coherence(inf, win=5, edge=0):
'''
compute coherence uisng only interferogram (phase).
This routine still follows the regular equation for computing coherence,
but assumes the amplitudes of reference and secondary are one, so that coherence
can be computed using phase only.
inf: interferogram
win: window size
edge: 0: remove all non-full convolution samples
1: remove samples computed from less than half convolution
(win=5 used to illustration below)
* * *
* * *
* * *
* * *
* * *
2: remove samples computed from less than quater convolution
(win=5 used to illustration below)
* * *
* * *
* * *
3: remove non-full convolution samples on image edges
4: keep all samples
'''
import numpy as np
import scipy.signal as ss
if win % 2 != 1:
raise Exception('window size must be odd!')
hwin = np.int(np.around((win - 1) / 2))
filt = np.ones((win, win))
amp = np.absolute(inf)
cnt = ss.convolve2d((amp!=0), filt, mode='same')
cor = ss.convolve2d(inf/(amp + (amp==0)), filt, mode='same')
cor = (amp!=0) * np.absolute(cor) / (cnt + (cnt==0))
#trim edges
if edge == 0:
num = win * win
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 1:
num = win * (hwin+1)
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 2:
num = (hwin+1) * (hwin+1)
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 3:
cor[0:hwin, :] = 0.0
cor[-hwin:, :] = 0.0
cor[:, 0:hwin] = 0.0
cor[:, -hwin:] = 0.0
else:
pass
#print("coherence, max: {} min: {}".format(np.max(cor[np.nonzero(cor!=0)]), np.min(cor[np.nonzero(cor!=0)])))
return cor
def snaphuUnwrap(track, t, wrapName, corName, unwrapName, nrlks, nalks, costMode = 'DEFO',initMethod = 'MST', defomax = 4.0, initOnly = False):
#runUnwrap(self, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
'''
track: track object
t: time for computing earth radius and altitude, normally mid azimuth time
wrapName: input interferogram
corName: input coherence file
unwrapName: output unwrapped interferogram
nrlks: number of range looks of the interferogram
nalks: number of azimuth looks of the interferogram
'''
import datetime
import numpy as np
import isceobj
from contrib.Snaphu.Snaphu import Snaphu
from isceobj.Planet.Planet import Planet
corImg = isceobj.createImage()
corImg.load(corName + '.xml')
width = corImg.width
length = corImg.length
#get altitude
orbit = track.orbit
peg = orbit.interpolateOrbit(t, method='hermite')
refElp = Planet(pname='Earth').ellipsoid
llh = refElp.xyz_to_llh(peg.getPosition())
hdg = orbit.getENUHeading(t)
refElp.setSCH(llh[0], llh[1], hdg)
earthRadius = refElp.pegRadCur
altitude = llh[2]
rangeLooks = nrlks
azimuthLooks = nalks
azfact = 0.8
rngfact = 0.8
corrLooks = rangeLooks * azimuthLooks/(azfact*rngfact)
maxComponents = 20
snp = Snaphu()
snp.setInitOnly(initOnly)
snp.setInput(wrapName)
snp.setOutput(unwrapName)
snp.setWidth(width)
snp.setCostMode(costMode)
snp.setEarthRadius(earthRadius)
snp.setWavelength(track.radarWavelength)
snp.setAltitude(altitude)
snp.setCorrfile(corName)
snp.setInitMethod(initMethod)
snp.setCorrLooks(corrLooks)
snp.setMaxComponents(maxComponents)
snp.setDefoMaxCycles(defomax)
snp.setRangeLooks(rangeLooks)
snp.setAzimuthLooks(azimuthLooks)
if corImg.bands == 1:
snp.setCorFileFormat('FLOAT_DATA')
snp.prepare()
snp.unwrap()
######Render XML
outImage = isceobj.Image.createUnwImage()
outImage.setFilename(unwrapName)
outImage.setWidth(width)
outImage.setAccessMode('read')
outImage.renderVRT()
outImage.createImage()
outImage.finalizeImage()
outImage.renderHdr()
#####Check if connected components was created
if snp.dumpConnectedComponents:
connImage = isceobj.Image.createImage()
connImage.setFilename(unwrapName+'.conncomp')
connImage.setWidth(width)
connImage.setAccessMode('read')
connImage.setDataType('BYTE')
connImage.renderVRT()
connImage.createImage()
connImage.finalizeImage()
connImage.renderHdr()
del connImage
del corImg
del snp
del outImage
#remove wired things in no-data area
amp=np.memmap(unwrapName, dtype='float32', mode='r+', shape=(length*2, width))
wrap = np.fromfile(wrapName, dtype=np.complex64).reshape(length, width)
(amp[0:length*2:2, :])[np.nonzero(wrap==0)]=0
(amp[1:length*2:2, :])[np.nonzero(wrap==0)]=0
del amp
del wrap
return
def snaphuUnwrapOriginal(wrapName, corName, ampName, unwrapName, costMode = 's', initMethod = 'mcf', snaphuConfFile = 'snaphu.conf'):
'''
unwrap interferogram using original snaphu program
'''
import numpy as np
import isceobj
corImg = isceobj.createImage()
corImg.load(corName + '.xml')
width = corImg.width
length = corImg.length
#specify coherence file format in configure file
#snaphuConfFile = 'snaphu.conf'
if corImg.bands == 1:
snaphuConf = '''CORRFILEFORMAT FLOAT_DATA
CONNCOMPFILE {}
MAXNCOMPS 20'''.format(unwrapName+'.conncomp')
else:
snaphuConf = '''CORRFILEFORMAT ALT_LINE_DATA
CONNCOMPFILE {}
MAXNCOMPS 20'''.format(unwrapName+'.conncomp')
with open(snaphuConfFile, 'w') as f:
f.write(snaphuConf)
cmd = 'snaphu {} {} -f {} -{} -o {} -a {} -c {} -v --{}'.format(
wrapName,
width,
snaphuConfFile,
costMode,
unwrapName,
ampName,
corName,
initMethod
)
runCmd(cmd)
create_xml(unwrapName, width, length, 'unw')
connImage = isceobj.Image.createImage()
connImage.setFilename(unwrapName+'.conncomp')
connImage.setWidth(width)
connImage.setAccessMode('read')
connImage.setDataType('BYTE')
connImage.renderVRT()
connImage.createImage()
connImage.finalizeImage()
connImage.renderHdr()
del connImage
#remove wired things in no-data area
amp=np.memmap(unwrapName, dtype='float32', mode='r+', shape=(length*2, width))
wrap = np.fromfile(wrapName, dtype=np.complex64).reshape(length, width)
(amp[0:length*2:2, :])[np.nonzero(wrap==0)]=0
(amp[1:length*2:2, :])[np.nonzero(wrap==0)]=0
del amp
del wrap
return
def getBboxGeo(track, useTrackOnly=False, numberOfSamples=1, numberOfLines=1, numberRangeLooks=1, numberAzimuthLooks=1):
'''
get bounding box in geo-coordinate
'''
import numpy as np
pointingDirection = {'right': -1, 'left' :1}
if useTrackOnly:
import datetime
rangeMin = track.startingRange + (numberRangeLooks-1.0)/2.0*track.rangePixelSize
rangeMax = rangeMin + (numberOfSamples-1) * numberRangeLooks * track.rangePixelSize
azimuthTimeMin = track.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0*track.azimuthLineInterval)
azimuthTimeMax = azimuthTimeMin + datetime.timedelta(seconds=(numberOfLines-1) * numberAzimuthLooks * track.azimuthLineInterval)
bboxRdr = [rangeMin, rangeMax, azimuthTimeMin, azimuthTimeMax]
else:
bboxRdr = getBboxRdr(track)
rangeMin = bboxRdr[0]
rangeMax = bboxRdr[1]
azimuthTimeMin = bboxRdr[2]
azimuthTimeMax = bboxRdr[3]
#get bounding box using Piyush's code
hgtrange=[-500,9000]
ts = [azimuthTimeMin, azimuthTimeMax]
rngs = [rangeMin, rangeMax]
pos = []
for ht in hgtrange:
for tim in ts:
for rng in rngs:
llh = track.orbit.rdr2geo(tim, rng, height=ht, side=pointingDirection[track.pointingDirection])
pos.append(llh)
pos = np.array(pos)
# S N W E
bbox = [np.min(pos[:,0]), np.max(pos[:,0]), np.min(pos[:,1]), np.max(pos[:,1])]
return bbox
def getBboxRdr(track):
'''
get bounding box in radar-coordinate
'''
import datetime
numberOfFrames = len(track.frames)
numberOfSwaths = len(track.frames[0].swaths)
sensingStartList = []
sensingEndList = []
startingRangeList = []
endingRangeList = []
for i in range(numberOfFrames):
for j in range(numberOfSwaths):
swath = track.frames[i].swaths[j]
sensingStartList.append(swath.sensingStart)
sensingEndList.append(swath.sensingStart + datetime.timedelta(seconds=(swath.numberOfLines-1) * swath.azimuthLineInterval))
startingRangeList.append(swath.startingRange)
endingRangeList.append(swath.startingRange + (swath.numberOfSamples - 1) * swath.rangePixelSize)
azimuthTimeMin = min(sensingStartList)
azimuthTimeMax = max(sensingEndList)
azimuthTimeMid = azimuthTimeMin+datetime.timedelta(seconds=(azimuthTimeMax-azimuthTimeMin).total_seconds()/2.0)
rangeMin = min(startingRangeList)
rangeMax = max(endingRangeList)
rangeMid = (rangeMin + rangeMax) / 2.0
bbox = [rangeMin, rangeMax, azimuthTimeMin, azimuthTimeMax]
return bbox
def filterInterferogram(data, alpha, windowSize, stepSize):
'''
a filter wrapper
'''
import os
import numpy as np
from contrib.alos2filter.alos2filter import psfilt1
(length, width)=data.shape
data.astype(np.complex64).tofile('tmp1234.int')
psfilt1('tmp1234.int', 'filt_tmp1234.int', width, alpha, windowSize, stepSize)
data2 = np.fromfile('filt_tmp1234.int', dtype=np.complex64).reshape(length, width)
os.remove('tmp1234.int')
os.remove('filt_tmp1234.int')
return data2
###################################################################
# these are routines for burst-by-burst ScanSAR interferometry
###################################################################
def mosaicBurstInterferogram(swath, burstPrefix, outputFile, numberOfLooksThreshold=1):
'''
take a burst sequence and output mosaicked file
'''
import numpy as np
interferogram = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.complex64)
cnt = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.int8)
for i in range(swath.numberOfBursts):
burstFile = burstPrefix + '_%02d.int'%(i+1)
burstInterferogram = np.fromfile(burstFile, dtype=np.complex64).reshape(swath.burstSlcNumberOfLines, swath.burstSlcNumberOfSamples)
interferogram[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += burstInterferogram
cnt[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += (burstInterferogram!=0)
#trim upper and lower edges with less number of looks
#############################################################################
firstLine = 0
for i in range(swath.numberOfLines):
if np.sum(cnt[i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
firstLine = i
break
lastLine = swath.numberOfLines - 1
for i in range(swath.numberOfLines):
if np.sum(cnt[swath.numberOfLines-1-i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
lastLine = swath.numberOfLines-1-i
break
interferogram[:firstLine,:]=0
interferogram[lastLine+1:,:]=0
# if numberOfLooksThreshold!= None:
# interferogram[np.nonzero(cnt<numberOfLooksThreshold)] = 0
#############################################################################
interferogram.astype(np.complex64).tofile(outputFile)
create_xml(outputFile, swath.numberOfSamples, swath.numberOfLines, 'int')
def mosaicBurstAmplitude(swath, burstPrefix, outputFile, numberOfLooksThreshold=1):
'''
take a burst sequence and output the magnitude
'''
import numpy as np
amp = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.float32)
cnt = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.int8)
for i in range(swath.numberOfBursts):
burstFile = burstPrefix + '_%02d.slc'%(i+1)
#azLineOffset = round((swath.burstSlcStartTimes[i] - swath.burstSlcStartTimes[0]).total_seconds() / swath.azimuthLineInterval)
burstMag = np.absolute(np.fromfile(burstFile, dtype=np.complex64).reshape(swath.burstSlcNumberOfLines, swath.burstSlcNumberOfSamples))
burstPwr = burstMag * burstMag
amp[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += burstPwr
cnt[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += (burstPwr!=0)
#trim upper and lower edges with less number of looks
#############################################################################
firstLine = 0
for i in range(swath.numberOfLines):
if np.sum(cnt[i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
firstLine = i
break
lastLine = swath.numberOfLines - 1
for i in range(swath.numberOfLines):
if np.sum(cnt[swath.numberOfLines-1-i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
lastLine = swath.numberOfLines-1-i
break
amp[:firstLine,:]=0
amp[lastLine+1:,:]=0
# if numberOfLooksThreshold!= None:
# amp[np.nonzero(cnt<numberOfLooksThreshold)] = 0
#############################################################################
np.sqrt(amp).astype(np.float32).tofile(outputFile)
create_xml(outputFile, swath.numberOfSamples, swath.numberOfLines, 'float')
def resampleBursts(referenceSwath, secondarySwath,
referenceBurstDir, secondaryBurstDir, secondaryBurstResampledDir, interferogramDir,
referenceBurstPrefix, secondaryBurstPrefix, secondaryBurstResampledPrefix, interferogramPrefix,
rangeOffset, azimuthOffset, rangeOffsetResidual=0, azimuthOffsetResidual=0):
import os
import datetime
import numpy as np
import numpy.matlib
from contrib.alos2proc.alos2proc import resamp
os.makedirs(secondaryBurstResampledDir, exist_ok=True)
os.makedirs(interferogramDir, exist_ok=True)
#get burst file names
referenceBurstSlc = [referenceBurstPrefix+'_%02d.slc'%(i+1) for i in range(referenceSwath.numberOfBursts)]
secondaryBurstSlc = [secondaryBurstPrefix+'_%02d.slc'%(i+1) for i in range(secondarySwath.numberOfBursts)]
secondaryBurstSlcResampled = [secondaryBurstPrefix+'_%02d.slc'%(i+1) for i in range(referenceSwath.numberOfBursts)]
interferogram = [interferogramPrefix+'_%02d.int'%(i+1) for i in range(referenceSwath.numberOfBursts)]
length = referenceSwath.burstSlcNumberOfLines
width = referenceSwath.burstSlcNumberOfSamples
lengthSecondary = secondarySwath.burstSlcNumberOfLines
widthSecondary = secondarySwath.burstSlcNumberOfSamples
#secondary burst slc start times
secondaryBurstStartTimesSlc = [secondarySwath.firstBurstSlcStartTime + \
datetime.timedelta(seconds=secondarySwath.burstSlcFirstLineOffsets[i]*secondarySwath.azimuthLineInterval) \
for i in range(secondarySwath.numberOfBursts)]
#secondary burst raw start times
secondaryBurstStartTimesRaw = [secondarySwath.firstBurstRawStartTime + \
datetime.timedelta(seconds=i*secondarySwath.burstCycleLength/secondarySwath.prf) \
for i in range(secondarySwath.numberOfBursts)]
for i in range(referenceSwath.numberOfBursts):
##########################################################################
# 1. get offsets and corresponding secondary burst
##########################################################################
#range offset
with open(rangeOffset, 'rb') as f:
f.seek(referenceSwath.burstSlcFirstLineOffsets[i] * width * np.dtype(np.float32).itemsize, 0)
rgoffBurst = np.fromfile(f, dtype=np.float32, count=length*width).reshape(length,width)
if type(rangeOffsetResidual) == np.ndarray:
residual = rangeOffsetResidual[0+referenceSwath.burstSlcFirstLineOffsets[i]:length+referenceSwath.burstSlcFirstLineOffsets[i],:]
rgoffBurst[np.nonzero(rgoffBurst!=-999999.0)] += residual[np.nonzero(rgoffBurst!=-999999.0)]
else:
rgoffBurst[np.nonzero(rgoffBurst!=-999999.0)] += rangeOffsetResidual
#azimuth offset
with open(azimuthOffset, 'rb') as f:
f.seek(referenceSwath.burstSlcFirstLineOffsets[i] * width * np.dtype(np.float32).itemsize, 0)
azoffBurst = np.fromfile(f, dtype=np.float32, count=length*width).reshape(length,width)
if type(azimuthOffsetResidual) == np.ndarray:
residual = azimuthOffsetResidual[0+referenceSwath.burstSlcFirstLineOffsets[i]:length+referenceSwath.burstSlcFirstLineOffsets[i],:]
azoffBurst[np.nonzero(azoffBurst!=-999999.0)] += residual[np.nonzero(azoffBurst!=-999999.0)]
else:
azoffBurst[np.nonzero(azoffBurst!=-999999.0)] += azimuthOffsetResidual
#find the corresponding secondary burst
#get mean offset to use
#remove BAD_VALUE = -999999.0 as defined in geo2rdr.f90
#single precision is not accurate enough to compute mean
azoffBurstMean = np.mean(azoffBurst[np.nonzero(azoffBurst!=-999999.0)], dtype=np.float64)
iSecondary = -1
for j in range(secondarySwath.numberOfBursts):
if abs(referenceSwath.burstSlcFirstLineOffsets[i] + azoffBurstMean - secondarySwath.burstSlcFirstLineOffsets[j]) < (referenceSwath.burstLength / referenceSwath.prf * 2.0) / referenceSwath.azimuthLineInterval:
iSecondary = j
break
#output zero resampled burst/interferogram if no secondary burst found
if iSecondary == -1:
print('\nburst pair, reference: %2d, secondary: no'%(i+1))
#output an interferogram with all pixels set to zero
os.chdir(interferogramDir)
np.zeros((length, width), dtype=np.complex64).astype(np.complex64).tofile(interferogram[i])
create_xml(interferogram[i], width, length, 'int')
os.chdir('../')
#output a resampled secondary image with all pixels set to zero
os.chdir(secondaryBurstResampledDir)
np.zeros((length, width), dtype=np.complex64).astype(np.complex64).tofile(secondaryBurstSlcResampled[i])
create_xml(secondaryBurstSlcResampled[i], width, length, 'slc')
os.chdir('../')
continue
else:
print('\nburst pair, reference: %2d, secondary: %3d'%(i+1, iSecondary+1))
#adjust azimuth offset accordingly, since original azimuth offset assumes reference and secondary start with sensingStart
azoffBurst -= (secondarySwath.burstSlcFirstLineOffsets[iSecondary]-referenceSwath.burstSlcFirstLineOffsets[i])
##########################################################################
# 2. compute deramp and reramp signals
##########################################################################
cj = np.complex64(1j)
tbase = (secondaryBurstStartTimesSlc[iSecondary] - (secondaryBurstStartTimesRaw[iSecondary] + \
datetime.timedelta(seconds=(secondarySwath.burstLength - 1.0) / 2.0 / secondarySwath.prf))).total_seconds()
#compute deramp signal
index1 = np.matlib.repmat(np.arange(widthSecondary), lengthSecondary, 1)
index2 = np.matlib.repmat(np.arange(lengthSecondary).reshape(lengthSecondary, 1), 1, widthSecondary)
ka = secondarySwath.azimuthFmrateVsPixel[3] * index1**3 + secondarySwath.azimuthFmrateVsPixel[2] * index1**2 + \
secondarySwath.azimuthFmrateVsPixel[1] * index1 + secondarySwath.azimuthFmrateVsPixel[0]
#use the convention that ka > 0
ka = -ka
t = tbase + index2*secondarySwath.azimuthLineInterval
deramp = np.exp(cj * np.pi * (-ka) * t**2)
#compute reramp signal
index1 = np.matlib.repmat(np.arange(width), length, 1) + rgoffBurst
index2 = np.matlib.repmat(np.arange(length).reshape(length, 1), 1, width) + azoffBurst
ka = secondarySwath.azimuthFmrateVsPixel[3] * index1**3 + secondarySwath.azimuthFmrateVsPixel[2] * index1**2 + \
secondarySwath.azimuthFmrateVsPixel[1] * index1 + secondarySwath.azimuthFmrateVsPixel[0]
#use the convention that ka > 0
ka = -ka
t = tbase + index2*secondarySwath.azimuthLineInterval
reramp = np.exp(cj * np.pi * (ka) * t**2)
##########################################################################
# 3. resample secondary burst
##########################################################################
#go to secondary directory to do resampling
os.chdir(secondaryBurstDir)
#output offsets
rgoffBurstFile = "burst_rg.off"
azoffBurstFile = "burst_az.off"
rgoffBurst.astype(np.float32).tofile(rgoffBurstFile)
azoffBurst.astype(np.float32).tofile(azoffBurstFile)
#deramp secondary burst
secondaryBurstDerampedFile = "secondary.slc"
sburst = np.fromfile(secondaryBurstSlc[iSecondary], dtype=np.complex64).reshape(lengthSecondary, widthSecondary)
(deramp * sburst).astype(np.complex64).tofile(secondaryBurstDerampedFile)
create_xml(secondaryBurstDerampedFile, widthSecondary, lengthSecondary, 'slc')
#resampled secondary burst
secondaryBurstResampFile = 'secondary_resamp.slc'
#resample secondary burst
#now doppler has bigger impact now, as it's value is about 35 Hz (azimuth resampling frequency is now only 1/20 * PRF)
#we don't know if this doppler value is accurate or not, so we set it to zero, which seems to give best resampling result
#otherwise if it is not accurate and we still use it, it will significantly affect resampling result
dopplerVsPixel = secondarySwath.dopplerVsPixel
dopplerVsPixel = [0.0, 0.0, 0.0, 0.0]
resamp(secondaryBurstDerampedFile, secondaryBurstResampFile, rgoffBurstFile, azoffBurstFile, width, length, 1.0/secondarySwath.azimuthLineInterval, dopplerVsPixel,
rgcoef=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
azcoef=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
azpos_off=0.0)
#read resampled secondary burst and reramp
sburstResamp = reramp * (np.fromfile(secondaryBurstResampFile, dtype=np.complex64).reshape(length, width))
#clear up
os.remove(rgoffBurstFile)
os.remove(azoffBurstFile)
os.remove(secondaryBurstDerampedFile)
os.remove(secondaryBurstDerampedFile+'.vrt')
os.remove(secondaryBurstDerampedFile+'.xml')
os.remove(secondaryBurstResampFile)
os.remove(secondaryBurstResampFile+'.vrt')
os.remove(secondaryBurstResampFile+'.xml')
os.chdir('../')
##########################################################################
# 4. dump results
##########################################################################
#dump resampled secondary burst
os.chdir(secondaryBurstResampledDir)
sburstResamp.astype(np.complex64).tofile(secondaryBurstSlcResampled[i])
create_xml(secondaryBurstSlcResampled[i], width, length, 'slc')
os.chdir('../')
#dump burst interferogram
mburst = np.fromfile(os.path.join(referenceBurstDir, referenceBurstSlc[i]), dtype=np.complex64).reshape(length, width)
os.chdir(interferogramDir)
(mburst * np.conj(sburstResamp)).astype(np.complex64).tofile(interferogram[i])
create_xml(interferogram[i], width, length, 'int')
os.chdir('../')
def create_multi_index(width, rgl):
import numpy as np
#create index after multilooking
#assuming original index start with 0
#applies to both range and azimuth direction
widthm = int(width/rgl)
#create range index: This applies to both odd and even cases, "rgl = 1" case, and "rgl = 2" case
start_rgindex = (rgl - 1.0) / 2.0
rgindex0 = start_rgindex + np.arange(widthm) * rgl
return rgindex0
def create_multi_index2(width2, l1, l2):
import numpy as np
#for number of looks of l1 and l2
#calculate the correponding index number of l2 in the l1 array
#applies to both range and azimuth direction
return ((l2 - l1) / 2.0 + np.arange(width2) * l2) / l1
def computePhaseDiff(data1, data22, coherenceWindowSize=5, coherenceThreshold=0.85):
import copy
import numpy as np
from isceobj.Alos2Proc.Alos2ProcPublic import cal_coherence_1
#data22 will be changed in the processing, so make a copy here
data2 = copy.deepcopy(data22)
dataDiff = data1 * np.conj(data2)
cor = cal_coherence_1(dataDiff, win=coherenceWindowSize)
index = np.nonzero(np.logical_and(cor>coherenceThreshold, dataDiff!=0))
#check if there are valid pixels
if index[0].size == 0:
phaseDiff = 0.0
numberOfValidSamples = 0
return (phaseDiff, numberOfValidSamples)
else:
numberOfValidSamples = index[0].size
#in case phase difference is around PI, sum of +PI and -PI is zero, which affects the following
#mean phase difference computation.
#remove magnitude before doing sum?
dataDiff = dataDiff / (np.absolute(dataDiff)+(dataDiff==0))
phaseDiff0 = np.angle(np.sum(dataDiff[index], dtype=np.complex128))
#now the phase difference values are mostly centered at 0
data2 *= np.exp(np.complex64(1j) * phaseDiff0)
phaseDiff = phaseDiff0
#compute phase difference
numberOfIterations = 1000000
threshold = 0.000001
for k in range(numberOfIterations):
dataDiff = data1 * np.conj(data2)
angle = np.mean(np.angle(dataDiff[index]), dtype=np.float64)
phaseDiff += angle
data2 *= np.exp(np.complex64(1j) * angle)
print('phase offset: %15.12f rad after iteration: %3d'%(phaseDiff, k+1))
if (k+1 >= 5) and (angle <= threshold):
break
#only take the value within -pi--pi
if phaseDiff > np.pi:
phaseDiff -= 2.0 * np.pi
if phaseDiff < -np.pi:
phaseDiff += 2.0 * np.pi
# mean phase difference
# number of valid samples to compute the phase difference
return (phaseDiff, numberOfValidSamples)
def snap(inputValue, fixedValues, snapThreshold):
'''
fixedValues can be a list or numpy array
'''
import numpy as np
diff = np.absolute(np.absolute(np.array(fixedValues)) - np.absolute(inputValue))
indexMin = np.argmin(diff)
if diff[indexMin] < snapThreshold:
outputValue = np.sign(inputValue) * np.absolute(fixedValues[indexMin])
snapped = True
else:
outputValue = inputValue
snapped = False
return (outputValue, snapped)
modeProcParDict = {
'ALOS-2': {
#All SPT (SBS) modes are the same
'SBS': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.015
},
#All SM1 (UBS, UBD) modes are the same
'UBS': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 3,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 32,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.015
},
'UBD': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 3,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 32,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.015
},
#All SM2 (HBS, HBD, HBQ) modes are the same
'HBS': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.035
},
'HBD': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.035
},
'HBQ': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.035
},
#All SM3 (FBS, FBD, FBQ) modes are the same
'FBS': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.075
},
'FBD': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.075
},
'FBQ': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 4,
'numberRangeLooks2': 4,
'numberAzimuthLooks2': 4,
'numberRangeLooksIon': 16,
'numberAzimuthLooksIon': 16,
'filterStdIon': 0.075
},
#All WD1 (WBS, WBD) modes are the same
'WBS': {
'numberRangeLooks1': 1,
'numberAzimuthLooks1': 14,
'numberRangeLooks2': 5,
'numberAzimuthLooks2': 2,
'numberRangeLooksIon': 80,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.1
},
'WBD': {
'numberRangeLooks1': 1,
'numberAzimuthLooks1': 14,
'numberRangeLooks2': 5,
'numberAzimuthLooks2': 2,
'numberRangeLooksIon': 80,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.1
},
#All WD1 (WWS, WWD) modes are the same
'WWS': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 14,
'numberRangeLooks2': 5,
'numberAzimuthLooks2': 2,
'numberRangeLooksIon': 80,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.075
},
'WWD': {
'numberRangeLooks1': 2,
'numberAzimuthLooks1': 14,
'numberRangeLooks2': 5,
'numberAzimuthLooks2': 2,
'numberRangeLooksIon': 80,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.075
},
#All WD2 (VBS, VBD) modes are the same
'VBS': {
'numberRangeLooks1': 1,
'numberAzimuthLooks1': 14,
'numberRangeLooks2': 5,
'numberAzimuthLooks2': 2,
'numberRangeLooksIon': 80,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.1
},
'VBD': {
'numberRangeLooks1': 1,
'numberAzimuthLooks1': 14,
'numberRangeLooks2': 5,
'numberAzimuthLooks2': 2,
'numberRangeLooksIon': 80,
'numberAzimuthLooksIon': 32,
'filterStdIon': 0.1
}
}
}
import numpy as np
filterStdPolyIon = np.array([ 2.31536879e-05, -3.41687763e-03, 1.39904121e-01])
| 38.362645
| 220
| 0.588187
|
53d04637c56f193d7be9471a32e495d1b4383694
| 1,693
|
py
|
Python
|
data/p2DJ/New/R2/benchmark/startCirq63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/R2/benchmark/startCirq63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/R2/benchmark/startCirq63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.X.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=4
c.append(cirq.X.on(input_qubit[1])) # number=5
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=6
c.append(cirq.Y.on(input_qubit[1])) # number=7
c.append(cirq.Y.on(input_qubit[1])) # number=8
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq63.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 27.306452
| 77
| 0.694625
|
7881c3d3f282e4d8b1baaad205eabb2cbc17501e
| 1,165
|
py
|
Python
|
limite/telaMonstro.py
|
Castagnna/dungeons-and-dragons
|
9821a47febb0669ff0a073585e53f286fe27bf0a
|
[
"CC0-1.0"
] | null | null | null |
limite/telaMonstro.py
|
Castagnna/dungeons-and-dragons
|
9821a47febb0669ff0a073585e53f286fe27bf0a
|
[
"CC0-1.0"
] | null | null | null |
limite/telaMonstro.py
|
Castagnna/dungeons-and-dragons
|
9821a47febb0669ff0a073585e53f286fe27bf0a
|
[
"CC0-1.0"
] | null | null | null |
from limite.telaGenerica import TelaGenerica
import PySimpleGUI as sg
class TelaMonstro(TelaGenerica):
def __init__(self, controlador):
super(TelaMonstro, self).__init__(controlador)
self.init_components()
def init_components(self):
sg.ChangeLookAndFeel('Reddit')
layout = [
[sg.Button('Novo monstro', key='NOVO_MONSTRO')],
[sg.Button('Listar monstros', key='LISTA_MONSTROS')],
[sg.Button('Excluir monstro', key='EXCLUIR_MONSTRO')],
[sg.Button('Cadastrar ataque do monstro', key='CADASTRA_ATAQUE')],
[sg.Button('Excluir ataque do monstro', key='EXCLUIR_ATAQUE')],
[sg.Button('Atacar jogador', key='ATACAR_JOGADOR')],
[sg.Button('Movimentar monstro', key='MOVIMENTAR_MONSTRO')],
[sg.Button('Mostra atributos do monstro', key='MOSTRA_ATRIBUTOS_MONSTRO')],
[sg.Button('Cria monstro teste', key='CRIA_MONSTRO_TESTE')],
[sg.Button('Voltar', key='VOLTAR')]
]
janela = sg.Window('Menu Monstro', default_element_size=(40,50)).Layout(layout)
super(TelaMonstro, self).cria_janela(janela)
| 44.807692
| 87
| 0.639485
|
fb1fd41ff3b393911c9b94ef3462a9fa9a0721b1
| 880
|
py
|
Python
|
практика/K1/numb1.py
|
Tamerlanchiques/usb
|
a6f5dd74a6209363bdfc820774509104e708700f
|
[
"MIT"
] | null | null | null |
практика/K1/numb1.py
|
Tamerlanchiques/usb
|
a6f5dd74a6209363bdfc820774509104e708700f
|
[
"MIT"
] | null | null | null |
практика/K1/numb1.py
|
Tamerlanchiques/usb
|
a6f5dd74a6209363bdfc820774509104e708700f
|
[
"MIT"
] | null | null | null |
print('')
print('Добро пожаловать в мой мир, сдесь мы с тобой сыграем в игру "угадай число"')
print('Введи случайное число от 1 до 100')
print('')
mas = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100]
point = int(input('Введите число и нажмите Enter: '))
print('')
length = len(mas)
for i in range(length):
if mas[i] == point:
print("В ячейке массива номер " + str(i) + " записано число " +
str(mas[i])+" == " + str(point)+" УСПЕХ")
break
else:
print("В ячейке массива номер " + str(i) + " записано число " +
str(mas[i])+" != " + str(point))
| 51.764706
| 302
| 0.578409
|
cc05d40cc2423afdbaa7166fbe005035acefd191
| 287
|
py
|
Python
|
PicoCTF 2019/Forensics/WhitePages/solve.py
|
p-g-krish/CTF-Writeups
|
05ad6a9ecbc19ceb8890f4581dfee36f16d164aa
|
[
"MIT"
] | 51
|
2018-06-26T09:49:42.000Z
|
2019-09-14T00:06:35.000Z
|
PicoCTF 2019/Forensics/WhitePages/solve.py
|
p-g-krish/CTF-Writeups
|
05ad6a9ecbc19ceb8890f4581dfee36f16d164aa
|
[
"MIT"
] | 1
|
2018-06-29T18:40:59.000Z
|
2018-07-09T20:29:41.000Z
|
PicoCTF 2019/Forensics/WhitePages/solve.py
|
p-g-krish/CTF-Writeups
|
05ad6a9ecbc19ceb8890f4581dfee36f16d164aa
|
[
"MIT"
] | 22
|
2019-10-03T14:52:43.000Z
|
2022-01-17T08:55:10.000Z
|
#!/usr/bin/env python3
from Cryptodome.Util.number import long_to_bytes
f = open('whitepages.txt','rb').read()
out = ""
i = 0
while i < len(f):
if f[i:i+1] == b'\x20':
out+="1"
elif f[i:i+3] == b'\xe2\x80\x83':
out+="0"
i+=2
i+=1
print(long_to_bytes(int(out,2)).decode('utf-8'))
| 22.076923
| 48
| 0.602787
|
53221fe74dfb3f8d887863e6244d048a8f18ef47
| 1,241
|
py
|
Python
|
tests/conftest.py
|
m1009d/scrapli_cfg
|
0d967f22be802ba57446f83b02a0703606546ec7
|
[
"MIT"
] | 15
|
2021-03-14T18:27:15.000Z
|
2022-02-08T17:05:33.000Z
|
tests/conftest.py
|
m1009d/scrapli_cfg
|
0d967f22be802ba57446f83b02a0703606546ec7
|
[
"MIT"
] | 21
|
2021-05-23T13:58:40.000Z
|
2022-03-02T14:38:00.000Z
|
tests/conftest.py
|
m1009d/scrapli_cfg
|
0d967f22be802ba57446f83b02a0703606546ec7
|
[
"MIT"
] | 1
|
2021-10-04T12:48:14.000Z
|
2021-10-04T12:48:14.000Z
|
from pathlib import Path
import pytest
from devices import CONFIG_REPLACER, DEVICES
import scrapli_cfg
TEST_DATA_PATH = f"{Path(scrapli_cfg.__file__).parents[1]}/tests/test_data"
@pytest.fixture(scope="session")
def test_data_path():
"""Fixture to provide path to test data files"""
return TEST_DATA_PATH
@pytest.fixture(scope="session")
def test_devices_dict():
"""Fixture to return test devices dict"""
return DEVICES
@pytest.fixture(scope="session")
def expected_configs():
"""Fixture to provide expected configs"""
return {
"arista_eos": open(f"{TEST_DATA_PATH}/expected/arista_eos").read(),
"cisco_iosxe": open(f"{TEST_DATA_PATH}/expected/cisco_iosxe").read(),
"cisco_nxos": open(f"{TEST_DATA_PATH}/expected/cisco_nxos").read(),
"cisco_iosxr": open(f"{TEST_DATA_PATH}/expected/cisco_iosxr").read(),
"juniper_junos": open(f"{TEST_DATA_PATH}/expected/juniper_junos").read(),
}
@pytest.fixture(scope="session")
def test_devices_dict():
"""Fixture to return test devices dict"""
return DEVICES
@pytest.fixture(scope="session")
def config_replacer_dict():
"""Fixture to return dict of config replacer helper functions"""
return CONFIG_REPLACER
| 27.577778
| 81
| 0.714746
|
a2d2ba6abea83b3492e08e6698a7c86663780e66
| 11,495
|
py
|
Python
|
src/packagedcode/pubspec.py
|
Siddhant-K-code/scancode-toolkit
|
d1e725d3603a8f96c25f7e3f7595c68999b92a67
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1,511
|
2015-07-01T15:29:03.000Z
|
2022-03-30T13:40:05.000Z
|
src/packagedcode/pubspec.py
|
Siddhant-K-code/scancode-toolkit
|
d1e725d3603a8f96c25f7e3f7595c68999b92a67
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2,695
|
2015-07-01T16:01:35.000Z
|
2022-03-31T19:17:44.000Z
|
src/packagedcode/pubspec.py
|
Siddhant-K-code/scancode-toolkit
|
d1e725d3603a8f96c25f7e3f7595c68999b92a67
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 540
|
2015-07-01T15:08:19.000Z
|
2022-03-31T12:13:11.000Z
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import sys
import warnings
import attr
import saneyaml
from commoncode import filetype
from packageurl import PackageURL
from packagedcode import models
from packagedcode.utils import combine_expressions
TRACE = False
def logger_debug(*args):
pass
logger = logging.getLogger(__name__)
if TRACE:
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(''.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Collect data from Dart pub packages.
See https://dart.dev/tools/pub/pubspec
"""
"""
TODO:
- license is only in a LICENSE file
https://dart.dev/tools/pub/publishing#preparing-to-publish
See https://dart.dev/tools/pub/publishing#important-files
API has theses URLs:
is limited and only returns all versions of a package
- feeds https://pub.dev/feed.atom
- all packages, paginated: https://pub.dev/api/packages
- one package, all version: https://pub.dev/api/packages/painter
- one version: https://pub.dev/api/packages/painter/versions/0.3.1
See https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md
"""
@attr.s()
class PubspecPackage(models.Package):
default_type = 'pubspec'
default_primary_language = 'dart'
default_web_baseurl = 'https://pub.dev/packages'
default_download_baseurl = 'https://pub.dartlang.org/packages'
default_api_baseurl = 'https://pub.dev/api/packages'
def repository_homepage_url(self, baseurl=default_web_baseurl):
return f'{baseurl}/{self.name}/versions/{self.version}'
def repository_download_url(self, baseurl=default_download_baseurl):
# A URL should be in the form of:
# https://pub.dartlang.org/packages/url_launcher/versions/6.0.9.tar.gz
# And it may resolve to:
# https://storage.googleapis.com/pub-packages/packages/http-0.13.2.tar.gz
# as seen in the pub.dev web pages
return f'{baseurl}/{self.name}/versions/{self.version}.tar.gz'
def api_data_url(self, baseurl=default_api_baseurl):
return f'{baseurl}/{self.name}/versions/{self.version}'
def compute_normalized_license(self):
return compute_normalized_license(self.declared_license)
def compute_normalized_license(declared_license, location=None):
"""
Return a normalized license expression string detected from a list of
declared license items.
The specification for pub demands to have a LICENSE file side-by-side and
nothing else. See https://dart.dev/tools/pub/publishing#preparing-to-publish
"""
# FIXME: we need a location to find the FILE file
# Approach:
# Find the LICENSE file
# detect on the text
# combine all expressions
if not declared_license:
return
detected_licenses = []
if detected_licenses:
return combine_expressions(detected_licenses)
@attr.s()
class PubspecYaml(PubspecPackage, models.PackageManifest):
file_patterns = ('pubspec.yaml',)
extensions = ('.yaml',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return file_endswith(location, 'pubspec.yaml')
@classmethod
def recognize(cls, location, compute_normalized_license=False):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with open(location) as inp:
package_data = saneyaml.load(inp.read())
package = build_package(cls, package_data)
if package and compute_normalized_license:
package.compute_normalized_license()
yield package
def file_endswith(location, endswith):
"""
Check if the file at ``location`` ends with ``endswith`` string or tuple.
"""
return filetype.is_file(location) and location.endswith(endswith)
@attr.s()
class PubspecLock(PubspecPackage, models.PackageManifest):
file_patterns = ('pubspec.lock',)
extensions = ('.lock',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return file_endswith(location, 'pubspec.lock')
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with open(location) as inp:
locks_data = saneyaml.load(inp.read())
yield cls(dependencies=list(collect_locks(locks_data)))
def collect_locks(locks_data):
"""
Yield DependentPackage from locks data
The general form is
packages:
_fe_analyzer_shared:
dependency: transitive
description:
name: _fe_analyzer_shared
url: "https://pub.dartlang.org"
source: hosted
version: "22.0.0"
sdks:
dart: ">=2.12.0 <3.0.0"
"""
# FIXME: we treat all as nno optioanl for now
sdks = locks_data.get('sdks') or {}
for name, version in sdks.items():
dep = build_dep(
name,
version,
scope='sdk',
is_runtime=True,
is_optional=False,
)
yield dep
packages = locks_data.get('packages') or {}
for name, details in packages.items():
version = details.get('version')
# FIXME: see https://github.com/dart-lang/pub/blob/2a08832e0b997ff92de65571b6d79a9b9099faa0/lib/src/lock_file.dart#L344
# transitive, direct main, direct dev, direct overridden.
# they do not map exactly to the pubspec scopes since transitive can be
# either main or dev
scope = details.get('dependency')
if scope == 'direct dev':
is_runtime = False
else:
is_runtime = True
desc = details.get('description') or {}
known_desc = isinstance(desc, dict)
# issue a warning for unknown data structure
warn = False
if not known_desc:
if not (isinstance(desc, str) and desc == 'flutter'):
warn = True
else:
dname = desc.get('name')
durl = desc.get('url')
dsource = details.get('source')
if (
(dname and dname != name)
or (durl and durl != 'https://pub.dartlang.org')
or (dsource and dsource not in ['hosted', 'sdk', ])
):
warn = True
if warn:
warnings.warn(
f'Dart pubspec.locks with unsupported external repo '
f'description or source: {details}',
stacklevel=1,
)
dep = build_dep(
name,
version,
scope=scope,
is_runtime=is_runtime,
is_optional=False,
)
yield dep
def collect_deps(data, dependency_field_name, is_runtime=True, is_optional=False):
"""
Yield DependentPackage found in the ``dependency_field_name`` of ``data``.
Use is_runtime and is_optional in created DependentPackage.
The shape of the data is:
dependencies:
path: 1.7.0
meta: ^1.2.4
yaml: ^3.1.0
environment:
sdk: '>=2.12.0 <3.0.0'
"""
# TODO: these can be more complex for SDKs
# https://dart.dev/tools/pub/dependencies#dependency-sources
dependencies = data.get(dependency_field_name) or {}
for name, version in dependencies.items():
dep = build_dep(
name,
version,
scope=dependency_field_name,
is_runtime=is_runtime,
is_optional=is_optional,
)
yield dep
def build_dep(name, version, scope, is_runtime=True, is_optional=False):
"""
Return DependentPackage from the provided data.
"""
# TODO: these can be more complex for SDKs
# https://dart.dev/tools/pub/dependencies#dependency-sources
if isinstance(version, dict) and 'sdk' in version:
# {'sdk': 'flutter'} type of deps....
# which is a wart that we keep as a requiremnet
version = ', '.join(': '.join([k, str(v)]) for k, v in version.items())
if version.replace('.', '').isdigit():
# version is pinned exactly if it is only made of dots and digits
purl = PackageURL(type='pubspec', name=name, version=version)
is_resolved = True
else:
purl = PackageURL(type='pubspec', name=name)
is_resolved = False
dep = models.DependentPackage(
purl=purl.to_string(),
requirement=version,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional,
is_resolved=is_resolved,
)
return dep
def build_package(cls, pubspec_data):
"""
Return a package object from a package data mapping or None
"""
name = pubspec_data.get('name')
version = pubspec_data.get('version')
description = pubspec_data.get('description')
homepage_url = pubspec_data.get('homepage')
declared_license = pubspec_data.get('license')
vcs_url = pubspec_data.get('repository')
download_url = pubspec_data.get('archive_url')
# Author and authors are deprecated
authors = []
author = pubspec_data.get('author')
if author:
authors.append(author)
authors.extend(pubspec_data.get('authors') or [])
parties = []
for auth in authors:
parties.append(models.Party(
type=models.party_person,
role='author',
name=auth
))
package_dependencies = []
dependencies = collect_deps(
pubspec_data,
'dependencies',
is_runtime=True,
is_optional=False,
)
package_dependencies.extend(dependencies)
dev_dependencies = collect_deps(
pubspec_data,
'dev_dependencies',
is_runtime=False,
is_optional=True,
)
package_dependencies.extend(dev_dependencies)
env_dependencies = collect_deps(
pubspec_data,
'environment',
is_runtime=True,
is_optional=False,
)
package_dependencies.extend(env_dependencies)
extra_data = {}
def add_to_extra_if_present(_key):
_value = pubspec_data.get(_key)
if _value:
extra_data[_key] = _value
add_to_extra_if_present('issue_tracker')
add_to_extra_if_present('documentation')
add_to_extra_if_present('dependencies_overrides')
add_to_extra_if_present('executables')
add_to_extra_if_present('publish_to')
package = cls(
name=name,
version=version,
vcs_url=vcs_url,
description=description,
declared_license=declared_license,
parties=parties,
homepage_url=homepage_url,
dependencies=package_dependencies,
extra_data=extra_data,
)
if not download_url:
package.download_url = package.repository_download_url()
return package
| 29.474359
| 128
| 0.639321
|
31a235f6b095da37ba59015335d956eb2b8c258e
| 12,259
|
py
|
Python
|
QS-AD/old/Visualization_sunburst_v.0.9.py
|
miguelbarretosanz/QS-AgregartorDashboard
|
59a317e16e771b190e3d27e2aff0a3da9a553aea
|
[
"MIT"
] | null | null | null |
QS-AD/old/Visualization_sunburst_v.0.9.py
|
miguelbarretosanz/QS-AgregartorDashboard
|
59a317e16e771b190e3d27e2aff0a3da9a553aea
|
[
"MIT"
] | null | null | null |
QS-AD/old/Visualization_sunburst_v.0.9.py
|
miguelbarretosanz/QS-AgregartorDashboard
|
59a317e16e771b190e3d27e2aff0a3da9a553aea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 07:16:35 2018
@author: MiguelArturo
"""
__author__ = "Miguel Barreto Sanz"
__copyright__ = "Copyright 2018, Miguel Barreto Sanz"
__credits__ = ["Miguel Barreto Sanz"]
__license__ = "MIT"
__version__ = "0.0.9"
__maintainer__ = "Miguel Barreto Sanz"
__email__ = "miguelbarretosanz@gmail.com"
__status__ = "Development"
from math import log, sqrt
import numpy as np
import pandas as pd
from bokeh.plotting import figure
#Import modules for interactive graphics
from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Select
from bokeh.io import curdoc
#Import modules for conversions to radians.
import math
#Import modules for time management and time zones
import time, datetime
#Color palette
import seaborn as sns
def make_plot(source):
"""
Plot the annular wedges
Parameters
----------
source : ColumnDataSources
Returns
-------
return : Figure
"""
hover = HoverTool(
names=["anular_wedges"],
tooltips=[
("Activity", "@Name"),
("color", "@color"),
])
plot = figure(width=700, height=700,tools=[hover], title="",x_axis_type=None, y_axis_type=None, x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="white", background_fill_color="#ffffff",)
plot.annular_wedge(x=0, y=0, inner_radius='inner_radius', outer_radius='outer_radius',start_angle='start_angle', end_angle='end_angle',
color='color', alpha=0.9, hover_color='color',hover_line_color="black", hover_alpha = 0.5, source=source,name="anular_wedges",legend='Name')
#Fixed attributes
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
#plot clock
angles = 2*np.pi/24*pd.Series(list(range(0,24)))
plot.annular_wedge(0, 0, fr_inner_radius, tr_outer_radius, angles, angles, color="lightgrey")
# Plot clock labels (24 hours)
labels = np.power(10.0, np.arange(-3, 4))
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = ((tr_outer_radius + 10) - fr_inner_radius) / (minr - maxr)
b = fr_inner_radius - a * maxr
radii = a * np.sqrt(np.log(labels * 1E4)) + b
xr = radii[0]*np.cos(np.array(angles))
yr = radii[0]*np.sin(np.array(angles))
label_angle=np.array(angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
labels_24h_clock = list(range(6,-1,-1)) + list(range(23,6,-1))
plot.text(xr, yr, pd.Series(labels_24h_clock), angle=label_angle, text_font_size="9pt", text_align="center", text_baseline="middle",
text_color="lightgrey")
return plot
def get_dataset (src, unique_days_list, selected_day, df_activity_colors):
def calculate_angles(start_time, duration):
ts = time.strptime(start_time, "%H:%M:%S")
hour = (ts[3] + (ts[4]/60) + (ts[5]/3600))
hour_rad = math.radians(hour * 15.0)
#add "pi/2" to transform radians to a 24 hours clock form.
hour_in_radians_to_plot = -hour_rad + np.pi/2
#Use duration and convert seconds in radians
sec_rad = time.gmtime(duration)
hour_duration = (sec_rad[3] + (sec_rad[4]/60) + (sec_rad[5]/3600))
hour_rad_duration = math.radians(hour_duration * 15.0)
duration_in_radians_to_plot = (hour_in_radians_to_plot + hour_rad_duration)
start_angle= hour_in_radians_to_plot - hour_rad_duration
end_angle= duration_in_radians_to_plot - hour_rad_duration
return start_angle, end_angle
#Group all the events from the same day
index_hours_same_day = np.where(unique_days_list== datetime.datetime.strptime(selected_day, "%Y-%m-%d").date())
events_at_day = src.Start_Time_Local[list(index_hours_same_day[0][:])]
events_at_day = pd.to_datetime(events_at_day)
end_time_events_at_day = src.End_time_Local[list(index_hours_same_day[0][:])]
end_time_events_at_day = pd.to_datetime(end_time_events_at_day)
#Select start time from timestamp
start_time_list_to_plot = events_at_day.dt.time
start_time_list_to_plot_dt = start_time_list_to_plot.to_frame()
start_date = events_at_day.dt.date.to_frame()
start_date.columns = ['start_date']
#get durations and events
#To-do change iloc for the column name
duration_list_to_plot = src.iloc[events_at_day.index[:],[0]]
events_list_to_plot = src.iloc[events_at_day.index[:],[3]]
#Select end time from timestamp
end_time_events_at_day_to_plot = end_time_events_at_day.dt.time
end_time_events_at_day_to_plot_dt = end_time_events_at_day_to_plot.to_frame()
end_date = end_time_events_at_day.dt.date.to_frame()
end_date.columns = ['end_date']
#Dataframe with "event duration" and "start time"
duration_list_to_plot.reset_index(drop=True, inplace=True)
events_list_to_plot.reset_index(drop=True, inplace=True)
start_time_list_to_plot_dt.reset_index(drop=True, inplace=True)
end_time_events_at_day_to_plot_dt.reset_index(drop=True, inplace=True)
start_date.reset_index(drop=True, inplace=True)
end_date.reset_index(drop=True, inplace=True)
result2 = pd.concat([duration_list_to_plot, events_list_to_plot, start_time_list_to_plot_dt,
end_time_events_at_day_to_plot_dt, start_date, end_date] , axis=1)
df_start_end_angle = pd.DataFrame(index=range(0,result2.index.size),columns=['start_angle','end_angle'])
for i in range(0, result2.index.size):
s_d = str(result2.iloc[i]['Start_Time_Local'])
du = result2.iloc[i]['Duration']
start_date = str(result2.iloc[i]['start_date'])
end_date = str(result2.iloc[i]['end_date'])
angles = calculate_angles(s_d, du)
df_start_end_angle['start_angle'][i]= angles[0]
df_start_end_angle['end_angle'][i] = angles[1]
df_inner_outer_radius = pd.DataFrame(index=range(0,result2.index.size),columns=['inner_radius','outer_radius'])
for i in range(0, result2.index.size):
df_inner_outer_radius['inner_radius'][i]= fr_inner_radius
df_inner_outer_radius['outer_radius'][i] = fr_outer_radius
#Match events with its respective color code
df_colors = pd.DataFrame(index=range(0,events_list_to_plot.index.size),columns=['color'])
for i in range(0,events_list_to_plot.index.size):
df_colors.color[i] = df_activity_colors.Colors[np.where(events_list_to_plot.Name[i] == df_activity_colors.Activities)[0][0]]
final_df = pd.concat([df_start_end_angle,df_colors,df_inner_outer_radius,events_list_to_plot] , axis=1)
return ColumnDataSource(data=final_df)
def update_plot(attrname, old, new):
selected_day = select_day.value
src = get_dataset(LC_data_r,unique_days_list,selected_day,df_activity_colors)
source.data.update(src.data)
def activities_color_table (array_activities):
df_activity_colors = pd.DataFrame(index=range(0,array_activities.size,1),columns=['Activities','Colors'])
#create palette
pal2 = sns.color_palette('pastel').as_hex()
pal3 = sns.color_palette("Set1", 10).as_hex()
pal4 = sns.color_palette("Set2", 10).as_hex()
pal5 = sns.color_palette("Set3", 10).as_hex()
pal6 = sns.color_palette("BrBG", 7).as_hex()
pal7 = sns.color_palette("RdBu_r", 7).as_hex()
pal8 = sns.color_palette("coolwarm", 7).as_hex()
pal9 = sns.diverging_palette(10, 220, sep=80, n=7).as_hex()
palette = np.concatenate((pal2,pal3,pal4,pal5,pal6,pal7,pal8,pal9), axis=0)
for i in range(0,array_activities.size,1):
df_activity_colors['Activities'][i]=array_activities[i]
df_activity_colors['Colors'][i] = palette[i]
return df_activity_colors
def until_midnidnight_dataset(LC_data):
df1 = pd.DataFrame(index=range(1,1,1),columns=['Start_Time_Local','End_time_Local'
,'Duration', 'Name','Location' ])
for i in LC_data.Start_Time_Local.index:
start = str(LC_data.Start_Time_Local[i]).split(" ")
end = str(LC_data.End_time_Local[i]).split(" ")
if start[1] != end[1]:
format = '%H:%M:%S'
sh = start[2]
eh = end[2]
hor,min,sec = eh.split(":")
eh_dur = int(hor)*3600 + 60*int(min) + int(sec)
mn = '23:59:59'
hours_one_day = pd.datetime.strptime(mn, format) - pd.datetime.strptime(sh, format)
row_a_Start_Time_Local =(start[1] + " " + sh)
row_a_End_time_Local = (start[1] + " " +mn)
row_a_Duration = hours_one_day.seconds
row_a_Name = LC_data.Name[i]
row_a_Location = LC_data.Location[i]
row_b_Start_Time_Local =(end[1] + " " + "00:00:00")
row_b_End_time_Local = (end[1] + " " + end[2])
row_b_Duration = eh_dur
row_b_Name = LC_data.Name[i]
row_b_Location = LC_data.Location[i]
df = pd.DataFrame({'Start_Time_Local': [row_a_Start_Time_Local,row_b_Start_Time_Local],
'End_time_Local' : [row_a_End_time_Local,row_b_End_time_Local],
'Duration' : [row_a_Duration,row_b_Duration],
'Name' : [row_a_Name,row_b_Name ],
'Location' : [row_a_Location,row_b_Location]
})
df1 = pd.concat([df,df1] , axis=0)
else:
sh = start[2]
eh = end[2]
row_a_Start_Time_Local = (start[1] + " " + sh)
row_a_End_time_Local = (end[1] + " " + eh )
row_a_Duration = LC_data.Duration[i]
row_a_Name = LC_data.Name[i]
row_a_Location = LC_data.Location[i]
df = pd.DataFrame({'Start_Time_Local': [row_a_Start_Time_Local],
'End_time_Local' : [row_a_End_time_Local],
'Duration' : [row_a_Duration],
'Name' : [row_a_Name],
'Location' : [row_a_Location]
})
df1 = pd.concat([df,df1] , axis=0)
return df1
#Fixed plot's atributes
fr_inner_radius = 140 #First ring (fr) parameters
fr_outer_radius = 200
sr_inner_radius = fr_outer_radius+2 #Second ring (sr) parameters
sr_outer_radius = fr_outer_radius+52
tr_inner_radius = fr_outer_radius+52+2, #third ring (tr) parameters
tr_outer_radius = fr_outer_radius+52+2+42
#LIFE CYCLE DATA
#Load Lyfe Cycle data
LC_data = pd.read_csv('../data/Life Cycle/example/LC_export 3.csv')
#Columns names were changed because the orinals have some espaces and special characters
# that makes more complicated the string manipulation. For instace : ' NAME' , 'START DATE(UTC)'.
LC_data.columns = ['Start_Date_UTC', 'End_Date_UTC','Start_Time_Local','End_time_Local','Duration','Name','Location']
#Create a new dataframe with events only from the same day.
LC_data_r = until_midnidnight_dataset(LC_data)
LC_data_r['Start_Time_Local'] = pd.to_datetime(LC_data_r.Start_Time_Local)
#Get all the events' timestamps per unique selected day
unique_days_list = LC_data_r.Start_Time_Local.dt.date
#Create a dataframe to store unique_days_list
columns_ud = ['Unique_Days']
New_data_days_unique = pd.DataFrame(unique_days_list.index,columns=columns_ud)
for i in New_data_days_unique.index:
New_data_days_unique['Unique_Days'][i]= pd.Timestamp.strftime(unique_days_list[i],'%Y-%m-%d')
#Color per activity
df_activity_colors = activities_color_table(LC_data_r.Name.unique())
#ACTIVITY WATCH DATA
#List to be shown in the "select button"
List_to_select_days = sorted(list(set(New_data_days_unique['Unique_Days'])))
selected_day='2017-01-26'
source=get_dataset(LC_data_r,unique_days_list,selected_day,df_activity_colors)
plot = make_plot(source)
#Timestamp selection
select_day = Select(title="Day", value="foo", options=List_to_select_days)
select_day.on_change('value', update_plot)
controls = column(select_day)
curdoc().add_root(row(plot, controls))
curdoc().title = "Sunburst"
| 42.272414
| 163
| 0.669549
|
22d8b41a9d71f01b3c6fc17684ebc2eef9c41ba8
| 1,446
|
bzl
|
Python
|
packer/deps.bzl
|
zacker330/bazel-distribution
|
148f7052f6b63cdfa035e95f8a140a2844696112
|
[
"Apache-2.0"
] | 97
|
2019-02-12T12:56:55.000Z
|
2021-04-10T13:15:50.000Z
|
packer/deps.bzl
|
zacker330/bazel-distribution
|
148f7052f6b63cdfa035e95f8a140a2844696112
|
[
"Apache-2.0"
] | 110
|
2019-01-18T12:57:19.000Z
|
2021-04-12T19:53:23.000Z
|
packer/deps.bzl
|
zacker330/bazel-distribution
|
148f7052f6b63cdfa035e95f8a140a2844696112
|
[
"Apache-2.0"
] | 29
|
2019-07-15T16:48:17.000Z
|
2021-04-09T16:11:19.000Z
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
def deps():
http_archive(
name = "packer_osx",
url = "https://releases.hashicorp.com/packer/1.7.4/packer_1.7.4_darwin_amd64.zip",
sha256 = "7fe8e59d9c456e98e52c9dcbca53009659da31f4fa2bf0dda5af43ebcc688685",
build_file_content = 'exports_files(["packer"])'
)
http_archive(
name = "packer_linux",
url = "https://releases.hashicorp.com/packer/1.7.4/packer_1.7.4_linux_amd64.zip",
sha256 = "3660064a56a174a6da5c37ee6b36107098c6b37e35cc84feb2f7f7519081b1b0",
build_file_content = 'exports_files(["packer"])'
)
| 40.166667
| 90
| 0.733057
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.