hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12c28cc46a7dcf0d0ebc53795dbe76d01de5679c | 1,254 | py | Python | tests/test_typing.py | lschmelzeisen/nasty-utils | d2daf2faed35d7028bf0adc7ae5a321ca3b9b4ed | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-23T19:18:45.000Z | 2020-05-23T19:18:45.000Z | tests/test_typing.py | lschmelzeisen/nasty-utils | d2daf2faed35d7028bf0adc7ae5a321ca3b9b4ed | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-05-09T08:09:20.000Z | 2021-05-09T08:09:20.000Z | tests/test_typing.py | lschmelzeisen/nasty-utils | d2daf2faed35d7028bf0adc7ae5a321ca3b9b4ed | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
# Copyright 2019-2020 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from logging import FileHandler, Handler
from sys import version_info
from typing import Sequence
from pytest import raises
from nasty_utils import checked_cast, safe_issubclass
| 29.162791 | 74 | 0.733652 | #
# Copyright 2019-2020 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from logging import FileHandler, Handler
from sys import version_info
from typing import Sequence
from pytest import raises
from nasty_utils import checked_cast, safe_issubclass
def test_checked_cast() -> None:
x: object = 5
y: int = checked_cast(int, x)
assert x == y
with raises(AssertionError):
checked_cast(int, 3.5)
def test_safe_issubclass() -> None:
assert safe_issubclass(FileHandler, Handler)
assert not safe_issubclass(Handler, FileHandler)
if version_info >= (3, 7):
with raises(TypeError):
issubclass(Sequence[int], Sequence)
assert not safe_issubclass(Sequence[int], Sequence)
| 434 | 0 | 46 |
ec2866ff5024ac73b3d31a1f531f8625cde8f60f | 485 | py | Python | froide/foirequest/migrations/0009_foirequest_closed.py | xenein/froide | 59bd3eeded3c3ed00fbc858fe20bfea99c8dbefa | [
"MIT"
] | 198 | 2016-12-03T22:42:55.000Z | 2022-03-25T15:08:36.000Z | froide/foirequest/migrations/0009_foirequest_closed.py | xenein/froide | 59bd3eeded3c3ed00fbc858fe20bfea99c8dbefa | [
"MIT"
] | 264 | 2016-11-30T18:53:17.000Z | 2022-03-17T11:34:18.000Z | froide/foirequest/migrations/0009_foirequest_closed.py | xenein/froide | 59bd3eeded3c3ed00fbc858fe20bfea99c8dbefa | [
"MIT"
] | 42 | 2016-12-22T04:08:27.000Z | 2022-02-26T08:30:38.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-01 14:49
from __future__ import unicode_literals
from django.db import migrations, models
| 23.095238 | 79 | 0.630928 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-01 14:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("foirequest", "0008_auto_20171124_1508"),
]
operations = [
migrations.AddField(
model_name="foirequest",
name="closed",
field=models.BooleanField(default=False, verbose_name="is closed"),
),
]
| 0 | 306 | 23 |
826a663afa3c9227a3c7b8143bc752352b87b5f3 | 2,244 | py | Python | pipedown/Tools/grpc_cisco_python/tests/test_cisco_grpc.py | cisco-ie/Pipedown | 5f37435f63ccb5b1f157e333fdd441bdc0b1c8f7 | [
"Apache-2.0"
] | 1 | 2017-05-22T11:26:32.000Z | 2017-05-22T11:26:32.000Z | pipedown/Tools/grpc_cisco_python/tests/test_cisco_grpc.py | cisco-ie/Pipedown | 5f37435f63ccb5b1f157e333fdd441bdc0b1c8f7 | [
"Apache-2.0"
] | null | null | null | pipedown/Tools/grpc_cisco_python/tests/test_cisco_grpc.py | cisco-ie/Pipedown | 5f37435f63ccb5b1f157e333fdd441bdc0b1c8f7 | [
"Apache-2.0"
] | 1 | 2018-01-17T21:44:52.000Z | 2018-01-17T21:44:52.000Z | import unittest
import sys
sys.path.insert(0, '../')
from client.cisco_grpc_client_insecure import CiscoGRPCClient
import json
if __name__ == '__main__':
unittest.main()
| 48.782609 | 478 | 0.62656 | import unittest
import sys
sys.path.insert(0, '../')
from client.cisco_grpc_client_insecure import CiscoGRPCClient
import json
class CiscoGRPCClientcase(unittest.TestCase):
def setUp(self):
self.client = CiscoGRPCClient('192.168.1.2', 57777, 10, 'vagrant', 'vagrant')
self.maxDiff = None
path = '{"Cisco-IOS-XR-ip-static-cfg:router-static": [null]}'
self._result = self.client.getconfig(path)
def test_get(self):
path = '{"Cisco-IOS-XR-ip-static-cfg:router-static": [null]}'
self._result = self.client.getconfig(path)
try:
json_object = json.loads(self._result)
except ValueError as e:
self.assertTrue(False, e)
self.assertTrue(True)
def test_replace(self):
yangjsonreplace = '{"Cisco-IOS-XR-ip-static-cfg:router-static": {"default-vrf": {"address-family": {"vrfipv4": {"vrf-unicast": {"vrf-prefixes": {"vrf-prefix": [{"prefix": "0.0.0.0", "vrf-route": {"vrf-next-hop-table": {"vrf-next-hop-next-hop-address": [{"next-hop-address": "10.0.2.2"}]}}, "prefix-length": 0}, {"prefix": "1.2.3.5", "vrf-route": {"vrf-next-hop-table": {"vrf-next-hop-next-hop-address": [{"next-hop-address": "10.0.2.2"}]}}, "prefix-length": 32}]}}}}}}}'
response = self.client.replaceconfig(yangjsonreplace)
self.assertEqual(response.errors, u'')
def test_merge(self):
yangjsonmerge = '{"Cisco-IOS-XR-ip-static-cfg:router-static": {"default-vrf": {"address-family": {"vrfipv4": {"vrf-unicast": {"vrf-prefixes": {"vrf-prefix": [{"prefix": "1.2.3.6", "vrf-route": {"vrf-next-hop-table": {"vrf-next-hop-next-hop-address": [{"next-hop-address": "10.0.2.2"}]}}, "prefix-length": 32}]}}}}}}}'
response = self.client.mergeconfig(yangjsonmerge)
self.assertEqual(response.errors, u'')
def test_get_oper(self):
path = '{"Cisco-IOS-XR-cdp-oper:cdp": [null]}'
self._result = self.client.getoper(path)
try:
json_object = json.loads(self._result)
except ValueError as e:
self.assertTrue(False, e)
self.assertTrue(True)
def tearDown(self):
response = self.client.replaceconfig(self._result)
if __name__ == '__main__':
unittest.main()
| 1,862 | 24 | 183 |
5367f5bc6cd6abd77796f8aca47609a14b59d7cf | 2,331 | py | Python | exchange/rest/async_client.py | raphaelvrosa/md2-nfv | b97e28fad3236fb76344ac3ac5194ef4a94494d0 | [
"Apache-2.0"
] | null | null | null | exchange/rest/async_client.py | raphaelvrosa/md2-nfv | b97e28fad3236fb76344ac3ac5194ef4a94494d0 | [
"Apache-2.0"
] | null | null | null | exchange/rest/async_client.py | raphaelvrosa/md2-nfv | b97e28fad3236fb76344ac3ac5194ef4a94494d0 | [
"Apache-2.0"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
import requests
import _thread
if __name__ == '__main__':
import json
import time
client = WebClient()
msg = {
'type': 'config',
'domains': [
{'id': 'B',
'address': 'http://127.0.0.1:8882'},
]
}
msg_json = json.dumps(msg)
params = {'message-id': '', 'call-back': ''}
suffix = '/' + 'A' + '/peer'
url = 'http://127.0.0.1:9090' + suffix
kwargs = {'params': params}
print('send msg')
print(client.send_msg('post', url, msg_json, **kwargs))
time.sleep(1)
| 28.084337 | 94 | 0.590305 | import logging
logger = logging.getLogger(__name__)
import requests
import _thread
def request(_type, url, message, kwargs):
logger.debug('sending msg %s to url %s', _type, url)
# logger.debug('sending msg data \n%s', message)
response = None
try:
if _type == 'post':
response = requests.post(url, headers=WebClient.headers, data=message, **kwargs)
elif _type == 'put':
response = requests.put(url, headers=WebClient.headers, data=message, **kwargs)
elif _type == 'get':
response = requests.get(url, headers=WebClient.headers, data=message, **kwargs)
else:
response = requests.delete(url, headers=WebClient.headers, data=message, **kwargs)
except requests.RequestException as exception:
logger.info('Requests fail - exception %s', exception)
response = None
finally:
reply = __process_msg_response(response)
logger.info('Requests - response %s', response)
if reply:
return reply.text
return reply
def __process_msg_response(response):
try:
if response:
response.raise_for_status()
else:
response = None
except Exception as exception:
logging.info("Response exception %s", exception)
response = None
finally:
return response
class WebClient():
headers = {'Content-Type': 'application/json'}
def send_msg(self, _type, url, message, **kwargs):
# logger.debug('sending msg %s to url %s', _type, url)
# logger.debug('sending msg data \n%s', message)
try:
_thread.start_new_thread(request, (_type, url, message, kwargs) )
except:
logger.debug("Error: unable to start thread")
return True
if __name__ == '__main__':
import json
import time
client = WebClient()
msg = {
'type': 'config',
'domains': [
{'id': 'B',
'address': 'http://127.0.0.1:8882'},
]
}
msg_json = json.dumps(msg)
params = {'message-id': '', 'call-back': ''}
suffix = '/' + 'A' + '/peer'
url = 'http://127.0.0.1:9090' + suffix
kwargs = {'params': params}
print('send msg')
print(client.send_msg('post', url, msg_json, **kwargs))
time.sleep(1)
| 1,573 | 75 | 69 |
7dc7845e99e4bdaa8364fbeffae232be1212558e | 12,092 | py | Python | cogdl/datasets/kg_data.py | Aliang-CN/cogdl | 01b1dbf7528240457a5fbe8c24b8271e805dc7ec | [
"MIT"
] | 1 | 2021-03-17T07:23:51.000Z | 2021-03-17T07:23:51.000Z | cogdl/datasets/kg_data.py | yingyukexiansheng/cogdl | cf594cdb3a97f45333d08c937205d1a691828a33 | [
"MIT"
] | null | null | null | cogdl/datasets/kg_data.py | yingyukexiansheng/cogdl | cf594cdb3a97f45333d08c937205d1a691828a33 | [
"MIT"
] | null | null | null | import os.path as osp
import numpy as np
import torch
from cogdl.data import Data, Dataset
from cogdl.utils import download_url
from cogdl.datasets import register_dataset
@register_dataset("fb13")
@register_dataset("fb15k")
@register_dataset("fb15k237")
@register_dataset("wn18")
@register_dataset("wn18rr")
@register_dataset("fb13s")
| 33.871148 | 114 | 0.61702 | import os.path as osp
import numpy as np
import torch
from cogdl.data import Data, Dataset
from cogdl.utils import download_url
from cogdl.datasets import register_dataset
class BidirectionalOneShotIterator(object):
def __init__(self, dataloader_head, dataloader_tail):
self.iterator_head = self.one_shot_iterator(dataloader_head)
self.iterator_tail = self.one_shot_iterator(dataloader_tail)
self.step = 0
def __next__(self):
self.step += 1
if self.step % 2 == 0:
data = next(self.iterator_head)
else:
data = next(self.iterator_tail)
return data
@staticmethod
def one_shot_iterator(dataloader):
"""
Transform a PyTorch Dataloader into python iterator
"""
while True:
for data in dataloader:
yield data
class TestDataset(torch.utils.data.Dataset):
def __init__(self, triples, all_true_triples, nentity, nrelation, mode):
self.len = len(triples)
self.triple_set = set(all_true_triples)
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.mode = mode
def __len__(self):
return self.len
def __getitem__(self, idx):
head, relation, tail = self.triples[idx]
if self.mode == "head-batch":
tmp = [
(0, rand_head) if (rand_head, relation, tail) not in self.triple_set else (-1, head)
for rand_head in range(self.nentity)
]
tmp[head] = (0, head)
elif self.mode == "tail-batch":
tmp = [
(0, rand_tail) if (head, relation, rand_tail) not in self.triple_set else (-1, tail)
for rand_tail in range(self.nentity)
]
tmp[tail] = (0, tail)
else:
raise ValueError("negative batch mode %s not supported" % self.mode)
tmp = torch.LongTensor(tmp)
filter_bias = tmp[:, 0].float()
negative_sample = tmp[:, 1]
positive_sample = torch.LongTensor((head, relation, tail))
return positive_sample, negative_sample, filter_bias, self.mode
@staticmethod
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
filter_bias = torch.stack([_[2] for _ in data], dim=0)
mode = data[0][3]
return positive_sample, negative_sample, filter_bias, mode
class TrainDataset(torch.utils.data.Dataset):
def __init__(self, triples, nentity, nrelation, negative_sample_size, mode):
self.len = len(triples)
self.triples = triples
self.triple_set = set(triples)
self.nentity = nentity
self.nrelation = nrelation
self.negative_sample_size = negative_sample_size
self.mode = mode
self.count = self.count_frequency(triples)
self.true_head, self.true_tail = self.get_true_head_and_tail(self.triples)
def __len__(self):
return self.len
def __getitem__(self, idx):
positive_sample = self.triples[idx]
head, relation, tail = positive_sample
subsampling_weight = self.count[(head, relation)] + self.count[(tail, -relation - 1)]
subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))
negative_sample_list = []
negative_sample_size = 0
while negative_sample_size < self.negative_sample_size:
negative_sample = np.random.randint(self.nentity, size=self.negative_sample_size * 2)
if self.mode == "head-batch":
mask = np.in1d(negative_sample, self.true_head[(relation, tail)], assume_unique=True, invert=True)
elif self.mode == "tail-batch":
mask = np.in1d(negative_sample, self.true_tail[(head, relation)], assume_unique=True, invert=True)
else:
raise ValueError("Training batch mode %s not supported" % self.mode)
negative_sample = negative_sample[mask]
negative_sample_list.append(negative_sample)
negative_sample_size += negative_sample.size
negative_sample = np.concatenate(negative_sample_list)[: self.negative_sample_size]
negative_sample = torch.LongTensor(negative_sample)
positive_sample = torch.LongTensor(positive_sample)
return positive_sample, negative_sample, subsampling_weight, self.mode
@staticmethod
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
subsample_weight = torch.cat([_[2] for _ in data], dim=0)
mode = data[0][3]
return positive_sample, negative_sample, subsample_weight, mode
@staticmethod
def count_frequency(triples, start=4):
"""
Get frequency of a partial triple like (head, relation) or (relation, tail)
The frequency will be used for subsampling like word2vec
"""
count = {}
for head, relation, tail in triples:
if (head, relation) not in count:
count[(head, relation)] = start
else:
count[(head, relation)] += 1
if (tail, -relation - 1) not in count:
count[(tail, -relation - 1)] = start
else:
count[(tail, -relation - 1)] += 1
return count
@staticmethod
def get_true_head_and_tail(triples):
"""
Build a dictionary of true triples that will
be used to filter these true triples for negative sampling
"""
true_head = {}
true_tail = {}
for head, relation, tail in triples:
if (head, relation) not in true_tail:
true_tail[(head, relation)] = []
true_tail[(head, relation)].append(tail)
if (relation, tail) not in true_head:
true_head[(relation, tail)] = []
true_head[(relation, tail)].append(head)
for relation, tail in true_head:
true_head[(relation, tail)] = np.array(list(set(true_head[(relation, tail)])))
for head, relation in true_tail:
true_tail[(head, relation)] = np.array(list(set(true_tail[(head, relation)])))
return true_head, true_tail
def read_triplet_data(folder):
filenames = ["train2id.txt", "valid2id.txt", "test2id.txt"]
count = 0
edge_index = []
edge_attr = []
count_list = []
triples = []
num_entities = 0
num_relations = 0
entity_dic = {}
relation_dic = {}
for filename in filenames:
with open(osp.join(folder, filename), "r") as f:
_ = int(f.readline().strip())
if "train" in filename:
train_start_idx = len(triples)
elif "valid" in filename:
valid_start_idx = len(triples)
elif "test" in filename:
test_start_idx = len(triples)
for line in f:
items = line.strip().split()
edge_index.append([int(items[0]), int(items[1])])
edge_attr.append(int(items[2]))
triples.append((int(items[0]), int(items[2]), int(items[1])))
if items[0] not in entity_dic:
entity_dic[items[0]] = num_entities
num_entities += 1
if items[1] not in entity_dic:
entity_dic[items[1]] = num_entities
num_entities += 1
if items[2] not in relation_dic:
relation_dic[items[2]] = num_relations
num_relations += 1
count += 1
count_list.append(count)
edge_index = torch.LongTensor(edge_index).t()
edge_attr = torch.LongTensor(edge_attr)
data = Data()
data.edge_index = edge_index
data.edge_attr = edge_attr
def generate_mask(start, end):
mask = torch.BoolTensor(count)
mask[:] = False
mask[start:end] = True
return mask
data.train_mask = generate_mask(0, count_list[0])
data.val_mask = generate_mask(count_list[0], count_list[1])
data.test_mask = generate_mask(count_list[1], count_list[2])
return data, triples, train_start_idx, valid_start_idx, test_start_idx, num_entities, num_relations
class KnowledgeGraphDataset(Dataset):
url = "https://raw.githubusercontent.com/thunlp/OpenKE/OpenKE-PyTorch/benchmarks"
def __init__(self, root, name):
self.name = name
super(KnowledgeGraphDataset, self).__init__(root)
self.data = torch.load(self.processed_paths[0])
triple_config = torch.load(self.processed_paths[1])
self.triples = triple_config["triples"]
self._train_start_index = triple_config["train_start_index"]
self._valid_start_index = triple_config["valid_start_index"]
self._test_start_index = triple_config["test_start_index"]
self._num_entities = triple_config["num_entities"]
self._num_relations = triple_config["num_relations"]
@property
def raw_file_names(self):
names = ["train2id.txt", "valid2id.txt", "test2id.txt"]
return names
@property
def processed_file_names(self):
return ["data.pt", "triple_config.pt"]
@property
def train_start_idx(self):
return self._train_start_index
@property
def valid_start_idx(self):
return self._valid_start_index
@property
def test_start_idx(self):
return self._test_start_index
@property
def num_entities(self):
return self._num_entities
@property
def num_relations(self):
return self._num_relations
def get(self, idx):
assert idx == 0
return self.data
def download(self):
for name in self.raw_file_names:
download_url("{}/{}/{}".format(self.url, self.name, name), self.raw_dir)
def process(self):
(
data,
triples,
train_start_index,
valid_start_index,
test_start_index,
num_entities,
num_relations,
) = read_triplet_data(self.raw_dir)
torch.save(data, self.processed_paths[0])
triple_config = {
"triples": triples,
"train_start_index": train_start_index,
"valid_start_index": valid_start_index,
"test_start_index": test_start_index,
"num_entities": num_entities,
"num_relations": num_relations,
}
torch.save(triple_config, self.processed_paths[1])
@register_dataset("fb13")
class FB13Datset(KnowledgeGraphDataset):
def __init__(self):
dataset = "FB13"
path = osp.join("data", dataset)
super(FB13Datset, self).__init__(path, dataset)
@register_dataset("fb15k")
class FB15kDatset(KnowledgeGraphDataset):
def __init__(self):
dataset = "FB15K"
path = osp.join("data", dataset)
super(FB15kDatset, self).__init__(path, dataset)
@register_dataset("fb15k237")
class FB15k237Datset(KnowledgeGraphDataset):
def __init__(self):
dataset = "FB15K237"
path = osp.join("data", dataset)
super(FB15k237Datset, self).__init__(path, dataset)
@register_dataset("wn18")
class WN18Datset(KnowledgeGraphDataset):
def __init__(self):
dataset = "WN18"
path = osp.join("data", dataset)
super(WN18Datset, self).__init__(path, dataset)
@register_dataset("wn18rr")
class WN18RRDataset(KnowledgeGraphDataset):
def __init__(self):
dataset = "WN18RR"
path = osp.join("data", dataset)
super(WN18RRDataset, self).__init__(path, dataset)
@register_dataset("fb13s")
class FB13SDatset(KnowledgeGraphDataset):
url = "https://raw.githubusercontent.com/cenyk1230/test-data/main"
def __init__(self):
dataset = "FB13-S"
path = osp.join("data", dataset)
super(FB13SDatset, self).__init__(path, dataset)
| 8,493 | 2,868 | 377 |
bfe905b1326f3ed89fb3548f48848ac7a0505c7b | 916 | py | Python | submissions/4sum/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | submissions/4sum/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/4sum/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/4sum
from collections import defaultdict
| 28.625 | 69 | 0.434498 | # https://leetcode.com/problems/4sum
from collections import defaultdict
class Solution:
def fourSum(self, nums, target):
N = len(nums)
d = defaultdict(list)
for i in range(N):
for j in range(N):
if i == j:
continue
key = target - nums[i] - nums[j]
tmp_list = [i, j]
d[key].append(tmp_list)
ans_set = set()
for k in range(N):
for l in range(N):
if k == l:
continue
key = nums[k] + nums[l]
for item in d[key]:
tmp = [k, l] + item
if len(set(tmp)) == 4:
set_elem = sorted([nums[idx] for idx in tmp])
ans_set.add(tuple(set_elem))
ans_list = [list(item) for item in ans_set]
return ans_list
| 798 | -6 | 49 |
bb14aeb9451aa32c364f0d1ac1c3e17ae0fffd0a | 16,324 | py | Python | training.py | Ashraf-T/DriftSurf | 1501f7e7ee1df94e750b6e33da1c7c2d2c4be914 | [
"MIT"
] | null | null | null | training.py | Ashraf-T/DriftSurf | 1501f7e7ee1df94e750b6e33da1c7c2d2c4be914 | [
"MIT"
] | null | null | null | training.py | Ashraf-T/DriftSurf | 1501f7e7ee1df94e750b6e33da1c7c2d2c4be914 | [
"MIT"
] | null | null | null | import models
import read_data as data
import hyperparameters
from drift_detection.__init__ import *
import random
import numpy
import logging
| 40.009804 | 238 | 0.588642 | import models
import read_data as data
import hyperparameters
from drift_detection.__init__ import *
import random
import numpy
import logging
class Training:
STRSAGA = models.Opt.STRSAGA
SGD = models.Opt.SGD
LIMITED = 'all_models'
UNLIMITED = 'each_model'
BEFORE = 'B'
AFTER = 'A'
def __init__(self, dataset, computation='each_model', rate=2, base_learner = models.Opt.STRSAGA.upper(), algo_names=['Aware', 'SGD', 'OBL', 'MDDM', 'AUE', 'DriftSurf']):
"""
:param dataset: str
name of the dataset
:param computation: str (algorithm / model)
algorithm: rho computational power is given to each algorithm and needs to be divided between learners
model: rho computational power is given to each model in each algorithm
:param rate: int
rate = (rho/lam)
:param base_learner: (STRSAGA / SGD)
the base learner used by each algorithm can be either strsaga or sgd
:param algo_names: ['Aware', 'SGD', 'OBL', 'MDDM', 'AUE', 'DriftSurf']
list of algorithms we want to train over the given dataset
Aware: have oracle knowledge about times of drifts
SGD: single-pass SGD
OBL: oblivious to drift
MDDM: presented in 'Pesaranghader, A., Viktor, H. L., and Paquet, E. Mcdiarmiddrift detection methods for evolving data streams. InIJCNN, pp. 1–9, 2018.'
AUE: presented in 'Brzezinski, D. and Stefanowski, J. Reacting to differenttypes of concept drift: The accuracy updated ensemblealgorithm.IEEE Trans. Neural Netw. Learn. Syst, 25(1):81–94, 2013.'
DriftSurf: our proposed algorithm
"""
self.algorithms = algo_names
self.loss = {}
self.computation = computation.lower()
self.rate = rate
self.opt = getattr(Training, base_learner)
self.dataset_name = dataset
read_data = data.read_dataset()
self.X, self.Y, self.n, self.d, self.drift_times = read_data.read(self.dataset_name)
if self.dataset_name.startswith('sea'):
self.dataset_name = 'sea'
self.mu = hyperparameters.MU[self.dataset_name]
self.step_size = hyperparameters.STEP_SIZE[self.dataset_name]
self.b = hyperparameters.b[self.dataset_name] if self.dataset_name in hyperparameters.b.keys() else hyperparameters.b['default']
self.lam = int(self.n//self.b)
self.rho = self.lam * self.rate
@staticmethod
def load_dataset(dataset):
"""
load the given dataset
:param dataset: str
name of the dataset to be loaded
:return:
features, labels, #records, #dimension, drift_times
"""
name = dataset
if dataset.startswith('sea'):
name = 'sea'
read_data = data.read_dataset()
X, Y, n, d, drift_times = read_data.read(dataset)
return name, X, Y, n, d, drift_times
@staticmethod
def setup_experiment(dataset, rate, n):
"""
setup the hyperparameters for the experiment
:param dataset: str
name of the dataset
:param rate: int
rho/lam
:param n: int
# of data points in the given dataset
:return:
regularization term (mu), step_size (eta), total number of batches (b), size of each batch (lam), computational power (rho)
"""
mu = hyperparameters.MU[dataset]
step_size = hyperparameters.STEP_SIZE[dataset]
b = hyperparameters.b[dataset] if dataset in hyperparameters.b.keys() else hyperparameters.b['default']
lam = int(n//b)
rho = lam * rate
return mu, step_size, b, lam, rho
def update_loss(self, test_set, time):
"""
computes and updates the loss of algorithms at time t over the given test set
:param test_set:
:param time:
"""
for algo in self.algorithms:
self.loss[algo][time] = getattr(self, algo).zero_one_loss(test_set)
def setup_algorithms(self, delta, loss_fn, detector=MDDM_G(), Aware_reset = 'B', r=None, reactive_method=models.LogisticRegression_DriftSurf.GREEDY):
"""
set parameters of algorithms in the begining of the training
:param delta: float
DriftSurf's parameter for drift detection
:param loss_fn: str (zero-one / reg)
loss function DriftSurf check for performance degrading
:param detector:
drift detector method for MDDM
:param Aware_reset: str
when to reset Aware: before or after computing the loss at drift times
:param r: int
length of the reactive state in DriftSurf
"""
for algo in self.algorithms:
if algo == 'DriftSurf': self.setup_DriftSurf(delta, loss_fn, r, reactive_method)
elif algo == 'MDDM': self.setup_MDDM(detector)
elif algo == 'Aware': self.setup_Aware(Aware_reset)
else: getattr(self, 'setup_{0}'.format(algo))()
def setup_DriftSurf(self, delta, loss_fn, r=None, method=models.LogisticRegression_DriftSurf.GREEDY):
"""
setup parameters of DriftSurf
:param delta: float
delta-degration in performance is considered as a sign of drift
:param loss_fn: str (reg, zero-one)
loss function that DriftSurf check for performance degration
:param r: int
length of the reactive state
"""
self.DriftSurf_t = 0
self.DriftSurf_r = r if r else (hyperparameters.r[self.dataset_name] if self.dataset_name in hyperparameters.r.keys() else hyperparameters.r['default'])
self.DriftSurf = models.LogisticRegression_DriftSurf(self.d, self.opt, delta, loss_fn, method)
def setup_MDDM(self, detector=MDDM_G()):
"""
setup parameters of MDDM
:param detector: (MDDM-A, MDDM-E, MDDM-G)
drift detector of MDDM, defualt is set to be MDDM-D()
"""
self.MDDM = models.LogisticRegression_expert(numpy.random.rand(self.d), self.opt)
self.MDDM_drift_detector = detector
def setup_AUE(self):
"""
setup AUE
"""
self.AUE = models.LogisticRegression_AUE(self.d, self.opt)
def setup_Aware(self, reset='B'):
"""
setup Aware
:param reset: str (B / A)
when to reset parameters of the predictive model in Aware: before computing loss or after - default is set to be before
"""
self.Aware_reset = reset
self.Aware = models.LogisticRegression_expert(numpy.random.rand(self.d), self.opt, self.S)
def setup_SGD(self):
"""
setup single-pass SGD
"""
self.SGD = models.LogisticRegression_expert(numpy.random.rand(self.d), Training.SGD)
def setup_OBL(self):
"""
setup oblivious algorithm
"""
self.OBL = models.LogisticRegression_expert(numpy.random.rand(self.d), Training.STRSAGA)
def setup_Candor(self):
"""
setup Candor
"""
self.Candor = models.LogisticRegression_Candor(self.d, self.opt)
def update_strsaga_model(self, model):
"""
update the given model based on strsaga algorithm presented in 'Jothimurugesan, E., Tahmasbi, A., Gibbons, P., and Tirtha-pura, S. Variance-reduced stochastic gradient descent onstreaming data. InNeurIPS, pp. 9906–9915, 2018.'
:param model:
the model to be updated
"""
if model:
weight = model.get_weight() if self.computation == Training.LIMITED else 1
lst = list(model.T_pointers)
for s in range(int(self.rho * weight)):
if s % 2 == 0 and lst[1] < self.S + self.lam:
j = lst[1]
lst[1] += 1
else:
j = random.randrange(lst[0], lst[1])
point = (j, self.X[j], self.Y[j])
model.update_step(point, self.step_size, self.mu)
model.update_effective_set(lst[1])
def update_strsaga_model_biased(self, model, wp):
if model:
weight = model.get_weight() if self.computation == Training.LIMITED else 1
lst = list(model.T_pointers)
for s in range(int(self.rho * weight)):
if s % 2 == 0 and lst[1] < self.S + self.lam:
j = lst[1]
lst[1] += 1
else:
j = random.randrange(lst[0], lst[1])
point = (j, self.X[j], self.Y[j])
model.strsaga_step_biased(point, self.step_size, self.mu, wp)
model.update_effective_set(lst[1])
def update_sgd_model(self, model):
"""
update the given model based on SGD algorithm
:param model:
the model to be updated
"""
if model:
weight = model.get_weight() if self.computation == Training.LIMITED else 1
lst = list(model.T_pointers)
for s in range(int(self.rho * weight)):
j = random.randrange(lst[0], lst[1] + self.lam)
point = (j, self.X[j], self.Y[j])
model.update_step(point, self.step_size, self.mu)
model.update_effective_set(lst[1] + self.lam)
def update_sgd_model_biased(self, model, wp):
if model:
weight = model.get_weight() if self.computation == Training.LIMITED else 1
lst = list(model.T_pointers)
for s in range(int(self.rho * weight)):
j = random.randrange(lst[0], lst[1] + self.lam)
point = (j, self.X[j], self.Y[j])
model.step_step_biased(point, self.step_size, models.LogisticRegression_Candor.MU, wp)
model.update_effective_set(lst[1] + self.lam)
# single-pass SGD
def update_sgd_SP_model(self, model):
"""
update the given model based on a single-pass SGD algorithm
:param model:
given model to be updated
"""
if model:
sgdOnline_T = self.S
for s in range(min(self.lam, self.rho)):
if sgdOnline_T < self.S + self.lam:
j = sgdOnline_T
sgdOnline_T += 1
point = (j, self.X[j], self.Y[j])
model.update_step(point, self.step_size, self.mu)
def process_MDDM(self, time, new_batch):
"""
MDDM's process at time t given a newly arrived batch of data points
:param time: int
time step
:param new_batch:
newly arrived batch of data points
"""
if (self.MDDM_drift_detector.test(self.MDDM, new_batch) and time != 0):
self.MDDM = models.LogisticRegression_expert(numpy.random.rand(self.d), self.opt, self.S)
self.MDDM_drift_detector.reset()
logging.info('MDDM drift detected, reset model : {0}'.format(time))
getattr(self, 'update_{0}_model'.format(self.opt))(self.MDDM)
def process_AUE(self, time, new_batch):
"""
AUE's process at time t given a newly arrived batch of data points
:param time: int
time step
:param new_batch:
newly arrived batch of data points
"""
self.AUE.update_weights(new_batch)
logging.info('AUE Experts at time {0}: {1}'.format(time, [int(k / self.lam) for k in self.AUE.experts.keys()]))
for index, expert in self.AUE.experts.items():
getattr(self, 'update_{0}_model'.format(self.opt))(expert)
def process_Candor(self, time, new_batch):
"""
Candor's process at time t given a newly arrived batch of data points
:param time: int
time step
:param new_batch:
newly arrived batch of data points
"""
# update_all = False
wp = self.Candor.get_weighted_combination()
expert = models.LogisticRegression_expert(numpy.random.rand(self.d), self.opt, self.S) # alt: first arg is wp
if time == 0:
getattr(self, 'update_{0}_model'.format(self.opt))(expert)
else:
getattr(self, 'update_{0}_model_biased'.format(self.opt))(expert, wp)
self.Candor.experts.append((expert, wp))
self.Candor.reset_weights()
def process_DriftSurf(self, time, new_batch):
"""
DriftSurf's process at time t given a newly arrived batch of data points
:param time: int
time step
:param new_batch:
newly arrived batch of data points
"""
self.DriftSurf.update_perf_all(new_batch, self.mu)
if self.DriftSurf.stable:
if self.DriftSurf.enter_reactive(self.S, new_batch, self.mu):
self.DriftSurf_t = 0
logging.info('DriftSurf enters reactive state : {0}'.format(time))
else:
# update models
getattr(self, 'update_{0}_model'.format(self.opt))(self.DriftSurf.expert_predictive)
getattr(self, 'update_{0}_model'.format(self.opt))(self.DriftSurf.expert_stable)
if not self.DriftSurf.stable:
# update models
self.DriftSurf.update_reactive_sample_set(new_batch)
getattr(self, 'update_{0}_model'.format(self.opt))(self.DriftSurf.expert_predictive)
getattr(self, 'update_{0}_model'.format(self.opt))(self.DriftSurf.expert_reactive)
self.DriftSurf_t += 1
if self.DriftSurf_t == self.DriftSurf_r :
self.DriftSurf.exit_reactive(self.S+self.lam, self.mu)
def process_Aware(self):
getattr(self, 'update_{0}_model'.format(self.opt))(self.Aware)
def process_OBL(self):
"""
oblivious algorithm's process
"""
lst = list(self.OBL.T_pointers)
for s in range(self.rho):
if s % 2 == 0 and lst[1] < self.S + self.lam:
lst[1] += 1
j = random.randrange(lst[0], lst[1])
point = (j, self.X[j], self.Y[j])
self.OBL.update_step(point, self.step_size, self.mu)
self.OBL.update_effective_set(lst[1])
def process_SGD(self):
self.update_sgd_SP_model(self.SGD)
def process(self, delta=0.1, loss_fn='reg', drift_detectr=MDDM_G(), Aware_reset='B', r=None, reactive_method=models.LogisticRegression_DriftSurf.GREEDY):
"""
Train algorithms over the given dataset arrivin in streaming setting over b batches
:param delta:
DriftSurf's parameter for drift detection
:param loss_fn:
DriftSurf's parameter for drift detection
:param drift_detectr:
MDDM's drift detector
:param Aware_reset:
When to reset Aware
:param r:
Length of the reactive state in DriftSurf
"""
self.S = 0
self.setup_algorithms(delta, loss_fn, drift_detectr, Aware_reset, r, reactive_method)
for algo in self.algorithms:
self.loss[algo] = [0] * self.b
logging.info('dataset : {0}, n : {1}, b : {2}'.format(self.dataset_name, self.n, self.b))
for time in range(self.b):
print(time)
if time in self.drift_times and 'Aware' in self.algorithms and self.Aware_reset == Training.BEFORE:
self.setup_Aware()
# measure accuracy over upcoming batch
test_set = [(i, self.X[i], self.Y[i]) for i in range(self.S, self.S + self.lam)]
self.update_loss(test_set, time)
if time in self.drift_times and 'Aware' in self.algorithms and self.Aware_reset == Training.AFTER:
self.setup_Aware()
for algo in self.algorithms:
if algo in ['SGD', 'OBL', 'Aware']: getattr(self, 'process_{0}'.format(algo))()
else: getattr(self, 'process_{0}'.format(algo))(time, test_set)
self.S += self.lam
return self.loss
| 1,220 | 14,942 | 23 |
272d1410d32bdb30de2d29d444f7d53461fbc254 | 994 | py | Python | openstates/openstates-master/openstates/in/utils.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/in/utils.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/in/utils.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | import requests
| 43.217391 | 147 | 0.68008 | import requests
def get_with_increasing_timeout(scraper,link,fail=False,kwargs={}):
#if fail is true, we want to throw an error if we can't
#access the page we need
#if it's false, throw a warning and keep going
timeout_length = 2
html = None
while timeout_length < 65 and html is None:
try:
html = scraper.get(link,timeout=timeout_length,**kwargs)
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout):
old_length = timeout_length
timeout_length **= 2 #this squares the result. awesome.
scraper.logger.debug("Timed out after {now} seconds, increasing to {next} and trying again".format(now=old_length,next=timeout_length))
else:
return html
if fail:
raise AssertionError("Link failed after waiting over a minute, giving up and failing.")
else:
scraper.logger.warning("Link failed after waiting over a minute, giving up and moving on.")
| 954 | 0 | 23 |
fe7fc7706419bfb0980b5755b2b030e96b17e2b8 | 1,026 | py | Python | frappe/website/serve.py | naderelabed/frappe | 4d6fefaf6b5af594180c0f9f31c7e28e6f514348 | [
"MIT"
] | 3,755 | 2015-01-06T07:47:43.000Z | 2022-03-31T20:54:23.000Z | frappe/website/serve.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 7,369 | 2015-01-01T19:59:41.000Z | 2022-03-31T23:02:05.000Z | frappe/website/serve.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 2,685 | 2015-01-07T17:51:03.000Z | 2022-03-31T23:16:24.000Z | import frappe
from frappe.website.page_renderers.error_page import ErrorPage
from frappe.website.page_renderers.not_permitted_page import NotPermittedPage
from frappe.website.page_renderers.redirect_page import RedirectPage
from frappe.website.path_resolver import PathResolver
def get_response(path=None, http_status_code=200):
"""Resolves path and renders page"""
response = None
path = path or frappe.local.request.path
endpoint = path
try:
path_resolver = PathResolver(path)
endpoint, renderer_instance = path_resolver.resolve()
response = renderer_instance.render()
except frappe.Redirect:
return RedirectPage(endpoint or path, http_status_code).render()
except frappe.PermissionError as e:
response = NotPermittedPage(endpoint, http_status_code, exception=e).render()
except Exception as e:
response = ErrorPage(exception=e).render()
return response
| 34.2 | 79 | 0.807992 | import frappe
from frappe.website.page_renderers.error_page import ErrorPage
from frappe.website.page_renderers.not_permitted_page import NotPermittedPage
from frappe.website.page_renderers.redirect_page import RedirectPage
from frappe.website.path_resolver import PathResolver
def get_response(path=None, http_status_code=200):
"""Resolves path and renders page"""
response = None
path = path or frappe.local.request.path
endpoint = path
try:
path_resolver = PathResolver(path)
endpoint, renderer_instance = path_resolver.resolve()
response = renderer_instance.render()
except frappe.Redirect:
return RedirectPage(endpoint or path, http_status_code).render()
except frappe.PermissionError as e:
response = NotPermittedPage(endpoint, http_status_code, exception=e).render()
except Exception as e:
response = ErrorPage(exception=e).render()
return response
def get_response_content(path=None, http_status_code=200):
response = get_response(path, http_status_code)
return str(response.data, 'utf-8')
| 122 | 0 | 23 |
db46d261fd1ce7029b0a21c7afe2c9b4339b3375 | 2,398 | py | Python | source/addition/lstm_trainer.py | smalik169/bachelor_thesis | 5aa481d6e3a2f2ac82c8588ea8a9931024fa8000 | [
"MIT"
] | null | null | null | source/addition/lstm_trainer.py | smalik169/bachelor_thesis | 5aa481d6e3a2f2ac82c8588ea8a9931024fa8000 | [
"MIT"
] | null | null | null | source/addition/lstm_trainer.py | smalik169/bachelor_thesis | 5aa481d6e3a2f2ac82c8588ea8a9931024fa8000 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import rnn_cell, rnn
train_set = np.load('train_set.npy')
test_set = np.load('test_set.npy')
x = tf.placeholder(tf.float32, shape=[None, None, 2])
y = tf.placeholder(tf.float32, shape=[1, None])
activation = lambda x: tf.maximum(x, .01*x)
with tf.variable_scope('lstm'):
cell_size = 64
lstm_cell = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
_, states = rnn.dynamic_rnn(lstm_cell, x, dtype=tf.float32, time_major=True)
final_states = states[1]
W = weight_variable([1, cell_size])
b = bias_variable([1])
y_pred = activation( tf.matmul(W, tf.transpose(final_states)) + b )
mean_square_error = tf.reduce_mean( (y - y_pred)**2 )
accuracy = tf.reduce_mean( tf.cast( tf.less( tf.abs(y_pred - y), 0.04), tf.float32 ) )
#train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(mean_square_error)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
grads_and_vars = optimizer.compute_gradients(mean_square_error)
clipped_grads_and_vars = [(tf.clip_by_norm(grad, 1.), var) for (grad, var) in grads_and_vars]
train_step = optimizer.apply_gradients(clipped_grads_and_vars)
if __name__ == "__main__":
session = tf.Session()
for i in xrange(5):
session.run(tf.initialize_all_variables())
train_log = []
for j in xrange(15000):
X, Y = train_set[j%2000]
train_log.append((session.run(accuracy, feed_dict={x: X, y: Y}), \
session.run(mean_square_error, feed_dict={x: X, y: Y})))
session.run(train_step, feed_dict={x: X, y: Y})
if j%100 == 0:
print 'run', i, 'step', j, train_log[-1]
print 'testing...'
err_list = []
for (Xt, Yt) in test_set:
err_list.append( session.run(mean_square_error, feed_dict={x: Xt, y: Yt}) )
print 'saving results...'
np.save('logs/lstm_train_log' + str(i) + '.npy', np.array(train_log))
np.save('logs/lstm_test_err' + str(i) + '.npy', np.array(err_list))
print
session.close()
| 31.552632 | 97 | 0.635113 | import tensorflow as tf
import numpy as np
import rnn_cell, rnn
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=1.0/np.sqrt(shape[0]))
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0., shape=shape)
return tf.Variable(initial)
train_set = np.load('train_set.npy')
test_set = np.load('test_set.npy')
x = tf.placeholder(tf.float32, shape=[None, None, 2])
y = tf.placeholder(tf.float32, shape=[1, None])
activation = lambda x: tf.maximum(x, .01*x)
with tf.variable_scope('lstm'):
cell_size = 64
lstm_cell = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
_, states = rnn.dynamic_rnn(lstm_cell, x, dtype=tf.float32, time_major=True)
final_states = states[1]
W = weight_variable([1, cell_size])
b = bias_variable([1])
y_pred = activation( tf.matmul(W, tf.transpose(final_states)) + b )
mean_square_error = tf.reduce_mean( (y - y_pred)**2 )
accuracy = tf.reduce_mean( tf.cast( tf.less( tf.abs(y_pred - y), 0.04), tf.float32 ) )
#train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(mean_square_error)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
grads_and_vars = optimizer.compute_gradients(mean_square_error)
clipped_grads_and_vars = [(tf.clip_by_norm(grad, 1.), var) for (grad, var) in grads_and_vars]
train_step = optimizer.apply_gradients(clipped_grads_and_vars)
if __name__ == "__main__":
session = tf.Session()
for i in xrange(5):
session.run(tf.initialize_all_variables())
train_log = []
for j in xrange(15000):
X, Y = train_set[j%2000]
train_log.append((session.run(accuracy, feed_dict={x: X, y: Y}), \
session.run(mean_square_error, feed_dict={x: X, y: Y})))
session.run(train_step, feed_dict={x: X, y: Y})
if j%100 == 0:
print 'run', i, 'step', j, train_log[-1]
print 'testing...'
err_list = []
for (Xt, Yt) in test_set:
err_list.append( session.run(mean_square_error, feed_dict={x: Xt, y: Yt}) )
print 'saving results...'
np.save('logs/lstm_train_log' + str(i) + '.npy', np.array(train_log))
np.save('logs/lstm_test_err' + str(i) + '.npy', np.array(err_list))
print
session.close()
| 188 | 0 | 46 |
0dd6de400dc9d79847f2f3ffb22a3df3d3ebcefd | 2,087 | py | Python | demo/test/api/demo.py | hyjiacan/restfx | 8ba70bc099e6ace0c9b3afe8909ea61a5ff82dec | [
"MIT",
"BSD-3-Clause"
] | 5 | 2021-01-25T11:09:41.000Z | 2021-04-28T07:17:21.000Z | demo/test/api/demo.py | hyjiacan/restfx | 8ba70bc099e6ace0c9b3afe8909ea61a5ff82dec | [
"MIT",
"BSD-3-Clause"
] | null | null | null | demo/test/api/demo.py | hyjiacan/restfx | 8ba70bc099e6ace0c9b3afe8909ea61a5ff82dec | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-01-28T00:53:37.000Z | 2021-01-28T00:53:37.000Z | from test.tools.enums import OpTypes
from restfx import val
from restfx import route
from restfx.http import HttpFile
from restfx.http import HttpRequest
@route(module='测试名称-模块', name='测试名称-GET', extname='jsp', auth=False, op_type=OpTypes.Query, validators=(
val('param1').range(2, 2)
))
def get(request, _injection, param1=(1, 2), param2=5):
"""
:param request: HttpRequest
:param _injection:
:param param1:第1个参数
:param param2:第2个参数
:return: 返回值为参数字典
"""
# request 会是 HttpRequest
temp = []
for i in range(20):
temp.append({
'i': i,
'p': i ^ i,
'o': i & i
})
data = {
'injection': _injection,
'param2': param1,
'param3': param2,
'temp': temp
}
return data
@route(module='测试名称-模块', name='测试名称-GET_APPEND_PARAM')
def get_param(param1, req: HttpRequest, from_=None, param3=5):
"""
:param param1:第1个参数
:param req:第2个参数
:param from_:第3个参数
:param param3:第4个参数
:return: 返回值为参数字典
"""
# req 会是 HttpRequest
return {
'param1': param1,
'from': from_,
'param3': param3,
}
@route(module='测试名称-模块', name='测试名称-PUT_PARAM', auth=False)
def put(request: int, file: HttpFile):
"""
:param request:第1个参数
:param file:需要上传一个文件
:return: 返回值为参数字典
"""
# request 会是请求参数,参数列表中没有 HttpRequest
return {
'request': request,
'param1': {
'filename': file.stream.filename if file else None,
'type': file.mimetype
}
}
@route(module='测试名称-模块', name='测试名称-DELETE_PARAM')
def delete(request, param1, from_=None, param3=5, **kwargs):
"""
:param request:第1个参数
:param param1:第2个参数
:param from_:第3个参数
:param param3:第4个参数
:return: 返回值为参数字典
"""
# 未在函数的参数列表中声明的请求参数,会出现在 kwargs 中
return {
'param1': param1,
'from': from_,
'param3': param3,
'variable_args': kwargs
}
| 22.44086 | 105 | 0.555343 | from test.tools.enums import OpTypes
from restfx import val
from restfx import route
from restfx.http import HttpFile
from restfx.http import HttpRequest
@route(module='测试名称-模块', name='测试名称-GET', extname='jsp', auth=False, op_type=OpTypes.Query, validators=(
val('param1').range(2, 2)
))
def get(request, _injection, param1=(1, 2), param2=5):
"""
:param request: HttpRequest
:param _injection:
:param param1:第1个参数
:param param2:第2个参数
:return: 返回值为参数字典
"""
# request 会是 HttpRequest
temp = []
for i in range(20):
temp.append({
'i': i,
'p': i ^ i,
'o': i & i
})
data = {
'injection': _injection,
'param2': param1,
'param3': param2,
'temp': temp
}
return data
@route(module='测试名称-模块', name='测试名称-GET_APPEND_PARAM')
def get_param(param1, req: HttpRequest, from_=None, param3=5):
"""
:param param1:第1个参数
:param req:第2个参数
:param from_:第3个参数
:param param3:第4个参数
:return: 返回值为参数字典
"""
# req 会是 HttpRequest
return {
'param1': param1,
'from': from_,
'param3': param3,
}
@route(module='测试名称-模块', name='测试名称-PUT_PARAM', auth=False)
def put(request: int, file: HttpFile):
"""
:param request:第1个参数
:param file:需要上传一个文件
:return: 返回值为参数字典
"""
# request 会是请求参数,参数列表中没有 HttpRequest
return {
'request': request,
'param1': {
'filename': file.stream.filename if file else None,
'type': file.mimetype
}
}
@route(module='测试名称-模块', name='测试名称-DELETE_PARAM')
def delete(request, param1, from_=None, param3=5, **kwargs):
"""
:param request:第1个参数
:param param1:第2个参数
:param from_:第3个参数
:param param3:第4个参数
:return: 返回值为参数字典
"""
# 未在函数的参数列表中声明的请求参数,会出现在 kwargs 中
return {
'param1': param1,
'from': from_,
'param3': param3,
'variable_args': kwargs
}
| 0 | 0 | 0 |
0716acadda46db3006308151bf26891cc22aae36 | 3,656 | py | Python | HubblePi/viewer/DirBrowser_ui.py | scriptorron/hubblepi | 402af74b537a40d0764b11d105aa8d3b0dd242f3 | [
"MIT"
] | null | null | null | HubblePi/viewer/DirBrowser_ui.py | scriptorron/hubblepi | 402af74b537a40d0764b11d105aa8d3b0dd242f3 | [
"MIT"
] | null | null | null | HubblePi/viewer/DirBrowser_ui.py | scriptorron/hubblepi | 402af74b537a40d0764b11d105aa8d3b0dd242f3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DirBrowser_ui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from HubblePi.viewer.DirBrowserTableView import DirBrowserTableView
| 50.082192 | 100 | 0.72128 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DirBrowser_ui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(801, 440)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.pushButton_ReloadDir = QtWidgets.QPushButton(Form)
self.pushButton_ReloadDir.setMaximumSize(QtCore.QSize(100, 16777215))
self.pushButton_ReloadDir.setObjectName("pushButton_ReloadDir")
self.gridLayout.addWidget(self.pushButton_ReloadDir, 0, 5, 1, 1)
self.label_Dir = QtWidgets.QLabel(Form)
self.label_Dir.setText("")
self.label_Dir.setObjectName("label_Dir")
self.gridLayout.addWidget(self.label_Dir, 0, 1, 1, 1)
self.pushButton_Dir = QtWidgets.QPushButton(Form)
self.pushButton_Dir.setMaximumSize(QtCore.QSize(100, 16777215))
self.pushButton_Dir.setObjectName("pushButton_Dir")
self.gridLayout.addWidget(self.pushButton_Dir, 0, 0, 1, 1)
self.tableView_Files = DirBrowserTableView(Form)
self.tableView_Files.setObjectName("tableView_Files")
self.gridLayout.addWidget(self.tableView_Files, 1, 0, 1, 6)
self.checkBox_DNG = QtWidgets.QCheckBox(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_DNG.sizePolicy().hasHeightForWidth())
self.checkBox_DNG.setSizePolicy(sizePolicy)
self.checkBox_DNG.setChecked(True)
self.checkBox_DNG.setObjectName("checkBox_DNG")
self.gridLayout.addWidget(self.checkBox_DNG, 0, 3, 1, 1)
self.checkBox_JPG = QtWidgets.QCheckBox(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_JPG.sizePolicy().hasHeightForWidth())
self.checkBox_JPG.setSizePolicy(sizePolicy)
self.checkBox_JPG.setChecked(True)
self.checkBox_JPG.setObjectName("checkBox_JPG")
self.gridLayout.addWidget(self.checkBox_JPG, 0, 2, 1, 1)
self.checkBox_NPY = QtWidgets.QCheckBox(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_NPY.sizePolicy().hasHeightForWidth())
self.checkBox_NPY.setSizePolicy(sizePolicy)
self.checkBox_NPY.setChecked(True)
self.checkBox_NPY.setObjectName("checkBox_NPY")
self.gridLayout.addWidget(self.checkBox_NPY, 0, 4, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton_ReloadDir.setText(_translate("Form", "Reload"))
self.pushButton_Dir.setText(_translate("Form", "Directory"))
self.checkBox_DNG.setText(_translate("Form", "DNG"))
self.checkBox_JPG.setText(_translate("Form", "JPG"))
self.checkBox_NPY.setText(_translate("Form", "NPY/NPZ"))
from HubblePi.viewer.DirBrowserTableView import DirBrowserTableView
| 3,266 | 1 | 76 |
d4f7200717212225a1a93918ba5c5d4276b670ea | 156 | py | Python | dataent/patches/v6_20x/set_allow_draft_for_print.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | dataent/patches/v6_20x/set_allow_draft_for_print.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | 6 | 2020-03-24T17:15:56.000Z | 2022-02-10T18:41:31.000Z | dataent/patches/v6_20x/set_allow_draft_for_print.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import dataent | 31.2 | 85 | 0.801282 | from __future__ import unicode_literals
import dataent
def execute():
dataent.db.set_value("Print Settings", "Print Settings", "allow_print_for_draft", 1) | 79 | 0 | 23 |
86bd6d50ace21215c2a04e7bfaf386521066d518 | 5,419 | py | Python | src/python/__init__.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | 1 | 2022-03-16T16:37:11.000Z | 2022-03-16T16:37:11.000Z | src/python/__init__.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | null | null | null | src/python/__init__.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | null | null | null | '''
The Sprite Curry system.
This package contains everything one needs to compile and execute Curry code.
The topmost module provides an instance of the Curry system and an API to
interact with it.
To perform a soft reset call :func:`reset`. This returns the system to its
original state, with no definitions and no modules imported. The configuration
is taken from environment variable SPRITE_INTERPRETER_FLAGS. To change that at
runtime, call :func:`reload` and specify new flags.
Use :func:`import_` to import Curry modules, :func:`compile` to compile Curry
code, :func:`expr` to build Curry expressions, and :func:`eval` to evaluate
them. :data:`path` determines where Sprite searches for Curry code. Loaded
modules can be found under :data:`modules`. Use :func:`topython` to convert
Curry values to Python objects.
Example:
>>> mymodule = curry.compile("""
... data Item = A | B
... rotate A = B
... rotate B = A
... main :: Item
... main = rotate A ? rotate B
... """)
>>> for value in curry.eval(mymodule.main):
... print(value)
B
A
'''
__all__ = [
# Global API
'getInterpreter'
, 'reload'
, 'show_value'
# Methods of the global interpreter.
, 'compile'
, 'currytype'
, 'eval'
, 'expr'
, 'flags'
, 'import_'
, 'load'
, 'module'
, 'modules'
, 'path'
, 'raw_expr'
, 'reset'
, 'save'
, 'symbol'
, 'topython'
, 'type'
# Wrappers to control expression building.
, 'choice'
, 'cons'
, 'fail'
, 'free'
, 'nil'
, 'ref'
, 'unboxed'
]
# Install breakpoint() into __builtins__.
import os
if os.environ.get('SPRITE_ENABLE_BREAKPOINT', False):
from .utility import breakpoint
del breakpoint
# Validate SPRITE_HOME.
if 'SPRITE_HOME' not in os.environ:
raise ImportError('SPRITE_HOME is not set in the environment')
if not os.path.isdir(os.environ['SPRITE_HOME']):
raise ImportError('SPRITE_HOME is not a directory')
if not os.access(os.environ['SPRITE_HOME'], os.O_RDONLY):
raise ImportError('SPRITE_HOME is not readable')
del os
from .exceptions import *
from . import interpreter, lib
from .interpreter import flags as _flags
from .utility import visitation as _visitation
import collections as _collections
import six as _six
from . import expressions as _expressions
choice = _expressions.choice
cons = _expressions.cons
fail = _expressions.fail
'''
Places a failure into a Curry expression.
:meta hide-value:
'''
free = _expressions.free
nil = _expressions.nil
'''
Places a list terminator into a Curry expression.
:meta hide-value:
'''
ref = _expressions.ref
unboxed = _expressions.unboxed
del _expressions
_interpreter_ = interpreter.Interpreter(flags=_flags.getflags())
compile = _interpreter_.compile
currytype = _interpreter_.currytype
eval = _interpreter_.eval
expr = _interpreter_.expr
flags = _interpreter_.flags
'''
The ``flags`` attribute of the global interpreter. Modify this to reconfigure
the interpreter.
:meta hide-value:
'''
import_ = _interpreter_.import_
load = _interpreter_.load
module = _interpreter_.module
modules = _interpreter_.modules
'''
The ``modules`` attribute of the global interpreter. This is a ``dict`` that
contains the imported Curry modules.
:meta hide-value:
'''
path = _interpreter_.path
'''
The ``path`` attribute of the global interpreter. Initialized from environment
variable CURRYPATH. Modify this to dynamically adjust the Curry search path.
:meta hide-value:
'''
raw_expr = _interpreter_.raw_expr
reset = _interpreter_.reset
save = _interpreter_.save
symbol = _interpreter_.symbol
topython = _interpreter_.topython
type = _interpreter_.type
def getInterpreter():
'''Get the global interpreter instance.'''
return _interpreter_
def reload(flags={}):
'''
Hard-resets the interpreter. Any flags supplied will be forwarded to the
constructor, overriding flags supplied via the environment variable
SPRITE_INTERPRETER_FLAGS.
'''
interpreter.reload(__name__, flags)
def show_value(value):
'''
Converts a Python Curry value to a string in Curry format. This does a few
things, such as lowering one-tuples; adjusting containers, such as tuples and
lists, to print elements as with ``str`` rather than ``repr``; and converting
free variable to friendly names _a, _b, etc. The output should match other
Curry systems.
'''
return ShowValue()(value)
| 25.92823 | 79 | 0.708987 | '''
The Sprite Curry system.
This package contains everything one needs to compile and execute Curry code.
The topmost module provides an instance of the Curry system and an API to
interact with it.
To perform a soft reset call :func:`reset`. This returns the system to its
original state, with no definitions and no modules imported. The configuration
is taken from environment variable SPRITE_INTERPRETER_FLAGS. To change that at
runtime, call :func:`reload` and specify new flags.
Use :func:`import_` to import Curry modules, :func:`compile` to compile Curry
code, :func:`expr` to build Curry expressions, and :func:`eval` to evaluate
them. :data:`path` determines where Sprite searches for Curry code. Loaded
modules can be found under :data:`modules`. Use :func:`topython` to convert
Curry values to Python objects.
Example:
>>> mymodule = curry.compile("""
... data Item = A | B
... rotate A = B
... rotate B = A
... main :: Item
... main = rotate A ? rotate B
... """)
>>> for value in curry.eval(mymodule.main):
... print(value)
B
A
'''
__all__ = [
# Global API
'getInterpreter'
, 'reload'
, 'show_value'
# Methods of the global interpreter.
, 'compile'
, 'currytype'
, 'eval'
, 'expr'
, 'flags'
, 'import_'
, 'load'
, 'module'
, 'modules'
, 'path'
, 'raw_expr'
, 'reset'
, 'save'
, 'symbol'
, 'topython'
, 'type'
# Wrappers to control expression building.
, 'choice'
, 'cons'
, 'fail'
, 'free'
, 'nil'
, 'ref'
, 'unboxed'
]
# Install breakpoint() into __builtins__.
import os
if os.environ.get('SPRITE_ENABLE_BREAKPOINT', False):
from .utility import breakpoint
del breakpoint
# Validate SPRITE_HOME.
if 'SPRITE_HOME' not in os.environ:
raise ImportError('SPRITE_HOME is not set in the environment')
if not os.path.isdir(os.environ['SPRITE_HOME']):
raise ImportError('SPRITE_HOME is not a directory')
if not os.access(os.environ['SPRITE_HOME'], os.O_RDONLY):
raise ImportError('SPRITE_HOME is not readable')
del os
from .exceptions import *
from . import interpreter, lib
from .interpreter import flags as _flags
from .utility import visitation as _visitation
import collections as _collections
import six as _six
from . import expressions as _expressions
choice = _expressions.choice
cons = _expressions.cons
fail = _expressions.fail
'''
Places a failure into a Curry expression.
:meta hide-value:
'''
free = _expressions.free
nil = _expressions.nil
'''
Places a list terminator into a Curry expression.
:meta hide-value:
'''
ref = _expressions.ref
unboxed = _expressions.unboxed
del _expressions
_interpreter_ = interpreter.Interpreter(flags=_flags.getflags())
compile = _interpreter_.compile
currytype = _interpreter_.currytype
eval = _interpreter_.eval
expr = _interpreter_.expr
flags = _interpreter_.flags
'''
The ``flags`` attribute of the global interpreter. Modify this to reconfigure
the interpreter.
:meta hide-value:
'''
import_ = _interpreter_.import_
load = _interpreter_.load
module = _interpreter_.module
modules = _interpreter_.modules
'''
The ``modules`` attribute of the global interpreter. This is a ``dict`` that
contains the imported Curry modules.
:meta hide-value:
'''
path = _interpreter_.path
'''
The ``path`` attribute of the global interpreter. Initialized from environment
variable CURRYPATH. Modify this to dynamically adjust the Curry search path.
:meta hide-value:
'''
raw_expr = _interpreter_.raw_expr
reset = _interpreter_.reset
save = _interpreter_.save
symbol = _interpreter_.symbol
topython = _interpreter_.topython
type = _interpreter_.type
def getInterpreter():
'''Get the global interpreter instance.'''
return _interpreter_
def reload(flags={}):
'''
Hard-resets the interpreter. Any flags supplied will be forwarded to the
constructor, overriding flags supplied via the environment variable
SPRITE_INTERPRETER_FLAGS.
'''
interpreter.reload(__name__, flags)
class ShowValue(object):
def __init__(self):
from . import show
self.stringifier = show.ReplStringifier()
@_visitation.dispatch.on('value')
def __call__(self, value):
from . import show
return show.show(value, stringifier=self.stringifier)
@__call__.when(tuple)
def __call__(self, value):
if len(value) == 1:
return self(value[0])
else:
return '(%s)' % ','.join(map(self, value))
@__call__.when(list)
def __call__(self, value):
return '[%s]' % ','.join(map(self, value))
@__call__.when(_collections.Sequence, no=str)
def __call__(self, value):
return self(list(value))
@__call__.when(_six.string_types)
def __call__(self, value):
# We need to add a single quote to the string to trick Python into
# surrounding it with double quotes.
value = repr(value + "'")
value = value[:-2] + value[-1]
return value
@__call__.when(_collections.Mapping)
def __call__(self, value):
return {self(k): self(v) for k,v in _six.iteritems(value)}
def show_value(value):
'''
Converts a Python Curry value to a string in Curry format. This does a few
things, such as lowering one-tuples; adjusting containers, such as tuples and
lists, to print elements as with ``str`` rather than ``repr``; and converting
free variable to friendly names _a, _b, etc. The output should match other
Curry systems.
'''
return ShowValue()(value)
| 622 | 383 | 23 |
102015a1ef62f8d52ecf5f73f79c9434dae540b2 | 546 | py | Python | src/Advent_of_Code/2020/Day_25/Day_25_1.py | phungj/MSOE_Comp_Prog_Py | 95e7521b28d3dbcb6279e7baf03067ca27acbe37 | [
"MIT"
] | null | null | null | src/Advent_of_Code/2020/Day_25/Day_25_1.py | phungj/MSOE_Comp_Prog_Py | 95e7521b28d3dbcb6279e7baf03067ca27acbe37 | [
"MIT"
] | null | null | null | src/Advent_of_Code/2020/Day_25/Day_25_1.py | phungj/MSOE_Comp_Prog_Py | 95e7521b28d3dbcb6279e7baf03067ca27acbe37 | [
"MIT"
] | null | null | null | card_pub_key = 15335876
door_pub_key = 15086442
card_loop_size = find_loop_size(7, card_pub_key)
door_loop_size = find_loop_size(7, door_pub_key)
print(find_enc_key(door_pub_key, card_loop_size)) | 18.2 | 49 | 0.673993 | card_pub_key = 15335876
door_pub_key = 15086442
def find_loop_size(subject_num, pub_key):
value = 1
loops = 0
while value != pub_key:
value *= subject_num
value %= 20201227
loops += 1
return loops
def find_enc_key(pub_key, loop_size):
value = 1
for i in range(loop_size):
value *= pub_key
value %= 20201227
return value
card_loop_size = find_loop_size(7, card_pub_key)
door_loop_size = find_loop_size(7, door_pub_key)
print(find_enc_key(door_pub_key, card_loop_size)) | 300 | 0 | 46 |
7cd531ad1c6711dfe1dafaa01f04d7892914958b | 607 | py | Python | search_page_export_example/urls.py | Cantemo/SearchPageExportExample | eaa4222cf12a887b0ba0a008855aaede5e1d49e1 | [
"BSD-3-Clause"
] | null | null | null | search_page_export_example/urls.py | Cantemo/SearchPageExportExample | eaa4222cf12a887b0ba0a008855aaede5e1d49e1 | [
"BSD-3-Clause"
] | null | null | null | search_page_export_example/urls.py | Cantemo/SearchPageExportExample | eaa4222cf12a887b0ba0a008855aaede5e1d49e1 | [
"BSD-3-Clause"
] | 1 | 2021-08-10T13:47:43.000Z | 2021-08-10T13:47:43.000Z | """
This file defined the URLs (end-points) for the plugin.
"""
from django.conf.urls import url
from . import views
# The plugin handles the request to the URL by responding with the view which is loaded
# from views.py. Inside "views" is a class which responses to the
# request. "name" is a shortcut name for the urls.
urlpatterns = [
# This is http://<portal_server_url>/search_page_export_example/
url(r'^$', views.ExportFormView.as_view(), name='form'),
# This is http://<portal_server_url>/search_page_export_example/csv/
url(r'^csv/$', views.CsvExportView.as_view(), name='csv'),
]
| 37.9375 | 87 | 0.719934 | """
This file defined the URLs (end-points) for the plugin.
"""
from django.conf.urls import url
from . import views
# The plugin handles the request to the URL by responding with the view which is loaded
# from views.py. Inside "views" is a class which responses to the
# request. "name" is a shortcut name for the urls.
urlpatterns = [
# This is http://<portal_server_url>/search_page_export_example/
url(r'^$', views.ExportFormView.as_view(), name='form'),
# This is http://<portal_server_url>/search_page_export_example/csv/
url(r'^csv/$', views.CsvExportView.as_view(), name='csv'),
]
| 0 | 0 | 0 |
5c51f23f4ff469a9bbe46d0d69a34b5b89598e91 | 1,121 | py | Python | Python-Algorithms/sorting-algorithms/test.py | ravi5175/Algorithm-Analysis | 9597ed60b8ba910a26fc9e386aa3c7ea8257cbe1 | [
"MIT"
] | null | null | null | Python-Algorithms/sorting-algorithms/test.py | ravi5175/Algorithm-Analysis | 9597ed60b8ba910a26fc9e386aa3c7ea8257cbe1 | [
"MIT"
] | null | null | null | Python-Algorithms/sorting-algorithms/test.py | ravi5175/Algorithm-Analysis | 9597ed60b8ba910a26fc9e386aa3c7ea8257cbe1 | [
"MIT"
] | null | null | null | import random
import datetime
from bubbleSort import BubbleSort | 32.028571 | 82 | 0.596789 | import random
import datetime
from bubbleSort import BubbleSort
class Test:
#res=['name','length','order' : rand,sort,reversed, time_taken, passes]
sample_data = []
passes = 0
available_sort=['bubble_sort']
def __init__(self, length=100, sample_range=None, sorted=False,reverse=False):
if sample_range is None:
self.sample_data = random.sample(range(100,1000),length)
if sorted == True and reverse == True:
self.sample_data.sort(reverse=True)
if sorted == True and reverse == False:
self.sample_data.sort()
self.call_test()
else:
self.sample_data = random.sample(range(sample_range),length)
if sorted == True and reverse == True:
self.sample_data.sort(reverse=True)
if sorted == True and reverse == False:
self.sample_data.sort()
self.call_test()
def call_test(self):
res = BubbleSort(self.sample_data)
self.export_metrics(res.passes)
def export_metrics(self,res):
pass | 797 | 237 | 23 |
e6357e8a67f7f2c18a86d82e6d55d2cc0e11652c | 8,218 | py | Python | Chemistry/PDB/pdb_chain.py | continuous-symmetry-measure/pdb_prep | 6cbd2ca43b4a52c998fdbd7e50a6ac48f991862f | [
"BSD-2-Clause"
] | null | null | null | Chemistry/PDB/pdb_chain.py | continuous-symmetry-measure/pdb_prep | 6cbd2ca43b4a52c998fdbd7e50a6ac48f991862f | [
"BSD-2-Clause"
] | 4 | 2018-12-06T08:57:24.000Z | 2019-04-10T19:53:50.000Z | Chemistry/PDB/pdb_chain.py | continuous-symmetry-measure/pdb_prep | 6cbd2ca43b4a52c998fdbd7e50a6ac48f991862f | [
"BSD-2-Clause"
] | 1 | 2021-11-21T10:18:17.000Z | 2021-11-21T10:18:17.000Z | from Chemistry.PDB.pdb_atom import pdb_atom
from Chemistry.PDB.pdb_constants import pdb_constants
"""
* pdb is list of pdb_models
* pdb_model is a list of pdb_chains objects
* pdb _chain is a list of pdb_atom objects
* atom ahs attributes such as:
- atom_serial_number
- atom_name
- resname
- resseq
- chain_id
...
* pdb_residue *is not* being used in the pdb object (use int in chain_utils)
* use chain_utils to preform operations on a given pdb_chain objects
"""
class pdb_chain(list):
"""
pdb_chain is a list of pdb_atoms objects
it has chain_id attribute
"""
@classmethod
@classmethod
def create_ter_line(cls, last_atom_line):
"""
this method will create TER line according to the last atom of the chain
COLUMNS DATA TYPE FIELD DEFINITION
-------------------------------------------------------------------------
1 - 6 Record name "TER "
7 - 11 Integer serial Serial number.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Insertion code.
:param last_atom_line:
:return:
"""
atom = pdb_atom(last_atom_line)
# v v v
# 12345678901234567890123456
# TER 25 GLY 3
# 12345678901234567890123456
# ATOM 24 HA3 GLY 3
ter_serial_number = int(atom.atom_serial_number) + 1
# TER 2100 ALA A 775
st = "{:<5}{:>6} {:>3} {:>1}{:>4}{:>1}"
ter_line = st.format('TER', ter_serial_number, atom.resname, atom.chain_id,
atom.resseq, atom.icode)
return ter_line
class pdb_residue(list):
"""
pdb_residue is *not* part of the pdb object
This class represent a residue (single amino acid)
the pdb_residue is a list of atoms which has the following attributes:
resname,resseq,icode
"""
@classmethod
def is_eqvivalent_residues(self, residue1, residue2):
"""
the residues are eqvivalent if bothe have the same resname and resseq
:param residue1:
:param residue2:
:return:
"""
ret_val = residue1.resname == residue2.resname and \
residue1.resseq == residue2.resseq
return ret_val
# noinspection PyGlobalUndefined
class chain_utils:
"""
the chain utils is a clas which will help TODO
"""
def chain2residues_list(self):
"""
expexted_number_of_residues: the expexted_number_of_residues
(if we know it from othe chins in the model)
:return: list of lists of atoms - the internal lists are residues (aminos)
"""
chain = self.chain
if len(chain) < 1:
return []
if len(self.residues) != 0:
return self.residues # this method alredy ran
first_atom = chain[0]
curr_residue = []
for i, curr_atom in enumerate(chain):
if first_atom.has_the_same_residue_as(curr_atom):
curr_residue.append(curr_atom)
else:
self.residues.append(pdb_residue(curr_residue))
curr_residue = [curr_atom]
first_atom = curr_atom
self.residues.append(pdb_residue(curr_residue))
return self.residues
def get_atoms_gaps(self):
"""TODO"""
return []
# def get_chain_gaps(self, expexted_number_of_residues=None):
# """TODO"""
# chain = self.chain
# self.residues_gaps = self.get_residues_gaps()
def remove_residues_by_resseqs(self, resseqs_list):
"""remove residues of the chain acoording to resseqs list"""
not_with_resseq = lambda a: a.resseq not in list(map(str, resseqs_list))
chain = pdb_chain(list(filter(not_with_resseq, self.chain)))
chain.ter_line = self.chain.ter_line
return chain
def remove_atoms_by_atom_resseq_and_name(self, atoms_to_remove):
"""
:param atoms_to_remove: [(resname,resseq,atom_name), (resname,resseq,atom_name)...]
:return:
"""
global _chain
atoms_indexes_to_delete = []
chain = self.chain
for ai, atom in enumerate(chain):
if (atom.resname, atom.resseq, atom.atom_name) in atoms_to_remove:
# print ("{:>22} will be deleted from chain {}".format(
# str((atom.resname,atom.resseq,atom.atom_name)),self.chain.chain_id))
atoms_indexes_to_delete.append(ai)
# else:
# print("{:>22} not missing chain {}".format(
# str((atom.resname, atom.resseq, atom.atom_name)),self.chain.chain_id))
atoms = [atom for ai, atom in enumerate(chain) if ai not in atoms_indexes_to_delete]
_chain = pdb_chain(atoms, ter_line=pdb_chain.create_ter_line(str(atoms[-1])))
return _chain
def fix_atoms_serial_number(self, start=1):
"""fix_atoms_serial_number TODO _Tests"""
index = start
for i, stom in enumerate(self.chain):
self.chain[i].atom_serial_number = index
index += 1
| 37.870968 | 121 | 0.578851 | from Chemistry.PDB.pdb_atom import pdb_atom
from Chemistry.PDB.pdb_constants import pdb_constants
"""
* pdb is list of pdb_models
* pdb_model is a list of pdb_chains objects
* pdb _chain is a list of pdb_atom objects
* atom ahs attributes such as:
- atom_serial_number
- atom_name
- resname
- resseq
- chain_id
...
* pdb_residue *is not* being used in the pdb object (use int in chain_utils)
* use chain_utils to preform operations on a given pdb_chain objects
"""
class pdb_chain(list):
"""
pdb_chain is a list of pdb_atoms objects
it has chain_id attribute
"""
@classmethod
def from_pdb_atoms_lines(cls, pdb_atoms_lines, ter_line=None):
pdb_atoms = []
for i, line in enumerate(pdb_atoms_lines):
pdb_atoms.append(pdb_atom(line))
return cls(pdb_atoms, ter_line)
def __init__(self, pdb_atoms, ter_line=None):
self.pdb_const_str = pdb_constants()
self.chain_id = pdb_atoms[0].chain_id
for i, pdbatom in enumerate(pdb_atoms):
if not self.chain_id == pdbatom.chain_id:
raise ValueError("expecting chain_id:'{}', but I got:'{}' in atom:'{}'".
format(self.chain_id, pdbatom.chain_id, pdbatom.pdb_line))
self.append(pdbatom)
self.ter_line = ter_line
def __str__(self):
chain_str = "\n".join(map(str, self))
if self.ter_line:
chain_str += "\n" + self.ter_line
return chain_str
def get_number_of_atoms(self):
return len(self)
@classmethod
def create_ter_line(cls, last_atom_line):
"""
this method will create TER line according to the last atom of the chain
COLUMNS DATA TYPE FIELD DEFINITION
-------------------------------------------------------------------------
1 - 6 Record name "TER "
7 - 11 Integer serial Serial number.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Insertion code.
:param last_atom_line:
:return:
"""
atom = pdb_atom(last_atom_line)
# v v v
# 12345678901234567890123456
# TER 25 GLY 3
# 12345678901234567890123456
# ATOM 24 HA3 GLY 3
ter_serial_number = int(atom.atom_serial_number) + 1
# TER 2100 ALA A 775
st = "{:<5}{:>6} {:>3} {:>1}{:>4}{:>1}"
ter_line = st.format('TER', ter_serial_number, atom.resname, atom.chain_id,
atom.resseq, atom.icode)
return ter_line
class pdb_residue(list):
"""
pdb_residue is *not* part of the pdb object
This class represent a residue (single amino acid)
the pdb_residue is a list of atoms which has the following attributes:
resname,resseq,icode
"""
def __init__(self, residue_atoms):
self.atoms = residue_atoms
reslst_names = list(map(lambda a: a.resname, residue_atoms))
reslst_seqs = list(map(lambda a: a.resseq, residue_atoms))
if len(set(reslst_names)) > 1:
raise ValueError("resname differ - Not a residue: {} ressec={}".format(set(reslst_names), set(reslst_seqs)))
if len(set(reslst_seqs)) > 1:
raise ValueError("resseq differ - Not a residue: {}".format(set(reslst_seqs)))
self.resname = residue_atoms[0].resname
self.resseq = int(residue_atoms[0].resseq)
self.icode = residue_atoms[0].icode
self.extend(residue_atoms)
def short_str(self):
return "{} {} {}".format(self.resname, self.resseq, self.icode)
def __str__(self):
chain_str = "\n".join(map(str, self))
return "{}->\n{}".format(self.resname, self.resseq, chain_str)
@classmethod
def is_eqvivalent_residues(self, residue1, residue2):
"""
the residues are eqvivalent if bothe have the same resname and resseq
:param residue1:
:param residue2:
:return:
"""
ret_val = residue1.resname == residue2.resname and \
residue1.resseq == residue2.resseq
return ret_val
# noinspection PyGlobalUndefined
class chain_utils:
"""
the chain utils is a clas which will help TODO
"""
def __init__(self, chain):
self.chain = chain
self.atoms_gaps = self.get_atoms_gaps()
self.residues_gaps = []
self.residues = []
def chain2residues_list(self):
"""
expexted_number_of_residues: the expexted_number_of_residues
(if we know it from othe chins in the model)
:return: list of lists of atoms - the internal lists are residues (aminos)
"""
chain = self.chain
if len(chain) < 1:
return []
if len(self.residues) != 0:
return self.residues # this method alredy ran
first_atom = chain[0]
curr_residue = []
for i, curr_atom in enumerate(chain):
if first_atom.has_the_same_residue_as(curr_atom):
curr_residue.append(curr_atom)
else:
self.residues.append(pdb_residue(curr_residue))
curr_residue = [curr_atom]
first_atom = curr_atom
self.residues.append(pdb_residue(curr_residue))
return self.residues
def get_residues_gaps(self, expexted_number_of_residues=None):
residues = self.chain2residues_list()
if expexted_number_of_residues is None:
expexted_number_of_residues = residues[-1].resseq
get_resseq = lambda r: r.resseq
resseqs = list(map(get_resseq, residues))
expexted_resseq_list = list(range(1, expexted_number_of_residues + 1))
resseqs_gaps = sorted(list(set(expexted_resseq_list) - set(resseqs)))
return resseqs_gaps
def get_atoms_gaps(self):
"""TODO"""
return []
# def get_chain_gaps(self, expexted_number_of_residues=None):
# """TODO"""
# chain = self.chain
# self.residues_gaps = self.get_residues_gaps()
def remove_residues_by_resseqs(self, resseqs_list):
"""remove residues of the chain acoording to resseqs list"""
not_with_resseq = lambda a: a.resseq not in list(map(str, resseqs_list))
chain = pdb_chain(list(filter(not_with_resseq, self.chain)))
chain.ter_line = self.chain.ter_line
return chain
def remove_atoms_by_atom_resseq_and_name(self, atoms_to_remove):
"""
:param atoms_to_remove: [(resname,resseq,atom_name), (resname,resseq,atom_name)...]
:return:
"""
global _chain
atoms_indexes_to_delete = []
chain = self.chain
for ai, atom in enumerate(chain):
if (atom.resname, atom.resseq, atom.atom_name) in atoms_to_remove:
# print ("{:>22} will be deleted from chain {}".format(
# str((atom.resname,atom.resseq,atom.atom_name)),self.chain.chain_id))
atoms_indexes_to_delete.append(ai)
# else:
# print("{:>22} not missing chain {}".format(
# str((atom.resname, atom.resseq, atom.atom_name)),self.chain.chain_id))
atoms = [atom for ai, atom in enumerate(chain) if ai not in atoms_indexes_to_delete]
_chain = pdb_chain(atoms, ter_line=pdb_chain.create_ter_line(str(atoms[-1])))
return _chain
def fix_atoms_serial_number(self, start=1):
"""fix_atoms_serial_number TODO _Tests"""
index = start
for i, stom in enumerate(self.chain):
self.chain[i].atom_serial_number = index
index += 1
| 2,332 | 0 | 259 |
dc0d3c12e18f1dcce50a37e49185dad081433687 | 2,660 | py | Python | pyalgotrade/optimizer/server.py | cdyfng/pyalgotrade | c9e8d950c8d911d5f1bed7d821c4cf6fd37f3a3c | [
"Apache-2.0"
] | 1,000 | 2016-01-26T12:10:11.000Z | 2022-03-01T23:59:50.000Z | pyalgotrade/optimizer/server.py | cdyfng/pyalgotrade | c9e8d950c8d911d5f1bed7d821c4cf6fd37f3a3c | [
"Apache-2.0"
] | 22 | 2016-01-26T15:14:09.000Z | 2019-01-30T02:36:38.000Z | pyalgotrade/optimizer/server.py | cdyfng/pyalgotrade | c9e8d950c8d911d5f1bed7d821c4cf6fd37f3a3c | [
"Apache-2.0"
] | 613 | 2016-01-27T01:02:30.000Z | 2022-03-21T01:38:58.000Z | # PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import pyalgotrade.logger
from pyalgotrade.optimizer import base
from pyalgotrade.optimizer import xmlrpcserver
logger = pyalgotrade.logger.getLogger(__name__)
class Results(object):
"""The results of the strategy executions."""
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found or None if no results were obtained.
"""
paramSource = base.ParameterSource(strategyParameters)
resultSinc = base.ResultSinc()
s = xmlrpcserver.Server(paramSource, resultSinc, barFeed, address, port)
logger.info("Starting server")
s.serve()
logger.info("Server finished")
ret = None
bestResult, bestParameters = resultSinc.getBest()
if bestResult is not None:
logger.info("Best final result %s with parameters %s" % (bestResult, bestParameters.args))
ret = Results(bestParameters.args, bestResult)
else:
logger.error("No results. All jobs failed or no jobs were processed.")
return ret
| 37.464789 | 158 | 0.727444 | # PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import pyalgotrade.logger
from pyalgotrade.optimizer import base
from pyalgotrade.optimizer import xmlrpcserver
logger = pyalgotrade.logger.getLogger(__name__)
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found or None if no results were obtained.
"""
paramSource = base.ParameterSource(strategyParameters)
resultSinc = base.ResultSinc()
s = xmlrpcserver.Server(paramSource, resultSinc, barFeed, address, port)
logger.info("Starting server")
s.serve()
logger.info("Server finished")
ret = None
bestResult, bestParameters = resultSinc.getBest()
if bestResult is not None:
logger.info("Best final result %s with parameters %s" % (bestResult, bestParameters.args))
ret = Results(bestParameters.args, bestResult)
else:
logger.error("No results. All jobs failed or no jobs were processed.")
return ret
| 88 | 0 | 26 |
77e9bc4463add5349ce1d5ad272d47f7ead0d6f3 | 8,026 | py | Python | template.py | Ajasra/stylegan2 | c52440e927c60d14c73185f33d26be8c99944d07 | [
"BSD-Source-Code"
] | 1 | 2021-09-02T09:41:16.000Z | 2021-09-02T09:41:16.000Z | template.py | Ajasra/stylegan2 | c52440e927c60d14c73185f33d26be8c99944d07 | [
"BSD-Source-Code"
] | null | null | null | template.py | Ajasra/stylegan2 | c52440e927c60d14c73185f33d26be8c99944d07 | [
"BSD-Source-Code"
] | null | null | null | # Add relative directory Library to import path, so we can import the SpoutSDK.pyd library.
# Feel free to remove these if you put the SpoutSDK.pyd file in the same directory as the python scripts.
import sys
sys.path.append('Library')
import numpy as np
import argparse
import time
import SpoutSDK
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GL.framebufferobjects import *
from OpenGL.GLU import *
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
import time
import socket
import cv2
import pretrained_networks
spout_type = "output"
spout_size = [512, 512]
silent = True
spout_name = "outputGan"
""" here your functions """
"""main"""
if __name__ == '__main__':
main()
| 36.481818 | 212 | 0.672315 | # Add relative directory Library to import path, so we can import the SpoutSDK.pyd library.
# Feel free to remove these if you put the SpoutSDK.pyd file in the same directory as the python scripts.
import sys
sys.path.append('Library')
import numpy as np
import argparse
import time
import SpoutSDK
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GL.framebufferobjects import *
from OpenGL.GLU import *
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
import time
import socket
import cv2
import pretrained_networks
spout_type = "output"
spout_size = [512, 512]
silent = True
spout_name = "outputGan"
def msg_to_bytes(msg):
return msg.encode('utf-8')
""" here your functions """
def main_pipeline(data):
output = data
return output
"""main"""
def main():
# setup UDP
udp_ip = "127.0.0.1"
udp_port = 7000
rec_port = 6000
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print('Setting up UDP on ip={} and port={}'.format(udp_ip, udp_port))
except:
print('Failed to create socket')
sys.exit()
try:
sock.bind(('', rec_port))
print('Listening on ip={} and port={}'.format(udp_ip, rec_port))
except:
print('Bind failed')
sys.exit()
starting_msg = "Ready"
sock.sendto( msg_to_bytes(starting_msg), (udp_ip, udp_port))
# load nmmetwork and prepare to generate
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
background = True
print()
print('LISTENING')
seed = 1
# window details
width = spout_size[0]
height = spout_size[1]
display = (width,height)
req_type = spout_type
receiverName = "none"
senderName = spout_name
#silent = args.silent
# window setup
pygame.init()
pygame.display.set_caption(senderName)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
# OpenGL init
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0,width,height,0,1,-1)
glMatrixMode(GL_MODELVIEW)
glDisable(GL_DEPTH_TEST)
glClearColor(0.0,0.0,0.0,0.0)
glEnable(GL_TEXTURE_2D)
if req_type == 'input' or req_type == 'input-output':
# init spout receiver
spoutReceiverWidth = width
spoutReceiverHeight = height
# create spout receiver
spoutReceiver = SpoutSDK.SpoutReceiver()
# Its signature in c++ looks like this: bool pyCreateReceiver(const char* theName, unsigned int theWidth, unsigned int theHeight, bool bUseActive);
spoutReceiver.pyCreateReceiver(receiverName,spoutReceiverWidth,spoutReceiverHeight, False)
# create textures for spout receiver and spout sender
textureReceiveID = glGenTextures(1)
# initalise receiver texture
glBindTexture(GL_TEXTURE_2D, textureReceiveID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# copy data into texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, spoutReceiverWidth, spoutReceiverHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, None )
glBindTexture(GL_TEXTURE_2D, 0)
if req_type == 'output' or req_type == 'input-output':
# init spout sender
spoutSender = SpoutSDK.SpoutSender()
spoutSenderWidth = width
spoutSenderHeight = height
# Its signature in c++ looks like this: bool CreateSender(const char *Sendername, unsigned int width, unsigned int height, DWORD dwFormat = 0);
spoutSender.CreateSender(senderName, spoutSenderWidth, spoutSenderHeight, 0)
# create textures for spout receiver and spout sender
textureSendID = glGenTextures(1)
# loop for graph frame by frame
while(True):
for event in pygame.event.get():
if event.type == pygame.QUIT:
spoutReceiver.ReleaseReceiver()
pygame.quit()
quit()
if req_type == 'input' or req_type == 'input-output':
# receive texture
# Its signature in c++ looks like this: bool pyReceiveTexture(const char* theName, unsigned int theWidth, unsigned int theHeight, GLuint TextureID, GLuint TextureTarget, bool bInvert, GLuint HostFBO);
if sys.version_info[1] != 7:
spoutReceiver.pyReceiveTexture(receiverName, spoutReceiverWidth, spoutReceiverHeight, textureReceiveID, GL_TEXTURE_2D, False, 0)
else:
spoutReceiver.pyReceiveTexture(receiverName, spoutReceiverWidth, spoutReceiverHeight, textureReceiveID.item(), GL_TEXTURE_2D, False, 0)
glBindTexture(GL_TEXTURE_2D, textureReceiveID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# copy pixel byte array from received texture
data = glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_UNSIGNED_BYTE, outputType=None) #Using GL_RGB can use GL_RGBA
glBindTexture(GL_TEXTURE_2D, 0)
# swap width and height data around due to oddness with glGetTextImage. http://permalink.gmane.org/gmane.comp.python.opengl.user/2423
data.shape = (data.shape[1], data.shape[0], data.shape[2])
else:
data = np.ones((width,height,3))*255
# call our main function
output = main_pipeline(data)
# setup the texture so we can load the output into it
glBindTexture(GL_TEXTURE_2D, textureSendID);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# copy output into texture
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, output )
# setup window to draw to screen
glActiveTexture(GL_TEXTURE0)
# clean start
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
# reset drawing perspective
glLoadIdentity()
# draw texture on screen
glBegin(GL_QUADS)
glTexCoord(0,0)
glVertex2f(0,0)
glTexCoord(1,0)
glVertex2f(width,0)
glTexCoord(1,1)
glVertex2f(width,height)
glTexCoord(0,1)
glVertex2f(0,height)
glEnd()
if silent:
pygame.display.iconify()
# update window
pygame.display.flip()
if req_type == 'output' or req_type == 'input-output':
# Send texture to spout...
# Its signature in C++ looks like this: bool SendTexture(GLuint TextureID, GLuint TextureTarget, unsigned int width, unsigned int height, bool bInvert=true, GLuint HostFBO = 0);
if sys.version_info[1] != 6:
spoutSender.SendTexture(textureSendID, GL_TEXTURE_2D, spoutSenderWidth, spoutSenderHeight, False, 0)
else:
spoutSender.SendTexture(textureSendID.item(), GL_TEXTURE_2D, spoutSenderWidth, spoutSenderHeight, False, 0)
if __name__ == '__main__':
main()
| 7,206 | 0 | 67 |
a3c1fb21e65b08f6014be032e36d657b8b8b251f | 42,834 | py | Python | tests/actor_tests.py | pythonprobr/pythonbirds-en | 439e10828dd5241de2807f48a4b0c64ce8be69a9 | [
"MIT"
] | 2 | 2016-05-31T01:22:24.000Z | 2021-09-11T18:54:05.000Z | tests/actor_tests.py | eliceiacrepaldi/pythonbirds-en | 439e10828dd5241de2807f48a4b0c64ce8be69a9 | [
"MIT"
] | null | null | null | tests/actor_tests.py | eliceiacrepaldi/pythonbirds-en | 439e10828dd5241de2807f48a4b0c64ce8be69a9 | [
"MIT"
] | 5 | 2015-04-09T19:32:17.000Z | 2022-02-09T19:15:22.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest.case import TestCase
from actors import Actor, DESTROYED, ACTIVE, Obstacle, Pig, YellowBird, RedBird
class BirdBaseTests(TestCase):
"""
Base Test Class for birds.
It has no tests. Its only purpose is encapsulate assertions logic for concrete Birds tests.
"""
def assert_bird_position(self, x_expected, y_expected, status_expected, bird, time):
"""
Assert bird position.
WARNING: this is not a test method because it has not the suffix "test".
:param x_expected: position x expected
:param y_expected: position y expected
:param status_expected: status expected
:param bird: bird under test
:param time: game's time
"""
x_calculated, y_calculated = bird.calculate_position(time)
self.assertEqual(x_expected, round(x_calculated), 'x real value = %s' % x_calculated)
self.assertEqual(y_expected, round(y_calculated), 'y real value = %s' % y_calculated)
self.assertEqual(status_expected, bird.status, '(x = %s, y = %s)' % (x_calculated, y_calculated))
| 59.32687 | 115 | 0.694588 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest.case import TestCase
from actors import Actor, DESTROYED, ACTIVE, Obstacle, Pig, YellowBird, RedBird
class ActorTestes(TestCase):
def test_default_values(self):
'Test inital values of Actor'
ator = Actor()
self.assertEqual(0, ator.x)
self.assertEqual(0, ator.y)
self.assertEqual(ACTIVE, ator.status)
self.assertEqual('A', ator.character())
def test_stored_values(self):
'Tests if initial values are stored in Actor'
ator = Actor(1, 2)
self.assertEqual(1, ator.x)
self.assertEqual(2, ator.y)
self.assertEqual(ACTIVE, ator.status)
self.assertEqual('A', ator.character())
def test_actor_position(self):
"Test that an ordinary actor doen't move."
ator = Actor()
x, y = ator.calculate_position(0)
self.assertEqual(0, x)
self.assertEqual(0, y)
ator = Actor(0.3, 0.5)
x, y = ator.calculate_position(10)
self.assertEqual(0.3, x)
self.assertEqual(0.5, y)
def test_clash_between_active_actors(self):
"""
Initially actors have ACTIVE status. After clashing it must be changed to DESTROYED
assert_active_actors_clash asserts that 2 active actor clash when they are neighbors.
"""
ator = Actor(2, 2) # Actor created has ACTIVE status
ator2 = Actor(2, 2)
self.assert_active_actors_clash(ator, ator2)
self.assert_active_actors_clash(Actor(2, 2), Actor(2, 3))
self.assert_active_actors_clash(Actor(2, 2), Actor(3, 3))
self.assert_active_actors_clash(Actor(2, 2), Actor(3, 2))
self.assert_active_actors_clash(Actor(2, 2), Actor(3, 1))
self.assert_active_actors_clash(Actor(2, 2), Actor(2, 1))
self.assert_active_actors_clash(Actor(2, 2), Actor(1, 1))
self.assert_active_actors_clash(Actor(2, 2), Actor(1, 2))
self.assert_active_actors_clash(Actor(2, 2), Actor(1, 3))
def test_active_actors_clash_with_non_default_interval(self):
# With interval equals to 2, not the default value 1, this clash must occur
self.assert_active_actors_clash(Actor(2, 2), Actor(2, 4), 2)
def test_active_actors_not_neighbors_do_not_clash(self):
'Tests that not neighbors actor does not clash'
self.assert_not_clashing(Actor(2, 2), Actor(2, 4))
self.assert_not_clashing(Actor(2, 2), Actor(3, 4))
self.assert_not_clashing(Actor(2, 2), Actor(4, 2))
self.assert_not_clashing(Actor(2, 2), Actor(3, 0))
self.assert_not_clashing(Actor(2, 2), Actor(2, 0))
self.assert_not_clashing(Actor(2, 2), Actor(0, 1))
self.assert_not_clashing(Actor(2, 2), Actor(0, 2))
self.assert_not_clashing(Actor(2, 2), Actor(0, 4))
def test_actor_destroyed_not_clashing(self):
"Tests that a destroyed actor can't clash, even if it is neighbor of another actor"
ator = Actor(2, 2)
ator.clash(ator, 0) # clashing actor with itself make its status equals to destroyed
ator2 = Actor(2, 2)
self.assert_not_clashing(ator, ator2)
self.assert_not_clashing(Actor(2, 3), ator)
self.assert_not_clashing(Actor(3, 3), ator)
self.assert_not_clashing(Actor(3, 2), ator)
self.assert_not_clashing(Actor(3, 1), ator)
self.assert_not_clashing(Actor(2, 1), ator)
self.assert_not_clashing(Actor(1, 1), ator)
self.assert_not_clashing(Actor(1, 2), ator)
self.assert_not_clashing(Actor(1, 3), ator)
self.assert_not_clashing(ator2, ator)
self.assert_not_clashing(Actor(2, 3), ator)
self.assert_not_clashing(Actor(3, 3), ator)
self.assert_not_clashing(Actor(3, 2), ator)
self.assert_not_clashing(Actor(3, 1), ator)
self.assert_not_clashing(Actor(2, 1), ator)
self.assert_not_clashing(Actor(1, 1), ator)
self.assert_not_clashing(Actor(1, 2), ator)
self.assert_not_clashing(Actor(1, 3), ator)
def test_character(self):
'Testing char for ACTIVE and DESTROYED status'
ator = Actor()
self.assertEqual('A', ator.character())
actor_on_same_position = Actor()
ator.clash(actor_on_same_position)
self.assertEqual(' ', ator.character())
def assert_active_actors_clash(self, actor, another_actor, interval=1):
"""
Asserts clash happens between 2 active actors
WARNING: this is not a test method because it has not the suffix "test".
Its purpose is encapsulate logic to be reused on tests
"""
# Checking actors status just before clash
self.assertEqual(actor.status, ACTIVE, 'Status should be ACTIVE before clashing')
self.assertEqual(another_actor.status, ACTIVE, 'Status should be ACTIVE before clashing')
actor.clash(another_actor, interval)
# Checking actors status after clash
self.assertEqual(another_actor.status, DESTROYED, 'Status should be DESTROYED after clashing')
self.assertEqual(actor.status, DESTROYED, 'Status should be DESTROYED after clashing')
def assert_not_clashing(self, actor, another_actor):
"""
Asserts actor do not clash
WARNING: this is not a test method because it has not the suffix "test".
Its purpose is encapsulate logic to be reused on tests
"""
# storing statuses before clashing
actor_status = actor.status
another_actor_status = another_actor.status
actor.clash(another_actor)
# Asserting statuses don't change
self.assertEqual(actor_status, actor.status, 'Status should be the same after clashing')
self.assertEqual(another_actor_status, another_actor.status, 'Status should be the same after clashing')
class ObstacleTestes(TestCase):
def teste_status(self):
obstacle = Obstacle()
self.assertEqual('O', obstacle.character())
actor_on_same_position = Actor()
obstacle.clash(actor_on_same_position)
self.assertEqual(' ', obstacle.character())
class PigTestes(TestCase):
def test_status(self):
pig = Pig()
self.assertEqual('@', pig.character())
actor_on_same_position = Actor()
pig.clash(actor_on_same_position)
self.assertEqual('+', pig.character())
class BirdBaseTests(TestCase):
"""
Base Test Class for birds.
It has no tests. Its only purpose is encapsulate assertions logic for concrete Birds tests.
"""
def assert_bird_position(self, x_expected, y_expected, status_expected, bird, time):
"""
Assert bird position.
WARNING: this is not a test method because it has not the suffix "test".
:param x_expected: position x expected
:param y_expected: position y expected
:param status_expected: status expected
:param bird: bird under test
:param time: game's time
"""
x_calculated, y_calculated = bird.calculate_position(time)
self.assertEqual(x_expected, round(x_calculated), 'x real value = %s' % x_calculated)
self.assertEqual(y_expected, round(y_calculated), 'y real value = %s' % y_calculated)
self.assertEqual(status_expected, bird.status, '(x = %s, y = %s)' % (x_calculated, y_calculated))
class RedBirdTests(BirdBaseTests):
def test_status(self):
red_bird = RedBird(1, 1)
self.assertEqual('R', red_bird.character())
actor_on_same_position = Actor()
red_bird.clash(actor_on_same_position)
self.assertEqual('r', red_bird.character())
def test_velocity(self):
self.assertEqual(20, RedBird.velocity)
def test_launched(self):
red_bird = RedBird(1, 1)
self.assertFalse(red_bird.launched(),
'Once launch method was not executed, should return False')
red_bird.launch(0, 0)
self.assertTrue(red_bird.launched(),
'Once launch method was executed on previous line, should return True')
def teste_ground_clash(self):
red_bird = RedBird(0, 0)
red_bird.ground_clash()
self.assertEqual(DESTROYED, red_bird.status, 'Must clash with ground once y<=0')
red_bird = RedBird(1, 0)
red_bird.ground_clash()
self.assertEqual(DESTROYED, red_bird.status, 'Must clash with ground once y<=0')
red_bird = RedBird(2, 0)
red_bird.ground_clash()
self.assertEqual(DESTROYED, red_bird.status, 'Must clash with ground once y<=0')
red_bird = RedBird(2, -0.1)
red_bird.ground_clash()
self.assertEqual(DESTROYED, red_bird.status, 'Must clash with ground once y<=0')
red_bird = RedBird(2, -5)
red_bird.ground_clash()
self.assertEqual(DESTROYED, red_bird.status, 'Must clash with ground once y<=0')
class YellowBirdTests(BirdBaseTests):
def test_status(self):
yellow_bird = YellowBird(1, 1)
self.assertEqual('Y', yellow_bird.character())
actor_on_same_position = Actor()
yellow_bird.clash(actor_on_same_position)
self.assertEqual('y', yellow_bird.character())
def test_velocity(self):
self.assertEqual(30, YellowBird.velocity)
def test_lacamento_vertical(self):
"""
Tests a vertical launch. So only y changes while x keep the same value during the flight
:return:
"""
yellow_bird = YellowBird(1, 1)
yellow_bird.launch(90, 2) # bird launched at 90 grad on time 2 seconds
# moving up
self.assert_vertical_position(1, 2.0, yellow_bird)
self.assert_vertical_position(1, 2.01, yellow_bird)
self.assert_vertical_position(2, 2.02, yellow_bird)
self.assert_vertical_position(2, 2.03, yellow_bird)
self.assert_vertical_position(2, 2.04, yellow_bird)
self.assert_vertical_position(2, 2.05, yellow_bird)
# moving down
self.assert_vertical_position(46, 5.26, yellow_bird)
self.assert_vertical_position(46, 5.27, yellow_bird)
self.assert_vertical_position(46, 5.279999999999999, yellow_bird)
self.assert_vertical_position(46, 5.29, yellow_bird)
self.assert_vertical_position(46, 5.3, yellow_bird)
self.assert_vertical_position(46, 5.3100000000000005, yellow_bird)
self.assert_vertical_position(45, 5.32, yellow_bird)
self.assert_vertical_position(45, 5.33, yellow_bird)
self.assert_vertical_position(45, 5.34, yellow_bird)
self.assert_vertical_position(45, 5.35, yellow_bird)
self.assert_vertical_position(45, 5.359999999999999, yellow_bird)
self.assert_vertical_position(45, 5.37, yellow_bird)
self.assert_vertical_position(45, 5.38, yellow_bird)
self.assert_vertical_position(45, 5.390000000000001, yellow_bird)
self.assert_vertical_position(45, 5.4, yellow_bird)
self.assert_vertical_position(45, 5.41, yellow_bird)
self.assert_vertical_position(45, 5.42, yellow_bird)
self.assert_vertical_position(45, 5.43, yellow_bird)
self.assert_vertical_position(45, 5.4399999999999995, yellow_bird)
self.assert_vertical_position(45, 5.45, yellow_bird)
self.assert_vertical_position(45, 5.46, yellow_bird)
self.assert_vertical_position(45, 5.470000000000001, yellow_bird)
self.assert_vertical_position(45, 5.48, yellow_bird)
# preparing for ground impact
self.assert_vertical_position(1, 8.0, yellow_bird)
self.assert_vertical_position(1, 8.01, yellow_bird)
# clashing
self.assert_vertical_position(0, 8.04, yellow_bird)
yellow_bird.ground_clash()
self.assertEqual(DESTROYED, yellow_bird.status)
def test_45_grads_launch(self):
yellow_bird = YellowBird(1, 1)
yellow_bird.launch(45, 2) # bird launched with 45 grads on time 2 seconds
self.assert_bird_position(1, 1, ACTIVE, yellow_bird, 2.0)
self.assert_bird_position(1, 1, ACTIVE, yellow_bird, 2.01)
self.assert_bird_position(1, 1, ACTIVE, yellow_bird, 2.02)
self.assert_bird_position(2, 2, ACTIVE, yellow_bird, 2.03)
self.assert_bird_position(2, 2, ACTIVE, yellow_bird, 2.04)
self.assert_bird_position(2, 2, ACTIVE, yellow_bird, 2.05)
self.assert_bird_position(2, 2, ACTIVE, yellow_bird, 2.06)
self.assert_bird_position(2, 2, ACTIVE, yellow_bird, 2.07)
self.assert_bird_position(3, 3, ACTIVE, yellow_bird, 2.08)
self.assert_bird_position(3, 3, ACTIVE, yellow_bird, 2.09)
self.assert_bird_position(3, 3, ACTIVE, yellow_bird, 2.1)
self.assert_bird_position(3, 3, ACTIVE, yellow_bird, 2.11)
self.assert_bird_position(4, 3, ACTIVE, yellow_bird, 2.12)
self.assert_bird_position(4, 4, ACTIVE, yellow_bird, 2.13)
self.assert_bird_position(4, 4, ACTIVE, yellow_bird, 2.14)
self.assert_bird_position(4, 4, ACTIVE, yellow_bird, 2.15)
self.assert_bird_position(4, 4, ACTIVE, yellow_bird, 2.16)
self.assert_bird_position(5, 4, ACTIVE, yellow_bird, 2.17)
self.assert_bird_position(5, 5, ACTIVE, yellow_bird, 2.18)
self.assert_bird_position(5, 5, ACTIVE, yellow_bird, 2.19)
self.assert_bird_position(5, 5, ACTIVE, yellow_bird, 2.2)
self.assert_bird_position(5, 5, ACTIVE, yellow_bird, 2.21)
self.assert_bird_position(6, 5, ACTIVE, yellow_bird, 2.22)
self.assert_bird_position(6, 6, ACTIVE, yellow_bird, 2.23)
self.assert_bird_position(6, 6, ACTIVE, yellow_bird, 2.24)
self.assert_bird_position(6, 6, ACTIVE, yellow_bird, 2.25)
self.assert_bird_position(7, 6, ACTIVE, yellow_bird, 2.26)
self.assert_bird_position(7, 6, ACTIVE, yellow_bird, 2.27)
self.assert_bird_position(7, 7, ACTIVE, yellow_bird, 2.2800000000000002)
self.assert_bird_position(7, 7, ACTIVE, yellow_bird, 2.29)
self.assert_bird_position(7, 7, ACTIVE, yellow_bird, 2.3)
self.assert_bird_position(8, 7, ACTIVE, yellow_bird, 2.31)
self.assert_bird_position(8, 7, ACTIVE, yellow_bird, 2.32)
self.assert_bird_position(8, 7, ACTIVE, yellow_bird, 2.33)
self.assert_bird_position(8, 8, ACTIVE, yellow_bird, 2.34)
self.assert_bird_position(8, 8, ACTIVE, yellow_bird, 2.35)
self.assert_bird_position(9, 8, ACTIVE, yellow_bird, 2.36)
self.assert_bird_position(9, 8, ACTIVE, yellow_bird, 2.37)
self.assert_bird_position(9, 8, ACTIVE, yellow_bird, 2.38)
self.assert_bird_position(9, 9, ACTIVE, yellow_bird, 2.39)
self.assert_bird_position(9, 9, ACTIVE, yellow_bird, 2.4)
self.assert_bird_position(10, 9, ACTIVE, yellow_bird, 2.41)
self.assert_bird_position(10, 9, ACTIVE, yellow_bird, 2.42)
self.assert_bird_position(10, 9, ACTIVE, yellow_bird, 2.43)
self.assert_bird_position(10, 9, ACTIVE, yellow_bird, 2.44)
self.assert_bird_position(11, 10, ACTIVE, yellow_bird, 2.45)
self.assert_bird_position(11, 10, ACTIVE, yellow_bird, 2.46)
self.assert_bird_position(11, 10, ACTIVE, yellow_bird, 2.4699999999999998)
self.assert_bird_position(11, 10, ACTIVE, yellow_bird, 2.48)
self.assert_bird_position(11, 10, ACTIVE, yellow_bird, 2.49)
self.assert_bird_position(12, 10, ACTIVE, yellow_bird, 2.5)
self.assert_bird_position(12, 11, ACTIVE, yellow_bird, 2.51)
self.assert_bird_position(12, 11, ACTIVE, yellow_bird, 2.52)
self.assert_bird_position(12, 11, ACTIVE, yellow_bird, 2.5300000000000002)
self.assert_bird_position(12, 11, ACTIVE, yellow_bird, 2.54)
self.assert_bird_position(13, 11, ACTIVE, yellow_bird, 2.55)
self.assert_bird_position(13, 11, ACTIVE, yellow_bird, 2.56)
self.assert_bird_position(13, 11, ACTIVE, yellow_bird, 2.57)
self.assert_bird_position(13, 12, ACTIVE, yellow_bird, 2.58)
self.assert_bird_position(14, 12, ACTIVE, yellow_bird, 2.59)
self.assert_bird_position(14, 12, ACTIVE, yellow_bird, 2.6)
self.assert_bird_position(14, 12, ACTIVE, yellow_bird, 2.61)
self.assert_bird_position(14, 12, ACTIVE, yellow_bird, 2.62)
self.assert_bird_position(14, 12, ACTIVE, yellow_bird, 2.63)
self.assert_bird_position(15, 13, ACTIVE, yellow_bird, 2.64)
self.assert_bird_position(15, 13, ACTIVE, yellow_bird, 2.65)
self.assert_bird_position(15, 13, ACTIVE, yellow_bird, 2.66)
self.assert_bird_position(15, 13, ACTIVE, yellow_bird, 2.67)
self.assert_bird_position(15, 13, ACTIVE, yellow_bird, 2.68)
self.assert_bird_position(16, 13, ACTIVE, yellow_bird, 2.69)
self.assert_bird_position(16, 13, ACTIVE, yellow_bird, 2.7)
self.assert_bird_position(16, 14, ACTIVE, yellow_bird, 2.71)
self.assert_bird_position(16, 14, ACTIVE, yellow_bird, 2.7199999999999998)
self.assert_bird_position(16, 14, ACTIVE, yellow_bird, 2.73)
self.assert_bird_position(17, 14, ACTIVE, yellow_bird, 2.74)
self.assert_bird_position(17, 14, ACTIVE, yellow_bird, 2.75)
self.assert_bird_position(17, 14, ACTIVE, yellow_bird, 2.76)
self.assert_bird_position(17, 14, ACTIVE, yellow_bird, 2.77)
self.assert_bird_position(18, 15, ACTIVE, yellow_bird, 2.7800000000000002)
self.assert_bird_position(18, 15, ACTIVE, yellow_bird, 2.79)
self.assert_bird_position(18, 15, ACTIVE, yellow_bird, 2.8)
self.assert_bird_position(18, 15, ACTIVE, yellow_bird, 2.81)
self.assert_bird_position(18, 15, ACTIVE, yellow_bird, 2.82)
self.assert_bird_position(19, 15, ACTIVE, yellow_bird, 2.83)
self.assert_bird_position(19, 15, ACTIVE, yellow_bird, 2.84)
self.assert_bird_position(19, 15, ACTIVE, yellow_bird, 2.85)
self.assert_bird_position(19, 16, ACTIVE, yellow_bird, 2.86)
self.assert_bird_position(19, 16, ACTIVE, yellow_bird, 2.87)
self.assert_bird_position(20, 16, ACTIVE, yellow_bird, 2.88)
self.assert_bird_position(20, 16, ACTIVE, yellow_bird, 2.89)
self.assert_bird_position(20, 16, ACTIVE, yellow_bird, 2.9)
self.assert_bird_position(20, 16, ACTIVE, yellow_bird, 2.91)
self.assert_bird_position(21, 16, ACTIVE, yellow_bird, 2.92)
self.assert_bird_position(21, 16, ACTIVE, yellow_bird, 2.93)
self.assert_bird_position(21, 17, ACTIVE, yellow_bird, 2.94)
self.assert_bird_position(21, 17, ACTIVE, yellow_bird, 2.95)
self.assert_bird_position(21, 17, ACTIVE, yellow_bird, 2.96)
self.assert_bird_position(22, 17, ACTIVE, yellow_bird, 2.9699999999999998)
self.assert_bird_position(22, 17, ACTIVE, yellow_bird, 2.98)
self.assert_bird_position(22, 17, ACTIVE, yellow_bird, 2.99)
self.assert_bird_position(22, 17, ACTIVE, yellow_bird, 3.0)
self.assert_bird_position(22, 17, ACTIVE, yellow_bird, 3.01)
self.assert_bird_position(23, 17, ACTIVE, yellow_bird, 3.02)
self.assert_bird_position(23, 18, ACTIVE, yellow_bird, 3.0300000000000002)
self.assert_bird_position(23, 18, ACTIVE, yellow_bird, 3.04)
self.assert_bird_position(23, 18, ACTIVE, yellow_bird, 3.05)
self.assert_bird_position(23, 18, ACTIVE, yellow_bird, 3.06)
self.assert_bird_position(24, 18, ACTIVE, yellow_bird, 3.0700000000000003)
self.assert_bird_position(24, 18, ACTIVE, yellow_bird, 3.08)
self.assert_bird_position(24, 18, ACTIVE, yellow_bird, 3.09)
self.assert_bird_position(24, 18, ACTIVE, yellow_bird, 3.1)
self.assert_bird_position(25, 18, ACTIVE, yellow_bird, 3.1100000000000003)
self.assert_bird_position(25, 18, ACTIVE, yellow_bird, 3.12)
self.assert_bird_position(25, 19, ACTIVE, yellow_bird, 3.13)
self.assert_bird_position(25, 19, ACTIVE, yellow_bird, 3.1399999999999997)
self.assert_bird_position(25, 19, ACTIVE, yellow_bird, 3.15)
self.assert_bird_position(26, 19, ACTIVE, yellow_bird, 3.16)
self.assert_bird_position(26, 19, ACTIVE, yellow_bird, 3.17)
self.assert_bird_position(26, 19, ACTIVE, yellow_bird, 3.1799999999999997)
self.assert_bird_position(26, 19, ACTIVE, yellow_bird, 3.19)
self.assert_bird_position(26, 19, ACTIVE, yellow_bird, 3.2)
self.assert_bird_position(27, 19, ACTIVE, yellow_bird, 3.21)
self.assert_bird_position(27, 19, ACTIVE, yellow_bird, 3.2199999999999998)
self.assert_bird_position(27, 20, ACTIVE, yellow_bird, 3.23)
self.assert_bird_position(27, 20, ACTIVE, yellow_bird, 3.24)
self.assert_bird_position(28, 20, ACTIVE, yellow_bird, 3.25)
self.assert_bird_position(28, 20, ACTIVE, yellow_bird, 3.26)
self.assert_bird_position(28, 20, ACTIVE, yellow_bird, 3.27)
self.assert_bird_position(28, 20, ACTIVE, yellow_bird, 3.2800000000000002)
self.assert_bird_position(28, 20, ACTIVE, yellow_bird, 3.29)
self.assert_bird_position(29, 20, ACTIVE, yellow_bird, 3.3)
self.assert_bird_position(29, 20, ACTIVE, yellow_bird, 3.31)
self.assert_bird_position(29, 20, ACTIVE, yellow_bird, 3.3200000000000003)
self.assert_bird_position(29, 20, ACTIVE, yellow_bird, 3.33)
self.assert_bird_position(29, 20, ACTIVE, yellow_bird, 3.34)
self.assert_bird_position(30, 21, ACTIVE, yellow_bird, 3.35)
self.assert_bird_position(30, 21, ACTIVE, yellow_bird, 3.3600000000000003)
self.assert_bird_position(30, 21, ACTIVE, yellow_bird, 3.37)
self.assert_bird_position(30, 21, ACTIVE, yellow_bird, 3.38)
self.assert_bird_position(30, 21, ACTIVE, yellow_bird, 3.3899999999999997)
self.assert_bird_position(31, 21, ACTIVE, yellow_bird, 3.4)
self.assert_bird_position(31, 21, ACTIVE, yellow_bird, 3.41)
self.assert_bird_position(31, 21, ACTIVE, yellow_bird, 3.42)
self.assert_bird_position(31, 21, ACTIVE, yellow_bird, 3.4299999999999997)
self.assert_bird_position(32, 21, ACTIVE, yellow_bird, 3.44)
self.assert_bird_position(32, 21, ACTIVE, yellow_bird, 3.45)
self.assert_bird_position(32, 21, ACTIVE, yellow_bird, 3.46)
self.assert_bird_position(32, 21, ACTIVE, yellow_bird, 3.4699999999999998)
self.assert_bird_position(32, 21, ACTIVE, yellow_bird, 3.48)
self.assert_bird_position(33, 22, ACTIVE, yellow_bird, 3.49)
self.assert_bird_position(33, 22, ACTIVE, yellow_bird, 3.5)
self.assert_bird_position(33, 22, ACTIVE, yellow_bird, 3.51)
self.assert_bird_position(33, 22, ACTIVE, yellow_bird, 3.52)
self.assert_bird_position(33, 22, ACTIVE, yellow_bird, 3.5300000000000002)
self.assert_bird_position(34, 22, ACTIVE, yellow_bird, 3.54)
self.assert_bird_position(34, 22, ACTIVE, yellow_bird, 3.55)
self.assert_bird_position(34, 22, ACTIVE, yellow_bird, 3.56)
self.assert_bird_position(34, 22, ACTIVE, yellow_bird, 3.5700000000000003)
self.assert_bird_position(35, 22, ACTIVE, yellow_bird, 3.58)
self.assert_bird_position(35, 22, ACTIVE, yellow_bird, 3.59)
self.assert_bird_position(35, 22, ACTIVE, yellow_bird, 3.6)
self.assert_bird_position(35, 22, ACTIVE, yellow_bird, 3.6100000000000003)
self.assert_bird_position(35, 22, ACTIVE, yellow_bird, 3.62)
self.assert_bird_position(36, 22, ACTIVE, yellow_bird, 3.63)
self.assert_bird_position(36, 22, ACTIVE, yellow_bird, 3.6399999999999997)
self.assert_bird_position(36, 22, ACTIVE, yellow_bird, 3.65)
self.assert_bird_position(36, 22, ACTIVE, yellow_bird, 3.66)
self.assert_bird_position(36, 22, ACTIVE, yellow_bird, 3.67)
self.assert_bird_position(37, 23, ACTIVE, yellow_bird, 3.6799999999999997)
self.assert_bird_position(37, 23, ACTIVE, yellow_bird, 3.69)
self.assert_bird_position(37, 23, ACTIVE, yellow_bird, 3.7)
self.assert_bird_position(37, 23, ACTIVE, yellow_bird, 3.71)
self.assert_bird_position(37, 23, ACTIVE, yellow_bird, 3.7199999999999998)
self.assert_bird_position(38, 23, ACTIVE, yellow_bird, 3.73)
self.assert_bird_position(38, 23, ACTIVE, yellow_bird, 3.74)
self.assert_bird_position(38, 23, ACTIVE, yellow_bird, 3.75)
self.assert_bird_position(38, 23, ACTIVE, yellow_bird, 3.76)
self.assert_bird_position(39, 23, ACTIVE, yellow_bird, 3.77)
self.assert_bird_position(39, 23, ACTIVE, yellow_bird, 3.7800000000000002)
self.assert_bird_position(39, 23, ACTIVE, yellow_bird, 3.79)
self.assert_bird_position(39, 23, ACTIVE, yellow_bird, 3.8)
self.assert_bird_position(39, 23, ACTIVE, yellow_bird, 3.81)
self.assert_bird_position(40, 23, ACTIVE, yellow_bird, 3.8200000000000003)
self.assert_bird_position(40, 23, ACTIVE, yellow_bird, 3.83)
self.assert_bird_position(40, 23, ACTIVE, yellow_bird, 3.84)
self.assert_bird_position(40, 23, ACTIVE, yellow_bird, 3.85)
self.assert_bird_position(40, 23, ACTIVE, yellow_bird, 3.8600000000000003)
self.assert_bird_position(41, 23, ACTIVE, yellow_bird, 3.87)
self.assert_bird_position(41, 23, ACTIVE, yellow_bird, 3.88)
self.assert_bird_position(41, 23, ACTIVE, yellow_bird, 3.8899999999999997)
self.assert_bird_position(41, 23, ACTIVE, yellow_bird, 3.9)
self.assert_bird_position(42, 23, ACTIVE, yellow_bird, 3.91)
self.assert_bird_position(42, 23, ACTIVE, yellow_bird, 3.92)
self.assert_bird_position(42, 23, ACTIVE, yellow_bird, 3.9299999999999997)
self.assert_bird_position(42, 23, ACTIVE, yellow_bird, 3.94)
self.assert_bird_position(42, 23, ACTIVE, yellow_bird, 3.95)
self.assert_bird_position(43, 23, ACTIVE, yellow_bird, 3.96)
self.assert_bird_position(43, 23, ACTIVE, yellow_bird, 3.9699999999999998)
self.assert_bird_position(43, 23, ACTIVE, yellow_bird, 3.98)
self.assert_bird_position(43, 23, ACTIVE, yellow_bird, 3.99)
self.assert_bird_position(43, 23, ACTIVE, yellow_bird, 4.0)
self.assert_bird_position(44, 23, ACTIVE, yellow_bird, 4.01)
self.assert_bird_position(44, 23, ACTIVE, yellow_bird, 4.02)
self.assert_bird_position(44, 23, ACTIVE, yellow_bird, 4.029999999999999)
self.assert_bird_position(44, 23, ACTIVE, yellow_bird, 4.04)
self.assert_bird_position(44, 23, ACTIVE, yellow_bird, 4.05)
self.assert_bird_position(45, 23, ACTIVE, yellow_bird, 4.0600000000000005)
self.assert_bird_position(45, 23, ACTIVE, yellow_bird, 4.07)
self.assert_bird_position(45, 23, ACTIVE, yellow_bird, 4.08)
self.assert_bird_position(45, 23, ACTIVE, yellow_bird, 4.09)
self.assert_bird_position(46, 23, ACTIVE, yellow_bird, 4.1)
self.assert_bird_position(46, 23, ACTIVE, yellow_bird, 4.109999999999999)
self.assert_bird_position(46, 23, ACTIVE, yellow_bird, 4.12)
self.assert_bird_position(46, 23, ACTIVE, yellow_bird, 4.13)
self.assert_bird_position(46, 23, ACTIVE, yellow_bird, 4.140000000000001)
self.assert_bird_position(47, 23, ACTIVE, yellow_bird, 4.15)
self.assert_bird_position(47, 23, ACTIVE, yellow_bird, 4.16)
self.assert_bird_position(47, 23, ACTIVE, yellow_bird, 4.17)
self.assert_bird_position(47, 23, ACTIVE, yellow_bird, 4.18)
self.assert_bird_position(47, 23, ACTIVE, yellow_bird, 4.1899999999999995)
self.assert_bird_position(48, 23, ACTIVE, yellow_bird, 4.2)
self.assert_bird_position(48, 23, ACTIVE, yellow_bird, 4.21)
self.assert_bird_position(48, 23, ACTIVE, yellow_bird, 4.220000000000001)
self.assert_bird_position(48, 23, ACTIVE, yellow_bird, 4.23)
self.assert_bird_position(49, 23, ACTIVE, yellow_bird, 4.24)
self.assert_bird_position(49, 23, ACTIVE, yellow_bird, 4.25)
self.assert_bird_position(49, 23, ACTIVE, yellow_bird, 4.26)
self.assert_bird_position(49, 23, ACTIVE, yellow_bird, 4.27)
self.assert_bird_position(49, 23, ACTIVE, yellow_bird, 4.279999999999999)
self.assert_bird_position(50, 23, ACTIVE, yellow_bird, 4.29)
self.assert_bird_position(50, 23, ACTIVE, yellow_bird, 4.3)
self.assert_bird_position(50, 23, ACTIVE, yellow_bird, 4.3100000000000005)
self.assert_bird_position(50, 23, ACTIVE, yellow_bird, 4.32)
self.assert_bird_position(50, 23, ACTIVE, yellow_bird, 4.33)
self.assert_bird_position(51, 23, ACTIVE, yellow_bird, 4.34)
self.assert_bird_position(51, 23, ACTIVE, yellow_bird, 4.35)
self.assert_bird_position(51, 23, ACTIVE, yellow_bird, 4.359999999999999)
self.assert_bird_position(51, 23, ACTIVE, yellow_bird, 4.37)
self.assert_bird_position(51, 23, ACTIVE, yellow_bird, 4.38)
self.assert_bird_position(52, 23, ACTIVE, yellow_bird, 4.390000000000001)
self.assert_bird_position(52, 23, ACTIVE, yellow_bird, 4.4)
self.assert_bird_position(52, 23, ACTIVE, yellow_bird, 4.41)
self.assert_bird_position(52, 23, ACTIVE, yellow_bird, 4.42)
self.assert_bird_position(53, 23, ACTIVE, yellow_bird, 4.43)
self.assert_bird_position(53, 23, ACTIVE, yellow_bird, 4.4399999999999995)
self.assert_bird_position(53, 23, ACTIVE, yellow_bird, 4.45)
self.assert_bird_position(53, 23, ACTIVE, yellow_bird, 4.46)
self.assert_bird_position(53, 23, ACTIVE, yellow_bird, 4.470000000000001)
self.assert_bird_position(54, 23, ACTIVE, yellow_bird, 4.48)
self.assert_bird_position(54, 23, ACTIVE, yellow_bird, 4.49)
self.assert_bird_position(54, 23, ACTIVE, yellow_bird, 4.5)
self.assert_bird_position(54, 23, ACTIVE, yellow_bird, 4.51)
self.assert_bird_position(54, 23, ACTIVE, yellow_bird, 4.52)
self.assert_bird_position(55, 23, ACTIVE, yellow_bird, 4.529999999999999)
self.assert_bird_position(55, 23, ACTIVE, yellow_bird, 4.54)
self.assert_bird_position(55, 23, ACTIVE, yellow_bird, 4.55)
self.assert_bird_position(55, 23, ACTIVE, yellow_bird, 4.5600000000000005)
self.assert_bird_position(56, 22, ACTIVE, yellow_bird, 4.57)
self.assert_bird_position(56, 22, ACTIVE, yellow_bird, 4.58)
self.assert_bird_position(56, 22, ACTIVE, yellow_bird, 4.59)
self.assert_bird_position(56, 22, ACTIVE, yellow_bird, 4.6)
self.assert_bird_position(56, 22, ACTIVE, yellow_bird, 4.609999999999999)
self.assert_bird_position(57, 22, ACTIVE, yellow_bird, 4.62)
self.assert_bird_position(57, 22, ACTIVE, yellow_bird, 4.63)
self.assert_bird_position(57, 22, ACTIVE, yellow_bird, 4.640000000000001)
self.assert_bird_position(57, 22, ACTIVE, yellow_bird, 4.65)
self.assert_bird_position(57, 22, ACTIVE, yellow_bird, 4.66)
self.assert_bird_position(58, 22, ACTIVE, yellow_bird, 4.67)
self.assert_bird_position(58, 22, ACTIVE, yellow_bird, 4.68)
self.assert_bird_position(58, 22, ACTIVE, yellow_bird, 4.6899999999999995)
self.assert_bird_position(58, 22, ACTIVE, yellow_bird, 4.7)
self.assert_bird_position(58, 22, ACTIVE, yellow_bird, 4.71)
self.assert_bird_position(59, 22, ACTIVE, yellow_bird, 4.720000000000001)
self.assert_bird_position(59, 22, ACTIVE, yellow_bird, 4.73)
self.assert_bird_position(59, 22, ACTIVE, yellow_bird, 4.74)
self.assert_bird_position(59, 22, ACTIVE, yellow_bird, 4.75)
self.assert_bird_position(60, 21, ACTIVE, yellow_bird, 4.76)
self.assert_bird_position(60, 21, ACTIVE, yellow_bird, 4.77)
self.assert_bird_position(60, 21, ACTIVE, yellow_bird, 4.779999999999999)
self.assert_bird_position(60, 21, ACTIVE, yellow_bird, 4.79)
self.assert_bird_position(60, 21, ACTIVE, yellow_bird, 4.8)
self.assert_bird_position(61, 21, ACTIVE, yellow_bird, 4.8100000000000005)
self.assert_bird_position(61, 21, ACTIVE, yellow_bird, 4.82)
self.assert_bird_position(61, 21, ACTIVE, yellow_bird, 4.83)
self.assert_bird_position(61, 21, ACTIVE, yellow_bird, 4.84)
self.assert_bird_position(61, 21, ACTIVE, yellow_bird, 4.85)
self.assert_bird_position(62, 21, ACTIVE, yellow_bird, 4.859999999999999)
self.assert_bird_position(62, 21, ACTIVE, yellow_bird, 4.87)
self.assert_bird_position(62, 21, ACTIVE, yellow_bird, 4.88)
self.assert_bird_position(62, 21, ACTIVE, yellow_bird, 4.890000000000001)
self.assert_bird_position(63, 20, ACTIVE, yellow_bird, 4.9)
self.assert_bird_position(63, 20, ACTIVE, yellow_bird, 4.91)
self.assert_bird_position(63, 20, ACTIVE, yellow_bird, 4.92)
self.assert_bird_position(63, 20, ACTIVE, yellow_bird, 4.93)
self.assert_bird_position(63, 20, ACTIVE, yellow_bird, 4.9399999999999995)
self.assert_bird_position(64, 20, ACTIVE, yellow_bird, 4.95)
self.assert_bird_position(64, 20, ACTIVE, yellow_bird, 4.96)
self.assert_bird_position(64, 20, ACTIVE, yellow_bird, 4.970000000000001)
self.assert_bird_position(64, 20, ACTIVE, yellow_bird, 4.98)
self.assert_bird_position(64, 20, ACTIVE, yellow_bird, 4.99)
self.assert_bird_position(65, 20, ACTIVE, yellow_bird, 5.0)
self.assert_bird_position(65, 20, ACTIVE, yellow_bird, 5.01)
self.assert_bird_position(65, 19, ACTIVE, yellow_bird, 5.02)
self.assert_bird_position(65, 19, ACTIVE, yellow_bird, 5.029999999999999)
self.assert_bird_position(65, 19, ACTIVE, yellow_bird, 5.04)
self.assert_bird_position(66, 19, ACTIVE, yellow_bird, 5.05)
self.assert_bird_position(66, 19, ACTIVE, yellow_bird, 5.0600000000000005)
self.assert_bird_position(66, 19, ACTIVE, yellow_bird, 5.07)
self.assert_bird_position(66, 19, ACTIVE, yellow_bird, 5.08)
self.assert_bird_position(67, 19, ACTIVE, yellow_bird, 5.09)
self.assert_bird_position(67, 19, ACTIVE, yellow_bird, 5.1)
self.assert_bird_position(67, 19, ACTIVE, yellow_bird, 5.109999999999999)
self.assert_bird_position(67, 19, ACTIVE, yellow_bird, 5.12)
self.assert_bird_position(67, 18, ACTIVE, yellow_bird, 5.13)
self.assert_bird_position(68, 18, ACTIVE, yellow_bird, 5.140000000000001)
self.assert_bird_position(68, 18, ACTIVE, yellow_bird, 5.15)
self.assert_bird_position(68, 18, ACTIVE, yellow_bird, 5.16)
self.assert_bird_position(68, 18, ACTIVE, yellow_bird, 5.17)
self.assert_bird_position(68, 18, ACTIVE, yellow_bird, 5.18)
self.assert_bird_position(69, 18, ACTIVE, yellow_bird, 5.1899999999999995)
self.assert_bird_position(69, 18, ACTIVE, yellow_bird, 5.2)
self.assert_bird_position(69, 18, ACTIVE, yellow_bird, 5.21)
self.assert_bird_position(69, 17, ACTIVE, yellow_bird, 5.220000000000001)
self.assert_bird_position(70, 17, ACTIVE, yellow_bird, 5.23)
self.assert_bird_position(70, 17, ACTIVE, yellow_bird, 5.24)
self.assert_bird_position(70, 17, ACTIVE, yellow_bird, 5.25)
self.assert_bird_position(70, 17, ACTIVE, yellow_bird, 5.26)
self.assert_bird_position(70, 17, ACTIVE, yellow_bird, 5.27)
self.assert_bird_position(71, 17, ACTIVE, yellow_bird, 5.279999999999999)
self.assert_bird_position(71, 17, ACTIVE, yellow_bird, 5.29)
self.assert_bird_position(71, 17, ACTIVE, yellow_bird, 5.3)
self.assert_bird_position(71, 16, ACTIVE, yellow_bird, 5.3100000000000005)
self.assert_bird_position(71, 16, ACTIVE, yellow_bird, 5.32)
self.assert_bird_position(72, 16, ACTIVE, yellow_bird, 5.33)
self.assert_bird_position(72, 16, ACTIVE, yellow_bird, 5.34)
self.assert_bird_position(72, 16, ACTIVE, yellow_bird, 5.35)
self.assert_bird_position(72, 16, ACTIVE, yellow_bird, 5.359999999999999)
self.assert_bird_position(72, 16, ACTIVE, yellow_bird, 5.37)
self.assert_bird_position(73, 16, ACTIVE, yellow_bird, 5.38)
self.assert_bird_position(73, 15, ACTIVE, yellow_bird, 5.390000000000001)
self.assert_bird_position(73, 15, ACTIVE, yellow_bird, 5.4)
self.assert_bird_position(73, 15, ACTIVE, yellow_bird, 5.41)
self.assert_bird_position(74, 15, ACTIVE, yellow_bird, 5.42)
self.assert_bird_position(74, 15, ACTIVE, yellow_bird, 5.43)
self.assert_bird_position(74, 15, ACTIVE, yellow_bird, 5.4399999999999995)
self.assert_bird_position(74, 15, ACTIVE, yellow_bird, 5.45)
self.assert_bird_position(74, 15, ACTIVE, yellow_bird, 5.46)
self.assert_bird_position(75, 14, ACTIVE, yellow_bird, 5.470000000000001)
self.assert_bird_position(75, 14, ACTIVE, yellow_bird, 5.48)
self.assert_bird_position(75, 14, ACTIVE, yellow_bird, 5.49)
self.assert_bird_position(75, 14, ACTIVE, yellow_bird, 5.5)
self.assert_bird_position(75, 14, ACTIVE, yellow_bird, 5.51)
self.assert_bird_position(76, 14, ACTIVE, yellow_bird, 5.52)
self.assert_bird_position(76, 14, ACTIVE, yellow_bird, 5.529999999999999)
self.assert_bird_position(76, 13, ACTIVE, yellow_bird, 5.54)
self.assert_bird_position(76, 13, ACTIVE, yellow_bird, 5.55)
self.assert_bird_position(77, 13, ACTIVE, yellow_bird, 5.5600000000000005)
self.assert_bird_position(77, 13, ACTIVE, yellow_bird, 5.57)
self.assert_bird_position(77, 13, ACTIVE, yellow_bird, 5.58)
self.assert_bird_position(77, 13, ACTIVE, yellow_bird, 5.59)
self.assert_bird_position(77, 13, ACTIVE, yellow_bird, 5.6)
self.assert_bird_position(78, 12, ACTIVE, yellow_bird, 5.609999999999999)
self.assert_bird_position(78, 12, ACTIVE, yellow_bird, 5.62)
self.assert_bird_position(78, 12, ACTIVE, yellow_bird, 5.63)
self.assert_bird_position(78, 12, ACTIVE, yellow_bird, 5.640000000000001)
self.assert_bird_position(78, 12, ACTIVE, yellow_bird, 5.65)
self.assert_bird_position(79, 12, ACTIVE, yellow_bird, 5.66)
self.assert_bird_position(79, 12, ACTIVE, yellow_bird, 5.67)
self.assert_bird_position(79, 11, ACTIVE, yellow_bird, 5.68)
self.assert_bird_position(79, 11, ACTIVE, yellow_bird, 5.6899999999999995)
self.assert_bird_position(79, 11, ACTIVE, yellow_bird, 5.7)
self.assert_bird_position(80, 11, ACTIVE, yellow_bird, 5.71)
self.assert_bird_position(80, 11, ACTIVE, yellow_bird, 5.720000000000001)
self.assert_bird_position(80, 11, ACTIVE, yellow_bird, 5.73)
self.assert_bird_position(80, 10, ACTIVE, yellow_bird, 5.74)
self.assert_bird_position(81, 10, ACTIVE, yellow_bird, 5.75)
self.assert_bird_position(81, 10, ACTIVE, yellow_bird, 5.76)
self.assert_bird_position(81, 10, ACTIVE, yellow_bird, 5.77)
self.assert_bird_position(81, 10, ACTIVE, yellow_bird, 5.779999999999999)
self.assert_bird_position(81, 10, ACTIVE, yellow_bird, 5.79)
self.assert_bird_position(82, 9, ACTIVE, yellow_bird, 5.8)
self.assert_bird_position(82, 9, ACTIVE, yellow_bird, 5.8100000000000005)
self.assert_bird_position(82, 9, ACTIVE, yellow_bird, 5.82)
self.assert_bird_position(82, 9, ACTIVE, yellow_bird, 5.83)
self.assert_bird_position(82, 9, ACTIVE, yellow_bird, 5.84)
self.assert_bird_position(83, 9, ACTIVE, yellow_bird, 5.85)
self.assert_bird_position(83, 8, ACTIVE, yellow_bird, 5.859999999999999)
self.assert_bird_position(83, 8, ACTIVE, yellow_bird, 5.87)
self.assert_bird_position(83, 8, ACTIVE, yellow_bird, 5.88)
self.assert_bird_position(84, 8, ACTIVE, yellow_bird, 5.890000000000001)
self.assert_bird_position(84, 8, ACTIVE, yellow_bird, 5.9)
self.assert_bird_position(84, 8, ACTIVE, yellow_bird, 5.91)
self.assert_bird_position(84, 7, ACTIVE, yellow_bird, 5.92)
self.assert_bird_position(84, 7, ACTIVE, yellow_bird, 5.93)
self.assert_bird_position(85, 7, ACTIVE, yellow_bird, 5.9399999999999995)
self.assert_bird_position(85, 7, ACTIVE, yellow_bird, 5.95)
self.assert_bird_position(85, 7, ACTIVE, yellow_bird, 5.96)
self.assert_bird_position(85, 6, ACTIVE, yellow_bird, 5.970000000000001)
self.assert_bird_position(85, 6, ACTIVE, yellow_bird, 5.98)
self.assert_bird_position(86, 6, ACTIVE, yellow_bird, 5.99)
self.assert_bird_position(86, 6, ACTIVE, yellow_bird, 6.0)
self.assert_bird_position(86, 6, ACTIVE, yellow_bird, 6.01)
self.assert_bird_position(86, 5, ACTIVE, yellow_bird, 6.02)
self.assert_bird_position(86, 5, ACTIVE, yellow_bird, 6.03)
self.assert_bird_position(87, 5, ACTIVE, yellow_bird, 6.04)
self.assert_bird_position(87, 5, ACTIVE, yellow_bird, 6.05)
self.assert_bird_position(87, 5, ACTIVE, yellow_bird, 6.06)
self.assert_bird_position(87, 5, ACTIVE, yellow_bird, 6.07)
self.assert_bird_position(88, 4, ACTIVE, yellow_bird, 6.08)
self.assert_bird_position(88, 4, ACTIVE, yellow_bird, 6.09)
self.assert_bird_position(88, 4, ACTIVE, yellow_bird, 6.1)
self.assert_bird_position(88, 4, ACTIVE, yellow_bird, 6.11)
self.assert_bird_position(88, 4, ACTIVE, yellow_bird, 6.12)
self.assert_bird_position(89, 3, ACTIVE, yellow_bird, 6.13)
self.assert_bird_position(89, 3, ACTIVE, yellow_bird, 6.14)
self.assert_bird_position(89, 3, ACTIVE, yellow_bird, 6.15)
self.assert_bird_position(89, 3, ACTIVE, yellow_bird, 6.16)
self.assert_bird_position(89, 3, ACTIVE, yellow_bird, 6.17)
self.assert_bird_position(90, 2, ACTIVE, yellow_bird, 6.18)
self.assert_bird_position(90, 2, ACTIVE, yellow_bird, 6.19)
self.assert_bird_position(90, 2, ACTIVE, yellow_bird, 6.2)
self.assert_bird_position(90, 2, ACTIVE, yellow_bird, 6.21)
self.assert_bird_position(91, 1, ACTIVE, yellow_bird, 6.22)
self.assert_bird_position(91, 1, ACTIVE, yellow_bird, 6.23)
self.assert_bird_position(91, 1, ACTIVE, yellow_bird, 6.24)
self.assert_bird_position(91, 1, ACTIVE, yellow_bird, 6.25)
self.assert_bird_position(91, 1, ACTIVE, yellow_bird, 6.26)
self.assert_bird_position(92, 0, ACTIVE, yellow_bird, 6.29)
yellow_bird.ground_clash()
self.assertEqual(DESTROYED, yellow_bird.status)
def assert_vertical_position(self, y, time, bird):
"""
Help method that hold x constant with value 1 and active status. Only vertical position changes over time.
WARNING: this is not a test method because it has not the suffix "test".
"""
self.assert_bird_position(1, y, ACTIVE, bird, time)
| 32,943 | 8,453 | 275 |
da781fe202b4a328cfa0731144f22ebe1d299307 | 3,231 | py | Python | designs/peng_robinson.py | poppyi-domain/eng_app_designs | e2b9ab29346a196226c6000d4d1aa6a0d5195d2d | [
"MIT"
] | 3 | 2021-12-25T12:56:26.000Z | 2022-03-28T13:36:00.000Z | designs/peng_robinson.py | poppyi-domain/eng_app_designs | e2b9ab29346a196226c6000d4d1aa6a0d5195d2d | [
"MIT"
] | null | null | null | designs/peng_robinson.py | poppyi-domain/eng_app_designs | e2b9ab29346a196226c6000d4d1aa6a0d5195d2d | [
"MIT"
] | null | null | null | """
Peng-robinson equation of state calculator.
As used: https://www.poppyi.com/app_design_form/public_render/peng%20robinson%20eq%20of%20state
"""
import numpy as np
R = 8.31446 # Pa.m3/K
| 28.095652 | 117 | 0.547818 | """
Peng-robinson equation of state calculator.
As used: https://www.poppyi.com/app_design_form/public_render/peng%20robinson%20eq%20of%20state
"""
import numpy as np
R = 8.31446 # Pa.m3/K
def validate_numeric(form_data, key):
try:
return float(form_data[key])
except ValueError:
raise Warning('Numeric input required for: {}'.format(key))
def main(form_data):
temperature_kelvin = validate_numeric(form_data, 'temp_kelvin')
pressure_pascal = validate_numeric(form_data, 'pressure_pascal')
temperature_critical = validate_numeric(form_data, 'critical_temp_kelvin')
pressure_critical = validate_numeric(form_data, 'critical_pressure_pa')
acentric_factor = validate_numeric(form_data, 'acentric_factor')
temperature_critical = float(temperature_critical)
pressure_critical = float(pressure_critical)
acentric_factor = float(acentric_factor)
a = (0.457235 * (R * temperature_critical)**2) / pressure_critical
b = 0.077796 * R * temperature_critical / pressure_critical
if acentric_factor <= 0.49:
kappa = 0.37464 + 1.54226 * acentric_factor - 0.26992 * acentric_factor**2
else:
kappa = 0.379642 + 1.48503 * acentric_factor - 0.164423 * acentric_factor**2 + 0.0166666 * acentric_factor**3
reduced_temp = temperature_kelvin / temperature_critical
alpha = (1 + kappa * (1 - reduced_temp**0.5))**2
A = alpha * a * pressure_pascal / (R * temperature_kelvin)**2
B = b * pressure_pascal / (R * temperature_kelvin)
k3 = 1
k2 = 1 - B
k1 = A - 2 * B - 3 * B**2
k0 = A * B - B**2 - B**3
z_roots = np.roots([k3, -k2, k1, -k0])
z = z_roots.real[z_roots.imag < 1e-5]
z_filtered = [float(z_) for z_ in z if z_ >= 1e-3]
if len(z_filtered) == 0:
raise Warning('peng robinson eq of state error: no solutions found (no roots)')
z_str = [str(i) for i in z_filtered]
return {
'compressibility_max': {
'action': 'update',
'value': max(z_filtered)
},
'compressibility_min': {
'action': 'update',
'value': min(z_filtered)
},
'peng_calc_output': {
'action': 'update'
},
'debug_outputs': {
'action': 'update'
},
'critical_temp': {
'action': 'update',
'value': temperature_critical
},
'critical_pressure': {
'action': 'update',
'value': pressure_critical
},
'acentric_factor': {
'action': 'update',
'value': acentric_factor
},
'reduced_temp': {
'action': 'update',
'value': reduced_temp
},
'a': {
'action': 'update',
'value': a
},
'b': {
'action': 'update',
'value': b
},
'k': {
'action': 'update',
'value': kappa
},
'alpha': {
'action': 'update',
'value': alpha
},
'A': {
'action': 'update',
'value': A
},
'B': {
'action': 'update',
'value': B
},
}
| 2,991 | 0 | 46 |
6cbea770a05081c0aeff593d5edafdf80c804f66 | 1,757 | py | Python | core/forms.py | dishad/ADD | 51455c493a4eb433eb1d8dde44771e917efcb500 | [
"MIT"
] | null | null | null | core/forms.py | dishad/ADD | 51455c493a4eb433eb1d8dde44771e917efcb500 | [
"MIT"
] | 4 | 2016-11-26T19:10:01.000Z | 2016-12-24T10:42:16.000Z | core/forms.py | dishad/deanslist | 51455c493a4eb433eb1d8dde44771e917efcb500 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from core.models import Post, Category
# forms will go here. create account has been added, new post form will probably go here
| 33.788462 | 103 | 0.692089 | from django import forms
from django.contrib.auth.models import User
from core.models import Post, Category
# forms will go here. create account has been added, new post form will probably go here
class CreateAccForm(forms.ModelForm):
#form labels
#first_name = forms.CharField(label="First Name:", max_length=30)
#last_name = forms.CharField(label="Last Name:", max_length=30)
#username = forms.CharField(label="Username:", max_length=30)
#email = forms.CharField(label="Email:", max_length=30)
#password = forms.CharField(label="Password:", max_length=30, widget=forms.PasswordInput())
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'email', 'password')
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'username': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
'password': forms.PasswordInput(attrs={'class': 'form-control'}),
}
class ForgotPasswordForm(forms.ModelForm):
secquestion = forms.CharField(label="What is your mother's maiden credit card number?", max_length=50)
class Meta:
model = User
fields = ('username', 'email')
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
}
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'price', 'description')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'price': forms.TextInput(attrs={'class': 'form-control'}),
'description': forms.Textarea(attrs={'class': 'form-control', 'rows': '10'}),
#'category': forms.
}
| 0 | 1,488 | 69 |
c63206845224ebcbad493a1c28d57ccb8b042d9a | 259 | py | Python | pycon/urls.py | tylerdave/PyCon-Website | 684c7a92776e61eb2e04951199ce344616c8d814 | [
"BSD-3-Clause"
] | null | null | null | pycon/urls.py | tylerdave/PyCon-Website | 684c7a92776e61eb2e04951199ce344616c8d814 | [
"BSD-3-Clause"
] | null | null | null | pycon/urls.py | tylerdave/PyCon-Website | 684c7a92776e61eb2e04951199ce344616c8d814 | [
"BSD-3-Clause"
] | 1 | 2020-09-30T18:09:16.000Z | 2020-09-30T18:09:16.000Z | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url('program_export/', views.program_export, name='program_export'),
url(r'^special_event/(?P<slug>.*)/$', views.special_event, name='special_event'),
)
| 23.545455 | 85 | 0.694981 | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url('program_export/', views.program_export, name='program_export'),
url(r'^special_event/(?P<slug>.*)/$', views.special_event, name='special_event'),
)
| 0 | 0 | 0 |
4cd82bb81321c3d8774ae3cf43cbc6b214b27871 | 5,118 | py | Python | magma_ff/parser.py | dbmi-bgm/cgap-wfl_utils | b22147555636e1b9194d4322ca148abe9fc3b8e5 | [
"MIT"
] | null | null | null | magma_ff/parser.py | dbmi-bgm/cgap-wfl_utils | b22147555636e1b9194d4322ca148abe9fc3b8e5 | [
"MIT"
] | 1 | 2022-03-19T17:55:54.000Z | 2022-03-19T17:55:54.000Z | magma_ff/parser.py | dbmi-bgm/magma | b22147555636e1b9194d4322ca148abe9fc3b8e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
################################################
#
# Parser to handle compatibility between
# magma and portal json formats
#
# Michele Berselli
# berselli.michele@gmail.com
#
################################################
################################################
# Libraries
################################################
import sys, os
import json
################################################
# ParserFF
################################################
class ParserFF(object):
"""
"""
def __init__(self, input_json):
"""
input_json is a meta-workflow or meta-workflow-run in json format
"""
self.in_json = input_json
#end def
def arguments_to_json(self):
"""
parse meta-workflow or meta-workflow-run json stored as self.in_json
if input, convert and replace arguments in input from string to json
if workflows, for each step-workflow convert and replace arguments in input from string to json
"""
if self.in_json.get('input'):
self._input_to_json(self.in_json['input'])
#end if
if self.in_json.get('workflows'):
self._workflows_to_json(self.in_json['workflows'])
#end if
return self.in_json
#end def
def _workflows_to_json(self, workflows):
"""
"""
for workflow in workflows:
self._input_to_json(workflow['input'])
#end for
#end def
def _input_to_json(self, input):
"""
loop through arguments in input
call appropriate conversion function for argument_type
"""
for arg in input:
if arg['argument_type'] == 'file':
self._file_to_json(arg)
else: # is parameter
self._parameter_to_json(arg)
#end if
#end for
#end def
def _file_to_json(self, arg):
"""
_files_to_json to convert file argument from string to json
replace the argument value (files)
"""
if arg.get('files'):
arg['files'] = self._files_to_json(arg['files'])
#end if
#end def
def _files_to_json(self, files, sep=','):
"""
convert file argument from string to json
return the argument value (files)
files, is a list of dictionaries representing files
and information on their dimensional structure
e.g. "files": [
{
"file": "A",
"dimension": "0"
# 0D missing, 1D X, 2D X:X, 3D X:X:X, ...
},
{
"file": "B",
"dimension": "1"
}
]
"""
list_ = []
# Get max dimensions needed
for file in files:
dimension = file.get('dimension')
if not dimension:
return file.get('file')
#end if
dimension_ = list(map(int, dimension.split(sep)))
# Expand input list based on dimensions
self._init_list(list_, dimension_)
# Add element
tmp_list = list_
for i in dimension_[:-1]:
tmp_list = tmp_list[i]
#end for
tmp_list[dimension_[-1]] = file.get('file')
#end for
return list_
#end def
def _init_list(self, list_, dimension_):
"""
"""
tmp_list = list_
for i in dimension_[:-1]:
try: # index esist
tmp_list[i]
except IndexError: # create index
for _ in range(i-len(tmp_list)+1):
tmp_list.append([])
#end try
tmp_list = tmp_list[i]
#end for
for _ in range(dimension_[-1]-len(tmp_list)+1):
tmp_list.append(None)
#end for
#end def
def _parameter_to_json(self, arg):
"""
convert parameter argument from string to json
replace the argument value (value)
value_type, json | string | integer | boolean | float
"""
if not arg.get('value'):
return
#end if
value = arg['value']
value_type = arg['value_type']
if value_type == 'json':
value = json.loads(value)
elif value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'boolean':
if value.lower() == 'true':
value = True
else: value = False
#end if
#end if
arg['value'] = value
del arg['value_type']
#end def
#end class
| 30.646707 | 107 | 0.454279 | #!/usr/bin/env python3
################################################
#
# Parser to handle compatibility between
# magma and portal json formats
#
# Michele Berselli
# berselli.michele@gmail.com
#
################################################
################################################
# Libraries
################################################
import sys, os
import json
################################################
# ParserFF
################################################
class ParserFF(object):
"""
"""
def __init__(self, input_json):
"""
input_json is a meta-workflow or meta-workflow-run in json format
"""
self.in_json = input_json
#end def
def arguments_to_json(self):
"""
parse meta-workflow or meta-workflow-run json stored as self.in_json
if input, convert and replace arguments in input from string to json
if workflows, for each step-workflow convert and replace arguments in input from string to json
"""
if self.in_json.get('input'):
self._input_to_json(self.in_json['input'])
#end if
if self.in_json.get('workflows'):
self._workflows_to_json(self.in_json['workflows'])
#end if
return self.in_json
#end def
def _workflows_to_json(self, workflows):
"""
"""
for workflow in workflows:
self._input_to_json(workflow['input'])
#end for
#end def
def _input_to_json(self, input):
"""
loop through arguments in input
call appropriate conversion function for argument_type
"""
for arg in input:
if arg['argument_type'] == 'file':
self._file_to_json(arg)
else: # is parameter
self._parameter_to_json(arg)
#end if
#end for
#end def
def _file_to_json(self, arg):
"""
_files_to_json to convert file argument from string to json
replace the argument value (files)
"""
if arg.get('files'):
arg['files'] = self._files_to_json(arg['files'])
#end if
#end def
def _files_to_json(self, files, sep=','):
"""
convert file argument from string to json
return the argument value (files)
files, is a list of dictionaries representing files
and information on their dimensional structure
e.g. "files": [
{
"file": "A",
"dimension": "0"
# 0D missing, 1D X, 2D X:X, 3D X:X:X, ...
},
{
"file": "B",
"dimension": "1"
}
]
"""
list_ = []
# Get max dimensions needed
for file in files:
dimension = file.get('dimension')
if not dimension:
return file.get('file')
#end if
dimension_ = list(map(int, dimension.split(sep)))
# Expand input list based on dimensions
self._init_list(list_, dimension_)
# Add element
tmp_list = list_
for i in dimension_[:-1]:
tmp_list = tmp_list[i]
#end for
tmp_list[dimension_[-1]] = file.get('file')
#end for
return list_
#end def
def _init_list(self, list_, dimension_):
"""
"""
tmp_list = list_
for i in dimension_[:-1]:
try: # index esist
tmp_list[i]
except IndexError: # create index
for _ in range(i-len(tmp_list)+1):
tmp_list.append([])
#end try
tmp_list = tmp_list[i]
#end for
for _ in range(dimension_[-1]-len(tmp_list)+1):
tmp_list.append(None)
#end for
#end def
def _parameter_to_json(self, arg):
"""
convert parameter argument from string to json
replace the argument value (value)
value_type, json | string | integer | boolean | float
"""
if not arg.get('value'):
return
#end if
value = arg['value']
value_type = arg['value_type']
if value_type == 'json':
value = json.loads(value)
elif value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'boolean':
if value.lower() == 'true':
value = True
else: value = False
#end if
#end if
arg['value'] = value
del arg['value_type']
#end def
#end class
| 0 | 0 | 0 |
4436746d4cc5c235785c9ac64cd85a23db4d6d26 | 360 | py | Python | CSC-291/Notes/Lists_9.6.14.py | FrancesCoronel/cs-hu | ecd103a525fd312146d3b6c69ee7c1452548c5e2 | [
"MIT"
] | 2 | 2016-12-05T06:15:34.000Z | 2016-12-15T10:56:50.000Z | CSC-291/Notes/Lists_9.6.14.py | fvcproductions/CS-HU | ecd103a525fd312146d3b6c69ee7c1452548c5e2 | [
"MIT"
] | null | null | null | CSC-291/Notes/Lists_9.6.14.py | fvcproductions/CS-HU | ecd103a525fd312146d3b6c69ee7c1452548c5e2 | [
"MIT"
] | 3 | 2019-04-06T01:45:54.000Z | 2020-04-24T16:55:32.000Z | '''
FVCproductions
CSC291_4
'''
# list
numbers = [3, 7, 9484, 46, 67, 2.3]
total = 0
for num in numbers:
total += num
# concanetation
print "The total is", total
print "hi!"*4
# tuple
things = ("one", "two")
# dictionary
age = {"Dryfuss": 6, "Yana": 30, "Lucy", 6}
#set
print numbers[2]
print age["Yana"]
(first,second) = things
#dictionary | 9.72973 | 44 | 0.608333 | '''
FVCproductions
CSC291_4
'''
# list
numbers = [3, 7, 9484, 46, 67, 2.3]
total = 0
for num in numbers:
total += num
# concanetation
print "The total is", total
print "hi!"*4
# tuple
things = ("one", "two")
# dictionary
age = {"Dryfuss": 6, "Yana": 30, "Lucy", 6}
#set
print numbers[2]
print age["Yana"]
(first,second) = things
#dictionary | 0 | 0 | 0 |
ca4567f7905e3458f70f2047be789ac11f75eda6 | 604 | py | Python | bundle-workflow/src/build_workflow/build_artifact_check.py | xuezhou25/opensearch-build | 24ebc3e444949b4582680edc76c29a684e486120 | [
"Apache-2.0"
] | 1 | 2022-01-11T17:47:01.000Z | 2022-01-11T17:47:01.000Z | bundle-workflow/src/build_workflow/build_artifact_check.py | xuezhou25/opensearch-build | 24ebc3e444949b4582680edc76c29a684e486120 | [
"Apache-2.0"
] | 13 | 2021-10-02T00:22:47.000Z | 2022-02-08T17:49:38.000Z | bundle-workflow/src/build_workflow/build_artifact_check.py | xuezhou25/opensearch-build | 24ebc3e444949b4582680edc76c29a684e486120 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
from abc import ABC, abstractmethod
| 26.26087 | 88 | 0.693709 | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
from abc import ABC, abstractmethod
class BuildArtifactCheck(ABC):
class BuildArtifactInvalidError(Exception):
def __init__(self, path, message):
self.path = path
super().__init__(f"Artifact {os.path.basename(path)} is invalid. {message}")
def __init__(self, target):
self.target = target
@abstractmethod
def check(self, path):
pass
| 180 | 161 | 23 |
bd5cca04b4b2bf6803e08c2267e8c51eca8218e9 | 4,935 | py | Python | examples/synth03_quickfit.py | cwhanse/pvpro | 1e96897e70605c4a185cb93eec9075e0d92047a5 | [
"BSD-3-Clause"
] | null | null | null | examples/synth03_quickfit.py | cwhanse/pvpro | 1e96897e70605c4a185cb93eec9075e0d92047a5 | [
"BSD-3-Clause"
] | null | null | null | examples/synth03_quickfit.py | cwhanse/pvpro | 1e96897e70605c4a185cb93eec9075e0d92047a5 | [
"BSD-3-Clause"
] | null | null | null | """
Example running quick estimate algorithm on synthetic data.
@author: toddkarin
"""
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import datetime
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
from pvpro import PvProHandler
import pvpro
from pvlib.pvsystem import singlediode
# Import synthetic data
df = pd.read_pickle('synth01_out.pkl')
save_figs_directory = 'figures/synth03'
# Make PvProHandler object to store data.
pvp = PvProHandler(df,
system_name='synthetic',
delta_T=3,
use_clear_times=False,
cells_in_series=60,
resistance_shunt_ref=df['resistance_shunt_ref'].mean(),
alpha_isc=0.001,
voltage_key='v_dc',
current_key='i_dc',
temperature_module_key='temperature_module_meas',
irradiance_poa_key='poa_meas',
modules_per_string=1,
parallel_strings=1,
)
# Preprocess
pvp.run_preprocess()
ret = pvp.quick_parameter_extraction(freq='M',
verbose=False,
figure=True
)
# print(ret['p'])
pfit = ret['p']
n = 2
figure = plt.figure(0, figsize=(7.5, 5.5))
plt.clf()
figure.subplots(nrows=4, ncols=3, sharex='all')
# plt.subplots(sharex='all')
plt.subplots_adjust(wspace=0.6, hspace=0.1)
# df['conductance_shunt_ref'] = 1/df['resistance_shunt_ref']
ylabel = {'diode_factor': 'Diode factor',
'photocurrent_ref': 'Photocurrent ref (A)',
'saturation_current_ref': 'I sat ref (nA)',
'resistance_series_ref': 'R series ref (Ohm)',
'resistance_shunt_ref': 'R shunt ref (Ohm)',
'conductance_shunt_ref': 'G shunt ref (1/Ohm)',
'conductance_shunt_extra': 'G shunt extra (1/Ohm)',
'i_sc_ref': 'I sc ref (A)',
'v_oc_ref': 'V oc ref (V)',
'i_mp_ref': 'I mp ref (A)',
'p_mp_ref': 'P mp ref (W)',
'i_x_ref': 'I x ref (A)',
'v_mp_ref': 'V mp ref (V)',
'residual': 'Residual (AU)',
}
plt.subplot(4, 3, 1)
ax = plt.gca()
plt.axis('off')
plt.text(-0.2, 0,
'QUICK ESTIMATE\n' + \
'System: {}\n'.format(pvp.system_name) + \
'Use clear times: {}\n'.format(pvp.use_clear_times) + \
'Temp: {}\n'.format(pvp.temperature_module_key) + \
'Irrad: {}\n'.format(pvp.irradiance_poa_key)
, fontsize=8
)
for k in ['diode_factor', 'photocurrent_ref', 'saturation_current_ref',
'resistance_series_ref',
'conductance_shunt_extra', 'i_mp_ref',
'v_mp_ref',
'p_mp_ref', 'i_sc_ref', 'v_oc_ref', ]:
ax = plt.subplot(4, 3, n)
if k == 'saturation_current_ref':
scale = 1e9
elif k == 'residual':
scale = 1e3
else:
scale = 1
plt.plot(pfit.index, pfit[k] * scale, '.',
color=[0, 0, 0.8],
label='pvpro')
ylims = scale * np.array([np.nanmax(pfit[k]), np.nanmin(pfit[k])])
if k in df.keys():
plt.plot(df.index, df[k] * scale, '--',
color=[1, 0.2, 0.2],
label=True)
ylims[0] = np.min([ylims[0], df[k].min() * scale])
ylims[1] = np.max([ylims[1], df[k].max() * scale])
plt.ylabel(ylabel[k], fontsize=9)
# plt.gca().fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
if np.nanmax(pfit[k]) < np.nanmin(pfit[k])*1.2:
plt.ylim(pfit[k].mean() * np.array([0.9, 1.1]))
else:
ylims = ylims + 0.1 * np.array([-1, 1]) * (ylims.max() - ylims.min())
plt.ylim(ylims)
date_form = matplotlib.dates.DateFormatter("%Y")
plt.gca().xaxis.set_major_formatter(date_form)
plt.yticks(fontsize=9)
plt.xticks(fontsize=9, rotation=90)
if n == 3:
plt.legend(loc=[0, 1.2])
# for y in [df.index.]:
# # plt.plot([y,y], [pfit[k].min()*scale, pfit[k].max()*scale] ,'--')
# plt.axvline(y,'--')
# mask = np.logical_and(df.index.month == 1, df.index.day == 1)
# day_ticks = np.arange(len(df))[mask]
# plt.xticks(ticks=df.index[day_ticks].year,
# labels=df.index[day_ticks].year)
n = n + 1
# figure.tight_layout(pad=5)
plt.show()
save_figs = False
if save_figs:
plt.savefig(
'{}/synth01_quickfit_degradation_{}.pdf'.format(save_figs_directory,
pvp.system_name),
bbox_inches='tight')
for f in range(20,25):
plt.figure(f)
plt.savefig(
'{}/synth03_estimate_{}.pdf'.format(save_figs_directory,
f),
bbox_inches='tight')
| 29.909091 | 77 | 0.551165 | """
Example running quick estimate algorithm on synthetic data.
@author: toddkarin
"""
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import datetime
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
from pvpro import PvProHandler
import pvpro
from pvlib.pvsystem import singlediode
# Import synthetic data
df = pd.read_pickle('synth01_out.pkl')
save_figs_directory = 'figures/synth03'
# Make PvProHandler object to store data.
pvp = PvProHandler(df,
system_name='synthetic',
delta_T=3,
use_clear_times=False,
cells_in_series=60,
resistance_shunt_ref=df['resistance_shunt_ref'].mean(),
alpha_isc=0.001,
voltage_key='v_dc',
current_key='i_dc',
temperature_module_key='temperature_module_meas',
irradiance_poa_key='poa_meas',
modules_per_string=1,
parallel_strings=1,
)
# Preprocess
pvp.run_preprocess()
ret = pvp.quick_parameter_extraction(freq='M',
verbose=False,
figure=True
)
# print(ret['p'])
pfit = ret['p']
n = 2
figure = plt.figure(0, figsize=(7.5, 5.5))
plt.clf()
figure.subplots(nrows=4, ncols=3, sharex='all')
# plt.subplots(sharex='all')
plt.subplots_adjust(wspace=0.6, hspace=0.1)
# df['conductance_shunt_ref'] = 1/df['resistance_shunt_ref']
ylabel = {'diode_factor': 'Diode factor',
'photocurrent_ref': 'Photocurrent ref (A)',
'saturation_current_ref': 'I sat ref (nA)',
'resistance_series_ref': 'R series ref (Ohm)',
'resistance_shunt_ref': 'R shunt ref (Ohm)',
'conductance_shunt_ref': 'G shunt ref (1/Ohm)',
'conductance_shunt_extra': 'G shunt extra (1/Ohm)',
'i_sc_ref': 'I sc ref (A)',
'v_oc_ref': 'V oc ref (V)',
'i_mp_ref': 'I mp ref (A)',
'p_mp_ref': 'P mp ref (W)',
'i_x_ref': 'I x ref (A)',
'v_mp_ref': 'V mp ref (V)',
'residual': 'Residual (AU)',
}
plt.subplot(4, 3, 1)
ax = plt.gca()
plt.axis('off')
plt.text(-0.2, 0,
'QUICK ESTIMATE\n' + \
'System: {}\n'.format(pvp.system_name) + \
'Use clear times: {}\n'.format(pvp.use_clear_times) + \
'Temp: {}\n'.format(pvp.temperature_module_key) + \
'Irrad: {}\n'.format(pvp.irradiance_poa_key)
, fontsize=8
)
for k in ['diode_factor', 'photocurrent_ref', 'saturation_current_ref',
'resistance_series_ref',
'conductance_shunt_extra', 'i_mp_ref',
'v_mp_ref',
'p_mp_ref', 'i_sc_ref', 'v_oc_ref', ]:
ax = plt.subplot(4, 3, n)
if k == 'saturation_current_ref':
scale = 1e9
elif k == 'residual':
scale = 1e3
else:
scale = 1
plt.plot(pfit.index, pfit[k] * scale, '.',
color=[0, 0, 0.8],
label='pvpro')
ylims = scale * np.array([np.nanmax(pfit[k]), np.nanmin(pfit[k])])
if k in df.keys():
plt.plot(df.index, df[k] * scale, '--',
color=[1, 0.2, 0.2],
label=True)
ylims[0] = np.min([ylims[0], df[k].min() * scale])
ylims[1] = np.max([ylims[1], df[k].max() * scale])
plt.ylabel(ylabel[k], fontsize=9)
# plt.gca().fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
if np.nanmax(pfit[k]) < np.nanmin(pfit[k])*1.2:
plt.ylim(pfit[k].mean() * np.array([0.9, 1.1]))
else:
ylims = ylims + 0.1 * np.array([-1, 1]) * (ylims.max() - ylims.min())
plt.ylim(ylims)
date_form = matplotlib.dates.DateFormatter("%Y")
plt.gca().xaxis.set_major_formatter(date_form)
plt.yticks(fontsize=9)
plt.xticks(fontsize=9, rotation=90)
if n == 3:
plt.legend(loc=[0, 1.2])
# for y in [df.index.]:
# # plt.plot([y,y], [pfit[k].min()*scale, pfit[k].max()*scale] ,'--')
# plt.axvline(y,'--')
# mask = np.logical_and(df.index.month == 1, df.index.day == 1)
# day_ticks = np.arange(len(df))[mask]
# plt.xticks(ticks=df.index[day_ticks].year,
# labels=df.index[day_ticks].year)
n = n + 1
# figure.tight_layout(pad=5)
plt.show()
save_figs = False
if save_figs:
plt.savefig(
'{}/synth01_quickfit_degradation_{}.pdf'.format(save_figs_directory,
pvp.system_name),
bbox_inches='tight')
for f in range(20,25):
plt.figure(f)
plt.savefig(
'{}/synth03_estimate_{}.pdf'.format(save_figs_directory,
f),
bbox_inches='tight')
| 0 | 0 | 0 |
b6e3d8d4624010bc7aa016b4db6b27f097c10049 | 6,776 | py | Python | home/views.py | darkun7/Evaluate-CS-Student-as-Lecture-Assistant | 55ccec16c2b4146c32d5cf27f3287d0d3393478b | [
"MIT"
] | null | null | null | home/views.py | darkun7/Evaluate-CS-Student-as-Lecture-Assistant | 55ccec16c2b4146c32d5cf27f3287d0d3393478b | [
"MIT"
] | null | null | null | home/views.py | darkun7/Evaluate-CS-Student-as-Lecture-Assistant | 55ccec16c2b4146c32d5cf27f3287d0d3393478b | [
"MIT"
] | null | null | null | from django.urls import reverse
from django.db.models import Count
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.forms import formset_factory, modelformset_factory
from .models import *
from .models import TrainingValue
from .models import Training as TrainingModel
from .forms import *
# Create your views here.
# TRAINING #
# DATA TRAINING
# LAB #
| 31.516279 | 88 | 0.617621 | from django.urls import reverse
from django.db.models import Count
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.forms import formset_factory, modelformset_factory
from .models import *
from .models import TrainingValue
from .models import Training as TrainingModel
from .forms import *
# Create your views here.
def pakarKNN(request):
trainings = TrainingModel.objects.all()
amount_training = trainings.count()
attr = Attribute.objects.all()
amount_attr = attr.count()
raw = []
for training in trainings:
dtrain = TrainingValue.objects.filter(training_id=training.id)
attribute = []
for train in dtrain:
attribute.append(train.value)
raw.append([attribute, training.result])
print("raw: ",raw)
if request.method == "POST":
# Menampung formulir insert
insert = []
for atr in attr:
insert.append(float(request.POST.get('value-'+str(atr.id),'invalid')))
print("insert: ",insert)
bound = []
#Metode Manhattan
manhattan = []
for i in range(amount_training): #training
distance = 0
for j in range(amount_attr): #atribut
distance += abs(insert[j]-raw[i][0][j])
bound.append({"d":distance,"r":raw[i][1]})
manhattan.append(distance)
print('manhattan: ', manhattan)
#KNN
hasil = knn(bound, manhattan, 9, amount_training)
print(hasil)
else:
hasil = ''
return render(request, 'pakar/knn.html', {'attr':attr, 'hasil':hasil})
def knn(data, manhattan, k, max_k):
manhattan.sort()
result = {}
for d in data:
if d['d'] <= manhattan[k-1]:
result[d['r'].name] = result.get(d['r'].name, 0) + 1
print(d['r'].name)
else:
print('-')
#Frequency
sorted_dict = {}
sorted_keys = sorted(result, key=result.get, reverse=True)
for w in sorted_keys:
sorted_dict[w] = result[w]
result = sorted_dict
print(result)
#Perbaikan
key1 = list(result.keys())[0]
key2 = list(result.keys())[1]
if ( result[key1]==result[key2] and k+4 < max_k ):
knn(data, manhattan, k+4, max_k)
return {
'result' : key1,
'detail' : result,
'k' : k,
}
def index(request):
return render(request, 'front/landing.html')
def dasbor(request):
return render(request, 'front/dasbor.html')
def akun(request):
return render(request, 'akun/index.html')
def editAkun(request):
return render(request, 'akun/editakun.html')
# TRAINING #
def dataTraining(request):
trainings = TrainingModel.objects.all()
data = {'trainings': trainings}
return render(request, 'training/index.html', data)
def createTraining(request):
form = TrainingForm()
if request.method == "POST":
form = TrainingForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('training'))
data = {'form':form}
return render(request, 'training/create.html', data)
def updateTraining(request, id):
train = TrainingModel.objects.filter(pk=id).first()
form = TrainingForm(instance=train)
if not train:
return redirect(reverse('create_training'))
else:
if request.method == "POST":
form = TrainingForm(request.POST, instance=train)
if form.is_valid():
print(train.id)
form.save()
return redirect(reverse('training'))
data = {'training':train,'form':form}
return render(request, 'training/update.html', data)
def deleteTraining(request, id):
train = TrainingModel.objects.filter(pk=id).first()
if not train:
return redirect(reverse('training'))
else:
train.delete()
return redirect(reverse('training'))
# DATA TRAINING
def seeDataTraining(request, id):
dtrain = TrainingValue.objects.filter(training_id=id)
attr = Attribute.objects.all()
data = {'dtrain':dtrain, 'attr':attr}
return render(request, 'training/see_training.html', data)
def updateDataTraining(request, id):
dtrain = TrainingValue.objects.filter(id=id).first()
form = DataTrainingForm(instance=dtrain)
if not dtrain:
return redirect(reverse('training'))
else:
if request.method == "POST":
form = DataTrainingForm(request.POST ,instance=dtrain)
if form.is_valid():
form.save()
return redirect(reverse('training'))
data = {'dtrain':dtrain, 'form':form}
return render(request, 'training/update_training.html', data)
def createDataTraining(request, id):
train = TrainingModel.objects.filter(pk=id).first()
attr = Attribute.objects.all()
amount_attr = attr.count()
init_val = []
for val in attr:
init_val.append({
'training_id': id,
'attribute_id':val.id
})
DataTrainingFormSet = formset_factory(DataTrainingForm, extra=0)
formset = DataTrainingFormSet(initial=init_val)
# formset = modelformset_factory(TrainingValue, exclude=()) #update batch dari model
if request.method == "POST":
formset = DataTrainingFormSet(request.POST)
if formset.is_valid():
for form in formset:
form.save()
return redirect(reverse('training'))
data = {'formset':formset}
return render(request, 'training/create_training.html', data)
def createAttribute(request):
return render(request, 'training/create_attribute.html')
# LAB #
def lab(request):
labs = Lab.objects.all()
data = {'labs': labs}
return render(request, 'lab/index.html', data)
def createLab(request):
form = LabForm()
if request.method == "POST":
form = LabForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('lab'))
data = {'form':form}
return render(request, 'lab/create.html', data)
def updateLab(request, id):
# lab = Lab.objects.get(pk=id)
lab = Lab.objects.filter(pk=id).first()
form = LabForm(instance=lab)
if not lab:
return redirect(reverse('create_lab'))
else:
if request.method == "POST":
form = LabForm(request.POST, instance=lab)
if form.is_valid():
print(lab.id)
form.save()
return redirect(reverse('lab'))
data = {'lab':lab,'form':form}
return render(request, 'lab/update.html', data)
def deleteLab(request, id):
lab = Lab.objects.filter(pk=id).first()
if not lab:
return redirect(reverse('lab'))
else:
lab.delete()
return redirect(reverse('lab'))
| 5,962 | 0 | 410 |
4fa89a64771d27994619ff2fffa1c136934e7e1f | 6,773 | py | Python | target_bigquery/schema.py | tarsisazevedo/target-bigquery | da8d21305fecc1f2782eca6e5edb8f25f0c2eea3 | [
"Apache-2.0"
] | null | null | null | target_bigquery/schema.py | tarsisazevedo/target-bigquery | da8d21305fecc1f2782eca6e5edb8f25f0c2eea3 | [
"Apache-2.0"
] | null | null | null | target_bigquery/schema.py | tarsisazevedo/target-bigquery | da8d21305fecc1f2782eca6e5edb8f25f0c2eea3 | [
"Apache-2.0"
] | null | null | null | import datetime
import re
import simplejson as json
import singer
from google.cloud.bigquery import SchemaField
from jsonschema import validate
from jsonschema.exceptions import ValidationError
# StitchData compatible timestamp meta data
# https://www.stitchdata.com/docs/data-structure/system-tables-and-columns
BATCH_TIMESTAMP = "_sdc_batched_at"
JSONSCHEMA_TYPES = ["object", "array", "string", "integer", "number", "boolean"]
MAX_WARNING = 20
logger = singer.get_logger()
| 32.099526 | 88 | 0.584232 | import datetime
import re
import simplejson as json
import singer
from google.cloud.bigquery import SchemaField
from jsonschema import validate
from jsonschema.exceptions import ValidationError
# StitchData compatible timestamp meta data
# https://www.stitchdata.com/docs/data-structure/system-tables-and-columns
BATCH_TIMESTAMP = "_sdc_batched_at"
JSONSCHEMA_TYPES = ["object", "array", "string", "integer", "number", "boolean"]
MAX_WARNING = 20
logger = singer.get_logger()
def _get_schema_type_mode(property_, numeric_type):
type_ = property_.get("type")
schema_mode = "NULLABLE"
if isinstance(type_, list):
if type_[0] != "null":
schema_mode = "REQUIRED"
if len(type_) < 2 or type_[1] not in JSONSCHEMA_TYPES:
# Some major taps contain type first :(
jsonschema_type = type_[0]
if len(type_) > 2 and type_[1] != "null":
schema_mode = "NULLABLE"
else:
jsonschema_type = type_[1]
elif isinstance(type_, str):
jsonschema_type = type_
else:
raise Exception("type must be given as string or list")
jsonschema_type = jsonschema_type.lower()
if jsonschema_type not in JSONSCHEMA_TYPES:
raise Exception(f"{jsonschema_type} is not a valid jsonschema type")
# map jsonschema to BigQuery type
if jsonschema_type == "object":
schema_type = "RECORD"
if jsonschema_type == "array":
# Determined later by the item
schema_type = None
schema_mode = "REPEATED"
if jsonschema_type == "string":
schema_type = "STRING"
if "format" in property_:
if property_["format"] == "date-time":
schema_type = "TIMESTAMP"
if jsonschema_type == "integer":
schema_type = "INT64"
if jsonschema_type == "number":
schema_type = numeric_type
if jsonschema_type == "boolean":
schema_type = "BOOL"
return schema_type, schema_mode
def _parse_property(key, property_, numeric_type="NUMERIC"):
if numeric_type not in ["NUMERIC", "FLOAT64"]:
raise ValueError("Unknown numeric type %s" % numeric_type)
schema_name = key
schema_description = None
schema_fields = tuple()
if "type" not in property_ and "anyOf" in property_:
for types in property_["anyOf"]:
if types["type"] == "null":
schema_mode = "NULLABLE"
else:
property_ = types
schema_type, schema_mode = _get_schema_type_mode(property_, numeric_type)
if schema_type == "RECORD":
schema_fields = tuple(parse_schema(property_, numeric_type))
if schema_mode == "REPEATED":
# get child type
schema_type, _ = _get_schema_type_mode(property_.get("items"), numeric_type)
if schema_type == "RECORD":
schema_fields = tuple(parse_schema(property_.get("items"), numeric_type))
return (schema_name, schema_type, schema_mode, schema_description, schema_fields)
def parse_schema(schema, numeric_type="NUMERIC"):
bq_schema = []
for key in schema["properties"].keys():
(
schema_name,
schema_type,
schema_mode,
schema_description,
schema_fields,
) = _parse_property(key, schema["properties"][key], numeric_type)
schema_field = SchemaField(
schema_name, schema_type, schema_mode, schema_description, schema_fields
)
bq_schema.append(schema_field)
if not bq_schema:
logger.warn(
"RECORD type does not have properties." + " Inserting a dummy string object"
)
return parse_schema(
{"properties": {"dummy": {"type": ["null", "string"]}}}, numeric_type
)
return bq_schema
def clean_and_validate(message, schemas, invalids, on_invalid_record, json_dumps=False):
batch_tstamp = datetime.datetime.utcnow()
batch_tstamp = batch_tstamp.replace(tzinfo=datetime.timezone.utc)
if message.stream not in schemas:
raise Exception(
(
"A record for stream {} was encountered"
+ "before a corresponding schema"
).format(message.stream)
)
schema = schemas[message.stream]
try:
validate(message.record, schema)
except ValidationError as e:
cur_validation = False
error_message = str(e)
# It's a bit hacky and fragile here...
instance = re.sub(
r".*instance\[\'(.*)\'\].*", r"\1", error_message.split("\n")[5]
)
type_ = re.sub(
r".*\{\'type\'\: \[\'.*\', \'(.*)\'\]\}.*",
r"\1",
error_message.split("\n")[3],
)
# Save number-convertible strings...
if type_ in ["integer", "number"]:
n = None
try:
n = float(message.record[instance])
except Exception:
# In case we want to persist the rows with partially
# invalid value
message.record[instance] = None
pass
if n is not None:
cur_validation = True
# TODO:
# Convert to BigQuery timestamp type (iso 8601)
# if type_ == "string" and format_ == "date-time":
# n = None
# try:
# n = float(message.record[instance])
# d = datetime.datetime.fromtimestamp(n)
# d = d.replace(tzinfo=datetime.timezone.utc)
# message.record[instance] = d.isoformat()
# except Exception:
# # In case we want to persist the rows with partially
# # invalid value
# message.record[instance] = None
# pass
# if d is not None:
# cur_validation = True
if cur_validation is False:
invalids = invalids + 1
if invalids < MAX_WARNING:
logger.warn(
("Validation error in record [%s]" + " :: %s :: %s :: %s")
% (instance, type_, str(message.record), str(e))
)
elif invalids == MAX_WARNING:
logger.warn("Max validation warning reached.")
if on_invalid_record == "abort":
raise ValidationError("Validation required and failed.")
if BATCH_TIMESTAMP in schema["properties"].keys():
message.record[BATCH_TIMESTAMP] = batch_tstamp.isoformat()
record = message.record
if json_dumps:
try:
record = bytes(json.dumps(record) + "\n", "UTF-8")
except TypeError as e:
logger.warning(record)
raise
return record, invalids
| 6,197 | 0 | 92 |
b3ce6c9f9389a83f38d0f1128fd23d73ec7da285 | 5,508 | py | Python | sgl/tests/api_test.py | RieksJ/sgl | 49ad41b6db7c02a22046d7a20471b0d5eac1a961 | [
"Apache-2.0"
] | 4 | 2019-06-26T21:03:36.000Z | 2021-10-05T10:40:10.000Z | sgl/tests/api_test.py | RieksJ/sgl | 49ad41b6db7c02a22046d7a20471b0d5eac1a961 | [
"Apache-2.0"
] | 4 | 2019-07-01T21:12:09.000Z | 2020-11-30T12:53:55.000Z | sgl/tests/api_test.py | RieksJ/sgl | 49ad41b6db7c02a22046d7a20471b0d5eac1a961 | [
"Apache-2.0"
] | 3 | 2019-06-26T21:01:38.000Z | 2021-06-06T15:43:19.000Z | import pytest
from ..api import *
from .examples import *
from ..condition import Condition
# This test is the simplest one I could imagine that exercises the logic where
# disjoint subsets are calculated. The next test is similar, but does much more
# complex work. Hopefully the debugging can take place on this one.
| 32.023256 | 100 | 0.705701 | import pytest
from ..api import *
from .examples import *
from ..condition import Condition
def test_bob_satisfies_id_bob():
assert satisfies(p.bob, c.bob)
assert satisfies(p.bob, r.enter_to_bob)
def test_carl_doesnt_satisfy_id_bob():
assert not satisfies(p.grandpa_carl, c.bob)
def test_bob_matches_1_and_with_id_bob():
assert satisfies([p.bob], c.all_with_1_id)
def test_emily_matches_1_or_with_id_emily():
assert satisfies(p.sister_emily, c.any_with_1_id)
def test_group_with_bob_matches_1_or_with_id_bob():
assert satisfies([p.sister_emily, p.grandma_carol, p.bob], c.any_with_1_id)
def test_empty_rule_fails():
with pytest.raises(PreconditionViolation):
satisfies(p.bob, None)
def test_grandma_satisfies_grandparent():
assert satisfies(p.grandma_extra, r.three_privs_to_grandparent)
def test_2_grandparents_satisfies_1():
assert satisfies([p.grandma_carol, p.grandpa_carl], r.three_privs_to_grandparent)
def test_1_grandparent_doesnt_satisfy_2():
assert not satisfies([p.grandma_carol], r.spoil_child_to_2_grandparents)
def test_2_grandparents_satisfies_2():
assert satisfies([p.grandma_carol, p.grandpa_carl], r.spoil_child_to_2_grandparents)
def test_multirole_satisfies_1():
assert satisfies(p.employee_and_investor, r.enter_to_employee)
def test_others_dont_satisfy_grandparent():
assert not satisfies([
p.sister_emily,
p.tribal_council,
p.bob,
p.employee
], r.three_privs_to_grandparent)
def test_same_2_grandparents():
assert not satisfies([p.grandpa_carl, p.grandpa_carl], c.two_grandparents)
def test_either_sibling_or_investor():
assert satisfies([p.investor, p.grandma_extra], Condition(any=[c.sibling, c.grandparent]))
assert satisfies([p.brother_extra, p.investor], Condition(any=[c.sibling, c.grandparent]))
assert satisfies([p.brother_extra, p.grandma_extra], Condition(any=[c.sibling, c.grandparent]))
def test_2_grandparents_trusted():
assert satisfies([p.grandma_carol, p.grandpa_carl], c.trusted)
assert satisfies([p.grandma_carol, p.bob, p.grandpa_carl], c.trusted)
def test_1_grandparent_3_tribal_council_trusted():
extra = Principal.from_dict({"roles": ["tribal_council"]})
assert satisfies([
p.grandma_carol,
p.sister_emily,
p.investor,
p.tribal_council,
p.tribal_council_fatima,
extra],
c.trusted)
def check_disjoint_and_not(group, c, expected_when_disjoint = False):
assert satisfies(group, c, disjoint=False)
assert satisfies(group, c, disjoint=True) == expected_when_disjoint
def test_same_person_for_all_not_disjoint():
check_disjoint_and_not(p.employee_and_investor, r.call_meeting_to_employee_and_investor)
def test_overlap_for_all_disjoint():
check_disjoint_and_not([
p.employee_and_investor,
p.investor
], Condition(all=[c.employee_and_investor, Condition.from_dict({"n": 2, "roles": "investor"})]))
def test_same_person_for_all_disjoint():
check_disjoint_and_not(p.employee_and_investor, r.call_meeting_to_employee_and_investor)
# This test is the simplest one I could imagine that exercises the logic where
# disjoint subsets are calculated. The next test is similar, but does much more
# complex work. Hopefully the debugging can take place on this one.
def test_easiest_all_disjoint():
x = Condition(all=[
Condition(n=2, roles="employee"),
Condition(n=2, roles="investor"),
])
# The list of p should not satisfy the c listed above, because
# we're asking for two employees and two investors -- and what we have is one
# employee, one investor, and one employee+investor.
assert not satisfies(p.objs, x)
def test_without_disjoint_3_satisfies_2_plus_2():
x = Condition(all=[
Condition(n=2, roles="employee"),
Condition(n=2, roles="investor"),
])
assert satisfies(p.objs, x, disjoint=False)
def test_complex_all_disjoint():
x = Condition(all=[
c.bob,
Condition(n=2, roles="sibling"),
Condition(all=[
c.trusted,
Condition(all=[
Condition(n=2, roles="employee"),
Condition(n=2, roles="investor"),
])
])
])
# The list of p should not satisfy the c listed above, because
# we're asking for two employees and two investors -- and what we have is one
# employee, one investor, and one employee+investor.
assert not satisfies(p.objs, x)
# All 3 ways to fix the problem should result in a positive answer.
assert satisfies(p.objs + [Principal(roles=["investor"])], x)
assert satisfies(p.objs + [Principal(roles=["employee"])], x)
assert satisfies(p.objs + [Principal(roles=["employee", "investor"])], x)
def test_satisfies_tolerates_dicts():
satisfies({"id": "Fred"}, c.bob)
satisfies(p.bob, {"id": "Bob"})
satisfies(p.bob, {"grant": "enter", "when": {"id": "Bob"}})
def donttest_any_with_n_3():
x = Condition.from_dict(
{"any": [
{"roles": "grandparent"},
{"roles": "sibling"}
], "n": 3}
)
p = p # to be concise
assert not satisfies([p.grandpa_carl, p.grandma_carol, p.investor], x)
assert not satisfies([p.grandpa_carl, p.sister_emily, p.investor], x)
assert satisfies([p.grandpa_carl, p.grandma_carol, p.sister_emily, p.investor], x)
assert satisfies([p.grandpa_carl, p.sister_emily, p.brother_extra, p.investor], x)
| 4,588 | 0 | 574 |
2c866933e977776d72cb1f582327bc68c0c36f0d | 1,782 | py | Python | tests/test_accuracy.py | uk-gov-mirror/ONSdigital.companies-house-big-data-project | be74293b4398976696d07c6b2329d6121c9e5c6a | [
"MIT"
] | null | null | null | tests/test_accuracy.py | uk-gov-mirror/ONSdigital.companies-house-big-data-project | be74293b4398976696d07c6b2329d6121c9e5c6a | [
"MIT"
] | null | null | null | tests/test_accuracy.py | uk-gov-mirror/ONSdigital.companies-house-big-data-project | be74293b4398976696d07c6b2329d6121c9e5c6a | [
"MIT"
] | null | null | null | import unittest
# Custom import
from src.performance_metrics.binary_classifier_metrics import BinaryClassifierMetrics
class TestAccuracy(unittest.TestCase):
"""
"""
def test_accuracy_pos(self):
"""
Positive test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
self.assertAlmostEqual(metrics.accuracy(tp=1, tn=1, fp=1, fn=1), 0.5)
def test_accuracy_neg(self):
"""
Negative test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
self.assertNotEqual(metrics.accuracy(tp=1, tn=1, fp=1, fn=1), 0.8)
def test_types(self):
"""
Positive test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
with self.assertRaises(TypeError):
metrics.accuracy(1.0, 2, 3, 4)
with self.assertRaises(TypeError):
metrics.accuracy(1, 2.0, 3, 4)
with self.assertRaises(TypeError):
metrics.accuracy(1, 2, 3.0, 4)
with self.assertRaises(TypeError):
metrics.accuracy(1, 2, 3, 4.0)
def test_values(self):
"""
Positive test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
with self.assertRaises(ValueError):
metrics.accuracy(None, 2, 3, 4)
with self.assertRaises(ValueError):
metrics.accuracy(1, None, 3, 4)
with self.assertRaises(ValueError):
metrics.accuracy(1, 2, None, 4)
with self.assertRaises(ValueError):
metrics.accuracy(1, 2, 3, None)
| 25.457143 | 85 | 0.585297 | import unittest
# Custom import
from src.performance_metrics.binary_classifier_metrics import BinaryClassifierMetrics
class TestAccuracy(unittest.TestCase):
"""
"""
def test_accuracy_pos(self):
"""
Positive test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
self.assertAlmostEqual(metrics.accuracy(tp=1, tn=1, fp=1, fn=1), 0.5)
def test_accuracy_neg(self):
"""
Negative test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
self.assertNotEqual(metrics.accuracy(tp=1, tn=1, fp=1, fn=1), 0.8)
def test_types(self):
"""
Positive test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
with self.assertRaises(TypeError):
metrics.accuracy(1.0, 2, 3, 4)
with self.assertRaises(TypeError):
metrics.accuracy(1, 2.0, 3, 4)
with self.assertRaises(TypeError):
metrics.accuracy(1, 2, 3.0, 4)
with self.assertRaises(TypeError):
metrics.accuracy(1, 2, 3, 4.0)
def test_values(self):
"""
Positive test case for the accuracy function.
"""
# Assume
metrics = BinaryClassifierMetrics()
# Assert
with self.assertRaises(ValueError):
metrics.accuracy(None, 2, 3, 4)
with self.assertRaises(ValueError):
metrics.accuracy(1, None, 3, 4)
with self.assertRaises(ValueError):
metrics.accuracy(1, 2, None, 4)
with self.assertRaises(ValueError):
metrics.accuracy(1, 2, 3, None)
| 0 | 0 | 0 |
4e67d1ef0355b54239ebe56111fc3b16c5ffc504 | 6,594 | py | Python | Utils/transforms.py | Jack-XHP/LabPicV2-MaskRCNN | b0586b2827000c7b7337d5110b2b1fd6185053a8 | [
"MIT"
] | null | null | null | Utils/transforms.py | Jack-XHP/LabPicV2-MaskRCNN | b0586b2827000c7b7337d5110b2b1fd6185053a8 | [
"MIT"
] | null | null | null | Utils/transforms.py | Jack-XHP/LabPicV2-MaskRCNN | b0586b2827000c7b7337d5110b2b1fd6185053a8 | [
"MIT"
] | null | null | null | import random
import torch
import numbers
from torchvision.transforms import Lambda, functional as F
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
| 40.703704 | 103 | 0.601153 | import random
import torch
import numbers
from torchvision.transforms import Lambda, functional as F
def _flip_coco_person_keypoints(kps, width):
flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
flipped_data = kps[:, flip_inds]
flipped_data[..., 0] = width - flipped_data[..., 0]
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
return flipped_data
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = _flip_coco_person_keypoints(keypoints, width)
target["keypoints"] = keypoints
return image, target
class RandomVerticalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-2)
bbox = target["boxes"]
bbox[:, [1, 3]] = height - bbox[:, [3, 1]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-2)
return image, target
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, image, target):
if self.brightness is not None:
brightness_factor = random.uniform(self.brightness[0], self.brightness[1])
image = F.adjust_brightness(image, brightness_factor)
if self.contrast is not None:
contrast_factor = random.uniform(self.contrast[0], self.contrast[1])
image = F.adjust_contrast(image, contrast_factor)
if self.saturation is not None:
saturation_factor = random.uniform(self.saturation[0], self.saturation[1])
image = F.adjust_saturation(image, saturation_factor)
if self.hue is not None:
hue_factor = random.uniform(self.hue[0], self.hue[1])
image = F.adjust_hue(image, hue_factor)
return image, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target
| 3,692 | 29 | 380 |
e70554a4eb14bbf62db7f17c3d12988f3fce621e | 5,747 | py | Python | repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py | AsM0DeUz/leapp-repository | b67a395ee3d67d3d628037c250a210bb52e9187c | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py | AsM0DeUz/leapp-repository | b67a395ee3d67d3d628037c250a210bb52e9187c | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py | AsM0DeUz/leapp-repository | b67a395ee3d67d3d628037c250a210bb52e9187c | [
"Apache-2.0"
] | null | null | null | import os
from leapp.libraries.common.config import mock_configs
from leapp.models import (
DNFWorkaround,
InstalledRPM,
Module,
RepositoriesSetupTasks,
RPM,
RpmTransactionTasks,
SatelliteFacts
)
from leapp.snactor.fixture import current_actor_context
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
FOREMAN_RPM = fake_package('foreman')
FOREMAN_PROXY_RPM = fake_package('foreman-proxy')
KATELLO_INSTALLER_RPM = fake_package('foreman-installer-katello')
KATELLO_RPM = fake_package('katello')
POSTGRESQL_RPM = fake_package('rh-postgresql12-postgresql-server')
| 40.188811 | 110 | 0.792761 | import os
from leapp.libraries.common.config import mock_configs
from leapp.models import (
DNFWorkaround,
InstalledRPM,
Module,
RepositoriesSetupTasks,
RPM,
RpmTransactionTasks,
SatelliteFacts
)
from leapp.snactor.fixture import current_actor_context
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
def fake_package(pkg_name):
return RPM(name=pkg_name, version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch',
pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')
FOREMAN_RPM = fake_package('foreman')
FOREMAN_PROXY_RPM = fake_package('foreman-proxy')
KATELLO_INSTALLER_RPM = fake_package('foreman-installer-katello')
KATELLO_RPM = fake_package('katello')
POSTGRESQL_RPM = fake_package('rh-postgresql12-postgresql-server')
def test_no_satellite_present(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(SatelliteFacts)
assert not message
def test_satellite_present(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(SatelliteFacts)[0]
assert message.has_foreman
def test_wrong_arch(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG_S390X)
message = current_actor_context.consume(SatelliteFacts)
assert not message
def test_satellite_capsule_present(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(SatelliteFacts)[0]
assert message.has_foreman
def test_no_katello_installer_present(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(SatelliteFacts)[0]
assert not message.has_katello_installer
def test_katello_installer_present(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, KATELLO_INSTALLER_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(SatelliteFacts)[0]
assert message.has_katello_installer
def test_enables_ruby_module(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(RpmTransactionTasks)[0]
assert Module(name='ruby', stream='2.7') in message.modules_to_enable
def test_enables_pki_modules(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, KATELLO_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
message = current_actor_context.consume(RpmTransactionTasks)[0]
assert Module(name='pki-core', stream='10.6') in message.modules_to_enable
assert Module(name='pki-deps', stream='10.6') in message.modules_to_enable
def test_detects_local_postgresql(monkeypatch, current_actor_context):
def mock_stat():
orig_stat = os.stat
def mocked_stat(path):
if path == '/var/opt/rh/rh-postgresql12/lib/pgsql/data/':
path = '/'
return orig_stat(path)
return mocked_stat
monkeypatch.setattr("os.stat", mock_stat())
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, POSTGRESQL_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
rpmmessage = current_actor_context.consume(RpmTransactionTasks)[0]
assert Module(name='postgresql', stream='12') in rpmmessage.modules_to_enable
satellitemsg = current_actor_context.consume(SatelliteFacts)[0]
assert satellitemsg.postgresql.local_postgresql
assert current_actor_context.consume(DNFWorkaround)
def test_detects_remote_postgresql(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
rpmmessage = current_actor_context.consume(RpmTransactionTasks)[0]
assert Module(name='postgresql', stream='12') not in rpmmessage.modules_to_enable
satellitemsg = current_actor_context.consume(SatelliteFacts)[0]
assert not satellitemsg.postgresql.local_postgresql
assert not current_actor_context.consume(DNFWorkaround)
def test_enables_right_repositories_on_satellite(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
rpmmessage = current_actor_context.consume(RepositoriesSetupTasks)[0]
assert 'satellite-maintenance-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable
assert 'satellite-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable
assert 'satellite-capsule-6.11-for-rhel-8-x86_64-rpms' not in rpmmessage.to_enable
def test_enables_right_repositories_on_capsule(current_actor_context):
current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM]))
current_actor_context.run(config_model=mock_configs.CONFIG)
rpmmessage = current_actor_context.consume(RepositoriesSetupTasks)[0]
assert 'satellite-maintenance-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable
assert 'satellite-6.11-for-rhel-8-x86_64-rpms' not in rpmmessage.to_enable
assert 'satellite-capsule-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable
| 4,824 | 0 | 299 |
98cfd62b53f13307a1586c84e4d32bb05f5372a1 | 5,496 | py | Python | cowait/tasks/task.py | emilwareus/cowa | c6df182aea488a129e517415e74a5164dcdeea9d | [
"Apache-2.0"
] | 2 | 2021-08-11T08:51:42.000Z | 2021-08-11T08:55:19.000Z | cowait/tasks/task.py | emilwareus/cowait | c6df182aea488a129e517415e74a5164dcdeea9d | [
"Apache-2.0"
] | null | null | null | cowait/tasks/task.py | emilwareus/cowait | c6df182aea488a129e517415e74a5164dcdeea9d | [
"Apache-2.0"
] | null | null | null | import sys
import inspect
from typing import Any
from cowait.types import serialize
from .definition import TaskDefinition
from .components import TaskManager, RpcComponent, rpc
from .parent_task import ParentTask
| 28.625 | 94 | 0.561135 | import sys
import inspect
from typing import Any
from cowait.types import serialize
from .definition import TaskDefinition
from .components import TaskManager, RpcComponent, rpc
from .parent_task import ParentTask
class Task(object):
__current__ = None
def __init__(self, **inputs):
"""
Creates a new instance of the task. Pass inputs as keyword arguments.
"""
# We are using **inputs keyword arguments so that in-IDE tooltips will be more helpful
# (at least less confusing) when invoking subtasks using constructor syntax.
# However, subtasks will actually never be instantiated. The constructor call is
# diverted by the runtime in Task.__new__().
# Tasks should only be constructed by the executor, and it will these 3 arguments:
if 'taskdef' not in inputs or 'node' not in inputs or \
'cluster' not in inputs or len(inputs) != 3:
raise RuntimeError('Invalid task class instantiation')
self.node = inputs['node']
self.cluster = inputs['cluster']
self.taskdef = inputs['taskdef']
self.parent = ParentTask(self.node)
self.subtasks = TaskManager(self)
self.rpc = RpcComponent(self)
# Set this task as the current active task
Task.set_current(self)
def __new__(cls, *args, **inputs):
current = Task.get_current()
if current is None:
# There is no active task. Continue normal instantiation.
return object.__new__(cls)
else:
# There is already an active task in this process, so we should spawn a subtask.
# Divert constructor behaviour to instead spawn a remote task and return it.
if len(args) > 0:
raise TypeError('Tasks do not accept positional arguments')
return current.spawn(cls, inputs=inputs)
@property
def id(self) -> str:
return self.taskdef.id
@property
def image(self) -> str:
return self.taskdef.image
@property
def meta(self) -> dict:
return self.taskdef.meta
def __str__(self) -> str:
return f'Task({self.id}, {self.name})'
def init(self):
pass
async def before(self, inputs: dict) -> dict:
return inputs
async def run(self, **inputs: dict) -> Any:
pass
async def after(self, inputs: dict) -> Any:
pass
@rpc
async def stop(self) -> None:
"""
Abort task execution.
"""
print('\n~~ STOPPED ~~')
# send a stop status
await self.node.parent.send_stop()
# stop subtasks
for task in self.subtasks.values():
await task.stop()
# schedule exit on the next event loop.
# allows the RPC call to return before exit.
async def _quit():
sys.exit(1)
self.node.io.create_task(_quit())
def spawn(
self,
name: str,
id: str = None,
image: str = None,
ports: dict = {},
routes: dict = {},
inputs: dict = {},
meta: dict = {},
env: dict = {},
volumes: dict = {},
cpu: str = None,
cpu_limit: str = None,
memory: str = None,
memory_limit: str = None,
owner: str = '',
affinity: dict = {},
**kwargs: dict,
) -> 'Task':
"""
Spawn a subtask.
Arguments:
name (str): Task name
image (str): Task image. Defaults to the current task image.
kwargs (dict): Input arguments
"""
# merge inputs with remaining kwargs
inputs = {
**inputs,
**kwargs,
}
if isinstance(name, str):
pass
elif issubclass(name, Task):
name = name.__module__
else:
raise TypeError('Unsupported task type: ' + type(name))
# throw error if any input is a coroutine
for key, value in inputs.items():
if inspect.iscoroutine(value):
raise TypeError(f'Input {key} must be awaited first')
taskdef = TaskDefinition(
id=id,
name=name,
parent=self.id,
image=image if image else self.image,
upstream=self.node.get_url(),
meta=meta,
ports=ports,
routes=routes,
cpu=cpu if cpu else self.taskdef.cpu,
cpu_limit=cpu_limit if cpu_limit else self.taskdef.cpu_limit,
memory=memory if memory else self.taskdef.memory,
memory_limit=memory_limit if memory_limit else self.taskdef.memory_limit,
owner=owner,
inputs=serialize(inputs),
storage=self.taskdef.storage,
volumes={
**self.taskdef.volumes,
**volumes,
},
env={
**self.taskdef.env,
**env,
},
affinity = affinity if affinity != {} else self.taskdef.affinity
)
# authorize id
self.node.http.auth.add_token(taskdef.id)
# spawn task
task = self.cluster.spawn(taskdef)
# register with subtask manager
self.subtasks.watch(task)
return task
@staticmethod
def get_current() -> 'Task':
return Task.__current__
@staticmethod
def set_current(task: 'Task'):
Task.__current__ = task
| 920 | 4,338 | 23 |
5072b17af5bb82ed5d711708659f63dcf1074494 | 6,653 | py | Python | gwaripper/download.py | nilfoer/gwaripper | 28492b9894973633612471094d24907b2bc47728 | [
"MIT"
] | 6 | 2021-03-12T08:57:18.000Z | 2022-03-27T00:28:17.000Z | gwaripper/download.py | nilfoer/gwaripper | 28492b9894973633612471094d24907b2bc47728 | [
"MIT"
] | 1 | 2020-10-05T04:25:53.000Z | 2020-10-05T14:20:07.000Z | gwaripper/download.py | nilfoer/gwaripper | 28492b9894973633612471094d24907b2bc47728 | [
"MIT"
] | 2 | 2021-03-12T11:05:46.000Z | 2021-09-12T22:53:58.000Z | import sys
import os
import urllib.request
import urllib.error
import logging
import subprocess
from typing import Optional, Dict
from urllib.error import ContentTooShortError
logger = logging.getLogger(__name__)
DEFAULT_HEADERS = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'
}
def download(url: str, dl_path: str):
"""
Will download the file to dl_path, return True on success
:param curfnr: Current file number
:param maxfnr: Max files to download
:return: Current file nr(int)
"""
# get head (everythin b4 last part of path ("/" last -> tail empty,
# filename or dir(without /) -> tail)) of path; no slash in path -> head empty
dirpath, fn = os.path.split(dl_path)
if dirpath:
os.makedirs(dirpath, exist_ok=True)
try:
_, headers = urllib.request.urlretrieve(url, dl_path) # reporthook=prog_bar_dl)
except urllib.error.HTTPError as err:
# catch this more detailed first then broader one (HTTPError is subclass of URLError)
logger.warning("HTTP Error %s: %s: \"%s\"", err.code, err.reason, url)
return False, None
except urllib.error.URLError as err:
logger.warning("URL Error %s: \"%s\"", err.reason, url)
return False, None
else:
return True, headers
def get_url_file_size(url: str) -> int:
"""Returns file size in bytes that is reported in Content-Length Header"""
with urllib.request.urlopen(url) as response:
reported_file_size = int(response.info()["Content-Length"])
return reported_file_size
def prog_bar_dl(blocknum: int, blocksize: int, totalsize: int) -> None:
"""
Displays a progress bar to sys.stdout
blocknum * blocksize == bytes read so far
Only display MB read when total size is -1
Calc percentage of file download, number of blocks to display is bar length * percent/100
String to display is Downloading: xx.x% [#*block_nr + "-"*(bar_len-block_nr)] xx.xx MB
http://stackoverflow.com/questions/13881092/download-progressbar-for-python-3
by J.F. Sebastian
combined with:
http://stackoverflow.com/questions/3160699/python-progress-bar
by Brian Khuu
and modified
:param blocknum: Count of blocks transferred so far
:param blocksize: Block size in bytes
:param totalsize: Total size of the file in bytes
:return: None
"""
bar_len = 25 # Modify this to change the length of the progress bar
# blocknum is current block, blocksize the size of each block in bytes
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize # 1e2 == 100.0
# nr of blocks
block_nr = int(round(bar_len*readsofar/totalsize))
# %5.1f: pad to 5 chars and display one decimal, type float, %% -> escaped %sign
# %*d -> Parametrized, width -> len(str(totalsize)), value -> readsofar
# s = "\rDownloading: %5.1f%% %*d / %d" % (percent, len(str(totalsize)), readsofar, totalsize)
sn = "\rDownloading: {:4.1f}% [{}] {:4.2f} / {:.2f} MB".format(percent, "#"*block_nr + "-"*(bar_len-block_nr),
readsofar / 1024**2, totalsize / 1024**2)
sys.stdout.write(sn)
if readsofar >= totalsize: # near the end
sys.stdout.write("\n")
else: # total size is unknown
sys.stdout.write("\rDownloading: %.2f MB" % (readsofar / 1024**2,))
# Python's standard out is buffered (meaning that it collects some of the data "written" to standard out before
# it writes it to the terminal). flush() forces it to "flush" the buffer, meaning that it will write everything
# in the buffer to the terminal, even if normally it would wait before doing so.
sys.stdout.flush()
| 41.322981 | 118 | 0.640764 | import sys
import os
import urllib.request
import urllib.error
import logging
import subprocess
from typing import Optional, Dict
from urllib.error import ContentTooShortError
logger = logging.getLogger(__name__)
DEFAULT_HEADERS = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'
}
def download(url: str, dl_path: str):
"""
Will download the file to dl_path, return True on success
:param curfnr: Current file number
:param maxfnr: Max files to download
:return: Current file nr(int)
"""
# get head (everythin b4 last part of path ("/" last -> tail empty,
# filename or dir(without /) -> tail)) of path; no slash in path -> head empty
dirpath, fn = os.path.split(dl_path)
if dirpath:
os.makedirs(dirpath, exist_ok=True)
try:
_, headers = urllib.request.urlretrieve(url, dl_path) # reporthook=prog_bar_dl)
except urllib.error.HTTPError as err:
# catch this more detailed first then broader one (HTTPError is subclass of URLError)
logger.warning("HTTP Error %s: %s: \"%s\"", err.code, err.reason, url)
return False, None
except urllib.error.URLError as err:
logger.warning("URL Error %s: \"%s\"", err.reason, url)
return False, None
else:
return True, headers
def download_in_chunks(url: str, filename: str,
headers: Optional[Dict[str, str]] = None,
prog_bar: bool = False) -> int:
# get head (everythin b4 last part of path ("/" last -> tail empty,
# filename or dir(without /) -> tail)) of path; no slash in path -> head empty
dirpath, fn = os.path.split(filename)
if dirpath:
os.makedirs(dirpath, exist_ok=True)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS if headers is None else headers)
# urlretrieve uses block-size of 8192
# Before response.read() is called, the contents are not downloaded.
with urllib.request.urlopen(req) as response:
meta = response.info()
reported_file_size = int(meta["Content-Length"])
# by Alex Martelli
# Experiment a bit with various CHUNK sizes to find the "sweet spot" for your requirements
# CHUNK = 16 * 1024
file_size_dl = 0
chunk_size = 8192
block_num = 0
with open(filename, 'wb') as w:
while True:
chunk = response.read(chunk_size)
if not chunk:
break
# not chunk_size since the last chunk will probably not be of size chunk_size
file_size_dl += len(chunk)
w.write(chunk)
block_num += 1
# copy behaviour of urlretrieve reporthook
if prog_bar:
prog_bar_dl(block_num, chunk_size, reported_file_size)
# from urlretrieve doc: urlretrieve() will raise ContentTooShortError when
# it detects that the amount of data available was less than the expected
# amount (which is the size reported by a Content-Length header). This can
# occur, for example, when the download is interrupted.
# The Content-Length is treated as a lower bound: if there’s more data to
# read, urlretrieve reads more data, but if less data is available, it
# raises the exception.
if file_size_dl < reported_file_size:
raise ContentTooShortError(
f"Downloaded file's size is samller than the reported size for \"{url}\"",
None)
else:
return file_size_dl
def get_url_file_size(url: str) -> int:
"""Returns file size in bytes that is reported in Content-Length Header"""
with urllib.request.urlopen(url) as response:
reported_file_size = int(response.info()["Content-Length"])
return reported_file_size
def prog_bar_dl(blocknum: int, blocksize: int, totalsize: int) -> None:
"""
Displays a progress bar to sys.stdout
blocknum * blocksize == bytes read so far
Only display MB read when total size is -1
Calc percentage of file download, number of blocks to display is bar length * percent/100
String to display is Downloading: xx.x% [#*block_nr + "-"*(bar_len-block_nr)] xx.xx MB
http://stackoverflow.com/questions/13881092/download-progressbar-for-python-3
by J.F. Sebastian
combined with:
http://stackoverflow.com/questions/3160699/python-progress-bar
by Brian Khuu
and modified
:param blocknum: Count of blocks transferred so far
:param blocksize: Block size in bytes
:param totalsize: Total size of the file in bytes
:return: None
"""
bar_len = 25 # Modify this to change the length of the progress bar
# blocknum is current block, blocksize the size of each block in bytes
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize # 1e2 == 100.0
# nr of blocks
block_nr = int(round(bar_len*readsofar/totalsize))
# %5.1f: pad to 5 chars and display one decimal, type float, %% -> escaped %sign
# %*d -> Parametrized, width -> len(str(totalsize)), value -> readsofar
# s = "\rDownloading: %5.1f%% %*d / %d" % (percent, len(str(totalsize)), readsofar, totalsize)
sn = "\rDownloading: {:4.1f}% [{}] {:4.2f} / {:.2f} MB".format(percent, "#"*block_nr + "-"*(bar_len-block_nr),
readsofar / 1024**2, totalsize / 1024**2)
sys.stdout.write(sn)
if readsofar >= totalsize: # near the end
sys.stdout.write("\n")
else: # total size is unknown
sys.stdout.write("\rDownloading: %.2f MB" % (readsofar / 1024**2,))
# Python's standard out is buffered (meaning that it collects some of the data "written" to standard out before
# it writes it to the terminal). flush() forces it to "flush" the buffer, meaning that it will write everything
# in the buffer to the terminal, even if normally it would wait before doing so.
sys.stdout.flush()
def download_hls_ffmpeg(m3u8_url, filename, prob_bar: bool = False) -> bool:
# -vn: no vieo; -acodec copy: copy audio codec
args = ['ffmpeg', '-hide_banner', '-i', m3u8_url, '-vn', '-acodec', 'copy', filename]
try:
proc = subprocess.run(args, capture_output=True, check=True)
except subprocess.CalledProcessError as err:
logger.error("HLS download FFmpeg error: %s", str(err))
logger.debug("FFmpeg stdout: %s", err.stdout)
logger.debug("FFmpeg stderr: %s", err.stderr)
return False
else:
return True
| 2,768 | 0 | 46 |
05dd1c658adfe935e1e7b1d4e297a0645c0809df | 67 | py | Python | dregel-7623/test.py | NTI-Gymnasieingenjor/AdventOfCode2020 | ea74c06a2b220e227618ed841c4eb853f08d5c84 | [
"MIT"
] | 1 | 2020-12-08T12:33:36.000Z | 2020-12-08T12:33:36.000Z | johan/test.py | NTI-Gymnasieingenjor/AdventOfCode2020 | ea74c06a2b220e227618ed841c4eb853f08d5c84 | [
"MIT"
] | null | null | null | johan/test.py | NTI-Gymnasieingenjor/AdventOfCode2020 | ea74c06a2b220e227618ed841c4eb853f08d5c84 | [
"MIT"
] | 1 | 2021-01-20T15:08:12.000Z | 2021-01-20T15:08:12.000Z | for count in range(6):
name = input()
print("hello ", name) | 22.333333 | 25 | 0.58209 | for count in range(6):
name = input()
print("hello ", name) | 0 | 0 | 0 |
fc5bdfbf1b2237de516549522094811569272771 | 24,209 | py | Python | src/openprocurement/api/utils.py | JrooTJunior/openprocurement.api | 9f932d96c4ba096a8e0a0b570f737620c1cc95db | [
"Apache-2.0"
] | null | null | null | src/openprocurement/api/utils.py | JrooTJunior/openprocurement.api | 9f932d96c4ba096a8e0a0b570f737620c1cc95db | [
"Apache-2.0"
] | null | null | null | src/openprocurement/api/utils.py | JrooTJunior/openprocurement.api | 9f932d96c4ba096a8e0a0b570f737620c1cc95db | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import json
import decimal
import simplejson
import couchdb.json
from couchdb import util
from logging import getLogger
from datetime import datetime, timedelta
from base64 import b64encode, b64decode
from cornice.resource import resource, view
from email.header import decode_header
from functools import partial
from jsonpatch import make_patch, apply_patch as _apply_patch
from openprocurement.api.traversal import factory
from rfc6266 import build_header
from time import time as ttime
from urllib import quote, unquote, urlencode
from urlparse import urlparse, urlunsplit, parse_qsl
from uuid import uuid4
from webob.multidict import NestedMultiDict
from binascii import hexlify, unhexlify
from Crypto.Cipher import AES
from cornice.util import json_error
from json import dumps
from schematics.exceptions import ValidationError
from couchdb_schematics.document import SchematicsDocument
from openprocurement.api.events import ErrorDesctiptorEvent
from openprocurement.api.constants import LOGGER
from openprocurement.api.constants import (
ADDITIONAL_CLASSIFICATIONS_SCHEMES, DOCUMENT_BLACKLISTED_FIELDS,
DOCUMENT_WHITELISTED_FIELDS, ROUTE_PREFIX, TZ, SESSION
)
from openprocurement.api.interfaces import IOPContent
from openprocurement.api.interfaces import IContentConfigurator
json_view = partial(view, renderer='simplejson')
def get_root(item):
""" traverse back to root op content object (plan, tender, contract, etc.)
"""
while not IOPContent.providedBy(item):
item = item.__parent__
return item
def raise_operation_error(request, message):
"""
This function mostly used in views validators to add access errors and
raise exceptions if requested operation is forbidden.
"""
request.errors.add('body', 'data', message)
request.errors.status = 403
raise error_handler(request.errors)
opresource = partial(resource, error_handler=error_handler, factory=factory)
| 40.483278 | 169 | 0.628651 | # -*- coding: utf-8 -*-
import os
import json
import decimal
import simplejson
import couchdb.json
from couchdb import util
from logging import getLogger
from datetime import datetime, timedelta
from base64 import b64encode, b64decode
from cornice.resource import resource, view
from email.header import decode_header
from functools import partial
from jsonpatch import make_patch, apply_patch as _apply_patch
from openprocurement.api.traversal import factory
from rfc6266 import build_header
from time import time as ttime
from urllib import quote, unquote, urlencode
from urlparse import urlparse, urlunsplit, parse_qsl
from uuid import uuid4
from webob.multidict import NestedMultiDict
from binascii import hexlify, unhexlify
from Crypto.Cipher import AES
from cornice.util import json_error
from json import dumps
from schematics.exceptions import ValidationError
from couchdb_schematics.document import SchematicsDocument
from openprocurement.api.events import ErrorDesctiptorEvent
from openprocurement.api.constants import LOGGER
from openprocurement.api.constants import (
ADDITIONAL_CLASSIFICATIONS_SCHEMES, DOCUMENT_BLACKLISTED_FIELDS,
DOCUMENT_WHITELISTED_FIELDS, ROUTE_PREFIX, TZ, SESSION
)
from openprocurement.api.interfaces import IOPContent
from openprocurement.api.interfaces import IContentConfigurator
json_view = partial(view, renderer='simplejson')
def validate_dkpp(items, *args):
if items and not any([i.scheme in ADDITIONAL_CLASSIFICATIONS_SCHEMES for i in items]):
raise ValidationError(u"One of additional classifications should be one of [{0}].".format(', '.join(ADDITIONAL_CLASSIFICATIONS_SCHEMES)))
def get_now():
return datetime.now(TZ)
def request_get_now(request):
return get_now()
def set_parent(item, parent):
if hasattr(item, '__parent__') and item.__parent__ is None:
item.__parent__ = parent
def get_root(item):
""" traverse back to root op content object (plan, tender, contract, etc.)
"""
while not IOPContent.providedBy(item):
item = item.__parent__
return item
def generate_id():
return uuid4().hex
def get_filename(data):
try:
pairs = decode_header(data.filename)
except Exception:
pairs = None
if not pairs:
return data.filename
header = pairs[0]
if header[1]:
return header[0].decode(header[1])
else:
return header[0]
def get_schematics_document(model):
while not isinstance(model, SchematicsDocument):
model = model.__parent__
return model
def generate_docservice_url(request, doc_id, temporary=True, prefix=None):
docservice_key = getattr(request.registry, 'docservice_key', None)
parsed_url = urlparse(request.registry.docservice_url)
query = {}
if temporary:
expires = int(ttime()) + 300 # EXPIRES
mess = "{}\0{}".format(doc_id, expires)
query['Expires'] = expires
else:
mess = doc_id
if prefix:
mess = '{}/{}'.format(prefix, mess)
query['Prefix'] = prefix
query['Signature'] = quote(b64encode(docservice_key.signature(mess.encode("utf-8"))))
query['KeyID'] = docservice_key.hex_vk()[:8]
return urlunsplit((parsed_url.scheme, parsed_url.netloc, '/get/{}'.format(doc_id), urlencode(query), ''))
def error_handler(errors, request_params=True):
params = {
'ERROR_STATUS': errors.status
}
if request_params:
params['ROLE'] = str(errors.request.authenticated_role)
if errors.request.params:
params['PARAMS'] = str(dict(errors.request.params))
if errors.request.matchdict:
for x, j in errors.request.matchdict.items():
params[x.upper()] = j
errors.request.registry.notify(ErrorDesctiptorEvent(errors, params))
LOGGER.info('Error on processing request "{}"'.format(dumps(errors, indent=4)),
extra=context_unpack(errors.request, {'MESSAGE_ID': 'error_handler'}, params))
return json_error(errors)
def raise_operation_error(request, message):
"""
This function mostly used in views validators to add access errors and
raise exceptions if requested operation is forbidden.
"""
request.errors.add('body', 'data', message)
request.errors.status = 403
raise error_handler(request.errors)
def upload_file(request, blacklisted_fields=DOCUMENT_BLACKLISTED_FIELDS, whitelisted_fields=DOCUMENT_WHITELISTED_FIELDS):
first_document = request.validated['documents'][-1] if 'documents' in request.validated and request.validated['documents'] else None
if 'data' in request.validated and request.validated['data']:
document = request.validated['document']
check_document(request, document, 'body')
if first_document:
for attr_name in type(first_document)._fields:
if attr_name in whitelisted_fields:
setattr(document, attr_name, getattr(first_document, attr_name))
elif attr_name not in blacklisted_fields and attr_name not in request.validated['json_data']:
setattr(document, attr_name, getattr(first_document, attr_name))
document_route = request.matched_route.name.replace("collection_", "")
document = update_document_url(request, document, document_route, {})
return document
if request.content_type == 'multipart/form-data':
data = request.validated['file']
filename = get_filename(data)
content_type = data.type
in_file = data.file
else:
filename = first_document.title
content_type = request.content_type
in_file = request.body_file
if hasattr(request.context, "documents"):
# upload new document
model = type(request.context).documents.model_class
else:
# update document
model = type(request.context)
document = model({'title': filename, 'format': content_type})
document.__parent__ = request.context
if 'document_id' in request.validated:
document.id = request.validated['document_id']
if first_document:
for attr_name in type(first_document)._fields:
if attr_name not in blacklisted_fields:
setattr(document, attr_name, getattr(first_document, attr_name))
if request.registry.docservice_url:
parsed_url = urlparse(request.registry.docservice_url)
url = request.registry.docservice_upload_url or urlunsplit((parsed_url.scheme, parsed_url.netloc, '/upload', '', ''))
files = {'file': (filename, in_file, content_type)}
doc_url = None
index = 10
while index:
try:
r = SESSION.post(url,
files=files,
headers={'X-Client-Request-ID': request.environ.get('REQUEST_ID', '')},
auth=(request.registry.docservice_username, request.registry.docservice_password)
)
json_data = r.json()
except Exception, e:
LOGGER.warning("Raised exception '{}' on uploading document to document service': {}.".format(type(e), e),
extra=context_unpack(request, {'MESSAGE_ID': 'document_service_exception'}, {'file_size': in_file.tell()}))
else:
if r.status_code == 200 and json_data.get('data', {}).get('url'):
doc_url = json_data['data']['url']
doc_hash = json_data['data']['hash']
break
else:
LOGGER.warning("Error {} on uploading document to document service '{}': {}".format(r.status_code, url, r.text),
extra=context_unpack(request, {'MESSAGE_ID': 'document_service_error'}, {'ERROR_STATUS': r.status_code, 'file_size': in_file.tell()}))
in_file.seek(0)
index -= 1
else:
request.errors.add('body', 'data', "Can't upload document to document service.")
request.errors.status = 422
raise error_handler(request.errors)
document.hash = doc_hash
key = urlparse(doc_url).path.split('/')[-1]
else:
key = generate_id()
filename = "{}_{}".format(document.id, key)
request.validated['db_doc']['_attachments'][filename] = {
"content_type": document.format,
"data": b64encode(in_file.read())
}
document_route = request.matched_route.name.replace("collection_", "")
document_path = request.current_route_path(_route_name=document_route, document_id=document.id, _query={'download': key})
document.url = '/' + '/'.join(document_path.split('/')[3:])
update_logging_context(request, {'file_size': in_file.tell()})
return document
def update_file_content_type(request): # XXX TODO
pass
def get_file(request):
db_doc_id = request.validated['db_doc'].id
document = request.validated['document']
key = request.params.get('download')
if not any([key in i.url for i in request.validated['documents']]):
request.errors.add('url', 'download', 'Not Found')
request.errors.status = 404
return
filename = "{}_{}".format(document.id, key)
if request.registry.docservice_url and filename not in request.validated['db_doc']['_attachments']:
document = [i for i in request.validated['documents'] if key in i.url][-1]
if 'Signature=' in document.url and 'KeyID' in document.url:
url = document.url
else:
if 'download=' not in document.url:
key = urlparse(document.url).path.replace('/get/', '')
if not document.hash:
url = generate_docservice_url(request, key, prefix='{}/{}'.format(db_doc_id, document.id))
else:
url = generate_docservice_url(request, key)
request.response.content_type = document.format.encode('utf-8')
request.response.content_disposition = build_header(document.title, filename_compat=quote(document.title.encode('utf-8')))
request.response.status = '302 Moved Temporarily'
request.response.location = url
return url
else:
data = request.registry.db.get_attachment(db_doc_id, filename)
if data:
request.response.content_type = document.format.encode('utf-8')
request.response.content_disposition = build_header(document.title, filename_compat=quote(document.title.encode('utf-8')))
request.response.body_file = data
return request.response
request.errors.add('url', 'download', 'Not Found')
request.errors.status = 404
def prepare_patch(changes, orig, patch, basepath=''):
if isinstance(patch, dict):
for i in patch:
if i in orig:
prepare_patch(changes, orig[i], patch[i], '{}/{}'.format(basepath, i))
else:
changes.append({'op': 'add', 'path': '{}/{}'.format(basepath, i), 'value': patch[i]})
elif isinstance(patch, list):
if len(patch) < len(orig):
for i in reversed(range(len(patch), len(orig))):
changes.append({'op': 'remove', 'path': '{}/{}'.format(basepath, i)})
for i, j in enumerate(patch):
if len(orig) > i:
prepare_patch(changes, orig[i], patch[i], '{}/{}'.format(basepath, i))
else:
changes.append({'op': 'add', 'path': '{}/{}'.format(basepath, i), 'value': j})
else:
for x in make_patch(orig, patch).patch:
x['path'] = '{}{}'.format(basepath, x['path'])
changes.append(x)
def apply_data_patch(item, changes):
patch_changes = []
prepare_patch(patch_changes, item, changes)
if not patch_changes:
return {}
return _apply_patch(item, patch_changes)
def get_revision_changes(dst, src):
return make_patch(dst, src).patch
def set_ownership(item, request):
if not item.get('owner'):
item.owner = request.authenticated_userid
item.owner_token = generate_id()
def check_document(request, document, document_container):
url = document.url
parsed_url = urlparse(url)
parsed_query = dict(parse_qsl(parsed_url.query))
if not url.startswith(request.registry.docservice_url) or \
len(parsed_url.path.split('/')) != 3 or \
set(['Signature', 'KeyID']) != set(parsed_query):
request.errors.add(document_container, 'url', "Can add document only from document service.")
request.errors.status = 403
raise error_handler(request.errors)
if not document.hash:
request.errors.add(document_container, 'hash', "This field is required.")
request.errors.status = 422
raise error_handler(request.errors)
keyid = parsed_query['KeyID']
if keyid not in request.registry.keyring:
request.errors.add(document_container, 'url', "Document url expired.")
request.errors.status = 422
raise error_handler(request.errors)
dockey = request.registry.keyring[keyid]
signature = parsed_query['Signature']
key = urlparse(url).path.split('/')[-1]
try:
signature = b64decode(unquote(signature))
except TypeError:
request.errors.add(document_container, 'url', "Document url signature invalid.")
request.errors.status = 422
raise error_handler(request.errors)
mess = "{}\0{}".format(key, document.hash.split(':', 1)[-1])
try:
if mess != dockey.verify(signature + mess.encode("utf-8")):
raise ValueError
except ValueError:
request.errors.add(document_container, 'url', "Document url invalid.")
request.errors.status = 422
raise error_handler(request.errors)
def update_document_url(request, document, document_route, route_kwargs):
key = urlparse(document.url).path.split('/')[-1]
route_kwargs.update({'_route_name': document_route,
'document_id': document.id,
'_query': {'download': key}})
document_path = request.current_route_path(**route_kwargs)
document.url = '/' + '/'.join(document_path.split('/')[3:])
return document
def check_document_batch(request, document, document_container, route_kwargs):
check_document(request, document, document_container)
document_route = request.matched_route.name.replace("collection_", "")
# Following piece of code was written by leits, so no one knows how it works
# and why =)
# To redefine document_route to get appropriate real document route when bid
# is created with documents? I hope so :)
if "Documents" not in document_route:
specified_document_route_end = (document_container.lower().rsplit('documents')[0] + ' documents').lstrip().title()
document_route = ' '.join([document_route[:-1], specified_document_route_end])
return update_document_url(request, document, document_route, route_kwargs)
def request_params(request):
try:
params = NestedMultiDict(request.GET, request.POST)
except UnicodeDecodeError:
request.errors.add('body', 'data', 'could not decode params')
request.errors.status = 422
raise error_handler(request.errors, False)
except Exception, e:
request.errors.add('body', str(e.__class__.__name__), str(e))
request.errors.status = 422
raise error_handler(request.errors, False)
return params
opresource = partial(resource, error_handler=error_handler, factory=factory)
class APIResource(object):
def __init__(self, request, context):
self.context = context
self.request = request
self.db = request.registry.db
self.server_id = request.registry.server_id
self.LOGGER = getLogger(type(self).__module__)
class APIResourceListing(APIResource):
def __init__(self, request, context):
super(APIResourceListing, self).__init__(request, context)
self.server = request.registry.couchdb_server
self.update_after = request.registry.update_after
@json_view(permission='view_listing')
def get(self):
params = {}
pparams = {}
fields = self.request.params.get('opt_fields', '')
if fields:
params['opt_fields'] = fields
pparams['opt_fields'] = fields
fields = fields.split(',')
view_fields = fields + ['dateModified', 'id']
limit = self.request.params.get('limit', '')
if limit:
params['limit'] = limit
pparams['limit'] = limit
limit = int(limit) if limit.isdigit() and (100 if fields else 1000) >= int(limit) > 0 else 100
descending = bool(self.request.params.get('descending'))
offset = self.request.params.get('offset', '')
if descending:
params['descending'] = 1
else:
pparams['descending'] = 1
feed = self.request.params.get('feed', '')
view_map = self.FEED.get(feed, self.VIEW_MAP)
changes = view_map is self.CHANGES_VIEW_MAP
if feed and feed in self.FEED:
params['feed'] = feed
pparams['feed'] = feed
mode = self.request.params.get('mode', '')
if mode and mode in view_map:
params['mode'] = mode
pparams['mode'] = mode
view_limit = limit + 1 if offset else limit
if changes:
if offset:
view_offset = decrypt(self.server.uuid, self.db.name, offset)
if view_offset and view_offset.isdigit():
view_offset = int(view_offset)
else:
self.request.errors.add('params', 'offset', 'Offset expired/invalid')
self.request.errors.status = 404
raise error_handler(self.request.errors)
if not offset:
view_offset = 'now' if descending else 0
else:
if offset:
view_offset = offset
else:
view_offset = '9' if descending else ''
list_view = view_map.get(mode, view_map[u''])
if self.update_after:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending, stale='update_after')
else:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending)
if fields:
if not changes and set(fields).issubset(set(self.FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id), ('dateModified', x.key)] if i in view_fields]), x.key)
for x in view()
]
elif changes and set(fields).issubset(set(self.FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id)] if i in view_fields]), x.key)
for x in view()
]
elif fields:
self.LOGGER.info('Used custom fields for {} list: {}'.format(self.object_name_for_listing, ','.join(sorted(fields))),
extra=context_unpack(self.request, {'MESSAGE_ID': self.log_message_id}))
results = [
(self.serialize_func(self.request, i[u'doc'], view_fields), i.key)
for i in view(include_docs=True)
]
else:
results = [
({'id': i.id, 'dateModified': i.value['dateModified']} if changes else {'id': i.id, 'dateModified': i.key}, i.key)
for i in view()
]
if results:
params['offset'], pparams['offset'] = results[-1][1], results[0][1]
if offset and view_offset == results[0][1]:
results = results[1:]
elif offset and view_offset != results[0][1]:
results = results[:limit]
params['offset'], pparams['offset'] = results[-1][1], view_offset
results = [i[0] for i in results]
if changes:
params['offset'] = encrypt(self.server.uuid, self.db.name, params['offset'])
pparams['offset'] = encrypt(self.server.uuid, self.db.name, pparams['offset'])
else:
params['offset'] = offset
pparams['offset'] = offset
data = {
'data': results,
'next_page': {
"offset": params['offset'],
"path": self.request.route_path(self.object_name_for_listing, _query=params),
"uri": self.request.route_url(self.object_name_for_listing, _query=params)
}
}
if descending or offset:
data['prev_page'] = {
"offset": pparams['offset'],
"path": self.request.route_path(self.object_name_for_listing, _query=pparams),
"uri": self.request.route_url(self.object_name_for_listing, _query=pparams)
}
return data
def forbidden(request):
request.errors.add('url', 'permission', 'Forbidden')
request.errors.status = 403
return error_handler(request.errors)
def update_logging_context(request, params):
if not request.__dict__.get('logging_context'):
request.logging_context = {}
for x, j in params.items():
request.logging_context[x.upper()] = j
def context_unpack(request, msg, params=None):
if params:
update_logging_context(request, params)
logging_context = request.logging_context
journal_context = msg
for key, value in logging_context.items():
journal_context["JOURNAL_" + key] = value
return journal_context
def get_content_configurator(request):
content_type = request.path[len(ROUTE_PREFIX)+1:].split('/')[0][:-1]
if hasattr(request, content_type): # content is constructed
context = getattr(request, content_type)
return request.registry.queryMultiAdapter((context, request),
IContentConfigurator)
def fix_url(item, app_url):
if isinstance(item, list):
[
fix_url(i, app_url)
for i in item
if isinstance(i, dict) or isinstance(i, list)
]
elif isinstance(item, dict):
if "format" in item and "url" in item and '?download=' in item['url']:
path = item["url"] if item["url"].startswith('/') else '/' + '/'.join(item['url'].split('/')[5:])
item["url"] = app_url + ROUTE_PREFIX + path
return
[
fix_url(item[i], app_url)
for i in item
if isinstance(item[i], dict) or isinstance(item[i], list)
]
def encrypt(uuid, name, key):
iv = "{:^{}.{}}".format(name, AES.block_size, AES.block_size)
text = "{:^{}}".format(key, AES.block_size)
return hexlify(AES.new(uuid, AES.MODE_CBC, iv).encrypt(text))
def decrypt(uuid, name, key):
iv = "{:^{}.{}}".format(name, AES.block_size, AES.block_size)
try:
text = AES.new(uuid, AES.MODE_CBC, iv).decrypt(unhexlify(key)).strip()
except:
text = ''
return text
def set_modetest_titles(item):
if not item.title or u'[ТЕСТУВАННЯ]' not in item.title:
item.title = u'[ТЕСТУВАННЯ] {}'.format(item.title or u'')
if not item.title_en or u'[TESTING]' not in item.title_en:
item.title_en = u'[TESTING] {}'.format(item.title_en or u'')
if not item.title_ru or u'[ТЕСТИРОВАНИЕ]' not in item.title_ru:
item.title_ru = u'[ТЕСТИРОВАНИЕ] {}'.format(item.title_ru or u'')
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
return super(DecimalEncoder, self).default(obj)
def couchdb_json_decode():
my_encode = lambda obj, dumps=dumps: dumps(obj, cls=DecimalEncoder)
def my_decode(string_):
if isinstance(string_, util.btype):
string_ = string_.decode("utf-8")
return json.loads(string_, parse_float=decimal.Decimal)
couchdb.json.use(decode=my_decode, encode=my_encode)
| 21,324 | 136 | 789 |
4668e45e7b4f7aee6a860a6306fc3156761aca36 | 24 | py | Python | interface/__init__.py | fgaspar/myhome | 9942364f913870d3eed3a0a6536872ebf4c83a0c | [
"MIT"
] | null | null | null | interface/__init__.py | fgaspar/myhome | 9942364f913870d3eed3a0a6536872ebf4c83a0c | [
"MIT"
] | null | null | null | interface/__init__.py | fgaspar/myhome | 9942364f913870d3eed3a0a6536872ebf4c83a0c | [
"MIT"
] | null | null | null | __all__ = ['interface']
| 12 | 23 | 0.666667 | __all__ = ['interface']
| 0 | 0 | 0 |
8bd55d55ac9289b7d0cb32cc7e44955b4be77eda | 1,688 | py | Python | stock_portfolio/stock_portfolio/tests/test_models_account.py | zarkle/pyramid-stocks | 493ad5a5b77e99dcff8e8234bf0616db1fbb4c98 | [
"MIT"
] | null | null | null | stock_portfolio/stock_portfolio/tests/test_models_account.py | zarkle/pyramid-stocks | 493ad5a5b77e99dcff8e8234bf0616db1fbb4c98 | [
"MIT"
] | 4 | 2019-12-26T16:42:42.000Z | 2020-01-06T18:53:34.000Z | stock_portfolio/stock_portfolio/tests/test_models_account.py | zarkle/pyramid-stocks | 493ad5a5b77e99dcff8e8234bf0616db1fbb4c98 | [
"MIT"
] | null | null | null | def test_account_model(db_session):
"""test make a new user account"""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
user = Account(
username='me',
password='me',
email='me@me.com',
)
db_session.add(user)
assert len(db_session.query(Account).all()) == 1
# def test_make_user_no_password(db_session):
# """test can't make new user with no password"""
# from ..models import Account
# import pytest
# from sqlalchemy.exc import DBAPIError
# assert len(db_session.query(Account).all()) == 0
# user = Account(
# username='me',
# password=None,
# email='me@me.com',
# )
# with pytest.raises(DBAPIError):
# db_session.add(user)
# assert len(db_session.query(Account).all()) == 0
# assert db_session.query(Account).one_or_none() is None
def test_make_user_no_email(db_session):
"""test can make new user with no email"""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
user = Account(
username='me',
password='me',
email='',
)
db_session.add(user)
assert len(db_session.query(Account).all()) == 1
def test_new_user_in_database(db_session):
"""test new user gets added to database"""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
user = Account(
username='me',
password='me',
email='',
)
db_session.add(user)
query = db_session.query(Account)
assert query.filter(Account.username == 'me').first()
assert len(db_session.query(Account).all()) == 1
| 26.793651 | 64 | 0.616706 | def test_account_model(db_session):
"""test make a new user account"""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
user = Account(
username='me',
password='me',
email='me@me.com',
)
db_session.add(user)
assert len(db_session.query(Account).all()) == 1
# def test_make_user_no_password(db_session):
# """test can't make new user with no password"""
# from ..models import Account
# import pytest
# from sqlalchemy.exc import DBAPIError
# assert len(db_session.query(Account).all()) == 0
# user = Account(
# username='me',
# password=None,
# email='me@me.com',
# )
# with pytest.raises(DBAPIError):
# db_session.add(user)
# assert len(db_session.query(Account).all()) == 0
# assert db_session.query(Account).one_or_none() is None
def test_make_user_no_email(db_session):
"""test can make new user with no email"""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
user = Account(
username='me',
password='me',
email='',
)
db_session.add(user)
assert len(db_session.query(Account).all()) == 1
def test_new_user_in_database(db_session):
"""test new user gets added to database"""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
user = Account(
username='me',
password='me',
email='',
)
db_session.add(user)
query = db_session.query(Account)
assert query.filter(Account.username == 'me').first()
assert len(db_session.query(Account).all()) == 1
| 0 | 0 | 0 |
a9322a09be39e0a72be52dbf707ba16161da58c8 | 933 | py | Python | fetch_images.py | UoA-eResearch/NZTA_traffic_cameras | ceeb1d328cb17a0f57116b2b3cbdfe61bf6e7366 | [
"MIT"
] | 1 | 2021-09-29T09:39:38.000Z | 2021-09-29T09:39:38.000Z | fetch_images.py | imyhxy/NZTA_traffic_cameras | 9952774db0eda4c30286833a41022fc740c3610b | [
"MIT"
] | null | null | null | fetch_images.py | imyhxy/NZTA_traffic_cameras | 9952774db0eda4c30286833a41022fc740c3610b | [
"MIT"
] | 1 | 2021-09-29T09:39:33.000Z | 2021-09-29T09:39:33.000Z | #!/usr/bin/env python3
import os
import json
import pprint
import requests
from dateutil.parser import parse as parsedate
with open("cameras.json") as f:
cameras = json.load(f)
with open("unavailable.jpg", "rb") as f:
unavailable = f.read()
camIds = sorted([int(c["properties"]["id"]) for c in cameras["features"]])
for camId in camIds:
path = f"images/{camId}/"
os.makedirs(path, exist_ok=True)
imageUrl = f"https://www.trafficnz.info/camera/{camId}.jpg"
try:
r = requests.get(imageUrl, timeout=5)
if r.content == unavailable:
print(camId, "unavailable")
continue
lastModified = r.headers["Last-Modified"]
lastModified = parsedate(lastModified).strftime("%Y-%m-%d-%H%M%S")
print(camId, lastModified)
with open(path + lastModified + ".jpg", "wb") as f:
f.write(r.content)
except Exception as e:
print(camId, e) | 31.1 | 74 | 0.631297 | #!/usr/bin/env python3
import os
import json
import pprint
import requests
from dateutil.parser import parse as parsedate
with open("cameras.json") as f:
cameras = json.load(f)
with open("unavailable.jpg", "rb") as f:
unavailable = f.read()
camIds = sorted([int(c["properties"]["id"]) for c in cameras["features"]])
for camId in camIds:
path = f"images/{camId}/"
os.makedirs(path, exist_ok=True)
imageUrl = f"https://www.trafficnz.info/camera/{camId}.jpg"
try:
r = requests.get(imageUrl, timeout=5)
if r.content == unavailable:
print(camId, "unavailable")
continue
lastModified = r.headers["Last-Modified"]
lastModified = parsedate(lastModified).strftime("%Y-%m-%d-%H%M%S")
print(camId, lastModified)
with open(path + lastModified + ".jpg", "wb") as f:
f.write(r.content)
except Exception as e:
print(camId, e) | 0 | 0 | 0 |
d0a01a00f070faee611561d1b2339d1a3ff3d521 | 8,322 | py | Python | tests/test_result.py | ageitgey/pyflight | 3d1624e1455e98b011ccaba1510da818f46a6e1d | [
"MIT"
] | 4 | 2018-01-19T07:24:25.000Z | 2021-05-01T04:45:36.000Z | tests/test_result.py | ageitgey/pyflight | 3d1624e1455e98b011ccaba1510da818f46a6e1d | [
"MIT"
] | null | null | null | tests/test_result.py | ageitgey/pyflight | 3d1624e1455e98b011ccaba1510da818f46a6e1d | [
"MIT"
] | 2 | 2018-04-02T04:10:35.000Z | 2019-08-13T13:51:26.000Z | # Tests the various Containers / Classes found in results.py
import os
import sys
import util
from pyflight.result import *
from pyflight.models.flight_data import FlightData
# Test the FlightData Container
# Test the Airport Container
first_result = Result(util.download_file_if_not_exists(
url="https://developers.google.com/qpx-express/v1/json.samples/SFOLAX.out.json",
filename="response_1.json"
))
second_result = Result(util.download_file_if_not_exists(
url="https://developers.google.com/qpx-express/v1/json.samples/OGGNCE.out.json",
filename="response_2.json"
))
# Test the Entry grabbing from the Result Container
# Test correct Grabbing of Aircraft
# Test correct Grabbing of Airports
# Test correct Grabbing of Carriers
# Test correct Grabbing of Taxes
# Test correct Grabbing of Trips
# Test correct Grabbing of Routes
# Test correct Grabbing of Route Segments
# Test correct Grabbing of Route Segment Flights
# Test correct Grabbing of Pricing Data
| 35.412766 | 108 | 0.696467 | # Tests the various Containers / Classes found in results.py
import os
import sys
import util
from pyflight.result import *
from pyflight.models.flight_data import FlightData
# Test the FlightData Container
def test_flight_data():
first_data = FlightData('9B1', 'Example Data')
second_data = FlightData('7B3', 'Another Example Data')
third_data = FlightData('9B1', 'Example Data')
# Test the __eq__ overload
assert first_data != second_data
assert first_data == third_data
assert third_data == first_data
# Test the __str__ overload
assert str(first_data) == 'Example Data'
assert str(second_data) == 'Another Example Data'
assert str(third_data) == 'Example Data'
# Test the as_dict method
assert first_data.as_dict() == {
'code': '9B1',
'name': 'Example Data'
}
assert second_data.as_dict() == {
'code': '7B3',
'name': 'Another Example Data'
}
assert third_data.as_dict() == {
'code': '9B1',
'name': 'Example Data'
}
# Test the Airport Container
def test_airport():
first_airport = Airport({'code': '13', 'city': 'C83', 'name': 'Some Airport'})
second_airport = Airport({'code': '58', 'city': '337', 'name': 'Another Airport'})
third_airport = Airport({'code': '31', 'city': '958', 'name': 'Airport Airport'})
# Test the __eq__ overload
assert first_airport == first_airport
assert first_airport != second_airport
assert first_airport != third_airport
assert second_airport == second_airport
assert second_airport != third_airport
assert third_airport == third_airport
# Test the __str__ overload
assert str(first_airport) == 'Some Airport'
assert str(second_airport) == 'Another Airport'
assert str(third_airport) == 'Airport Airport'
# Test the as_dict method
assert first_airport.as_dict() == {
'code': '13',
'city': 'C83',
'name': 'Some Airport'
}
assert second_airport.as_dict() == {
'code': '58',
'city': '337',
'name': 'Another Airport'
}
assert third_airport.as_dict() == {
'code': '31',
'city': '958',
'name': 'Airport Airport'
}
first_result = Result(util.download_file_if_not_exists(
url="https://developers.google.com/qpx-express/v1/json.samples/SFOLAX.out.json",
filename="response_1.json"
))
second_result = Result(util.download_file_if_not_exists(
url="https://developers.google.com/qpx-express/v1/json.samples/OGGNCE.out.json",
filename="response_2.json"
))
# Test the Entry grabbing from the Result Container
def test_result_grab_all_entries():
assert first_result.request_id == 'eBJXPDdjvK4zDogeE0JJp3'
assert second_result.request_id == 'hRI7zJ7vwhikqNiwU0JKDA'
assert len(first_result.aircraft) == 1
assert len(second_result.aircraft) == 13
assert len(first_result.airports) == 2
assert len(second_result.airports) == 9
assert len(first_result.carriers) == 1
assert len(second_result.carriers) == 5
assert len(first_result.taxes) == 4
assert len(second_result.taxes) == 14
assert len(first_result.trips) == 1
assert len(second_result.trips) == 8
# Test correct Grabbing of Aircraft
def test_result_aircraft():
assert first_result.aircraft[0].code == '320'
assert first_result.aircraft[0].name == 'Airbus A320'
assert second_result.aircraft[0].code == '319'
assert second_result.aircraft[0].name == 'Airbus A319'
assert second_result.aircraft[1].code == '320'
assert second_result.aircraft[1].name == 'Airbus A320'
assert second_result.aircraft[2].code == '321'
assert second_result.aircraft[2].name == 'Airbus A321'
assert second_result.aircraft[12].code == '76W'
assert second_result.aircraft[12].name == 'Boeing 767'
# Test correct Grabbing of Airports
def test_result_airport():
assert first_result.airports[0].code == 'LAX'
assert first_result.airports[0].name == 'Los Angeles International'
assert first_result.airports[0].city == 'LAX'
assert first_result.airports[1].code == 'SFO'
assert first_result.airports[1].name == 'San Francisco International'
assert first_result.airports[1].city == 'SFO'
assert second_result.airports[0].code == 'CDG'
assert second_result.airports[0].name == 'Paris Charles de Gaulle'
assert second_result.airports[0].city == 'PAR'
assert second_result.airports[1].code == 'FRA'
assert second_result.airports[1].name == 'Frankfurt International'
assert second_result.airports[1].city == 'FRA'
# Test correct Grabbing of Carriers
def test_result_carrier():
assert first_result.carriers[0].code == 'VX'
assert first_result.carriers[0].name == 'Virgin America Inc.'
assert second_result.carriers[0].code == 'AF'
assert second_result.carriers[0].name == 'Air France'
assert second_result.carriers[1].code == 'DL'
assert second_result.carriers[1].name == 'Delta Air Lines Inc.'
# Test correct Grabbing of Taxes
def test_result_taxes():
assert first_result.taxes[0].code == 'ZP'
assert first_result.taxes[0].name == 'US Flight Segment Tax'
assert first_result.taxes[1].code == 'XF'
assert first_result.taxes[1].name == 'US Passenger Facility Charge'
assert second_result.taxes[0].code == 'DE_1'
assert second_result.taxes[0].name == 'German Airport Security Tax'
assert second_result.taxes[1].code == 'XY'
assert second_result.taxes[1].name == 'US Immigration Fee'
# Test correct Grabbing of Trips
def test_result_trips():
assert first_result.trips[0].id == 'faqkIcj6Te2V3Sll2SskwJ001'
assert first_result.trips[0].total_price == 'USD69.00'
assert second_result.trips[0].id == '43z22eKyiiCSeB8K7CaOB8001'
assert second_result.trips[0].total_price == 'USD3275.60'
assert second_result.trips[1].id == '43z22eKyiiCSeB8K7CaOB8002'
assert second_result.trips[1].total_price == 'USD3345.60'
# Test correct Grabbing of Routes
def test_result_routes():
assert len(first_result.trips[0].routes) == 1
assert len(second_result.trips[0].routes) == 2
assert len(second_result.trips[1].routes) == 2
assert len(second_result.trips[2].routes) == 2
assert first_result.trips[0].routes[0].duration == 75
assert second_result.trips[0].routes[0].duration == 1670
assert second_result.trips[0].routes[1].duration == 1352
# Test correct Grabbing of Route Segments
def test_result_segments():
assert len(first_result.trips[0].routes[0].segments) == 1
assert len(second_result.trips[0].routes[0].segments) == 3
assert len(second_result.trips[0].routes[1].segments) == 3
assert first_result.trips[0].routes[0].segments[0].id == 'G4Yqn7Md2QltVrzT'
assert first_result.trips[0].routes[0].segments[0].flight_carrier == 'VX'
assert first_result.trips[0].routes[0].segments[0].cabin == 'COACH'
assert first_result.trips[0].routes[0].segments[0].booking_code_count == 7
# Test correct Grabbing of Route Segment Flights
def test_result_segment_flights():
assert len(first_result.trips[0].routes[0].segments[0].flights) == 1
assert len(second_result.trips[0].routes[0].segments[0].flights) == 1
assert first_result.trips[0].routes[0].segments[0].flights[0].id == 'LFaJowO2NvJzM2Vd'
assert first_result.trips[0].routes[0].segments[0].flights[0].aircraft == '320'
assert second_result.trips[0].routes[0].segments[0].flights[0].id == 'LACncSVM+gmtx9mJ'
assert second_result.trips[0].routes[0].segments[0].flights[0].aircraft == '738'
assert second_result.trips[0].routes[0].segments[0].flights[0].meal == 'Food and Beverages for Purchase'
# Test correct Grabbing of Pricing Data
def test_result_pricing():
assert len(first_result.trips[0].pricing) == 1
assert len(second_result.trips[0].pricing) == 2
assert len(second_result.trips[2].pricing) == 2
assert len(first_result.trips[0].pricing[0].segment_pricing) == 1
assert first_result.trips[0].pricing[0].sale_total == 'USD69.00'
assert first_result.trips[0].pricing[0].adults == 1
assert first_result.trips[0].pricing[0].fares[0].id == 'A+yi0+pn2eL1pf3nKwZazHIVDvsw2Ru8zx5LByC/kQaA'
assert first_result.trips[0].pricing[0].segment_pricing[0].segment_id == 'G4Yqn7Md2QltVrzT'
| 7,051 | 0 | 264 |
c62dc71cf1abc0549697f8aa7b80d4482d3dc8b5 | 413 | py | Python | authentise_services/errors.py | DoWhileGeek/authentise-services | ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d | [
"MIT"
] | 3 | 2015-10-06T21:07:33.000Z | 2015-12-31T21:33:52.000Z | authentise_services/errors.py | DoWhileGeek/authentise-services | ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d | [
"MIT"
] | 5 | 2015-06-21T03:45:09.000Z | 2015-08-15T17:00:30.000Z | authentise_services/errors.py | DoWhileGeek/authentise-services | ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d | [
"MIT"
] | null | null | null | """Error classes for authentise_services"""
class ResourceError(Exception):
"""arbitrary error whenever a call to a authentise resource doesnt go according to plan"""
pass
class ResourceStillProcessing(Exception):
"""most authentise resources have a status property to tell the user what state its in
Whenever the resource isnt ready in some way or another, throw one of these"""
pass
| 31.769231 | 94 | 0.743341 | """Error classes for authentise_services"""
class ResourceError(Exception):
"""arbitrary error whenever a call to a authentise resource doesnt go according to plan"""
pass
class ResourceStillProcessing(Exception):
"""most authentise resources have a status property to tell the user what state its in
Whenever the resource isnt ready in some way or another, throw one of these"""
pass
| 0 | 0 | 0 |
7e51fad58a661946b39af373553cbe440b365721 | 3,535 | py | Python | corehq/apps/users/middleware.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | corehq/apps/users/middleware.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/users/middleware.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | from redis_cache.exceptions import ConnectionInterrumped
from django.conf import settings
import django.core.exceptions
from dimagi.utils.couch.cache import cache_core
rcache = cache_core.get_redis_default_cache()
############################################################################################################
from corehq.apps.users.models import CouchUser, PublicUser, InvalidUser
from corehq.apps.domain.models import Domain
SESSION_USER_KEY_PREFIX = "session_user_doc_%s"
#def process_request(self, request):
############################################################################################################
| 44.746835 | 108 | 0.539745 | from redis_cache.exceptions import ConnectionInterrumped
from django.conf import settings
import django.core.exceptions
from dimagi.utils.couch.cache import cache_core
rcache = cache_core.get_redis_default_cache()
############################################################################################################
from corehq.apps.users.models import CouchUser, PublicUser, InvalidUser
from corehq.apps.domain.models import Domain
SESSION_USER_KEY_PREFIX = "session_user_doc_%s"
class UsersMiddleware(object):
def __init__(self):
# Normally we'd expect this class to be pulled out of the middleware list, too,
# but in case someone forgets, this will stop this class from being used.
found_domain_app = False
for app_name in settings.INSTALLED_APPS:
if app_name == "users" or app_name.endswith(".users"):
found_domain_app = True
break
if not found_domain_app:
raise django.core.exceptions.MiddlewareNotUsed
#def process_request(self, request):
def process_view(self, request, view_func, view_args, view_kwargs):
if 'domain' in view_kwargs:
request.domain = view_kwargs['domain']
if 'org' in view_kwargs:
request.org = view_kwargs['org']
if request.user and hasattr(request.user, 'get_profile'):
sessionid = request.COOKIES.get('sessionid', None)
if sessionid:
# roundabout way to keep doc_id based caching consistent.
# get user doc_id from session_id
MISSING = object()
INTERRUPTED = object()
try:
cached_user_doc_id = rcache.get(SESSION_USER_KEY_PREFIX % sessionid, MISSING)
except ConnectionInterrumped:
cached_user_doc_id = INTERRUPTED
# disable session based couch user caching - to be enabled later.
if cached_user_doc_id not in (MISSING, INTERRUPTED):
# cache hit
couch_user = CouchUser.wrap_correctly(
cache_core.cached_open_doc(
CouchUser.get_db(), cached_user_doc_id
)
)
else:
# cache miss, write to cache
couch_user = CouchUser.from_django_user(request.user)
if couch_user:
cache_core.do_cache_doc(couch_user.to_json())
if cached_user_doc_id is not INTERRUPTED:
rcache.set(SESSION_USER_KEY_PREFIX % sessionid, couch_user.get_id)
request.couch_user = couch_user
if 'domain' in view_kwargs:
domain = request.domain
if not request.couch_user:
couch_domain = Domain.view("domain/domains",
key=domain,
reduce=False,
include_docs=True,
).one()
if couch_domain and couch_domain.is_public:
request.couch_user = PublicUser(domain)
else:
request.couch_user = InvalidUser()
if request.couch_user:
request.couch_user.current_domain = domain
return None
############################################################################################################
| 2,799 | 9 | 75 |
0ec83fe7fc7dd6e6e433cfe711c2b981da1ad9e9 | 1,003 | py | Python | tests/fixtures/noqa/noqa_pre38.py | Kvm99/wemake-python-styleguide | a415339ed47de3ff03754bddd9b57316f494dfb1 | [
"MIT"
] | 1 | 2020-02-21T18:58:44.000Z | 2020-02-21T18:58:44.000Z | tests/fixtures/noqa/noqa_pre38.py | Kvm99/wemake-python-styleguide | a415339ed47de3ff03754bddd9b57316f494dfb1 | [
"MIT"
] | 15 | 2020-02-22T11:09:46.000Z | 2020-02-27T16:36:54.000Z | tests/fixtures/noqa/noqa_pre38.py | Kvm99/wemake-python-styleguide | a415339ed47de3ff03754bddd9b57316f494dfb1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This file represents how AST worked before python3.8 release.
We used to have violations on the first decorator
that wraps function, method, or a class.
We also store here things that are ``SyntaxError`` in python3.8 and above.
"""
@first # noqa: WPS216
@second
@third(param='a')
@fourth
@fifth()
@error
iters = list((yield letter) for letter in 'ab') # noqa: WPS416
| 20.06 | 75 | 0.642074 | # -*- coding: utf-8 -*-
"""
This file represents how AST worked before python3.8 release.
We used to have violations on the first decorator
that wraps function, method, or a class.
We also store here things that are ``SyntaxError`` in python3.8 and above.
"""
class WithStatic(object):
@staticmethod # noqa: WPS602
def some_static(arg1):
anti_wps428 = 1
@staticmethod # noqa: WPS602
async def some_async_static(arg1):
anti_wps428 = 1
@first # noqa: WPS216
@second
@third(param='a')
@fourth
@fifth()
@error
def decorated():
anti_wps428 = 1
iters = list((yield letter) for letter in 'ab') # noqa: WPS416
def wrong_comprehension1():
return [
node for node in 'ab' if node != 'a' if node != 'b' # noqa: WPS307
]
def wrong_comprehension2():
return [
target # noqa: WPS224
for assignment in range(hex_number)
for target in range(assignment)
for _ in range(10)
if isinstance(target, int)
]
| 380 | 125 | 91 |
cbf5c9870a2ba2de6edbbfa1dd3821eaa4520ae8 | 368 | py | Python | dataentry/migrations/0006_alter_user_content.py | abrehman90/Integrate-SummerNote-in-Django | a588578f007e153b85e1b18e71fa37d05bdef7ef | [
"MIT"
] | 1 | 2021-12-15T03:47:19.000Z | 2021-12-15T03:47:19.000Z | dataentry/migrations/0006_alter_user_content.py | abrehman90/Integrate-SummerNote-in-Django | a588578f007e153b85e1b18e71fa37d05bdef7ef | [
"MIT"
] | null | null | null | dataentry/migrations/0006_alter_user_content.py | abrehman90/Integrate-SummerNote-in-Django | a588578f007e153b85e1b18e71fa37d05bdef7ef | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-12 15:38
from django.db import migrations, models
| 19.368421 | 47 | 0.581522 | # Generated by Django 3.2.6 on 2021-08-12 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0005_user_content'),
]
operations = [
migrations.AlterField(
model_name='user',
name='content',
field=models.TextField(),
),
]
| 0 | 254 | 23 |
6831d22807efd0f098528d5f871a514617ea8537 | 34,692 | py | Python | vibro_estparam/mixednoise.py | VibroSim/vibro_estparam | 1c1c4cadeee4fbe38bb153161ebd62ba7a137111 | [
"MIT"
] | null | null | null | vibro_estparam/mixednoise.py | VibroSim/vibro_estparam | 1c1c4cadeee4fbe38bb153161ebd62ba7a137111 | [
"MIT"
] | null | null | null | vibro_estparam/mixednoise.py | VibroSim/vibro_estparam | 1c1c4cadeee4fbe38bb153161ebd62ba7a137111 | [
"MIT"
] | null | null | null | import sys
import os
import os.path
import glob
import collections
import re
import numpy as np
import scipy as sp
import scipy.integrate
import scipy.special
import theano
import theano.tensor as tt
from theano.printing import Print
from theano import gof
use_accel = True
if use_accel:
from . import mixednoise_accel
pass
import pymc3 as pm
#import pandas as pd
from theano.compile.ops import as_op
from theano.gradient import grad_not_implemented
if __name__=="__main__":
pass
| 59.813793 | 510 | 0.633143 | import sys
import os
import os.path
import glob
import collections
import re
import numpy as np
import scipy as sp
import scipy.integrate
import scipy.special
import theano
import theano.tensor as tt
from theano.printing import Print
from theano import gof
use_accel = True
if use_accel:
from . import mixednoise_accel
pass
import pymc3 as pm
#import pandas as pd
from theano.compile.ops import as_op
from theano.gradient import grad_not_implemented
class mixednoise_op(gof.Op):
__props__ = ()
itypes = None
otypes = None
observed = None # Note that "observed" MUST NOT BE CHANGED unless you clear the evaluation_cache
evaluation_cache = None
inhibit_accel_pid = None # Set this to a pid to prevent acceleration from happening in this pid. Used to prevent openmp parallelism in main process that causes python multiprocessing (used by pymc3) to bork.
def __init__(self,observed,inhibit_accel_pid=None):
self.observed=observed
self.inhibit_accel_pid=inhibit_accel_pid
self.itypes = [tt.dscalar,tt.dscalar,tt.dvector] # sigma_additive, sigma_multiplicative, prediction
self.otypes = [tt.dvector]
self.grad_sigma_additive_op = as_op(itypes=[tt.dscalar,tt.dscalar,tt.dvector],otypes=[tt.dvector])(self.grad_sigma_additive,) # infer_shape=lambda node,input_shapes: [ )
self.grad_sigma_multiplicative_op = as_op(itypes=[tt.dscalar,tt.dscalar,tt.dvector],otypes=[tt.dvector])(self.grad_sigma_multiplicative)
self.grad_prediction_op = as_op(itypes=[tt.dscalar,tt.dscalar,tt.dvector],otypes=[tt.dvector])(self.grad_prediction)
self.evaluation_cache = {}
pass
def infer_shape(self,node,input_shapes):
return [ (self.observed.shape[0],) ]
@staticmethod
def lognormal_normal_convolution_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y):
# y is variable of integration
# Formula is for pdf of (a0*n1 + n2) evaluated at x = observed value for a where a0 = prediction
# n1 ~ lognormal(0,sigma_multiplicative^2)
# a0n1 ~ lognormal(ln(a0),sigma_multiplicative^2)
# n2 ~ normal(0,sigma_additive^2)
ret = (1.0/(y*sigma_multiplicative*np.sqrt(2.0*np.pi)))*np.exp(-((np.log(y)-np.log(prediction_indexed))**2.0)/(2.0*sigma_multiplicative**2.0))*(1.0/(sigma_additive*np.sqrt(2.0*np.pi)))*np.exp(-((observed_indexed-y)**2.0)/(2.0*sigma_additive**2.0))
#print("kernel(%g,%g,%g,%g,%g) returns %g\n" % (y,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,ret))
return ret
@staticmethod
def lognormal_normal_convolution_integral_y_zero_to_eps(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps):
# ... Treating y=0 in additive noise exponent (because y presumed small relative to observed):
# Integral as y = 0...eps of (1.0/(y*sigma_multiplicative*np.sqrt(2.0*np.pi)))*np.exp(-((np.log(y)-np.log(prediction))**2.0)/(2.0*sigma_multiplicative**2.0))*(1.0/(sigma_additive*np.sqrt(2.0*np.pi)))*np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0))
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * Integral as y = 0...eps of (1.0/y)*np.exp(-((np.log(y)-np.log(prediction))**2.0)/(2.0*sigma_multiplicative**2.0))
# By Wolfram Alpha
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * (1/2)*sqrt(pi)*sqrt(2)*sigma_multiplicative*erf((log(y)-log(prediction))/(sqrt(2)*sigma_multiplicative)) evaluated from y=0...eps
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * (1/2)*sqrt(pi)*sqrt(2)*sigma_multiplicative*( erf((log(eps)-log(prediction))/(sqrt(2)*sigma_multiplicative))- erf((log(0)-log(prediction))/(sqrt(2)*sigma_multiplicative))) ... where log(0) is -inf and erf(-inf)= -1
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * (1/2)*sqrt(pi)*sqrt(2)*sigma_multiplicative*( erf((log(eps)-log(prediction))/(sqrt(2)*sigma_multiplicative)) + 1)
if sigma_additive < 1e-20 or sigma_multiplicative < 1e-20:
# ridiculously small.... treat derivative as zero
return 0.0
#if prediction_indexed==0.0:
# return 0.0 ... not correct!
return (1.0/(sigma_multiplicative*sigma_additive*2.0*np.pi)) *np.exp(-((observed_indexed)**2.0)/(2.0*sigma_additive**2.0)) * (1.0/2.0)*np.sqrt(np.pi)*np.sqrt(2.0)*sigma_multiplicative*( scipy.special.erf((np.log(eps)-np.log(prediction_indexed))/(np.sqrt(2.0)*sigma_multiplicative)) + 1.0)
@classmethod
def lognormal_normal_convolution_kernel_deriv_sigma_additive(cls,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y):
# y is variable of integration
# Formula is for pdf of (a0*n1 + n2) evaluated at x = observed value for a where a0 = prediction
# n1 ~ lognormal(0,sigma_multiplicative^2)
# a0n1 ~ lognormal(ln(a0),sigma_multiplicative^2)
# n2 ~ normal(0,sigma_additive^2)
if sigma_additive < 1e-20 or sigma_multiplicative < 1e-20:
# ridiculously small.... treat derivative as zero
return 0.0
res = cls.lognormal_normal_convolution_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y)*( (((observed_indexed-y)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive))
#print("kernel_dsa_unaccel(%g,%g,%g,%g,%g) returns %g\n" % (y,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,res))
return res
@classmethod
def lognormal_normal_convolution_integral_y_zero_to_eps_deriv_sigma_additive(cls,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps):
# ... Treating y=0 in additive noise exponent (because y presumed small relative to observed):
# Integral as y = 0...eps of (1.0/(y*sigma_multiplicative*np.sqrt(2.0*np.pi)))*np.exp(-((np.log(y)-np.log(prediction))**2.0)/(2.0*sigma_multiplicative**2.0))*(1.0/(sigma_additive*np.sqrt(2.0*np.pi)))*np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) )
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) ) * Integral as y = 0...eps of (1.0/y)*np.exp(-((np.log(y)-np.log(prediction))**2.0)/(2.0*sigma_multiplicative**2.0))
# By Wolfram Alpha
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) ) * (1/2)*sqrt(pi)*sqrt(2)*sigma_multiplicative*erf((log(y)-log(prediction))/(sqrt(2)*sigma_multiplicative)) evaluated from y=0...eps
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) ) * (1/2)*sqrt(pi)*sqrt(2)*sigma_multiplicative*( erf((log(eps)-log(prediction))/(sqrt(2)*sigma_multiplicative))- erf((log(0)-log(prediction))/(sqrt(2)*sigma_multiplicative))) ... where log(0) is -inf and erf(-inf)= -1
# = (1.0/(sigma_multiplicative*sigma_additive*2*pi)) *np.exp(-((observed)**2.0)/(2.0*sigma_additive**2.0)) * ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) ) * (1/2)*sqrt(pi)*sqrt(2)*sigma_multiplicative*( erf((log(eps)-log(prediction))/(sqrt(2)*sigma_multiplicative)) + 1)
# ... reduces to ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) ) * lognormal_normal_convolution_integral_y_zero_to_eps()
if sigma_additive < 1e-20 or sigma_multiplicative < 1e-20:
# ridiculously small.... treat derivative as zero
return 0.0
return ( (((observed_indexed)**2.0)/(sigma_additive**3.0)) - (1.0/sigma_additive) ) * cls.lognormal_normal_convolution_integral_y_zero_to_eps(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps)
@classmethod
def lognormal_normal_convolution_kernel_deriv_sigma_multiplicative(cls,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y):
# y is variable of integration
# Formula is for pdf of (a0*n1 + n2) evaluated at x = observed value for a where a0 = prediction
# n1 ~ lognormal(0,sigma_multiplicative^2)
# a0n1 ~ lognormal(ln(a0),sigma_multiplicative^2)
# n2 ~ normal(0,sigma_additive^2)
if sigma_additive < 1e-20 or sigma_multiplicative < 1e-20:
# ridiculously small.... treat derivative as zero
return 0.0
if prediction_indexed == 0.0:
return 0.0
res = cls.lognormal_normal_convolution_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y)*( (((np.log(y) - np.log(prediction_indexed))**2.0)/(sigma_multiplicative**3.0)) - (1.0/sigma_multiplicative))
if not np.isfinite(res):
# res becomes nan if prediction==0, but this is OK because the derivative is forced to zero by the exponential in the normal convolution kernel in that case
assert(prediction_indexed==0.0)
res=0.0
pass
return res
@classmethod
def lognormal_normal_convolution_integral_y_zero_to_eps_deriv_sigma_multiplicative(cls,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps):
if sigma_additive < 1e-20 or sigma_multiplicative < 1e-20:
# ridiculously small.... treat derivative as zero
return 0.0
if prediction_indexed == 0.0:
return 0.0
# ... Treating y=0 in additive noise exponent (because y presumed small relative to observed):
# The effect of the derivative is to multiply the original kernel by ( (((log(y) - log(prediction_indexed))**2.0)/(sigma_multiplicative**3.0)) - (1.0/sigma_multiplicative))
# of those two terms, the -1/sigma_multiplicative term is just a multiplier... so we can treat it separately like we do for
# the derivative with respect ot sigma_additive
one_over_sigmamultiplicative_term = - (1.0/sigma_multiplicative) * cls.lognormal_normal_convolution_integral_y_zero_to_eps(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps)
# for the other term, (((log(y) - log(prediction_indexed))**2.0)/(sigma_multiplicative**3.0))
# this terms goes to infinity as y-> 0 (because log(y)-> -infty).
# So we have to do the integration differently for this term.
# As worked out by hand, treating y=0 for the sigma_additive term brings that term out in front...
# Integrating the remaining integrand form of (1/y)exp(-(ln(y)-ln(a0))^2/s)((ln(y)-ln(a0))^2) via Wolfram Alpha
# gives us
# (1/(sigma_additive*sqrt(2*pi)))*exp(-x^2/(2*sigma_additive^2)) * (1/(sigma_multiplicative^4*sqrt(2pi)))* [ (1/4)sqrt(pi)*2sqrt(2)*sigma_multiplicative^3*erf((ln(y)-ln(a0))/(sigma_multiplicative*sqrt(2))) - sigma_multiplicative^2*(ln(y)-ln(a0))*(a0^(ln(y)/sigma_multiplicative^2))*exp(-(ln^2(a0) + ln^2(y))/(2*sigma_multiplicative^2)) ] evaluated from y=0 to y=epsilon
# As y->0 the erf->-1 ; the limit of the right hand term inside the []'s is simply 0, verified by l'Hopital's rule.
# So we have:
# (1/(sigma_additive*sqrt(2*pi)))*exp(-x^2/(2*sigma_additive^2)) * (1/(sigma_multiplicative^4*sqrt(2pi)))* [ (1/4)sqrt(pi)*2sqrt(2)*sigma_multiplicative^3*erf((ln(epsilon)-ln(a0))/(sigma_multiplicative*sqrt(2))) - sigma_multiplicative^2*(ln(epsilon)-ln(a0))*(a0^(ln(epsilon)/sigma_multiplicative^2))*exp(-(ln^2(a0) + ln^2(epsilon))/(2*sigma_multiplicative^2)) + (1/4)sqrt(pi)*2sqrt(2)*sigma_multiplicative^3 ]
# simplify a bit...
# (1/(sigma_additive*sqrt(2*pi)))*exp(-x^2/(2*sigma_additive^2)) * (1/(sigma_multiplicative^2*sqrt(2pi)))* [ (1/2)sqrt(2*pi)*sigma_multiplicative*erf((ln(epsilon)-ln(a0))/(sigma_multiplicative*sqrt(2))) - (ln(epsilon)-ln(a0))*(a0^(ln(epsilon)/sigma_multiplicative^2))*exp(-(ln^2(a0) + ln^2(epsilon))/(2*sigma_multiplicative^2)) + (1/2)sqrt(2pi)*sigma_multiplicative ]
additive_factor = (1.0/(sigma_additive*np.sqrt(2.0*np.pi)))*np.exp(-observed_indexed**2.0/(2.0*sigma_additive**2.0))
#integration_term = additive_factor * (1.0/(sigma_multiplicative**2.0*np.sqrt(2.0*np.pi)))* ( (1.0/2.0)*np.sqrt(2.0*np.pi)*sigma_multiplicative*scipy.special.erf((np.log(eps)-np.log(prediction_indexed))/(sigma_multiplicative*np.sqrt(2.0))) - (np.log(eps)-np.log(prediction_indexed))*(prediction_indexed**(np.log(eps)/sigma_multiplicative**2.0))*np.exp(-(np.log(prediction_indexed)**2.0 + np.log(eps)**2.0)/(2.0*sigma_multiplicative**2.0)) + (1.0/2.0)*np.sqrt(2.0*np.pi)*sigma_multiplicative )
# ... but the prediction_indexed**(np.log(eps)/sigma_multiplicative**2.0))
# is numerically problematic (overflow from very large numbers.
# Replace it and the following exp() per handwritten notes
# on the basis of (a^b)*exp(c) === exp(c + b*log(a)):
integration_term = additive_factor * (1.0/(sigma_multiplicative**2.0*np.sqrt(2.0*np.pi)))* ( (1.0/2.0)*np.sqrt(2.0*np.pi)*sigma_multiplicative*scipy.special.erf((np.log(eps)-np.log(prediction_indexed))/(sigma_multiplicative*np.sqrt(2.0))) - (np.log(eps)-np.log(prediction_indexed))*np.exp(-(np.log(prediction_indexed)**2.0 + np.log(eps)**2.0)/(2.0*sigma_multiplicative**2.0) + (np.log(eps)/sigma_multiplicative**2.0)*np.log(prediction_indexed) ) + (1.0/2.0)*np.sqrt(2.0*np.pi)*sigma_multiplicative )
if (not np.isfinite(integration_term)) or (not np.isfinite(one_over_sigmamultiplicative_term)):
#import pdb
#pdb.set_trace()
assert(prediction_indexed==0.0) # Know this happens in this case and it is OK because derivative is indeed zero
return 0.0
return integration_term + one_over_sigmamultiplicative_term
@staticmethod
def lognormal_normal_convolution_integral_y_zero_to_eps_deriv_prediction(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps):
# ... Treating y=0 in additive noise exponent (because y presumed small relative to observed):
if prediction_indexed==0.0:
return 0.0
if sigma_additive < 1e-20 or sigma_multiplicative < 1e-20:
# ridiculously small.... treat derivative as zero
return 0.0
# The effect of the derivative is to multiply the original kernel by (((np.log(y) - np.log(prediction_indexed))**2.0)/((sigma_multiplicative**2.0)*prediction_indexed))
# As usual the additive factor in the integral can be pulled out as a constant by neglecting y relative to observed:
additive_factor = (1.0/(sigma_additive*np.sqrt(2.0*np.pi)))*np.exp(-observed_indexed**2.0/(2.0*sigma_additive**2.0))
# The remaining integral has the form: integral of (1/y)*exp(-(log(y)-log(c))^2/s)*(log(y)-log(c)) dy
# by Wolfram Alpha this integrates to -(1/2)*s*exp(-((log(c)-log(y))^2)/s)
# As worked out on paper, we get
integration = -1.0/(sigma_multiplicative*prediction_indexed*np.sqrt(2.0*np.pi))*np.exp(-((np.log(prediction_indexed)-np.log(eps))**2.0)/(2.0*sigma_multiplicative**2.0))
if not np.isfinite(integration):
# integration becomes nan if prediction==0, but this is OK because the derivative is forced to zero by the exponential in the normal convolution kernel in that case
assert(prediction_indexed==0.0)
integration=0.0
pass
return additive_factor*integration
@classmethod
def lognormal_normal_convolution_kernel_deriv_prediction(cls,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y):
# y is variable of integration
# Formula is for pdf of (a0*n1 + n2) evaluated at x = observed value for a where a0 = prediction
# n1 ~ lognormal(0,sigma_multiplicative^2)
# a0n1 ~ lognormal(ln(a0),sigma_multiplicative^2)
# n2 ~ normal(0,sigma_additive^2)
res = cls.lognormal_normal_convolution_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y)* ((np.log(y) - np.log(prediction_indexed))/((sigma_multiplicative**2.0)*prediction_indexed))
if not np.isfinite(res):
# res becomes nan if prediction==0, but this is OK because the derivative is forced to zero by the exponential in the normal convolution kernel in that case
assert(prediction_indexed==0.0)
res=0.0
pass
return res
#@classmethod
#def integrate_lognormal_normal_kernel(cls,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed):
# # Integration of kernel singularity: Kernel is singular at y=0
# # Perform integration from y=0 to y=eps analytically.
# # where eps presumed small relative to observed.
# eps = observed_indexed/100.0
#
#
# singular_portion = cls.lognormal_normal_convolution_integral_y_zero_to_eps(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps)
# # Break integration into singular portion, portion up to observed value, portion to infinity to help make sure quadrature is accurate.
# p1 = scipy.integrate.quad(lambda y: cls.lognormal_normal_convolution_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),eps,observed_indexed)[0]
# p2 = scipy.integrate.quad(lambda y: cls.lognormal_normal_convolution_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),observed_indexed,np.inf)[0]
# return singular_portion + p1 + p2
@staticmethod
def integrate_kernel(integral_y_zero_to_eps,kernel,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed):
# Integration of kernel singularity: Kernel is singular at y=0
# Perform integration from y=0 to y=eps analytically.
# where eps presumed small relative to observed.
eps = observed_indexed/100.0
bounds = np.array((eps,
observed_indexed-sigma_additive,
observed_indexed+sigma_additive,
prediction_indexed*np.exp(-sigma_multiplicative),
prediction_indexed*np.exp(sigma_multiplicative)),dtype='d')
bounds.sort()
bounds[bounds < eps] = eps
assert(bounds.shape[0]==5)
singular_portion = integral_y_zero_to_eps(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,eps)
# Break integration into singular portion, portion up to observed value, portion to infinity to help make sure quadrature is accurate.
#print("Integration from y=%g... %g" % (bounds[0],bounds[1]))
if bounds[0] < bounds[1]:
(p1,p1err) = scipy.integrate.quad(lambda y: kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),bounds[0],bounds[1],epsabs=3e-15)
#print("integral from %g to %g, sa=%g, sm=%g, pi=%g, oi=%g,ea=%g" %(bounds[0],bounds[1],sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,3e-15))
pass
else:
p1=0.0
p1err=0.0
pass
#print("Integration from y=%g... %g" % (bounds[1],bounds[2]))
if bounds[1] < bounds[2]:
(p2,p2err) = scipy.integrate.quad(lambda y: kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),bounds[1],bounds[2],epsabs=3e-15)
#print("integral from %g to %g, sa=%g, sm=%g, pi=%g, oi=%g,ea=%g" %(bounds[1],bounds[2],sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,3e-15))
pass
else:
p2=0.0
p2err=0.0
pass
#print("Integration from y=%g... %g" % (bounds[2],bounds[3]))
if bounds[2] < bounds[3]:
(p3,p3err) = scipy.integrate.quad(lambda y: kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),bounds[2],bounds[3],epsabs=3e-15)
pass
#print("integral from %g to %g, sa=%g, sm=%g, pi=%g, oi=%g,ea=%g" %(bounds[2],bounds[3],sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,3e-15))
else:
p3=0.0
p3err=0.0
pass
#print("Integration from y=%g... %g" % (bounds[3],bounds[4]))
if bounds[3] < bounds[4]:
(p4,p4err) = scipy.integrate.quad(lambda y: kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),bounds[3],bounds[4],epsabs=3e-15)
#print("integral from %g to %g, sa=%g, sm=%g, pi=%g, oi=%g,ea=%g" %(bounds[3],bounds[4],sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,3e-15))
pass
else:
p4=0.0
p4err=0.0
pass
#print("Integration from y=%g... inf" % (bounds[4]))
(p5,p5err) = scipy.integrate.quad(lambda y: kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed,y),bounds[4],np.inf,epsabs=1e-24)
if not np.isfinite(singular_portion+p1+p2+p3+p4+p5):
print("integrate_kernel returns %s from %s, %s, %s, %s, %s, and %s; p1err=%g, p2err=%g,p3err=%g,p4err=%g,p5err=%g" % (str(singular_portion+p1+p2+p3+p4+p5),str(singular_portion),str(p1),str(p2),str(p3),str(p4),str(p5),p1err,p2err,p3err,p4err,p5err))
import pdb
pdb.set_trace()
pass
#print("kernel(1,1,1,1,1)=%g" % (kernel(1,1,1,1,1)))
return singular_portion + p1 + p2 + p3 + p4 + p5
def evaluate_p_from_cache(self,sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed):
# Evaluating the baseline probability is required both in each call to perform() and in the
# gradient along each axis. This uses a dictionary as a cache so that they don't need to be
# recomputed
key = (float(sigma_additive),float(sigma_multiplicative),float(prediction_indexed),float(observed_indexed))
if not key in self.evaluation_cache:
#p = self.integrate_lognormal_normal_kernel(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
p = self.integrate_kernel(self.lognormal_normal_convolution_integral_y_zero_to_eps,
self.lognormal_normal_convolution_kernel,
sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
self.evaluation_cache[key]=p
return p
return self.evaluation_cache[key]
def perform(self,node,inputs_storage,outputs_storage):
(sigma_additive, sigma_multiplicative, prediction) = inputs_storage
logp = np.zeros(self.observed.shape[0],dtype='d')
if use_accel and self.inhibit_accel_pid != os.getpid():
p = mixednoise_accel.integrate_lognormal_normal_convolution(self.lognormal_normal_convolution_integral_y_zero_to_eps,
self.evaluation_cache,
sigma_additive,sigma_multiplicative,prediction,self.observed)
logp=np.log(p)
pass
else:
for index in range(self.observed.shape[0]):
prediction_indexed=prediction[index]
observed_indexed = self.observed[index]
p = self.evaluate_p_from_cache(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
logp[index]=np.log(p)
pass
pass
outputs_storage[0][0]=logp
pass
def grad_sigma_additive(self,sigma_additive,sigma_multiplicative,prediction):
# gradient of log p is (1/p) dp
dlogp = np.zeros(self.observed.shape[0],dtype='d')
if use_accel and self.inhibit_accel_pid != os.getpid():
p = mixednoise_accel.integrate_lognormal_normal_convolution(self.lognormal_normal_convolution_integral_y_zero_to_eps,
self.evaluation_cache,
sigma_additive,sigma_multiplicative,prediction,self.observed)
dp = mixednoise_accel.integrate_deriv_sigma_additive(self.lognormal_normal_convolution_integral_y_zero_to_eps_deriv_sigma_additive,
sigma_additive,sigma_multiplicative,prediction,self.observed)
dlogp = dp/p
assert((np.isfinite(dp)).all())
assert((np.isfinite(p)).all())
dlogp[~np.isfinite(dlogp)]=0.0
#print("accel: p=%s; dp=%s; dlogp = %s" % (str(p),str(dp),str(dlogp)))
pass
else:
for index in range(self.observed.shape[0]):
prediction_indexed=prediction[index]
observed_indexed = self.observed[index]
p = self.evaluate_p_from_cache(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
dp = self.integrate_kernel(self.lognormal_normal_convolution_integral_y_zero_to_eps_deriv_sigma_additive,
self.lognormal_normal_convolution_kernel_deriv_sigma_additive,
sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
dlogp[index] = (1.0/p) * dp
assert(np.isfinite(dp))
assert(np.isfinite(p))
if not np.isfinite(dlogp[index]): # Would come from overflow (div/0) which I think should just be treated as zero
dlogp[index]=0.0
pass
pass
#print("unaccel: p=%s; dp=%s; dlogp = %s" % (str(p),str(dp),str(dlogp)))
pass
#print("grad_sigma_additive() returns %s from p = %s and dp = %s" % (str(dlogp),str(p),str(dp)))
if (~np.isfinite(dlogp)).any():
import pdb
pdb.set_trace()
pass
return dlogp
def grad_sigma_multiplicative(self,sigma_additive,sigma_multiplicative,prediction):
# gradient of log p is (1/p) dp
dlogp = np.zeros(self.observed.shape[0],dtype='d')
if use_accel and self.inhibit_accel_pid != os.getpid():
p = mixednoise_accel.integrate_lognormal_normal_convolution(self.lognormal_normal_convolution_integral_y_zero_to_eps,
self.evaluation_cache,
sigma_additive,sigma_multiplicative,prediction,self.observed)
dp = mixednoise_accel.integrate_deriv_sigma_multiplicative(self.lognormal_normal_convolution_integral_y_zero_to_eps_deriv_sigma_multiplicative,
sigma_additive,sigma_multiplicative,prediction,self.observed)
dlogp = dp/p
assert((np.isfinite(dp)).all())
assert((np.isfinite(p)).all())
dlogp[~np.isfinite(dlogp)]=0.0
pass
else:
for index in range(self.observed.shape[0]):
prediction_indexed=prediction[index]
observed_indexed = self.observed[index]
p = self.evaluate_p_from_cache(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
dp = self.integrate_kernel(self.lognormal_normal_convolution_integral_y_zero_to_eps_deriv_sigma_multiplicative,
self.lognormal_normal_convolution_kernel_deriv_sigma_multiplicative,
sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
dlogp[index] = (1.0/p) * dp
assert(np.isfinite(dp))
assert(np.isfinite(p))
if ~np.isfinite(dlogp[index]): # Would come from overflow (div/0) which I think should just be treated as zero
dlogp[index]=0.0
pass
pass
pass
#print("grad_sigma_multiplicative: use_accel=%s p=%s dp=%s dlogp=%s" % (str(use_accel),str(p),str(dp),str(dlogp)))
if (~np.isfinite(dlogp)).any():
import pdb
pdb.set_trace()
pass
return dlogp
def grad_prediction(self,sigma_additive,sigma_multiplicative,prediction):
# gradient of log p is (1/p) dp
dlogp = np.zeros(self.observed.shape[0],dtype='d')
if use_accel and self.inhibit_accel_pid != os.getpid():
p = mixednoise_accel.integrate_lognormal_normal_convolution(self.lognormal_normal_convolution_integral_y_zero_to_eps,
self.evaluation_cache,
sigma_additive,sigma_multiplicative,prediction,self.observed)
dp = mixednoise_accel.integrate_deriv_prediction(self.lognormal_normal_convolution_integral_y_zero_to_eps_deriv_prediction,
sigma_additive,sigma_multiplicative,prediction,self.observed)
dlogp = dp/p
assert((np.isfinite(dp)).all())
assert((np.isfinite(p)).all())
dlogp[~np.isfinite(dlogp)]=0.0
pass
else:
for index in range(self.observed.shape[0]):
prediction_indexed=prediction[index]
observed_indexed = self.observed[index]
p = self.evaluate_p_from_cache(sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
dp = self.integrate_kernel(self.lognormal_normal_convolution_integral_y_zero_to_eps_deriv_prediction,
self.lognormal_normal_convolution_kernel_deriv_prediction,
sigma_additive,sigma_multiplicative,prediction_indexed,observed_indexed)
dlogp[index] = (1.0/p) * dp
assert(np.isfinite(dp))
assert(np.isfinite(p))
if not np.isfinite(dlogp[index]): # Would come from overflow (div/0) which I think should just be treated as zero
dlogp[index]=0.0
pass
pass
#print("grad_prediction() returns %s from p = %s and dp = %s" % (str(dlogp),str(p),str(dp)))
pass
if (~np.isfinite(dlogp)).any():
import pdb
pdb.set_trace()
pass
return dlogp
def grad(self,inputs,output_grads):
(sigma_additive, sigma_multiplicative, prediction) = inputs
# output_grads has a single element output_grads[0] corresponding to our single vector output
#
# ... In evaluating grad_x G(x),
# where G(x) is representable as C(f(x)) and f(x) is predict_crackheating_op.
#
# Then grad_x G(x) = dC/df * df/dx
# If f is a vector
# Then grad_x G(x) = sum_i dC/df_i * df_i/dx
# If x is also a vector
# Then grad_xj G(x) = sum_i dC/df_i * df_i/dxj
# And if xj can be x0 (mu) or x1 (log_msqrtR)
# Then grad_xj G(x) = (sum_i dC/df_i * df_i/dx0, sum_i dC/df_i * df_i/dx1),
#
# We are supposed to return the tensor product dC/df_i * df_i/dxj where
# * dC/df_i is output_gradients[0],
# * df_i/dx0 is predict_crackheating_grad_mu_op, and
# * df_i/dx1 is predict_crackheating_grad_log_msqrtR_op.
# Since f is a vectors, dC_df is also a vector
# and returning the tensor product means summing over the elements i.
# From the Theano documentation "The grad method must return a list containing
# one Variable for each input. Each returned Variable
# represents the gradient with respect to that input
# computed based on the symbolic gradients with
# respect to each output."
# So we return a list indexed over input j
#import pdb
#pdb.set_trace()
return [ (self.grad_sigma_additive_op(*inputs)*output_grads[0]).sum(), (self.grad_sigma_multiplicative_op(*inputs)*output_grads[0]).sum(), (self.grad_prediction_op(*inputs)*output_grads[0]) ]
pass
def CreateMixedNoise(name,
sigma_additive,
sigma_multiplicative,
prediction,
observed,inhibit_accel_pid=None):
MixedNoiseOp=mixednoise_op(observed,inhibit_accel_pid=inhibit_accel_pid)
def MixedNoiseLogP(sigma_additive,
sigma_multiplicative,
prediction):
# captures "MixedNoiseOp"
return Print('MixedNoiseLogP')(MixedNoiseOp(sigma_additive,sigma_multiplicative,prediction))
return (MixedNoiseOp,pm.DensityDist(name,
MixedNoiseLogP,
observed={
"sigma_additive": sigma_additive,
"sigma_multiplicative": sigma_multiplicative,
"prediction": prediction
}))
if __name__=="__main__":
pass
| 31,836 | 2,247 | 46 |
31d053562a5479ed0dba3716178c09ed5e5ad9ba | 11,337 | py | Python | evernode/bin/create.py | AtomHash/evernode | f70d82e78cf9f6d58208438e26485e9565abd875 | [
"MIT"
] | 1 | 2017-09-23T16:44:10.000Z | 2017-09-23T16:44:10.000Z | evernode/bin/create.py | AtomHash/evernode | f70d82e78cf9f6d58208438e26485e9565abd875 | [
"MIT"
] | 9 | 2017-09-25T14:44:17.000Z | 2020-09-16T01:47:43.000Z | evernode/bin/create.py | AtomHash/evernode | f70d82e78cf9f6d58208438e26485e9565abd875 | [
"MIT"
] | 1 | 2020-10-05T20:36:24.000Z | 2020-10-05T20:36:24.000Z | #!/usr/bin/env python
import os
import sys
import click
import yaml
from urllib import request
from evernode.classes import Json, Security
class Create:
""" Easy evernode app creation"""
app_name = None
dir_name = None
config_file = None
uwsgi_file = None
app_file = None
http_messages_file = None
branch = None
| 44.11284 | 80 | 0.538061 | #!/usr/bin/env python
import os
import sys
import click
import yaml
from urllib import request
from evernode.classes import Json, Security
class Create:
""" Easy evernode app creation"""
app_name = None
dir_name = None
config_file = None
uwsgi_file = None
app_file = None
http_messages_file = None
branch = None
def __init__(self, app_name, branch='master'):
self.app_name = 'evernode_%s' % (app_name)
self.dir_name = './%s' % (self.app_name)
self.branch = branch
self.app_file = os.path.join(self.dir_name, 'app', 'app.py')
self.http_messages_file = os.path.join(
self.dir_name, 'app', 'resources',
'lang', 'en', 'http_messages.lang')
self.config_file = os.path.join(self.dir_name, 'app', 'config.json')
self.uwsgi_file = os.path.join(self.dir_name, 'uwsgi.ini')
print('Making folder structure.')
self.make_structure()
print('Downloading config.json...')
self.configure_config()
print('Downloading sample uwsgi.ini...')
self.download_sample_uwsgi()
print('Downloading sample app.py...')
self.download_sample_app()
print('Downloading sample resources/lang/en/http_messages.lang...')
self.download_sample_http_errors()
if click.confirm(
'Use a docker development enviroment? [Default=Yes]',
default=True):
self.configure_docker()
if click.confirm(
'Create a mock module? [Default=Yes]', default=True):
self.configure_module()
print("""
Done!
You can now start using EverNode.
%s folder created.
1. Navigate into the EverNode app
`$ cd %s`
2. If you downloaded the docker files
`$ cd docker`
`$ docker-compose up --build`
3. If you downloaded the mock module,
goto https://api.localhost/v1/hello-world
once the docker image has started.
4. If using a database, please init!
`$ cd app`
`$ flask db init`
`$ flask db migrate`
`$ flask db upgrade`
Notes:
Add `127.0.0.1 api.localhost`
to your hosts file.
""" % (self.app_name, self.app_name))
def __touch(self, path):
with open(path, 'a'):
os.utime(path, None)
def download_file(self, url, file_name):
request.urlretrieve(url, file_name)
def configure_config(self):
# download current config file from github
self.download_file(
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/config.json' % (self.branch)),
self.config_file)
config = Json.from_file(self.config_file)
config['NAME'] = self.app_name
config['SECRET'] = Security.generate_key()
config['KEY'] = Security.generate_key()
config['SQLALCHEMY_BINDS']['DEFAULT'] = \
'mysql://<db_user>:<password>@<host>/<db_name>'
Json.save_file(self.config_file, config)
def download_sample_uwsgi(self):
self.download_file(
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/uwsgi.ini' % (self.branch)),
self.uwsgi_file)
def download_sample_app(self):
self.download_file(
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/app.py' % (self.branch)),
self.app_file)
def download_sample_http_errors(self):
self.download_file(
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/resources/lang/en/http_messages.lang'
% (self.branch)),
self.http_messages_file)
def configure_module(self):
mock_module_path = os.path.join(
self.dir_name, 'app', 'modules', 'mock_module')
mock_module_files = [
{'mock_module': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/modules/mock_module/routes.py') % (self.branch)]},
{'controllers': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/modules/mock_module/controllers/'
'__init__.py') % (self.branch),
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/modules/mock_module/'
'controllers/mock_controller.py' % (self.branch))]},
{'models': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/modules/mock_module/'
'models/__init__.py' % (self.branch)),
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/app/modules/mock_module/'
'models/hello_world_model.py' % (self.branch))]}
]
for folder in mock_module_files:
for key, value in folder.items():
root = ''
if key is 'mock_module':
root = 'app/modules/mock_module'
elif key is 'controllers':
root = 'app/modules/mock_module/controllers'
elif key is 'models':
root = 'app/modules/mock_module/models'
os.mkdir(os.path.join(self.dir_name, root))
for file in value:
self.easy_file_download(root, file)
os.mkdir(os.path.join(mock_module_path, 'resources'))
os.mkdir(os.path.join(mock_module_path, 'resources', 'lang'))
os.mkdir(os.path.join(mock_module_path, 'resources', 'lang', 'en'))
os.mkdir(os.path.join(mock_module_path, 'resources', 'templates'))
self.__touch(os.path.join(mock_module_path, '__init__.py'))
def configure_docker(self):
needed_files = [
{'docker': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/docker-compose.yml' % (self.branch))
]},
{'build': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/build/supervisord.conf' % (self.branch)),
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/build/Dockerfile' % (self.branch)),
]},
{'nginx': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/build/nginx/nginx.conf' % (self.branch))
]},
{'ssls': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/build/nginx/ssls/'
'api.localhost.crt' % (self.branch)),
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/build/nginx/ssls/'
'api.localhost.key' % (self.branch))
]},
{'conf.d': [
('https://raw.githubusercontent.com/AtomHash/evernode/'
'%s/app/docker/build/nginx/conf.d/'
'api.localhost.conf' % (self.branch))
]}]
for folder in needed_files:
for key, value in folder.items():
root = ''
if key is 'docker':
root = 'docker'
elif key is 'build':
root = 'docker/build'
elif key is 'nginx':
root = 'docker/build/nginx'
elif key is 'ssls':
root = 'docker/build/nginx/ssls'
elif key is 'conf.d':
root = 'docker/build/nginx/conf.d'
os.mkdir(os.path.join(self.dir_name, root))
for file in value:
self.easy_file_download(root, file)
docker_compose = os.path.join(
self.dir_name, 'docker', 'docker-compose.yml')
with open(docker_compose, 'r') as docker_compose_opened:
try:
yml = yaml.load(docker_compose_opened)
yml['services'][self.app_name] = \
yml['services'].pop('evernode-development', None)
yml['services'][self.app_name]['container_name'] = \
self.app_name
del yml['services'][self.app_name]['volumes'][-1]
with open(docker_compose, 'w') as new_docker_compose:
yaml.dump(yml, new_docker_compose,
default_flow_style=False, allow_unicode=True)
except yaml.YAMLError as exc:
print('Error: Cannot parse docker-compose.yml')
dockerfile = os.path.join(
self.dir_name, 'docker', 'build', 'Dockerfile')
with open(dockerfile, 'r') as dockerfile_opened:
lines = dockerfile_opened.readlines()
lines[-1] = ('ENTRYPOINT pip3.6 install --upgrade -r /srv/app/'
'requirements.txt && python2.7 /usr/bin/supervisord')
with open(dockerfile, 'w') as df_opened_writable:
df_opened_writable.writelines(lines)
def easy_file_download(self, root, file):
file_name = file.rsplit('/', 1)[-1]
print('Downloading %s...' % (file_name))
self.download_file(file, os.path.join(
self.dir_name, root, file_name))
def make_structure(self):
if os.path.isdir(self.dir_name):
print('Error: Projects already exists.')
sys.exit(1)
return
# make root folder
os.mkdir(self.dir_name)
# make app folder
os.mkdir(os.path.join(self.dir_name, 'app'))
requirements_file = os.path.join(
self.dir_name, 'app', 'requirements.txt')
self.__touch(requirements_file)
with open(requirements_file, 'w') as requirements_file_writable:
requirements_file_writable.write('evernode')
# make app folder
os.mkdir(os.path.join(self.dir_name, 'logs'))
# make uploads folder
os.mkdir(os.path.join(self.dir_name, 'uploads'))
# make public folder
os.mkdir(os.path.join(self.dir_name, 'public'))
# make public static folder
os.mkdir(os.path.join(self.dir_name, 'public', 'static'))
# make app root modules folder
os.mkdir(os.path.join(self.dir_name, 'app', 'modules'))
self.__touch(
os.path.join(self.dir_name, 'app', 'modules', '__init__.py'))
# make app root resources folder
os.mkdir(os.path.join(self.dir_name, 'app', 'resources'))
os.mkdir(os.path.join(self.dir_name, 'app', 'resources', 'lang'))
os.mkdir(os.path.join(self.dir_name, 'app', 'resources', 'lang', 'en'))
os.mkdir(os.path.join(self.dir_name, 'app', 'resources', 'templates'))
| 10,650 | 0 | 319 |
a00c718ab07d7d811a4543592751e2c1d6258509 | 758 | py | Python | nanobrew/core/domain/option_type/text.py | nanobrew/nanobrew-core | ef180faa1e33af58ca7b7ff76a4ae016becb6cfc | [
"MIT"
] | 1 | 2020-04-02T08:54:11.000Z | 2020-04-02T08:54:11.000Z | nanobrew/core/domain/option_type/text.py | nanobrew/nanobrew-core | ef180faa1e33af58ca7b7ff76a4ae016becb6cfc | [
"MIT"
] | 19 | 2020-05-02T10:04:07.000Z | 2020-06-01T09:59:13.000Z | nanobrew/core/domain/option_type/text.py | nanobrew/nanobrew-core | ef180faa1e33af58ca7b7ff76a4ae016becb6cfc | [
"MIT"
] | 1 | 2020-03-13T15:59:19.000Z | 2020-03-13T15:59:19.000Z | from ..option import Option
| 25.266667 | 59 | 0.55277 | from ..option import Option
class Text(Option):
def __init__(self, required: bool, label, description):
self._required = required
self._label = label
self._description = description
@classmethod
def from_dict(cls, option):
return Text(
option['required'],
option['label'],
option['description']
)
def validate(self, value) -> bool:
errors = []
if self._required and value is None:
errors.append('Value can not be empty')
return len(errors) == 0, errors
def to_dict(self) -> dict:
return {
'option_type': 'text',
'label': self._label,
'description': self._description
} | 585 | 122 | 23 |
08c10500fbb3553cf3932a78df86b5fb9fbfa9be | 522 | py | Python | models/constants.py | couatl/http-server | 8fe92f4fa556d966cc3942ccea6a89a32af70962 | [
"MIT"
] | null | null | null | models/constants.py | couatl/http-server | 8fe92f4fa556d966cc3942ccea6a89a32af70962 | [
"MIT"
] | null | null | null | models/constants.py | couatl/http-server | 8fe92f4fa556d966cc3942ccea6a89a32af70962 | [
"MIT"
] | null | null | null | from enum import Enum
ServerName = 'Http Technopark Highload server'
HttpVersion = '1.1'
| 20.076923 | 46 | 0.626437 | from enum import Enum
ServerName = 'Http Technopark Highload server'
HttpVersion = '1.1'
class ContentTypes(Enum):
html = 'text/html'
css = 'text/css'
js = 'text/javascript'
txt = 'text/txt'
plain = 'text/plain'
jpg = 'image/jpeg'
jpeg = 'image/jpeg'
png = 'image/png'
gif = 'image/gif'
swf = 'application/x-shockwave-flash'
class ResponseStatus(Enum):
Ok = '200 Ok'
NotFound = '404 Not Found'
NotAllowed = '405 Method Not Allowed'
Forbidden = '403 Forbidden'
| 0 | 385 | 46 |
4c23ae3939c5011c19cd5681cefa7780e4551346 | 12,680 | py | Python | tests/unit/stream_alert_rule_processor/test_firehose.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | null | null | null | tests/unit/stream_alert_rule_processor/test_firehose.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | 1 | 2020-02-07T18:08:22.000Z | 2020-02-07T18:08:22.000Z | tests/unit/stream_alert_rule_processor/test_firehose.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mock import patch
from moto import mock_kinesis
from nose.tools import assert_equal, assert_false, assert_true
from stream_alert.rule_processor.firehose import FirehoseClient
from stream_alert.shared.config import load_config
@patch('stream_alert.rule_processor.firehose.FirehoseClient.MAX_BACKOFF_ATTEMPTS', 1)
class TestFirehoseClient(object):
"""Test class for FirehoseClient"""
# pylint: disable=protected-access,no-self-use,attribute-defined-outside-init
def setup(self):
"""Setup before each method"""
self.sa_firehose = FirehoseClient(region='us-east-1')
def teardown(self):
"""Teardown after each method"""
FirehoseClient._ENABLED_LOGS.clear()
@staticmethod
@mock_kinesis
def _mock_delivery_streams(self, delivery_stream_names):
"""Mock Kinesis Delivery Streams for tests"""
for delivery_stream in delivery_stream_names:
self.sa_firehose._client.create_delivery_stream(
DeliveryStreamName=delivery_stream,
S3DestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': '{}/'.format(delivery_stream),
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 124
},
'CompressionFormat': 'Snappy',
})
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery_failed_put_count(self, mock_logging):
"""FirehoseClient - Record Delivery - Failed Put Count"""
# Add sample categorized payloads
for payload_type, logs in self._sample_categorized_payloads().iteritems():
self.sa_firehose._categorized_payloads[payload_type].extend(logs)
# Setup mocked Delivery Streams
self._mock_delivery_streams(
['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log'])
with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock:
firehose_mock.side_effect = [{
'FailedPutCount':
3,
'RequestResponses': [{
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}]
}, {
'FailedPutCount':
3,
'RequestResponses': [{
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}]
}, {
'FailedPutCount':
0,
'RequestResponses': [{
"RecordId": "12345678910",
"ErrorCode": "None",
"ErrorMessage": "None"
}, {
"RecordId": "12345678910",
"ErrorCode": "None",
"ErrorMessage": "None"
}, {
"RecordId": "12345678910",
"ErrorCode": "None",
"ErrorMessage": "None"
}]
}]
self.sa_firehose.send()
firehose_mock.assert_called()
assert_true(mock_logging.info.called)
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery(self, mock_logging):
"""FirehoseClient - Record Delivery"""
# Add sample categorized payloads
for payload_type, logs in self._sample_categorized_payloads().iteritems():
self.sa_firehose._categorized_payloads[payload_type].extend(logs)
# Setup mocked Delivery Streams
self._mock_delivery_streams(
['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log'])
# Send the records
with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock:
firehose_mock.return_value = {'FailedPutCount': 0}
self.sa_firehose.send()
firehose_mock.assert_called()
assert_true(mock_logging.info.called)
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery_failure(self, mock_logging):
"""FirehoseClient - Record Delivery - Failed PutRecord"""
# Add sample categorized payloads
for payload_type, logs in self._sample_categorized_payloads().iteritems():
self.sa_firehose._categorized_payloads[payload_type].extend(logs)
# Setup mocked Delivery Streams
self._mock_delivery_streams(
['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log'])
# Send the records
with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock:
firehose_mock.return_value = {
'FailedPutCount':
3,
'RequestResponses': [
{
'RecordId': '12345',
'ErrorCode': '300',
'ErrorMessage': 'Bad message!!!'
},
]
}
self.sa_firehose.send()
firehose_mock.assert_called()
assert_true(mock_logging.error.called)
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery_client_error(self, mock_logging):
"""FirehoseClient - Record Delivery - Client Error"""
test_events = [
# unit_test_simple_log
{
'unit_key_01': 2,
'unit_key_02': 'testtest'
} for _ in range(10)
]
self.sa_firehose._firehose_request_helper('invalid_stream', test_events)
missing_stream_message = 'Client Error ... An error occurred ' \
'(ResourceNotFoundException) when calling the PutRecordBatch ' \
'operation: Stream invalid_stream under account 123456789012 not found.'
assert_true(mock_logging.error.called_with(missing_stream_message))
@mock_kinesis
def test_load_enabled_sources(self):
"""FirehoseClient - Load Enabled Sources"""
config = load_config('tests/unit/conf')
firehose_config = {
'enabled_logs': ['json:regex_key_with_envelope', 'test_cloudtrail', 'cloudwatch']
} # expands to 2 logs
enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, config['logs'])
assert_equal(len(enabled_logs), 4)
# Make sure the subtitution works properly
assert_true(all([':' not in log for log in enabled_logs]))
assert_false(FirehoseClient.enabled_log_source('test_inspec'))
@patch('stream_alert.rule_processor.firehose.LOGGER.error')
@mock_kinesis
def test_load_enabled_sources_invalid_log(self, mock_logging):
"""FirehoseClient - Load Enabled Sources - Invalid Log"""
config = load_config('tests/unit/conf')
firehose_config = {'enabled_logs': ['log-that-doesnt-exist']}
sa_firehose = FirehoseClient(
region='us-east-1', firehose_config=firehose_config, log_sources=config['logs'])
assert_equal(len(sa_firehose._ENABLED_LOGS), 0)
mock_logging.assert_called_with(
'Enabled Firehose log %s not declared in logs.json', 'log-that-doesnt-exist'
)
def test_strip_successful_records(self):
"""FirehoseClient - Strip Successful Records"""
batch = [{'test': 'success'}, {'test': 'data'}, {'other': 'failure'}, {'other': 'info'}]
response = {
'FailedPutCount': 1,
'RequestResponses': [
{'RecordId': 'rec_id_00'},
{'RecordId': 'rec_id_01'},
{'ErrorCode': 10, 'ErrorMessage': 'foo'},
{'RecordId': 'rec_id_03'}
]
}
expected_batch = [{'other': 'failure'}]
FirehoseClient._strip_successful_records(batch, response)
assert_equal(batch, expected_batch)
def test_segment_records_by_size(self):
"""FirehoseClient - Segment Large Records"""
record_batch = [
# unit_test_simple_log
{
'unit_key_01': 2,
'unit_key_02': 'testtest' * 10000
} for _ in range(100)
]
sized_batches = []
for sized_batch in FirehoseClient._segment_records_by_size(record_batch):
sized_batches.append(sized_batch)
assert_true(len(str(sized_batches[0])) < 4000000)
assert_equal(len(sized_batches), 4)
assert_true(isinstance(sized_batches[3][0], dict))
def test_sanitize_keys(self):
"""FirehoseClient - Sanitize Keys"""
# test_log_type_json_nested
test_event = {
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super-duper': 'secret',
'sanitize_me': 1,
'example-key': 1,
'moar**data': 2,
'even.more': 3
}
}
expected_sanitized_event = {
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super_duper': 'secret',
'sanitize_me': 1,
'example_key': 1,
'moar__data': 2,
'even_more': 3
}
}
sanitized_event = FirehoseClient.sanitize_keys(test_event)
assert_equal(sanitized_event, expected_sanitized_event)
@patch('stream_alert.rule_processor.firehose.LOGGER')
def test_limit_record_size(self, mock_logging):
"""FirehoseClient - Record Size Check"""
test_events = [
# unit_test_simple_log
{
'unit_key_01': 1,
'unit_key_02': 'test' * 250001 # is 4 bytes higher than max
},
{
'unit_key_01': 2,
'unit_key_02': 'test'
},
# test_log_type_json_nested
{
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super': 'secret'
}
},
# add another unit_test_sample_log to verify in a different position
{
'unit_key_01': 1,
'unit_key_02': 'test' * 250001 # is 4 bytes higher than max
},
{
'test': 1
}
]
FirehoseClient._limit_record_size(test_events)
assert_true(len(test_events), 3)
assert_true(mock_logging.error.called)
| 37.40413 | 100 | 0.568375 | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mock import patch
from moto import mock_kinesis
from nose.tools import assert_equal, assert_false, assert_true
from stream_alert.rule_processor.firehose import FirehoseClient
from stream_alert.shared.config import load_config
@patch('stream_alert.rule_processor.firehose.FirehoseClient.MAX_BACKOFF_ATTEMPTS', 1)
class TestFirehoseClient(object):
"""Test class for FirehoseClient"""
# pylint: disable=protected-access,no-self-use,attribute-defined-outside-init
def setup(self):
"""Setup before each method"""
self.sa_firehose = FirehoseClient(region='us-east-1')
def teardown(self):
"""Teardown after each method"""
FirehoseClient._ENABLED_LOGS.clear()
@staticmethod
def _sample_categorized_payloads():
return {
'unit_test_simple_log': [{
'unit_key_01': 1,
'unit_key_02': 'test'
}, {
'unit_key_01': 2,
'unit_key_02': 'test'
}],
'test_log_type_json_nested': [{
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super': 'secret'
}
}]
}
@mock_kinesis
def _mock_delivery_streams(self, delivery_stream_names):
"""Mock Kinesis Delivery Streams for tests"""
for delivery_stream in delivery_stream_names:
self.sa_firehose._client.create_delivery_stream(
DeliveryStreamName=delivery_stream,
S3DestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': '{}/'.format(delivery_stream),
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 124
},
'CompressionFormat': 'Snappy',
})
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery_failed_put_count(self, mock_logging):
"""FirehoseClient - Record Delivery - Failed Put Count"""
# Add sample categorized payloads
for payload_type, logs in self._sample_categorized_payloads().iteritems():
self.sa_firehose._categorized_payloads[payload_type].extend(logs)
# Setup mocked Delivery Streams
self._mock_delivery_streams(
['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log'])
with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock:
firehose_mock.side_effect = [{
'FailedPutCount':
3,
'RequestResponses': [{
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}]
}, {
'FailedPutCount':
3,
'RequestResponses': [{
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}, {
"ErrorCode": "ServiceUnavailableException",
"ErrorMessage": "Slow down."
}]
}, {
'FailedPutCount':
0,
'RequestResponses': [{
"RecordId": "12345678910",
"ErrorCode": "None",
"ErrorMessage": "None"
}, {
"RecordId": "12345678910",
"ErrorCode": "None",
"ErrorMessage": "None"
}, {
"RecordId": "12345678910",
"ErrorCode": "None",
"ErrorMessage": "None"
}]
}]
self.sa_firehose.send()
firehose_mock.assert_called()
assert_true(mock_logging.info.called)
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery(self, mock_logging):
"""FirehoseClient - Record Delivery"""
# Add sample categorized payloads
for payload_type, logs in self._sample_categorized_payloads().iteritems():
self.sa_firehose._categorized_payloads[payload_type].extend(logs)
# Setup mocked Delivery Streams
self._mock_delivery_streams(
['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log'])
# Send the records
with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock:
firehose_mock.return_value = {'FailedPutCount': 0}
self.sa_firehose.send()
firehose_mock.assert_called()
assert_true(mock_logging.info.called)
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery_failure(self, mock_logging):
"""FirehoseClient - Record Delivery - Failed PutRecord"""
# Add sample categorized payloads
for payload_type, logs in self._sample_categorized_payloads().iteritems():
self.sa_firehose._categorized_payloads[payload_type].extend(logs)
# Setup mocked Delivery Streams
self._mock_delivery_streams(
['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log'])
# Send the records
with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock:
firehose_mock.return_value = {
'FailedPutCount':
3,
'RequestResponses': [
{
'RecordId': '12345',
'ErrorCode': '300',
'ErrorMessage': 'Bad message!!!'
},
]
}
self.sa_firehose.send()
firehose_mock.assert_called()
assert_true(mock_logging.error.called)
@patch('stream_alert.rule_processor.firehose.LOGGER')
@mock_kinesis
def test_record_delivery_client_error(self, mock_logging):
"""FirehoseClient - Record Delivery - Client Error"""
test_events = [
# unit_test_simple_log
{
'unit_key_01': 2,
'unit_key_02': 'testtest'
} for _ in range(10)
]
self.sa_firehose._firehose_request_helper('invalid_stream', test_events)
missing_stream_message = 'Client Error ... An error occurred ' \
'(ResourceNotFoundException) when calling the PutRecordBatch ' \
'operation: Stream invalid_stream under account 123456789012 not found.'
assert_true(mock_logging.error.called_with(missing_stream_message))
@mock_kinesis
def test_load_enabled_sources(self):
"""FirehoseClient - Load Enabled Sources"""
config = load_config('tests/unit/conf')
firehose_config = {
'enabled_logs': ['json:regex_key_with_envelope', 'test_cloudtrail', 'cloudwatch']
} # expands to 2 logs
enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, config['logs'])
assert_equal(len(enabled_logs), 4)
# Make sure the subtitution works properly
assert_true(all([':' not in log for log in enabled_logs]))
assert_false(FirehoseClient.enabled_log_source('test_inspec'))
@patch('stream_alert.rule_processor.firehose.LOGGER.error')
@mock_kinesis
def test_load_enabled_sources_invalid_log(self, mock_logging):
"""FirehoseClient - Load Enabled Sources - Invalid Log"""
config = load_config('tests/unit/conf')
firehose_config = {'enabled_logs': ['log-that-doesnt-exist']}
sa_firehose = FirehoseClient(
region='us-east-1', firehose_config=firehose_config, log_sources=config['logs'])
assert_equal(len(sa_firehose._ENABLED_LOGS), 0)
mock_logging.assert_called_with(
'Enabled Firehose log %s not declared in logs.json', 'log-that-doesnt-exist'
)
def test_strip_successful_records(self):
"""FirehoseClient - Strip Successful Records"""
batch = [{'test': 'success'}, {'test': 'data'}, {'other': 'failure'}, {'other': 'info'}]
response = {
'FailedPutCount': 1,
'RequestResponses': [
{'RecordId': 'rec_id_00'},
{'RecordId': 'rec_id_01'},
{'ErrorCode': 10, 'ErrorMessage': 'foo'},
{'RecordId': 'rec_id_03'}
]
}
expected_batch = [{'other': 'failure'}]
FirehoseClient._strip_successful_records(batch, response)
assert_equal(batch, expected_batch)
def test_segment_records_by_size(self):
"""FirehoseClient - Segment Large Records"""
record_batch = [
# unit_test_simple_log
{
'unit_key_01': 2,
'unit_key_02': 'testtest' * 10000
} for _ in range(100)
]
sized_batches = []
for sized_batch in FirehoseClient._segment_records_by_size(record_batch):
sized_batches.append(sized_batch)
assert_true(len(str(sized_batches[0])) < 4000000)
assert_equal(len(sized_batches), 4)
assert_true(isinstance(sized_batches[3][0], dict))
def test_sanitize_keys(self):
"""FirehoseClient - Sanitize Keys"""
# test_log_type_json_nested
test_event = {
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super-duper': 'secret',
'sanitize_me': 1,
'example-key': 1,
'moar**data': 2,
'even.more': 3
}
}
expected_sanitized_event = {
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super_duper': 'secret',
'sanitize_me': 1,
'example_key': 1,
'moar__data': 2,
'even_more': 3
}
}
sanitized_event = FirehoseClient.sanitize_keys(test_event)
assert_equal(sanitized_event, expected_sanitized_event)
@patch('stream_alert.rule_processor.firehose.LOGGER')
def test_limit_record_size(self, mock_logging):
"""FirehoseClient - Record Size Check"""
test_events = [
# unit_test_simple_log
{
'unit_key_01': 1,
'unit_key_02': 'test' * 250001 # is 4 bytes higher than max
},
{
'unit_key_01': 2,
'unit_key_02': 'test'
},
# test_log_type_json_nested
{
'date': 'January 01, 3005',
'unixtime': '32661446400',
'host': 'my-host.name.website.com',
'data': {
'super': 'secret'
}
},
# add another unit_test_sample_log to verify in a different position
{
'unit_key_01': 1,
'unit_key_02': 'test' * 250001 # is 4 bytes higher than max
},
{
'test': 1
}
]
FirehoseClient._limit_record_size(test_events)
assert_true(len(test_events), 3)
assert_true(mock_logging.error.called)
| 537 | 0 | 26 |
bf46b0f8d343d438e6b89354701e2ac2d3c1bca7 | 655 | py | Python | netdevice/migrations/0002_auto_20180511_0619.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 5 | 2016-10-31T17:46:17.000Z | 2022-02-02T00:40:49.000Z | netdevice/migrations/0002_auto_20180511_0619.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 33 | 2018-05-09T06:07:50.000Z | 2021-09-22T17:39:56.000Z | netdevice/migrations/0002_auto_20180511_0619.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 1 | 2020-05-14T21:44:25.000Z | 2020-05-14T21:44:25.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-11 06:19
from __future__ import unicode_literals
from django.db import migrations
| 21.833333 | 46 | 0.564885 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-11 06:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('netdevice', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='ipv4_address',
name='interface',
),
migrations.RemoveField(
model_name='ipv6_address',
name='interface',
),
migrations.DeleteModel(
name='ipv4_address',
),
migrations.DeleteModel(
name='ipv6_address',
),
]
| 0 | 486 | 23 |
b1d6075f1cad4b78485b993cebfa8d98fe3abffb | 2,167 | py | Python | tests/test_utilities.py | yohokuno/dl4nlp | 818db943835195397cd999e98806cabdc3499c19 | [
"MIT"
] | 41 | 2016-08-10T11:26:10.000Z | 2020-02-13T19:01:12.000Z | tests/test_utilities.py | nokuno/dl4nlp | 818db943835195397cd999e98806cabdc3499c19 | [
"MIT"
] | null | null | null | tests/test_utilities.py | nokuno/dl4nlp | 818db943835195397cd999e98806cabdc3499c19 | [
"MIT"
] | 8 | 2016-02-21T17:40:10.000Z | 2016-06-24T03:23:27.000Z | import unittest
import numpy as np
from math import exp
from scipy.special import expit
from dl4nlp.utilities import sigmoid_gradient, softmax
if __name__ == '__main__':
unittest.main()
| 38.696429 | 85 | 0.601754 | import unittest
import numpy as np
from math import exp
from scipy.special import expit
from dl4nlp.utilities import sigmoid_gradient, softmax
class TestUtilities(unittest.TestCase):
def assertDistribution(self, distribution):
self.assertTrue(all(distribution >= 0.0))
self.assertTrue(all(distribution <= 1.0))
self.assertEqual(1.0, np.sum(distribution))
def assertNumpyEqual(self, expect, actual):
self.assertEqual(expect.shape, actual.shape)
if expect.shape == (): # This is scalar!
self.assertAlmostEqual(expect, actual)
else: # This is array
for e, a in zip(expect, actual):
self.assertNumpyEqual(e, a)
def test_softmax(self):
# softmax should receive numpy array and return normalized vector
expect = np.array([exp(1) / (exp(1) + exp(2)), exp(2) / (exp(1) + exp(2))])
actual = softmax(np.array([1, 2]))
self.assertDistribution(actual)
self.assertNumpyEqual(expect, actual)
# softmax should be invariant to constant offsets in the input
# softmax should be able to handle very large or small values
actual = softmax(np.array([1001, 1002]))
self.assertNumpyEqual(expect, actual)
actual = softmax(np.array([-1002, -1001]))
self.assertNumpyEqual(expect, actual)
# softmax should receive matrix and return matrix of same size
expect = np.array([[exp(1) / (exp(1) + exp(2)), exp(2) / (exp(1) + exp(2))],
[exp(1) / (exp(1) + exp(2)), exp(2) / (exp(1) + exp(2))]])
actual = softmax(np.array([[1, 2], [3, 4]]))
self.assertNumpyEqual(expect, actual)
def test_sigmoid(self):
x = np.array([[1, 2], [-1, -2]])
f = expit(x)
g = sigmoid_gradient(f)
expected = np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])
self.assertNumpyEqual(expected, f)
expected = np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])
self.assertNumpyEqual(expected, g)
if __name__ == '__main__':
unittest.main()
| 1,827 | 18 | 130 |
84fce317899af0d32f08b19c33ca9515797bc5c0 | 7,563 | py | Python | bleu/bleu.py | SnehaMondal/bleu | dddeac9bddf5f3eca98d7e68f16870eee5e2bfbf | [
"BSD-3-Clause"
] | 4 | 2019-11-25T06:22:50.000Z | 2019-12-24T06:27:44.000Z | bleu/bleu.py | SnehaMondal/bleu | dddeac9bddf5f3eca98d7e68f16870eee5e2bfbf | [
"BSD-3-Clause"
] | 3 | 2019-12-24T13:19:12.000Z | 2021-03-16T11:53:19.000Z | bleu/bleu.py | zhijing-jin/nlp-evaluation | a129eb67ec844cdbf0e6c7bc0ab8a611b07ef667 | [
"BSD-3-Clause"
] | 2 | 2020-03-12T10:42:29.000Z | 2020-06-20T07:22:45.000Z | '''
author = Zhijing Jin (zhijing.jin@connect.hku.hk)
date = Aug 24, 2019
How to Run:
python bleu.py \
-refs data/ref0.txt data/ref1.txt -hyps data/hyp0.txt
'''
from __future__ import print_function, division
import os
import json
import argparse
from .download import TMP_DIR, DETOK_FILE, BLEU_DETOK_FILE, BLEU_FILE
def multi_file_bleu(ref_files, hyp_files, detok=True, verbose=False):
'''
This is to get the average BLEU for hyp among ref0, ref1, ref2, ...
:param hyp_files: a list of filenames for hypothesis
:param ref_files: a list of filenames for references
:return: print a bleu score
'''
from efficiency.function import shell
# check for wrong input of ref_list, and correct it
if isinstance(ref_files, str):
ref_files = [ref_files]
ref_files, hyp_files = \
preprocess_files(ref_files, hyp_files, verbose=verbose)
outputs = []
script = BLEU_DETOK_FILE if detok else BLEU_FILE
for hyp in hyp_files:
cmd = 'perl {script} {refs} < {hyp} '.format(
script=script,refs=' '.join(ref_files), hyp=hyp)
if verbose: print('[cmd]', cmd)
stdout, stderr = shell(cmd)
bleu_prefix = 'BLEU = '
if verbose and not stdout.startswith(bleu_prefix):
print(stdout)
if bleu_prefix in stdout:
num = stdout.split(bleu_prefix, 1)[-1].split(',')[0]
output = float(num)
else:
# if stdout.startswith('Illegal division by zero'):
output = -1
outputs += [output]
if verbose:
print('{}-ref bleu for {}: {}'.format(len(ref_files), hyp, output))
return outputs
def detok_files(files_in, tmp_dir=TMP_DIR, file_prefix='detok', verbose=False):
'''
This is to detokenize all files
:param files: a list of filenames
:return: a list of files after detokenization
'''
files_out = []
if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir)
for ix, f_in in enumerate(files_in):
f_out = os.path.join(tmp_dir, '{}{}.txt'.format(file_prefix, ix))
files_out.append(f_out)
cmd = 'perl {DETOK_FILE} -l en < {f_in} > {f_out} 2>/dev/null'.format(
DETOK_FILE=DETOK_FILE, f_in=f_in, f_out=f_out)
if verbose: print('[cmd]', cmd)
os.system(cmd)
return files_out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-refs', default=['data/ref0.txt', 'data/ref1.txt'],
nargs='+', type=str,
help='a list of filenames for reference files, separated by space')
parser.add_argument('-hyps', default=['data/hyp0.txt'], nargs='+', type=str,
help='a list of filenames for hypothesis files, separated by space')
parser.add_argument('-data_dir', default='', type=str,
help='directory to save temporary outputs')
parser.add_argument('-verbose', action='store_true',
help='whether to allow printing out logs')
args = parser.parse_args()
main(args)
| 33.171053 | 133 | 0.622108 | '''
author = Zhijing Jin (zhijing.jin@connect.hku.hk)
date = Aug 24, 2019
How to Run:
python bleu.py \
-refs data/ref0.txt data/ref1.txt -hyps data/hyp0.txt
'''
from __future__ import print_function, division
import os
import json
import argparse
from .download import TMP_DIR, DETOK_FILE, BLEU_DETOK_FILE, BLEU_FILE
def file_bleu(ref_files, hyp_file, detok=True, verbose=False):
bleus = multi_file_bleu(ref_files, [hyp_file], detok=detok, verbose=verbose)
return bleus[0]
def multi_file_bleu(ref_files, hyp_files, detok=True, verbose=False):
'''
This is to get the average BLEU for hyp among ref0, ref1, ref2, ...
:param hyp_files: a list of filenames for hypothesis
:param ref_files: a list of filenames for references
:return: print a bleu score
'''
from efficiency.function import shell
# check for wrong input of ref_list, and correct it
if isinstance(ref_files, str):
ref_files = [ref_files]
ref_files, hyp_files = \
preprocess_files(ref_files, hyp_files, verbose=verbose)
outputs = []
script = BLEU_DETOK_FILE if detok else BLEU_FILE
for hyp in hyp_files:
cmd = 'perl {script} {refs} < {hyp} '.format(
script=script,refs=' '.join(ref_files), hyp=hyp)
if verbose: print('[cmd]', cmd)
stdout, stderr = shell(cmd)
bleu_prefix = 'BLEU = '
if verbose and not stdout.startswith(bleu_prefix):
print(stdout)
if bleu_prefix in stdout:
num = stdout.split(bleu_prefix, 1)[-1].split(',')[0]
output = float(num)
else:
# if stdout.startswith('Illegal division by zero'):
output = -1
outputs += [output]
if verbose:
print('{}-ref bleu for {}: {}'.format(len(ref_files), hyp, output))
return outputs
def list_bleu(refs, hyp, detok=True, tmp_dir=TMP_DIR, verbose=False, return_files=False):
# check for wrong input of ref_list, and correct it
for ref_list in refs:
if isinstance(ref_list, str):
refs = [refs]
break
import uuid
uid = str(uuid.uuid4())
folder = os.path.join(tmp_dir, uid)
try:
os.mkdir(folder)
ref_files, hyp_files = lists2files(refs, [hyp], tmp_dir=folder)
bleus = multi_file_bleu(ref_files=ref_files, hyp_files=hyp_files,
detok=detok, verbose=verbose)
bleu = bleus[0]
finally:
if not return_files:
import shutil
shutil.rmtree(folder)
if return_files:
hyp_file = hyp_files[0]
return bleu, ref_files, hyp_file
else:
return bleu
def multi_list_bleu(refs, hyps, detok=True, tmp_dir=TMP_DIR, verbose=False, return_files=False):
# check for wrong input of ref_list, and correct it
for ref_list in refs:
if isinstance(ref_list, str):
refs = [refs]
break
import uuid
uid = str(uuid.uuid4())
folder = os.path.join(tmp_dir, uid)
try:
os.mkdir(folder)
ref_files, hyp_files = lists2files(refs, hyps, tmp_dir=folder)
bleus = multi_file_bleu(ref_files=ref_files, hyp_files=hyp_files,
detok=detok, verbose=verbose)
finally:
if not return_files:
import shutil
shutil.rmtree(folder)
if return_files:
return bleus, ref_files, hyp_files
else:
return bleus
def lists2files(refs, hyps, tmp_dir=TMP_DIR):
def _list2file(sents, file):
writeout = '\n'.join(sents) + '\n'
with open(file, 'w') as f:
f.write(writeout)
ref_files = [os.path.join(tmp_dir, 'ref{}.txt'.format(ref_ix))
for ref_ix, _ in enumerate(refs)]
hyp_files = [os.path.join(tmp_dir, 'hyp{}.txt'.format(hyp_ix))
for hyp_ix, _ in enumerate(hyps)]
_ = [_list2file(*item) for item in zip(refs, ref_files)]
_ = [_list2file(*item) for item in zip(hyps, hyp_files)]
return ref_files, hyp_files
def preprocess_files(ref_files, hyp_files, verbose=False):
# Step 1. Check whether all files exist
valid_refs = [f for f in ref_files if os.path.isfile(f)]
valid_hyps = [f for f in hyp_files if os.path.isfile(f)]
if verbose:
print('[Info] Valid Reference Files: {}'.format(str(valid_refs)))
print('[Info] Valid Hypothesis Files: {}'.format(str(valid_hyps)))
# Step 2. Check whether all files has the same num of lines
num_lines = []
files = valid_refs + valid_hyps
for file in files:
with open(file) as f:
lines = [line.strip() for line in f]
num_lines += [len(lines)]
if len(set(num_lines)) != 1:
raise RuntimeError("[Error] File lengths are different! list(zip(files, num_lines)): {}".format(list(zip(files, num_lines))))
if verbose:
print("[Info] #lines in each file: {}".format(num_lines[0]))
# Step 3. detokenization
valid_refs = detok_files(valid_refs, tmp_dir=TMP_DIR, file_prefix='ref_dtk', verbose=verbose)
valid_hyps = detok_files(valid_hyps, tmp_dir=TMP_DIR, file_prefix='hyp_dtk', verbose=verbose)
return valid_refs, valid_hyps
def detok_files(files_in, tmp_dir=TMP_DIR, file_prefix='detok', verbose=False):
'''
This is to detokenize all files
:param files: a list of filenames
:return: a list of files after detokenization
'''
files_out = []
if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir)
for ix, f_in in enumerate(files_in):
f_out = os.path.join(tmp_dir, '{}{}.txt'.format(file_prefix, ix))
files_out.append(f_out)
cmd = 'perl {DETOK_FILE} -l en < {f_in} > {f_out} 2>/dev/null'.format(
DETOK_FILE=DETOK_FILE, f_in=f_in, f_out=f_out)
if verbose: print('[cmd]', cmd)
os.system(cmd)
return files_out
def main(args=None):
refs = [['it is a white cat .',
'wow , this dog is huge .'],
['This cat is white .',
'wow , this is a huge dog .']]
hyp = ['it is a white kitten .',
'wowww , the dog is huge !']
hyp2 = ["it 's a white kitten .",
'wow , this dog is huge !']
import pdb;pdb.set_trace()
bleus, ref_files, hyp_files = multi_list_bleu(refs, [hyp, hyp2], detok=True, verbose=True, return_files=True)
bleu = list_bleu(refs, hyp, detok=True, verbose=False)
bleus = multi_file_bleu(ref_files, hyp_files, detok=True, verbose=True)
bleu = file_bleu(ref_files, hyp_files[0], detok=True, verbose=True)
if args is not None:
if args.verbose:
print(json.dumps(vars(args), indent=4, sort_keys=True))
outputs = multi_file_bleu(args.refs, args.hyps, verbose=args.verbose)
print("All BLEUs:", outputs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-refs', default=['data/ref0.txt', 'data/ref1.txt'],
nargs='+', type=str,
help='a list of filenames for reference files, separated by space')
parser.add_argument('-hyps', default=['data/hyp0.txt'], nargs='+', type=str,
help='a list of filenames for hypothesis files, separated by space')
parser.add_argument('-data_dir', default='', type=str,
help='directory to save temporary outputs')
parser.add_argument('-verbose', action='store_true',
help='whether to allow printing out logs')
args = parser.parse_args()
main(args)
| 4,325 | 0 | 138 |
21c669d69edbbac66ebd85a74a2aa9f1421d4b4a | 170 | py | Python | pages/PendingPage.py | jasonlfchen/python_framework | b174cb02fe0b62e872e2f3bffed834d9f89e5584 | [
"MIT"
] | 1 | 2019-05-15T05:57:29.000Z | 2019-05-15T05:57:29.000Z | pages/PendingPage.py | jasonlfchen/python_framework | b174cb02fe0b62e872e2f3bffed834d9f89e5584 | [
"MIT"
] | null | null | null | pages/PendingPage.py | jasonlfchen/python_framework | b174cb02fe0b62e872e2f3bffed834d9f89e5584 | [
"MIT"
] | null | null | null | __author__ = 'Yunxi Lin'
from pages.BasePage import BasePage | 28.333333 | 50 | 0.747059 | __author__ = 'Yunxi Lin'
from pages.BasePage import BasePage
class PendingPage(BasePage):
def print_display(self):
self.logger.info('Pending Page Displayed') | 54 | 7 | 48 |
7ce271ff191ba5eeb0e9f491ae0a3c57439fec19 | 7,467 | py | Python | venv/Lib/site-packages/PySide6/examples/widgets/state-machine/rogue.py | gabrielcervante/live-sharer | 6fb33d5663849734caddcba8d439244c03693e63 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PySide6/examples/widgets/state-machine/rogue.py | gabrielcervante/live-sharer | 6fb33d5663849734caddcba8d439244c03693e63 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PySide6/examples/widgets/state-machine/rogue.py | gabrielcervante/live-sharer | 6fb33d5663849734caddcba8d439244c03693e63 | [
"MIT"
] | null | null | null | #############################################################################
##
## Copyright (C) 2010 velociraptor Genjix <aphidia@hotmail.com>
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide6.QtWidgets import *
from PySide6.QtGui import *
from PySide6.QtCore import *
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
sys.exit(app.exec_())
| 36.783251 | 87 | 0.602384 | #############################################################################
##
## Copyright (C) 2010 velociraptor Genjix <aphidia@hotmail.com>
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide6.QtWidgets import *
from PySide6.QtGui import *
from PySide6.QtCore import *
class MovementTransition(QEventTransition):
def __init__(self, window):
super(MovementTransition, self).__init__(window, QEvent.KeyPress)
self.window = window
def eventTest(self, event):
if event.type() == QEvent.StateMachineWrapped and \
event.event().type() == QEvent.KeyPress:
key = event.event().key()
return key == Qt.Key_2 or key == Qt.Key_8 or \
key == Qt.Key_6 or key == Qt.Key_4
return False
def onTransition(self, event):
key = event.event().key()
if key == Qt.Key_4:
self.window.movePlayer(self.window.Left)
if key == Qt.Key_8:
self.window.movePlayer(self.window.Up)
if key == Qt.Key_6:
self.window.movePlayer(self.window.Right)
if key == Qt.Key_2:
self.window.movePlayer(self.window.Down)
class Custom(QState):
def __init__(self, parent, mw):
super(Custom, self).__init__(parent)
self.mw = mw
def onEntry(self, e):
print(self.mw.status)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.pX = 5
self.pY = 5
self.width = 35
self.height = 20
self.statusStr = ''
database = QFontDatabase()
font = QFont()
if 'Monospace' in database.families():
font = QFont('Monospace', 12)
else:
for family in database.families():
if database.isFixedPitch(family):
font = QFont(family, 12)
self.setFont(font)
self.setupMap()
self.buildMachine()
self.show()
def setupMap(self):
self.map = []
qsrand(QTime(0, 0, 0).secsTo(QTime.currentTime()))
for x in range(self.width):
column = []
for y in range(self.height):
if x == 0 or x == self.width - 1 or y == 0 or \
y == self.height - 1 or qrand() % 40 == 0:
column.append('#')
else:
column.append('.')
self.map.append(column)
def buildMachine(self):
machine = QStateMachine(self)
inputState = Custom(machine, self)
# this line sets the status
self.status = 'hello!'
# however this line does not
inputState.assignProperty(self, 'status', 'Move the rogue with 2, 4, 6, and 8')
machine.setInitialState(inputState)
machine.start()
transition = MovementTransition(self)
inputState.addTransition(transition)
quitState = QState(machine)
quitState.assignProperty(self, 'status', 'Really quit(y/n)?')
yesTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_Y)
self.finalState = QFinalState(machine)
yesTransition.setTargetState(self.finalState)
quitState.addTransition(yesTransition)
noTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_N)
noTransition.setTargetState(inputState)
quitState.addTransition(noTransition)
quitTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_Q)
quitTransition.setTargetState(quitState)
inputState.addTransition(quitTransition)
machine.setInitialState(inputState)
machine.finished.connect(qApp.quit)
machine.start()
def sizeHint(self):
metrics = QFontMetrics(self.font())
return QSize(metrics.horizontalAdvance('X') * self.width,
metrics.height() * (self.height + 1))
def paintEvent(self, event):
metrics = QFontMetrics(self.font())
painter = QPainter(self)
fontHeight = metrics.height()
fontWidth = metrics.horizontalAdvance('X')
painter.fillRect(self.rect(), Qt.black)
painter.setPen(Qt.white)
yPos = fontHeight
painter.drawText(QPoint(0, yPos), self.status)
for y in range(self.height):
yPos += fontHeight
xPos = 0
for x in range(self.width):
if y == self.pY and x == self.pX:
xPos += fontWidth
continue
painter.drawText(QPoint(xPos, yPos), self.map[x][y])
xPos += fontWidth
painter.drawText(QPoint(self.pX * fontWidth, (self.pY + 2) * fontHeight), '@')
def movePlayer(self, direction):
if direction == self.Left:
if self.map[self.pX - 1][self.pY] != '#':
self.pX -= 1
elif direction == self.Right:
if self.map[self.pX + 1][self.pY] != '#':
self.pX += 1
elif direction == self.Up:
if self.map[self.pX][self.pY - 1] != '#':
self.pY -= 1
elif direction == self.Down:
if self.map[self.pX][self.pY + 1] != '#':
self.pY += 1
self.repaint()
def getStatus(self):
return self.statusStr
def setStatus(self, status):
self.statusStr = status
self.repaint()
status = Property(str, getStatus, setStatus)
Up = 0
Down = 1
Left = 2
Right = 3
Width = 35
Height = 20
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
sys.exit(app.exec_())
| 4,643 | 372 | 200 |
c17238e3ea6d5c3bbcb53e0f74878691432e6488 | 617 | py | Python | Auto Correct/image.py | uttamkeshri786/Communication-and-Education-Hub-For-Specially-Abled | d20f1f0c0c7fb367267643e702b9f3c5d70e9004 | [
"MIT"
] | null | null | null | Auto Correct/image.py | uttamkeshri786/Communication-and-Education-Hub-For-Specially-Abled | d20f1f0c0c7fb367267643e702b9f3c5d70e9004 | [
"MIT"
] | null | null | null | Auto Correct/image.py | uttamkeshri786/Communication-and-Education-Hub-For-Specially-Abled | d20f1f0c0c7fb367267643e702b9f3c5d70e9004 | [
"MIT"
] | null | null | null | from google_images_download import google_images_download
# creating object
response = google_images_download.googleimagesdownload()
search_queries =['apple','cat','appropriate','exorbitant']
for query in search_queries:
downloadimages(query)
print()
| 24.68 | 60 | 0.589951 | from google_images_download import google_images_download
# creating object
response = google_images_download.googleimagesdownload()
search_queries =['apple','cat','appropriate','exorbitant']
def downloadimages(query):
arguments = {"keywords": query}
try:
response.download(arguments)
except FileNotFoundError:
arguments = {"keywords": query}
try:
response.download(arguments)
except:
pass
for query in search_queries:
downloadimages(query)
print()
| 299 | 0 | 27 |
d2c270b297614b45177ca27bf5f9f77a7a6584d4 | 6,825 | py | Python | indy_node/test/auth_rule/test_auth_txn_with_deprecated_key.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 627 | 2017-07-06T12:38:08.000Z | 2022-03-30T13:18:43.000Z | indy_node/test/auth_rule/test_auth_txn_with_deprecated_key.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 580 | 2017-06-29T17:59:57.000Z | 2022-03-29T21:37:52.000Z | indy_node/test/auth_rule/test_auth_txn_with_deprecated_key.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 704 | 2017-06-29T17:45:34.000Z | 2022-03-30T07:08:58.000Z | import shutil
from contextlib import contextmanager
import pytest
from indy_common.config_helper import NodeConfigHelper
from indy_node.test.helper import TestNode
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.test_node import ensureElectionsDone, ensure_node_disconnected, checkNodesConnected
from indy_node.test.auth_rule.helper import sdk_send_and_check_auth_rule_request, sdk_send_and_check_get_auth_rule_request
from indy_common.authorize.auth_actions import ADD_PREFIX, AuthActionAdd
from indy_common.authorize.auth_constraints import AuthConstraint, ROLE
from indy_common.constants import CONSTRAINT, AUTH_TYPE, CONFIG_LEDGER_ID, NYM
from indy_common.authorize.auth_map import one_trustee_constraint
from plenum.common.constants import STEWARD, DATA
from plenum.common.exceptions import RequestNackedException
@contextmanager
def extend_auth_map(nodes, key, constraint):
"""
Context manager to add a new auth rule to the auth map and remove it on exit.
:param nodes: nodes list which auth maps should be changed
:param key: str gotten from AuthActionAdd(...).get_action_id()
:param constraint: AuthConstraint
"""
for node in nodes:
node.write_req_validator.auth_map[key] = constraint
yield
for node in nodes:
node.write_req_validator.auth_map.pop(key, None)
def test_auth_txn_with_deprecated_key(tconf, tdir, allPluginsPath,
txnPoolNodeSet,
looper,
sdk_wallet_trustee,
sdk_pool_handle):
"""
Add to the auth_map a fake rule
Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
Send GET_AUTH_RULE txn and check that the fake rule was changed
Remove the fake auth rule from the map
Check that we can't get the fake auth rule
Restart the last node with its state regeneration
Check that nodes data is equal after changing the existing auth rule (restarted node regenerate config state)
"""
fake_txn_type = "100002"
fake_key = AuthActionAdd(txn_type=fake_txn_type,
field="*",
value="*").get_action_id()
fake_constraint = one_trustee_constraint
new_auth_constraint = AuthConstraint(role=STEWARD, sig_count=1, need_to_be_owner=False).as_dict
# Add to the auth_map a fake rule
with extend_auth_map(txnPoolNodeSet,
fake_key,
fake_constraint):
# Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX,
auth_type=fake_txn_type,
field='*',
new_value='*',
constraint=new_auth_constraint)
# Send GET_AUTH_RULE txn and check that the fake rule was changed
result = sdk_send_and_check_get_auth_rule_request(
looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_type=fake_txn_type,
auth_action=ADD_PREFIX,
field="*",
new_value="*"
)[0][1]["result"][DATA][0]
assert result[AUTH_TYPE] == fake_txn_type
assert result[CONSTRAINT] == new_auth_constraint
# Remove the fake auth rule from the map
# Check that we can't get the fake auth rule
with pytest.raises(RequestNackedException, match="not found in authorization map"):
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX,
auth_type=fake_txn_type,
field='*',
new_value='*',
constraint=AuthConstraint(role=STEWARD, sig_count=2,
need_to_be_owner=False).as_dict)
resp = sdk_send_and_check_get_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee)
assert all(rule[AUTH_TYPE] != fake_txn_type for rule in resp[0][1]["result"][DATA])
with pytest.raises(RequestNackedException, match="not found in authorization map"):
sdk_send_and_check_get_auth_rule_request(
looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_type=fake_txn_type,
auth_action=ADD_PREFIX,
field="*",
new_value="*"
)
# Restart the last node with its state regeneration
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
node_to_stop = txnPoolNodeSet[-1]
node_state = node_to_stop.states[CONFIG_LEDGER_ID]
assert not node_state.isEmpty
state_db_path = node_state._kv.db_path
node_to_stop.cleanupOnStopping = False
node_to_stop.stop()
looper.removeProdable(node_to_stop)
ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet[:-1])
shutil.rmtree(state_db_path)
config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
restarted_node = TestNode(
node_to_stop.name,
config_helper=config_helper,
config=tconf,
pluginPaths=allPluginsPath,
ha=node_to_stop.nodestack.ha,
cliha=node_to_stop.clientstack.ha)
looper.add(restarted_node)
txnPoolNodeSet[-1] = restarted_node
# Check that nodes data is equal (restarted node regenerate config state)
looper.run(checkNodesConnected(txnPoolNodeSet))
ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX,
auth_type=NYM,
field=ROLE,
new_value=STEWARD,
constraint=AuthConstraint(role=STEWARD, sig_count=2,
need_to_be_owner=False).as_dict)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
| 45.198675 | 122 | 0.606886 | import shutil
from contextlib import contextmanager
import pytest
from indy_common.config_helper import NodeConfigHelper
from indy_node.test.helper import TestNode
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.test_node import ensureElectionsDone, ensure_node_disconnected, checkNodesConnected
from indy_node.test.auth_rule.helper import sdk_send_and_check_auth_rule_request, sdk_send_and_check_get_auth_rule_request
from indy_common.authorize.auth_actions import ADD_PREFIX, AuthActionAdd
from indy_common.authorize.auth_constraints import AuthConstraint, ROLE
from indy_common.constants import CONSTRAINT, AUTH_TYPE, CONFIG_LEDGER_ID, NYM
from indy_common.authorize.auth_map import one_trustee_constraint
from plenum.common.constants import STEWARD, DATA
from plenum.common.exceptions import RequestNackedException
@contextmanager
def extend_auth_map(nodes, key, constraint):
"""
Context manager to add a new auth rule to the auth map and remove it on exit.
:param nodes: nodes list which auth maps should be changed
:param key: str gotten from AuthActionAdd(...).get_action_id()
:param constraint: AuthConstraint
"""
for node in nodes:
node.write_req_validator.auth_map[key] = constraint
yield
for node in nodes:
node.write_req_validator.auth_map.pop(key, None)
def test_auth_txn_with_deprecated_key(tconf, tdir, allPluginsPath,
txnPoolNodeSet,
looper,
sdk_wallet_trustee,
sdk_pool_handle):
"""
Add to the auth_map a fake rule
Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
Send GET_AUTH_RULE txn and check that the fake rule was changed
Remove the fake auth rule from the map
Check that we can't get the fake auth rule
Restart the last node with its state regeneration
Check that nodes data is equal after changing the existing auth rule (restarted node regenerate config state)
"""
fake_txn_type = "100002"
fake_key = AuthActionAdd(txn_type=fake_txn_type,
field="*",
value="*").get_action_id()
fake_constraint = one_trustee_constraint
new_auth_constraint = AuthConstraint(role=STEWARD, sig_count=1, need_to_be_owner=False).as_dict
# Add to the auth_map a fake rule
with extend_auth_map(txnPoolNodeSet,
fake_key,
fake_constraint):
# Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX,
auth_type=fake_txn_type,
field='*',
new_value='*',
constraint=new_auth_constraint)
# Send GET_AUTH_RULE txn and check that the fake rule was changed
result = sdk_send_and_check_get_auth_rule_request(
looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_type=fake_txn_type,
auth_action=ADD_PREFIX,
field="*",
new_value="*"
)[0][1]["result"][DATA][0]
assert result[AUTH_TYPE] == fake_txn_type
assert result[CONSTRAINT] == new_auth_constraint
# Remove the fake auth rule from the map
# Check that we can't get the fake auth rule
with pytest.raises(RequestNackedException, match="not found in authorization map"):
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX,
auth_type=fake_txn_type,
field='*',
new_value='*',
constraint=AuthConstraint(role=STEWARD, sig_count=2,
need_to_be_owner=False).as_dict)
resp = sdk_send_and_check_get_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee)
assert all(rule[AUTH_TYPE] != fake_txn_type for rule in resp[0][1]["result"][DATA])
with pytest.raises(RequestNackedException, match="not found in authorization map"):
sdk_send_and_check_get_auth_rule_request(
looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_type=fake_txn_type,
auth_action=ADD_PREFIX,
field="*",
new_value="*"
)
# Restart the last node with its state regeneration
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
node_to_stop = txnPoolNodeSet[-1]
node_state = node_to_stop.states[CONFIG_LEDGER_ID]
assert not node_state.isEmpty
state_db_path = node_state._kv.db_path
node_to_stop.cleanupOnStopping = False
node_to_stop.stop()
looper.removeProdable(node_to_stop)
ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet[:-1])
shutil.rmtree(state_db_path)
config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
restarted_node = TestNode(
node_to_stop.name,
config_helper=config_helper,
config=tconf,
pluginPaths=allPluginsPath,
ha=node_to_stop.nodestack.ha,
cliha=node_to_stop.clientstack.ha)
looper.add(restarted_node)
txnPoolNodeSet[-1] = restarted_node
# Check that nodes data is equal (restarted node regenerate config state)
looper.run(checkNodesConnected(txnPoolNodeSet))
ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX,
auth_type=NYM,
field=ROLE,
new_value=STEWARD,
constraint=AuthConstraint(role=STEWARD, sig_count=2,
need_to_be_owner=False).as_dict)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
| 0 | 0 | 0 |
09849d19c6f8c069a16069f6143c86c920b9a180 | 1,304 | py | Python | src/unordered_pair.py | martgarden/pp-time-analysis | 6cf0e03b0d2d9039cc200c831ee171022cea4e26 | [
"MIT"
] | null | null | null | src/unordered_pair.py | martgarden/pp-time-analysis | 6cf0e03b0d2d9039cc200c831ee171022cea4e26 | [
"MIT"
] | null | null | null | src/unordered_pair.py | martgarden/pp-time-analysis | 6cf0e03b0d2d9039cc200c831ee171022cea4e26 | [
"MIT"
] | 1 | 2019-12-12T15:01:05.000Z | 2019-12-12T15:01:05.000Z | # -*- coding: utf-8 -*-
# If y != None, constructs the unordered pair (x, y)
# If y == None, constructs an unordered pair from iterable x, e.g. a tuple
| 24.148148 | 78 | 0.509202 | # -*- coding: utf-8 -*-
class upair:
# If y != None, constructs the unordered pair (x, y)
# If y == None, constructs an unordered pair from iterable x, e.g. a tuple
def __init__(self, x, y=None):
if y is not None:
self._x = x
self._y = y
else:
self._x, self._y = tuple(x)
def some(self):
return self._x
def other(self, x):
if (self._x == x):
return self._y
else:
return self._x
def count(self, x):
if x not in self:
return 0
elif self.other(x) != x:
return 1
else:
return 2
def __eq__(self, other):
return ((self._x == other._x and self._y == other._y) or
(self._x == other._y and self._y == other._x))
def __ne__(self, other):
return (self != other)
def __len__(self):
return 2
def __contains__(self, elem):
return (self._x == elem or self._y == elem)
def __iter__(self):
yield self._x
yield self._y
def __hash__(self):
return hash(hash((self._x, self._y)) + hash((self._y, self._x)))
def __str__(self):
return "⟅{}, {}⟆".format(self._x, self._y)
def __repr__(self):
return str(self)
| 808 | -9 | 349 |
f08712809580ee92fb03e643dd9e48ab44d626b8 | 1,288 | py | Python | lib/python2.7/site-packages/leginon/checkapps.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/leginon/checkapps.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/leginon/checkapps.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #!/usr/bin/env python
import leginon.leginondata
import sys
import getpass
import sets
days = int(raw_input('Days: '))
## make set of all application names
appquery = leginon.leginondata.ApplicationData()
apps = appquery.query()
print 'APPS', len(apps)
allapps = sets.Set()
allappsdict = {}
for app in apps:
appname = app['name']
allapps.add(appname)
if appname in allappsdict:
allappsdict[appname].append(app)
else:
allappsdict[appname] = [app]
print 'ALL', len(allapps)
## make set off apps launched in last n days
launchquery = leginon.leginondata.LaunchedApplicationData()
timelimit = '-%d 0:0:0' % (days,)
launchedapps = launchquery.query(timelimit=timelimit)
recentapps = []
for launchedapp in launchedapps:
try:
appname = launchedapp['application']['name']
except:
continue
if appname not in recentapps:
recentapps.append(appname)
print 'RECENT', len(recentapps)
## make set off apps not launched in last n days
notrecentapps = allapps - sets.Set(recentapps)
print 'NOTRECENT', len(notrecentapps)
print 'Most Recently Launched (last %d days = %d apps):' % (days,len(recentapps))
for recent in recentapps:
print '\t%s' % (recent,)
print 'Others Sorted Alphabetically'
others = list(notrecentapps)
others.sort()
for other in others:
print '\t%s' % (other,)
| 24.769231 | 81 | 0.733696 | #!/usr/bin/env python
import leginon.leginondata
import sys
import getpass
import sets
days = int(raw_input('Days: '))
## make set of all application names
appquery = leginon.leginondata.ApplicationData()
apps = appquery.query()
print 'APPS', len(apps)
allapps = sets.Set()
allappsdict = {}
for app in apps:
appname = app['name']
allapps.add(appname)
if appname in allappsdict:
allappsdict[appname].append(app)
else:
allappsdict[appname] = [app]
print 'ALL', len(allapps)
## make set off apps launched in last n days
launchquery = leginon.leginondata.LaunchedApplicationData()
timelimit = '-%d 0:0:0' % (days,)
launchedapps = launchquery.query(timelimit=timelimit)
recentapps = []
for launchedapp in launchedapps:
try:
appname = launchedapp['application']['name']
except:
continue
if appname not in recentapps:
recentapps.append(appname)
print 'RECENT', len(recentapps)
## make set off apps not launched in last n days
notrecentapps = allapps - sets.Set(recentapps)
print 'NOTRECENT', len(notrecentapps)
print 'Most Recently Launched (last %d days = %d apps):' % (days,len(recentapps))
for recent in recentapps:
print '\t%s' % (recent,)
print 'Others Sorted Alphabetically'
others = list(notrecentapps)
others.sort()
for other in others:
print '\t%s' % (other,)
| 0 | 0 | 0 |
1b7001b06418ddc532a256eb22664ba369d7d7e4 | 291 | py | Python | test/scripts/testSubMatrix.py | Gibbsdavidl/miergolf | 1728bd9459d6067fb45d777d06ad440ab603d2e7 | [
"BSD-3-Clause"
] | 8 | 2016-03-04T07:51:43.000Z | 2018-09-19T03:18:45.000Z | test/scripts/testSubMatrix.py | Gibbsdavidl/miergolf | 1728bd9459d6067fb45d777d06ad440ab603d2e7 | [
"BSD-3-Clause"
] | null | null | null | test/scripts/testSubMatrix.py | Gibbsdavidl/miergolf | 1728bd9459d6067fb45d777d06ad440ab603d2e7 | [
"BSD-3-Clause"
] | 3 | 2017-03-24T02:52:38.000Z | 2022-01-22T00:14:55.000Z | import timeit
setup = '''
import scipy.sparse as sp
import numpy as np
from bisect import bisect
from numpy.random import rand, randint
import submatrix as s
r = [10,20,30]
A = s.randomMatrix()
'''
t = timeit.Timer("s.subMatrix(r,r,A)", setup).repeat(3, 10)
print t
#print t.timeit()
| 17.117647 | 59 | 0.697595 | import timeit
setup = '''
import scipy.sparse as sp
import numpy as np
from bisect import bisect
from numpy.random import rand, randint
import submatrix as s
r = [10,20,30]
A = s.randomMatrix()
'''
t = timeit.Timer("s.subMatrix(r,r,A)", setup).repeat(3, 10)
print t
#print t.timeit()
| 0 | 0 | 0 |
937d699f29de9bf65385d4f5c1da52e203206dc2 | 4,595 | py | Python | services/traction/acapy_wrapper/apis/wallet_api.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/traction/acapy_wrapper/apis/wallet_api.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/traction/acapy_wrapper/apis/wallet_api.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | # coding: utf-8
from typing import Dict, List # noqa: F401
from fastapi import ( # noqa: F401
APIRouter,
Body,
Cookie,
Depends,
Form,
Header,
Path,
Query,
Request,
Response,
Security,
status,
)
from acapy_wrapper.models.extra_models import TokenModel # noqa: F401
from acapy_wrapper.models.did_create import DIDCreate
from acapy_wrapper.models.did_endpoint import DIDEndpoint
from acapy_wrapper.models.did_endpoint_with_type import DIDEndpointWithType
from acapy_wrapper.models.did_list import DIDList
from acapy_wrapper.models.did_result import DIDResult
from api import acapy_utils as au
router = APIRouter()
@router.post(
"/wallet/did/create",
responses={
200: {"model": DIDResult, "description": ""},
},
tags=["wallet"],
summary="Create a local DID",
)
@router.get(
"/wallet/did",
responses={
200: {"model": DIDList, "description": ""},
},
tags=["wallet"],
summary="List wallet DIDs",
)
@router.patch(
"/wallet/did/local/rotate-keypair",
responses={
200: {"model": dict, "description": ""},
},
tags=["wallet"],
summary="Rotate keypair for a DID not posted to the ledger",
)
@router.get(
"/wallet/did/public",
responses={
200: {"model": DIDResult, "description": ""},
},
tags=["wallet"],
summary="Fetch the current public DID",
)
@router.post(
"/wallet/did/public",
responses={
200: {"model": DIDResult, "description": ""},
},
tags=["wallet"],
summary="Assign the current public DID",
)
@router.get(
"/wallet/get-did-endpoint",
responses={
200: {"model": DIDEndpoint, "description": ""},
},
tags=["wallet"],
summary="Query DID endpoint in wallet",
)
@router.post(
"/wallet/set-did-endpoint",
responses={
200: {"model": dict, "description": ""},
},
tags=["wallet"],
summary="Update endpoint in wallet and on ledger if posted to it",
)
| 26.715116 | 171 | 0.666376 | # coding: utf-8
from typing import Dict, List # noqa: F401
from fastapi import ( # noqa: F401
APIRouter,
Body,
Cookie,
Depends,
Form,
Header,
Path,
Query,
Request,
Response,
Security,
status,
)
from acapy_wrapper.models.extra_models import TokenModel # noqa: F401
from acapy_wrapper.models.did_create import DIDCreate
from acapy_wrapper.models.did_endpoint import DIDEndpoint
from acapy_wrapper.models.did_endpoint_with_type import DIDEndpointWithType
from acapy_wrapper.models.did_list import DIDList
from acapy_wrapper.models.did_result import DIDResult
from api import acapy_utils as au
router = APIRouter()
@router.post(
"/wallet/did/create",
responses={
200: {"model": DIDResult, "description": ""},
},
tags=["wallet"],
summary="Create a local DID",
)
async def wallet_did_create_post(
request: Request,
body: DIDCreate = Body(None, description=""),
) -> DIDResult:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.get(
"/wallet/did",
responses={
200: {"model": DIDList, "description": ""},
},
tags=["wallet"],
summary="List wallet DIDs",
)
async def wallet_did_get(
request: Request,
did: str = Query(
None,
description="DID of interest",
regex=r"^did:key:z[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]+$|^(did:sov:)?[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}$",
),
key_type: str = Query(None, description="Key type to query for."),
method: str = Query(
None,
description="DID method to query for. e.g. sov to only fetch indy/sov DIDs",
),
posture: str = Query(
None,
description="Whether DID is current public DID, posted to ledger but current public DID, or local to the wallet",
),
verkey: str = Query(
None,
description="Verification key of interest",
regex=r"^[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{43,44}$",
),
) -> DIDList:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.patch(
"/wallet/did/local/rotate-keypair",
responses={
200: {"model": dict, "description": ""},
},
tags=["wallet"],
summary="Rotate keypair for a DID not posted to the ledger",
)
async def wallet_did_local_rotate_keypair_patch(
request: Request,
did: str = Query(
None,
description="DID of interest",
regex=r"^(did:sov:)?[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}$",
),
) -> dict:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.get(
"/wallet/did/public",
responses={
200: {"model": DIDResult, "description": ""},
},
tags=["wallet"],
summary="Fetch the current public DID",
)
async def wallet_did_public_get(
request: Request,
) -> DIDResult:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.post(
"/wallet/did/public",
responses={
200: {"model": DIDResult, "description": ""},
},
tags=["wallet"],
summary="Assign the current public DID",
)
async def wallet_did_public_post(
request: Request,
did: str = Query(
None,
description="DID of interest",
regex=r"^(did:sov:)?[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}$",
),
) -> DIDResult:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.get(
"/wallet/get-did-endpoint",
responses={
200: {"model": DIDEndpoint, "description": ""},
},
tags=["wallet"],
summary="Query DID endpoint in wallet",
)
async def wallet_get_did_endpoint_get(
request: Request,
did: str = Query(
None,
description="DID of interest",
regex=r"^(did:sov:)?[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}$",
),
) -> DIDEndpoint:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.post(
"/wallet/set-did-endpoint",
responses={
200: {"model": dict, "description": ""},
},
tags=["wallet"],
summary="Update endpoint in wallet and on ledger if posted to it",
)
async def wallet_set_did_endpoint_post(
request: Request,
body: DIDEndpointWithType = Body(None, description=""),
) -> dict:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
| 2,437 | 0 | 154 |
2ab8baacea67bdd028c291e9f1cb7d788577d7fe | 128 | py | Python | Desafios/desafio014.py | MaxGabrielima/Python-Codes | 195c13821a937f0b792b0d214d19840d57068279 | [
"MIT"
] | 3 | 2021-07-31T21:10:32.000Z | 2021-11-17T02:21:28.000Z | Desafios/desafio014.py | MaxGabrielima/Python-Codes | 195c13821a937f0b792b0d214d19840d57068279 | [
"MIT"
] | null | null | null | Desafios/desafio014.py | MaxGabrielima/Python-Codes | 195c13821a937f0b792b0d214d19840d57068279 | [
"MIT"
] | null | null | null | c = float(input('Digite a temperatura em C°: '))
f = c * 1.8 + 32
print('{:.0f} C° é igual a {:.0f}° Fahrenheight'.format(c, f)) | 42.666667 | 62 | 0.585938 | c = float(input('Digite a temperatura em C°: '))
f = c * 1.8 + 32
print('{:.0f} C° é igual a {:.0f}° Fahrenheight'.format(c, f)) | 0 | 0 | 0 |
e1f67f7b91bd8c32c0cfdb45a2077ae8c8ff3cd8 | 2,272 | py | Python | sleeplog/tests.py | jepaynedev/sleeplog | 7e79e9d36521ac03b49bf5984bb4a2759d8015ca | [
"MIT"
] | null | null | null | sleeplog/tests.py | jepaynedev/sleeplog | 7e79e9d36521ac03b49bf5984bb4a2759d8015ca | [
"MIT"
] | null | null | null | sleeplog/tests.py | jepaynedev/sleeplog | 7e79e9d36521ac03b49bf5984bb4a2759d8015ca | [
"MIT"
] | null | null | null | import transaction
import unittest
import unittest.mock as mock
from pyramid import testing
from .models import User
| 32 | 121 | 0.629842 | import transaction
import unittest
import unittest.mock as mock
from pyramid import testing
from .models import User
class SleepLogViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_default(self):
from .views.default import SleepLogViews
request = testing.DummyRequest()
request.user = User(
sub='105578945702061677132',
email='jepayne1138@gmail.com',
verified=1,
name='James Payne',
given='James',
family='James',
locale='en',
picture='https://lh3.googleusercontent.com/-cc2Wq0RJZ7g/AAAAAAAAAAI/AAAAAAAAAFc/mx6nAXHHaOc/s96-c/photo.jpg',
)
inst = SleepLogViews(request)
response = inst.default()
# Not ideal, but I want to quickly have a way to keep the id secret
self.assertTrue(response['client_id'].endswith('.apps.googleusercontent.com'))
self.assertEqual(response['given'], 'James')
class SleepLogFunctionalTests(unittest.TestCase):
def setUp(self):
from pyramid.paster import get_app
app = get_app('production.ini')
from webtest import TestApp
self.testapp = TestApp(app)
def tearDown(self):
transaction.abort()
@mock.patch('sleeplog.views.default.verify_google_token', return_value='105578945702061677132', autospec=True)
def _login(self, *args):
redirect_res = self.testapp.post(
'/login',
params={'form.submitted': 'Log In', 'token': 'dummy_token'},
status=302,
)
redirect_res.follow(status=200)
@mock.patch('sleeplog.views.default.verify_google_token', return_value='105578945702061677132', autospec=True)
def test_login(self, mock_verify):
self.testapp.post(
'/login',
params={'form.submitted': 'Log In', 'token': 'dummy_token'},
status=302,
)
self.assertTrue(mock_verify.called)
def test_default(self):
self._login()
res = self.testapp.get('/', status=200)
self.assertIn(b'Welcome, James!', res.body)
self.assertIn(b'.apps.googleusercontent.com', res.body)
| 1,611 | 414 | 126 |
afc39d0c21a7cdf47359b933ab387e9408704987 | 23,211 | py | Python | catkin_ws/src/folding_control/src/moveit_interface.py | roop-pal/robotic-folding | a0e062ac6d23cd07fe10e3f45abc4ba50e533141 | [
"RSA-MD"
] | 17 | 2015-10-21T16:09:18.000Z | 2021-07-23T03:15:55.000Z | catkin_ws/src/folding_control/src/moveit_interface.py | roop-pal/robotic-folding | a0e062ac6d23cd07fe10e3f45abc4ba50e533141 | [
"RSA-MD"
] | 1 | 2017-12-17T04:39:38.000Z | 2017-12-17T04:39:38.000Z | catkin_ws/src/folding_control/src/moveit_interface.py | roop-pal/robotic-folding | a0e062ac6d23cd07fe10e3f45abc4ba50e533141 | [
"RSA-MD"
] | 8 | 2016-03-18T14:13:58.000Z | 2020-01-15T15:03:51.000Z | #!/usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import signal
import math
import tf
from copy import deepcopy
import transformation
signal.signal(signal.SIGINT, signal_handler)
pose_list = \
{
'left_open': [0.6451299414163874, 0.48308154941436016, 0.688241579565718, 0.11442618882303603, 0.9926119706997465, 0.028043380270852243, 0.02901192441022871],
'right_open': [0.6462863106297506, -0.48390075862008375, 0.6859732922530206, -0.15210562530968288, 0.9881075743671162, 0.0028034494110560793, 0.02234817439712085]
}
joint_list = \
{
'left_open': [-0.04525243319091797, -1.185383652484131, -0.6304661031005859, 0.9851991598937989, 0.2657621711975098, 1.7587089713012696, 0.44638840876464847],
'right_open': [-0.024543692578125, -1.1293933537902834, 0.7673738882629395, 0.9560535249572755, -0.2922233397583008, 1.8089468420471193, -0.4793689956665039]
}
def distance():
"""
Calculate the distance between two points.
return:
Distance.
"""
return lambda a, b: math.sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*+(a.y-b.y)+(a.z-b.z)*(a.z-b.z))
class MoveitInterface:
"""
A wrapper class that calls functions from the MoveIt.
"""
gripper_translation_dist = 0.22
print_flag = False
def move_predef_position(self, limb_name, pos_name, speed=0.3,
timeout = 15.0):
"""
Move to some pre-defined pose.
Args:
limb_name: Joint name.
pos_name: refer to the dict() defined at the beginning
"""
self.move_limb(limb_name, pose_list[limb_name+'_'+pos_name], False,
speed)
def move_predef_joint_position(self, limb_name, pose_name, speed = 0.3,
timeout = 15.0):
"""
Move to some pre-defined joints.
Args:
limb_name: Both arm or left or right.
pos_name: refer to the dict() defined at the beginning
"""
if limb_name == 'both':
self.move_limb_joint(limb_name, joint_list['left_'+pose_name]+
joint_list['right_'+pose_name], speed)
else:
self.move_limb_joint(limb_name, joint_list[limb_name+'_'+pose_name],
speed)
def move_limb_height(self, limb_name, target_height_displacement,
do_displacement, speed=0.1):
"""
Move the arm vertically.
Args:
limb_name: Left or right arm.
target_height_displacement: Move offset.
do_displacement: True if move from the current position.
speed: Move speed.
Return:
"""
# Both arm not implemented.
print target_height_displacement
if limb_name == 'left':
target_pos = self.left_arm.get_current_pose().pose
elif limb_name == 'right':
target_pos = self.right_arm.get_current_pose().pose
if do_displacement:
target_height_displacement += target_pos.position.z
print [target_pos.position.x, target_pos.position.y,
target_height_displacement]
self.move_limb(limb_name, [target_pos.position.x, target_pos.position.y,
target_height_displacement])
def move_limb_joint_single(self, limb_name, joint_index, joint_position):
"""
Move a joint of the arm.
Args:
limb_name: Left or right arm.
joint_index: Joint index. There are 7 joints on each arm.
joint_position: joint angle.
Return:
"""
# Both arm not implemented.
joint_val = self.query_joint(limb_name)
joint_val[joint_index] = joint_position
self.move_limb_joint(limb_name, joint_val)
def move_limb_joint(self, limb_name, joint_positions, speed=0.1):
"""
Move the arm with 7 joints in a whole.
Args:
limb_name: Left or right arm.
joint_index: Joint index of 7. There are 7 joints on each arm.
joint_position: joint angle.
Return:
"""
if limb_name == 'both':
print joint_positions
self.both_arms.set_joint_value_target(joint_positions)
self.both_arms.go()
return
joint_pos = {limb_name+'_s0': joint_positions[0],
limb_name+'_s1': joint_positions[1],
limb_name+'_e0': joint_positions[2],
limb_name+'_e1': joint_positions[3],
limb_name+'_w0': joint_positions[4],
limb_name+'_w1': joint_positions[5],
limb_name+'_w2': joint_positions[6]}
if limb_name == 'left':
self.left_arm.set_joint_value_target(joint_pos)
self.left_arm.go()
if limb_name == 'right':
self.right_arm.set_joint_value_target(joint_pos)
self.right_arm.go()
def move_limb_position(self, limb_name, position, speed=0.1):
"""
Move a single arm by position.
Args:
limb_name: Left or right arm.
position: 3D position in terms of the robot base.
speed: Moving speed.
Return:
"""
if limb_name == 'left':
self.left_arm.set_position_target(position[0], position[1],
position[2])
self.left_arm.go()
if limb_name == 'right':
self.right_arm.set_position_target(position[0], position[1],
position[2])
self.right_arm.go()
def move_limb(self, limb_name, position, do_displacement = False,
sync_option = 'wait', speed=0.3, timeout = 15.0):
"""
Move a limb to the target position & orientation
---position (float list):
- The first 3 elements are cartesian coordinate of the target position.
- THe 4 following elements are the quaternion of the end effector
(optional)
Args:
limb_name: Left or right arm.
position: 3D position in terms of the robot base.
do_displacement: True if move from the current position.
sync_option:
speed: Moving speed.
timeout: If not reachable, time to release.
Return:
"""
if limb_name == 'left':
self.left_arm.clear_pose_targets()
pose = self.generate_pose(limb_name, position, do_displacement)
self.left_arm.set_pose_target(pose)
self.left_arm.go()
if limb_name == 'right':
self.right_arm.clear_pose_targets()
pose = self.generate_pose(limb_name, position, do_displacement)
self.right_arm.set_pose_target(pose)
self.right_arm.go()
if limb_name == 'both':
self.left_arm.clear_pose_targets()
self.right_arm.clear_pose_targets()
if len(position) == 6:
# Position only, same orientation.
pose_left = self.generate_pose(
'left', position[0:3], do_displacement)
pose_right = self.generate_pose(
'right', position[3:], do_displacement)
elif len(position) == 14: # Pose.
pose_left = self.generate_pose(
'left', position[0:7], do_displacement)
pose_right = self.generate_pose(
'right', position[7:], do_displacement)
elif len(position) == 2:
# For the case that the parameter is a tuple of two lists.
pose_left = self.generate_pose(
'left', position[0], do_displacement)
pose_right = self.generate_pose(
'right', position[1], do_displacement)
self.right_arm.set_pose_target(pose_right)
self.left_arm.set_pose_target(pose_left)
pa = self.right_arm.plan()
pb = self.left_arm.plan()
pc = self.merge_plans(pa, pb, sync_option)
self.both_arms.execute(pc)
return 1
def generate_pose(self, side, position, do_displacement=False):
"""
Generate arm pose for a target moving position.
Args:
side: Left or right arm.
position: Target moving position.
do_displacement: True if move from the current position.
Return: The pose of a list of 3 or 7 elements for the target position.
"""
if side == 'left':
limb = self.left_arm
else:
limb = self.right_arm
target_pose = limb.get_current_pose()
if do_displacement:
target_pose.pose.position.x += position[0]
target_pose.pose.position.y += position[1]
target_pose.pose.position.z += position[2]
else:
target_pose.pose.position.x = position[0]
target_pose.pose.position.y = position[1]
target_pose.pose.position.z = position[2]
if len(position) == 7:
print "size of 7! Including orientation."
target_pose.pose.orientation.w = position[3]
target_pose.pose.orientation.x = position[4]
target_pose.pose.orientation.y = position[5]
target_pose.pose.orientation.z = position[6]
target_pose = self.transform_gripper_to_wrist(side, target_pose)
return target_pose.pose
def move_cartesian_path(self, limb_name, way_points,
sync_option = 'wait'):
"""
Move the arm in cartesian path defined by a set of way points.
Args:
limb_name: Left or right arm.
way_points: A set of 3D points to move the arm.
sync_option: 'wait' if move the arm one by one.
Return:
"""
if limb_name == 'both':
assert len(way_points) == 2
pa = self.generate_cartesian_path_plan('left', way_points[0])
pb = self.generate_cartesian_path_plan('right', way_points[1])
if pa and pb: #if both are valid
plan = self.merge_plans(pa, pb, sync_option)
self.both_arms.execute(plan)
else:
print "Invalid Cartesian Path"
elif limb_name == 'left':
plan = self.generate_cartesian_path_plan(limb_name, way_points)
if plan:
self.left_arm.execute(plan)
else:
print "Invalid Cartesian Path"
exit()
elif limb_name == 'right':
plan = self.generate_cartesian_path_plan(limb_name, way_points)
if plan:
self.right_arm.execute(plan)
else:
print "Invalid Cartesian Path"
exit()
def generate_cartesian_path_plan(self, side, way_points,
eef_step = 0.01, jump_threshold = 0):
"""
Generate a plan based on a sequence of cartesian way-points
Args:
group: move group
way_points: a list of cartesian way points, can be either x,y,z
(position) or x,y,z,w,x,y,z (pose)
eef_step: end effector step constraint
jump_threshold:
Return:a MoveIt plan.
"""
if side == 'left':
group = self.left_arm
else:
group = self.right_arm
# Always start with the current pose.
way_point_poses = []
for w in way_points:
way_point_poses.append(self.generate_pose(side, w))
(plan, fraction) = group.compute_cartesian_path(
way_point_poses, # Way points to follow.
eef_step, # eef_step.
jump_threshold) # jump_threshold.
if fraction == -1: # Error.
return False
return plan
def merge_plans(self, pa, pb, sync_option='wait'):
"""
Merge two MoveIt plans.
Args:
pa: Plan a.
pb: Plan b.
sync_option: wait
Return:
Merged plan.
"""
# Merge two (left & right) plans.
self.merge_trajectories(pa.joint_trajectory, pb.joint_trajectory,
sync_option)
if len(pa.joint_trajectory.points) == 0:
return pb
else:
return pa
def merge_points(self, target, source):
"""
Merge trajectory data points.
Args:
target:
source:
Return:
"""
target.joint_names.extend(source.joint_names)
for i in range(len(target.points)):
target.points[i].positions = target.points[i].positions + \
source.points[i].positions
target.points[i].accelerations = target.points[i].accelerations + \
source.points[i].accelerations
target.points[i].velocities = target.points[i].velocities + \
source.points[i].velocities
target.points[i].effort = target.points[i].effort + \
source.points[i].effort
source.points[:] = []
return target
def merge_trajectories(self, traj_l, traj_r, sync_option='wait'):
"""
Merge two trajectories by various synchronization options.
Args:
traj_l: Left arm trajectory.
traj_r: Right arm trajectory.
sync_option: See below.
Return:
"""
#
if len(traj_l.points) < len(traj_r.points):
long = traj_r
short = traj_l
else:
long = traj_l
short = traj_r
if sync_option == 'trim' or len(short.points) == len(long.points):
# merge to shorter trajectory by trimming the longer one.
self.merge_points(short, long) # merge long to short.
elif sync_option == 'wait':
# merge to longer trajectory by waiting for the shorter one.
size_diff = len(long.points) - len(short.points)
state = deepcopy(short.points[-1])
# zero velocities & accelerations state.
state.accelerations = (0,)*len(state.accelerations)
state.velocities = (0,)*len(state.velocities)
for i in range(size_diff):
short.points.append(deepcopy(state))
assert(len(short.points) == len(long.points))
self.merge_points(long, short) #merge short to long
elif sync_option == 'fastforward':
# Merge to shorter trajectory by fast forwarding the longer one.
pass
elif sync_option == 'slowmotion':
pass
def query_pose(self, limb_name):
"""
Query the current pose (7 parameters) of arms.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose().pose
if limb_name == 'right':
pose = self.right_arm.get_current_pose().pose
return pose
def query_joint(self, limb_name):
"""
Query the current joint of arms.
Args:
limb_name: Left or right arm.
Return:
Joint of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_joint_values()
if limb_name == 'right':
pose = self.right_arm.get_current_joint_values()
return pose
def current_position(self, limb_name):
"""
Query the current pose (position) of arms.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose().pose
if limb_name == 'right':
pose = self.right_arm.get_current_pose().pose
return [pose.position.x, pose.position.y, pose.position.z]
def current_orientation(self, limb_name):
"""
Query the current pose (orientation) of arms.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose().pose
if limb_name == 'right':
pose = self.right_arm.get_current_pose().pose
return [pose.orientation.w, pose.orientation.x, pose.orientation.y,
pose.orientation.z]
def current_gripper_pose(self, limb_name):
"""
Query the current pose (orientation) of gripper.
End effector with transform to the gripper.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried gripper (7 parameters).
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose()
if limb_name == 'right':
pose = self.right_arm.get_current_pose()
pose = self.transform_wrist_to_gripper(limb_name, pose)
return ([pose.pose.position.x, pose.pose.position.y,
pose.pose.position.z],
[pose.pose.orientation.w, pose.pose.orientation.x,
pose.pose.orientation.y, pose.pose.orientation.z])
def transform_gripper_to_wrist(self, side, gripper_target_pose):
"""
Transform a pose in side_gripper_center frame to side_wrist frame.
Args:
side: Left or right arm.
gripper_target_pose: End effector position.
Return:
New pose of the end effector.
"""
self.tros.waitForTransform('/base', side + '_gripper_center',
rospy.Time(), rospy.Duration(4))
gripper_target_pose.header.stamp = \
self.tros.getLatestCommonTime('/base', side + '_gripper_center')
p = self.tros.transformPose(side + '_gripper_center',
gripper_target_pose)
p.header.frame_id = side + '_wrist'
self.tros.waitForTransform('/base', side + '_wrist',
rospy.Time(), rospy.Duration(4))
p.header.stamp = self.tros.getLatestCommonTime('/base', side + '_wrist')
p_new = self.tros.transformPose('base', p)
return p_new
def transform_wrist_to_gripper(self, side, wrist_pose):
"""
Transform between end effector and gripper. The default end effector
is the wrist part.
Args:
side: Left or right arm.
wrist_pose: Current end effector position.
Return:
3D position of the gripper.
"""
self.tros.waitForTransform(
'/base', side + '_wrist',rospy.Time(), rospy.Duration(4))
wrist_pose.header.stamp = self.tros.getLatestCommonTime(
'/base', side + '_wrist')
p = self.tros.transformPose(side + '_wrist', wrist_pose)
p.header.frame_id = side + '_gripper_center'
self.tros.waitForTransform(
'/base', side + '_gripper_center',rospy.Time(), rospy.Duration(4))
p.header.stamp = self.tros.getLatestCommonTime(
'/base', side + '_gripper_center')
p_new = self.tros.transformPose('base', p)
return p_new
if __name__ == '__main__':
sys.exit(main())
| 37.437097 | 162 | 0.563827 | #!/usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import signal
import math
import tf
from copy import deepcopy
import transformation
def signal_handler(signal, frame):
print("Disabling robot... ")
moveit_commander.roscpp_shutdown()
print("done.")
rospy.signal_shutdown("finished")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
pose_list = \
{
'left_open': [0.6451299414163874, 0.48308154941436016, 0.688241579565718, 0.11442618882303603, 0.9926119706997465, 0.028043380270852243, 0.02901192441022871],
'right_open': [0.6462863106297506, -0.48390075862008375, 0.6859732922530206, -0.15210562530968288, 0.9881075743671162, 0.0028034494110560793, 0.02234817439712085]
}
joint_list = \
{
'left_open': [-0.04525243319091797, -1.185383652484131, -0.6304661031005859, 0.9851991598937989, 0.2657621711975098, 1.7587089713012696, 0.44638840876464847],
'right_open': [-0.024543692578125, -1.1293933537902834, 0.7673738882629395, 0.9560535249572755, -0.2922233397583008, 1.8089468420471193, -0.4793689956665039]
}
def distance():
"""
Calculate the distance between two points.
return:
Distance.
"""
return lambda a, b: math.sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*+(a.y-b.y)+(a.z-b.z)*(a.z-b.z))
class MoveitInterface:
"""
A wrapper class that calls functions from the MoveIt.
"""
gripper_translation_dist = 0.22
print_flag = False
def __init__(self, t=None):
moveit_commander.roscpp_initialize([])
self.robot = moveit_commander.RobotCommander()
self.left_arm = moveit_commander.MoveGroupCommander("left_arm")
# self.left_arm.set_end_effector_link('left_gripper')
self.right_arm = moveit_commander.MoveGroupCommander("right_arm")
# self.left_arm.set_end_effector_link('right_gripper')
self.both_arms = moveit_commander.MoveGroupCommander("both_arms")
self.left_arm.set_planner_id('SBLkConfigDefault')
self.right_arm.set_planner_id('SBLkConfigDefault')
self.both_arms.set_planner_id('SBLkConfigDefault')
if t == None:
self.tros = tf.TransformListener()
else:
self.tros = t
def move_predef_position(self, limb_name, pos_name, speed=0.3,
timeout = 15.0):
"""
Move to some pre-defined pose.
Args:
limb_name: Joint name.
pos_name: refer to the dict() defined at the beginning
"""
self.move_limb(limb_name, pose_list[limb_name+'_'+pos_name], False,
speed)
def move_predef_joint_position(self, limb_name, pose_name, speed = 0.3,
timeout = 15.0):
"""
Move to some pre-defined joints.
Args:
limb_name: Both arm or left or right.
pos_name: refer to the dict() defined at the beginning
"""
if limb_name == 'both':
self.move_limb_joint(limb_name, joint_list['left_'+pose_name]+
joint_list['right_'+pose_name], speed)
else:
self.move_limb_joint(limb_name, joint_list[limb_name+'_'+pose_name],
speed)
def move_limb_height(self, limb_name, target_height_displacement,
do_displacement, speed=0.1):
"""
Move the arm vertically.
Args:
limb_name: Left or right arm.
target_height_displacement: Move offset.
do_displacement: True if move from the current position.
speed: Move speed.
Return:
"""
# Both arm not implemented.
print target_height_displacement
if limb_name == 'left':
target_pos = self.left_arm.get_current_pose().pose
elif limb_name == 'right':
target_pos = self.right_arm.get_current_pose().pose
if do_displacement:
target_height_displacement += target_pos.position.z
print [target_pos.position.x, target_pos.position.y,
target_height_displacement]
self.move_limb(limb_name, [target_pos.position.x, target_pos.position.y,
target_height_displacement])
def move_limb_joint_single(self, limb_name, joint_index, joint_position):
"""
Move a joint of the arm.
Args:
limb_name: Left or right arm.
joint_index: Joint index. There are 7 joints on each arm.
joint_position: joint angle.
Return:
"""
# Both arm not implemented.
joint_val = self.query_joint(limb_name)
joint_val[joint_index] = joint_position
self.move_limb_joint(limb_name, joint_val)
def move_limb_joint(self, limb_name, joint_positions, speed=0.1):
"""
Move the arm with 7 joints in a whole.
Args:
limb_name: Left or right arm.
joint_index: Joint index of 7. There are 7 joints on each arm.
joint_position: joint angle.
Return:
"""
if limb_name == 'both':
print joint_positions
self.both_arms.set_joint_value_target(joint_positions)
self.both_arms.go()
return
joint_pos = {limb_name+'_s0': joint_positions[0],
limb_name+'_s1': joint_positions[1],
limb_name+'_e0': joint_positions[2],
limb_name+'_e1': joint_positions[3],
limb_name+'_w0': joint_positions[4],
limb_name+'_w1': joint_positions[5],
limb_name+'_w2': joint_positions[6]}
if limb_name == 'left':
self.left_arm.set_joint_value_target(joint_pos)
self.left_arm.go()
if limb_name == 'right':
self.right_arm.set_joint_value_target(joint_pos)
self.right_arm.go()
def move_limb_position(self, limb_name, position, speed=0.1):
"""
Move a single arm by position.
Args:
limb_name: Left or right arm.
position: 3D position in terms of the robot base.
speed: Moving speed.
Return:
"""
if limb_name == 'left':
self.left_arm.set_position_target(position[0], position[1],
position[2])
self.left_arm.go()
if limb_name == 'right':
self.right_arm.set_position_target(position[0], position[1],
position[2])
self.right_arm.go()
def move_limb(self, limb_name, position, do_displacement = False,
sync_option = 'wait', speed=0.3, timeout = 15.0):
"""
Move a limb to the target position & orientation
---position (float list):
- The first 3 elements are cartesian coordinate of the target position.
- THe 4 following elements are the quaternion of the end effector
(optional)
Args:
limb_name: Left or right arm.
position: 3D position in terms of the robot base.
do_displacement: True if move from the current position.
sync_option:
speed: Moving speed.
timeout: If not reachable, time to release.
Return:
"""
if limb_name == 'left':
self.left_arm.clear_pose_targets()
pose = self.generate_pose(limb_name, position, do_displacement)
self.left_arm.set_pose_target(pose)
self.left_arm.go()
if limb_name == 'right':
self.right_arm.clear_pose_targets()
pose = self.generate_pose(limb_name, position, do_displacement)
self.right_arm.set_pose_target(pose)
self.right_arm.go()
if limb_name == 'both':
self.left_arm.clear_pose_targets()
self.right_arm.clear_pose_targets()
if len(position) == 6:
# Position only, same orientation.
pose_left = self.generate_pose(
'left', position[0:3], do_displacement)
pose_right = self.generate_pose(
'right', position[3:], do_displacement)
elif len(position) == 14: # Pose.
pose_left = self.generate_pose(
'left', position[0:7], do_displacement)
pose_right = self.generate_pose(
'right', position[7:], do_displacement)
elif len(position) == 2:
# For the case that the parameter is a tuple of two lists.
pose_left = self.generate_pose(
'left', position[0], do_displacement)
pose_right = self.generate_pose(
'right', position[1], do_displacement)
self.right_arm.set_pose_target(pose_right)
self.left_arm.set_pose_target(pose_left)
pa = self.right_arm.plan()
pb = self.left_arm.plan()
pc = self.merge_plans(pa, pb, sync_option)
self.both_arms.execute(pc)
return 1
def generate_pose(self, side, position, do_displacement=False):
"""
Generate arm pose for a target moving position.
Args:
side: Left or right arm.
position: Target moving position.
do_displacement: True if move from the current position.
Return: The pose of a list of 3 or 7 elements for the target position.
"""
if side == 'left':
limb = self.left_arm
else:
limb = self.right_arm
target_pose = limb.get_current_pose()
if do_displacement:
target_pose.pose.position.x += position[0]
target_pose.pose.position.y += position[1]
target_pose.pose.position.z += position[2]
else:
target_pose.pose.position.x = position[0]
target_pose.pose.position.y = position[1]
target_pose.pose.position.z = position[2]
if len(position) == 7:
print "size of 7! Including orientation."
target_pose.pose.orientation.w = position[3]
target_pose.pose.orientation.x = position[4]
target_pose.pose.orientation.y = position[5]
target_pose.pose.orientation.z = position[6]
target_pose = self.transform_gripper_to_wrist(side, target_pose)
return target_pose.pose
def move_cartesian_path(self, limb_name, way_points,
sync_option = 'wait'):
"""
Move the arm in cartesian path defined by a set of way points.
Args:
limb_name: Left or right arm.
way_points: A set of 3D points to move the arm.
sync_option: 'wait' if move the arm one by one.
Return:
"""
if limb_name == 'both':
assert len(way_points) == 2
pa = self.generate_cartesian_path_plan('left', way_points[0])
pb = self.generate_cartesian_path_plan('right', way_points[1])
if pa and pb: #if both are valid
plan = self.merge_plans(pa, pb, sync_option)
self.both_arms.execute(plan)
else:
print "Invalid Cartesian Path"
elif limb_name == 'left':
plan = self.generate_cartesian_path_plan(limb_name, way_points)
if plan:
self.left_arm.execute(plan)
else:
print "Invalid Cartesian Path"
exit()
elif limb_name == 'right':
plan = self.generate_cartesian_path_plan(limb_name, way_points)
if plan:
self.right_arm.execute(plan)
else:
print "Invalid Cartesian Path"
exit()
def generate_cartesian_path_plan(self, side, way_points,
eef_step = 0.01, jump_threshold = 0):
"""
Generate a plan based on a sequence of cartesian way-points
Args:
group: move group
way_points: a list of cartesian way points, can be either x,y,z
(position) or x,y,z,w,x,y,z (pose)
eef_step: end effector step constraint
jump_threshold:
Return:a MoveIt plan.
"""
if side == 'left':
group = self.left_arm
else:
group = self.right_arm
# Always start with the current pose.
way_point_poses = []
for w in way_points:
way_point_poses.append(self.generate_pose(side, w))
(plan, fraction) = group.compute_cartesian_path(
way_point_poses, # Way points to follow.
eef_step, # eef_step.
jump_threshold) # jump_threshold.
if fraction == -1: # Error.
return False
return plan
def merge_plans(self, pa, pb, sync_option='wait'):
"""
Merge two MoveIt plans.
Args:
pa: Plan a.
pb: Plan b.
sync_option: wait
Return:
Merged plan.
"""
# Merge two (left & right) plans.
self.merge_trajectories(pa.joint_trajectory, pb.joint_trajectory,
sync_option)
if len(pa.joint_trajectory.points) == 0:
return pb
else:
return pa
def merge_points(self, target, source):
"""
Merge trajectory data points.
Args:
target:
source:
Return:
"""
target.joint_names.extend(source.joint_names)
for i in range(len(target.points)):
target.points[i].positions = target.points[i].positions + \
source.points[i].positions
target.points[i].accelerations = target.points[i].accelerations + \
source.points[i].accelerations
target.points[i].velocities = target.points[i].velocities + \
source.points[i].velocities
target.points[i].effort = target.points[i].effort + \
source.points[i].effort
source.points[:] = []
return target
def merge_trajectories(self, traj_l, traj_r, sync_option='wait'):
"""
Merge two trajectories by various synchronization options.
Args:
traj_l: Left arm trajectory.
traj_r: Right arm trajectory.
sync_option: See below.
Return:
"""
#
if len(traj_l.points) < len(traj_r.points):
long = traj_r
short = traj_l
else:
long = traj_l
short = traj_r
if sync_option == 'trim' or len(short.points) == len(long.points):
# merge to shorter trajectory by trimming the longer one.
self.merge_points(short, long) # merge long to short.
elif sync_option == 'wait':
# merge to longer trajectory by waiting for the shorter one.
size_diff = len(long.points) - len(short.points)
state = deepcopy(short.points[-1])
# zero velocities & accelerations state.
state.accelerations = (0,)*len(state.accelerations)
state.velocities = (0,)*len(state.velocities)
for i in range(size_diff):
short.points.append(deepcopy(state))
assert(len(short.points) == len(long.points))
self.merge_points(long, short) #merge short to long
elif sync_option == 'fastforward':
# Merge to shorter trajectory by fast forwarding the longer one.
pass
elif sync_option == 'slowmotion':
pass
def adjust_trajectory_speed(self, traj, target_time):
pass
def query_pose(self, limb_name):
"""
Query the current pose (7 parameters) of arms.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose().pose
if limb_name == 'right':
pose = self.right_arm.get_current_pose().pose
return pose
def query_joint(self, limb_name):
"""
Query the current joint of arms.
Args:
limb_name: Left or right arm.
Return:
Joint of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_joint_values()
if limb_name == 'right':
pose = self.right_arm.get_current_joint_values()
return pose
def current_position(self, limb_name):
"""
Query the current pose (position) of arms.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose().pose
if limb_name == 'right':
pose = self.right_arm.get_current_pose().pose
return [pose.position.x, pose.position.y, pose.position.z]
def current_orientation(self, limb_name):
"""
Query the current pose (orientation) of arms.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried arm.
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose().pose
if limb_name == 'right':
pose = self.right_arm.get_current_pose().pose
return [pose.orientation.w, pose.orientation.x, pose.orientation.y,
pose.orientation.z]
def current_gripper_pose(self, limb_name):
"""
Query the current pose (orientation) of gripper.
End effector with transform to the gripper.
Args:
limb_name: Left or right arm.
Return:
Pose of the queried gripper (7 parameters).
"""
if limb_name == 'left':
pose = self.left_arm.get_current_pose()
if limb_name == 'right':
pose = self.right_arm.get_current_pose()
pose = self.transform_wrist_to_gripper(limb_name, pose)
return ([pose.pose.position.x, pose.pose.position.y,
pose.pose.position.z],
[pose.pose.orientation.w, pose.pose.orientation.x,
pose.pose.orientation.y, pose.pose.orientation.z])
def transform_gripper_to_wrist(self, side, gripper_target_pose):
"""
Transform a pose in side_gripper_center frame to side_wrist frame.
Args:
side: Left or right arm.
gripper_target_pose: End effector position.
Return:
New pose of the end effector.
"""
self.tros.waitForTransform('/base', side + '_gripper_center',
rospy.Time(), rospy.Duration(4))
gripper_target_pose.header.stamp = \
self.tros.getLatestCommonTime('/base', side + '_gripper_center')
p = self.tros.transformPose(side + '_gripper_center',
gripper_target_pose)
p.header.frame_id = side + '_wrist'
self.tros.waitForTransform('/base', side + '_wrist',
rospy.Time(), rospy.Duration(4))
p.header.stamp = self.tros.getLatestCommonTime('/base', side + '_wrist')
p_new = self.tros.transformPose('base', p)
return p_new
def transform_wrist_to_gripper(self, side, wrist_pose):
"""
Transform between end effector and gripper. The default end effector
is the wrist part.
Args:
side: Left or right arm.
wrist_pose: Current end effector position.
Return:
3D position of the gripper.
"""
self.tros.waitForTransform(
'/base', side + '_wrist',rospy.Time(), rospy.Duration(4))
wrist_pose.header.stamp = self.tros.getLatestCommonTime(
'/base', side + '_wrist')
p = self.tros.transformPose(side + '_wrist', wrist_pose)
p.header.frame_id = side + '_gripper_center'
self.tros.waitForTransform(
'/base', side + '_gripper_center',rospy.Time(), rospy.Duration(4))
p.header.stamp = self.tros.getLatestCommonTime(
'/base', side + '_gripper_center')
p_new = self.tros.transformPose('base', p)
return p_new
def main():
#MOVE THE ROBOT WITH INTERACTIVE UI
rospy.init_node("moveit_interface")
limb = MoveitInterface()
print_flag = False
limb_name = sys.argv[1]
while True:
mode = raw_input(
'Choose Mode: Position(p)/Move(m)/Query(q)/Preset Movment(pm)/Validate(v)')
if mode == 'q':
print limb.query_pose(limb_name)
continue
if mode == 'v':
q_pos = limb.query_pose(limb_name).position
q_ori = limb.query_pose(limb_name).orientation
target_pos = [q_pos.x, q_pos.y, q_pos.z, q_ori.w,
q_ori.x, q_ori.y, q_ori.z]
result = limb.move_limb(limb_name, target_pos, False, 0.3)
if result <0:
print "Invalid position"
continue
else:
print target_pos
continue
if mode != 'm' and mode !='p' and mode!='pm' and mode!= 'j' and mode!='jm': continue
incre = []
speed = 0
if mode == 'pm':
select = raw_input('1. right gripper to far left. 2.right gripper to far right. 3.others')
if int(select) == 1:
speed = 0.1
incre = [0,0,0,-3,1,0,0]
mode = 'm'
if int(select) == 2:
speed = 0.1
incre = [0,0,0,3,1,0,0]
mode = 'm'
if int(select) == 3:
speed = 0.3
limb.move_predef_position(
limb_name, raw_input('movement name'), speed)
if mode == 'jm':
limb.move_predef_joint_position(
limb_name, raw_input('movement name'), 0.3)
else:
pos_input = raw_input('Please enter: x y z (x y z w) ')
speed = raw_input('Speed?: \n')
pos_input = pos_input.split(' ')
incre = [float(n) for n in pos_input]
if len(incre) == 3 or len(incre) == 7:
result = 1
if mode == 'm': result = limb.move_limb(
limb_name, incre, True, float(speed))
elif mode == 'p': result = limb.move_limb(
limb_name, incre, False, float(speed))
# elif mode == 'j': result = limb.move_joint_position(limb_name, incre, float(speed))
else:
continue
if result < 0:
print "Invalid pose, try again"
continue
if __name__ == '__main__':
sys.exit(main())
| 3,397 | 0 | 100 |
09fb9853e9aa2a1bb1aebfc5a5e2fef2ea69bb92 | 376 | py | Python | test.py | FRC1076/2019-wapur | e872489792e9573b7205eccde55be21705af6e3c | [
"MIT"
] | 2 | 2018-11-15T23:22:31.000Z | 2019-11-28T23:58:34.000Z | test.py | FRC1076/2019-wapur | e872489792e9573b7205eccde55be21705af6e3c | [
"MIT"
] | 1 | 2018-11-06T03:50:27.000Z | 2018-11-06T03:54:35.000Z | test.py | FRC1076/2019-wapur | e872489792e9573b7205eccde55be21705af6e3c | [
"MIT"
] | null | null | null | """
Hi!
I intentionally wrote this script sort of weird so that it will
only work in Python 3.7+, to make sure that you're using
the right Python. Don't worry if you don't understand it!
If you're curious, I'm happy to explain anything in it.
-- C
"""
from __future__ import annotations
print("Hello, ", end='World!\n')
| 17.904762 | 63 | 0.68883 | """
Hi!
I intentionally wrote this script sort of weird so that it will
only work in Python 3.7+, to make sure that you're using
the right Python. Don't worry if you don't understand it!
If you're curious, I'm happy to explain anything in it.
-- C
"""
from __future__ import annotations
print("Hello, ", end='World!\n')
def foo(x: X) -> None:
pass
class X:
pass
| 10 | -4 | 46 |
8fab9a0e60252075d7e70d644b34510e8b6ba206 | 102 | py | Python | gm2m/signals.py | mikewolfd/django-gm2m | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | [
"MIT"
] | 24 | 2019-07-24T14:31:48.000Z | 2022-02-19T23:30:37.000Z | gm2m/signals.py | mikewolfd/django-gm2m | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | [
"MIT"
] | 15 | 2019-07-24T11:07:34.000Z | 2022-01-05T04:16:34.000Z | gm2m/signals.py | mikewolfd/django-gm2m | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | [
"MIT"
] | 20 | 2019-09-18T09:45:33.000Z | 2022-02-25T14:49:04.000Z | from django.core.signals import Signal
deleting = Signal(providing_args=['del_objs', 'rel_objs'])
| 25.5 | 59 | 0.754902 | from django.core.signals import Signal
deleting = Signal(providing_args=['del_objs', 'rel_objs'])
| 0 | 0 | 0 |
d4b98b76fe624baafc9de081109a4e9e4e43d434 | 12,333 | py | Python | sensorFlightTest/MCv8.py | AllenRing/ProjectTabulas | 2ad4c7d17a8c5ae3bae7f984ff599b2b5a990ffa | [
"MIT"
] | 1 | 2016-02-05T14:40:20.000Z | 2016-02-05T14:40:20.000Z | sensorFlightTest/MCv8.py | AllenRing/ProjectTabulas | 2ad4c7d17a8c5ae3bae7f984ff599b2b5a990ffa | [
"MIT"
] | 1 | 2016-02-05T14:05:43.000Z | 2016-02-05T18:10:30.000Z | sensorFlightTest/MCv8.py | AllenRing/ProjectTabulas | 2ad4c7d17a8c5ae3bae7f984ff599b2b5a990ffa | [
"MIT"
] | null | null | null | #---------------------------------------------------------------------------------------------------------------|
# Organization: AllenRing |
# -- Created by Ritch |
# |
# This program is responsible for controlling the settings of RPi pins and the speed of the attached motors. |
# It is also set up to execute other scripts (more to come). |
# |
# Scripts: |
# motorSpeedLimiter |
# This program will countinuously check the frequency and dutycycle ranges of all pins assigned with motors.|
#---------------------------------------------------------------------------------------------------------------|
import logging
import sys
import time
import pigpio
from Adafruit_BNO055 import BNO055
#---------------------------------------------------------------------------------------------------------------|
# --- Initialize Orientation Sensor ---
bno = BNO055.BNO055(serial_port='/dev/ttyAMA0', rst=18)
# Enable verbose debug logging if -v is passed as a parameter.
if len(sys.argv) == 2 and sys.argv[1].lower() == '-v':
logging.basicConfig(level=logging.DEBUG)
# Initialize the BNO055 and stop if something went wrong.
if not bno.begin():
raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
# Print BNO055 software revision and other diagnostic data.
sw, bl, accel, mag, gyro = bno.get_revision()
print('Software version: {0}'.format(sw))
print('Bootloader version: {0}'.format(bl))
print('Accelerometer ID: 0x{0:02X}'.format(accel))
print('Magnetometer ID: 0x{0:02X}'.format(mag))
print('Gyroscope ID: 0x{0:02X}\n'.format(gyro))
#---------------------------------------------------------------------------------------------------------------|
pi = pigpio.pi()
systemOn = True
motorOneSpeed = 180
motorTwoSpeed = 180
motorThreeSpeed = 180
motorFourSpeed = 185
#---------------------
#------Functions------
#---------------------
#-----------------------------
#------Movement Fucntions-----
#-----------------------------
#-----------------------------
#------Begin the program------
#-----------------------------
#----------------------------------------
# This will prompt for the pins to modify
# Each entered pin will be initialized and have global values set to default
#----------------------------------------
print('***Connect Battery & Press ENTER to start***')
res = raw_input()
print('***Enter Pins for Each Prompted Motor***')
print('Motor 1')
res = input()
motorOne = res
print('Motor 2')
res = input()
motorTwo = res
print('Motor 3')
res = input()
motorThree = res
print('Motor 4')
res = input()
motorFour = res
#----------------------------------------
# All motors will now be initialized
# The loop-to-follow will receive inputs and then change motorspeeds accordingly
#----------------------------------------
initializeMotors(motorOne, motorTwo, motorThree, motorFour)
res = raw_input()
motorOneSpeed = 195
motorTwoSpeed = 185
motorThreeSpeed = 190
motorFourSpeed = 190
res = raw_input()
print ('System initialized and running.')
print ('Follow your reference key or press 9 to shutdown')
strength = 218 #Sets base strength of motors
OldRoll = 0
OldRoll2 = 0
OldPitch = 0
OldPitch2 = 0
RMark = 0
PMark = 0
Front = 0
Back = 0
Left = 0
Right = 0
rollV = 0
Change = 20
ACR = 0 #Acceleration Roll
TestRV = 0
motorTwoSpeed = strength
motorFourSpeed = strength
motorThreeSpeed = strength
motorOneSpeed = strength
cycling = True
try:
while cycling:
pi.set_PWM_dutycycle(motorOne, motorOneSpeed)
pi.set_PWM_dutycycle(motorTwo, motorTwoSpeed)
pi.set_PWM_dutycycle(motorThree, motorThreeSpeed)
pi.set_PWM_dutycycle(motorFour, motorFourSpeed)
print ("motorOne: %s" % (motorOneSpeed))
print ("motorTwo: %s" % (motorTwoSpeed))
print ("motorThree: %s" % (motorThreeSpeed))
print ("motorFour: %s" % (motorFourSpeed))
#print(roll)
#print(OldRoll)
print("rollV: %s" % rollV)
# Read the Euler angles for heading, roll, pitch (all in degrees).
heading, roll, pitch = bno.read_euler()
# Read the calibration status, 0=uncalibrated and 3=fully calibrated.
sys, gyro, accel, mag = bno.get_calibration_status()
# Print everything out.
print('Heading={0:0.2F} roll={1:0.2F} Pitch={2:0.2F}\tSys_cal={3} Gyro_cal={4} Accel_cal={5} Mag_cal={6}'.format(heading, roll, pitch, sys, gyro, accel, mag))
# positive roll = tilt to right
# positive pitch = nose tilt up
#=====================================================
#calculates current roll and pitch velocity
OldRollV = rollV
rollV = ((roll - OldRoll) + (OldRoll - OldRoll2)) / 2
pitchV = ((pitch - OldPitch) + (OldPitch - OldPitch2)) / 2
TestRV = rollV
if(TestRV < 0):
TestRV *= -1
if(TestRV <= 0.03125):
rollV = 0
ACR = rollV - OldRollV
print("ACR %s" % ACR)
#=====================================================
#Start of roll calculations
if(roll > 0): # tilted right
if(rollV > 0): #Drone tilting right
Right += 1
#RMark = roll
elif(rollV < 0): # tilted right and tilting left
if(rollV > -1): #not tilting fast enough
Right += 1
elif(rollV < -1):
Right += -1
elif(roll < 0): #Drone tilted left
if(rollV < 0): # tilting left
Right += -1
#RMark = roll
elif(rollV > 0): #Drone tilting right
if(rollV < 1): #not tilting fast enough
Right += -1
elif(rollV > 1): #too fast
Right += 1
#Limits the change in direction
if(Right < Change * -1):
Right = Change * -1
elif(Right > Change):
Right = Change
#=====================================================
# start of pitch calculations
if(pitch > 0): # nose tilted upward
if(pitchV > 0): # nose tilting upward
Front += -1
#PMark = pitch
elif(pitchV < 0): # if moving down
if(pitchV < -1): # too fast
Front += 1
elif(pitchV > -1): #too slow
Front += -1
elif(pitch < 0): #nose tilted down
if(pitchV < 0): #nose tilting down
Front += 1
#PMark = pitch
elif(pitchV > 0): #tilting up
if(pitchV > 1):
Front += -1
elif(pitchV < 1):
Front += 1
else:
Front = 0
#limits the change in direction
if(Front < Change * -1):
Front = Change * -1
elif(Front > Change):
Front = Change
#=====================================================
# Sets values for Left and Back
Left = Right * float(-1)
Back = Front * float(-1)
#=====================================================
# updates motor speeds
motorTwoSpeed = Back + Right + strength
motorFourSpeed = Back + Left + strength
motorThreeSpeed = Front + Right + strength
motorOneSpeed = Front + Left + strength
#=====================================================
# sets old roll and pitch values
OldRoll2 = OldRoll
OldRoll = roll
OldPitch2 = OldPitch
OldPitch = pitch
time.sleep(1/20.0) # possible use in future for delay if ethan's idea doesnt work
# End of while
# End of Try
#----------------------------------------
# When the while loop has ended, the code will proceed here
# This will shutdown all motors in increments of one, until the speed value has reached '0'
#----------------------------------------
finally:
# shut down cleanly
while (systemOn):
if motorOneSpeed > 0:
motorOneSpeed = motorOneSpeed - 1
if motorTwoSpeed > 0:
motorTwoSpeed = motorTwoSpeed - 1
if motorThreeSpeed > 0:
motorThreeSpeed = motorThreeSpeed - 1
if motorFourSpeed > 0:
motorFourSpeed = motorFourSpeed - 1
pi.set_PWM_dutycycle(motorOne, motorOneSpeed)
pi.set_PWM_dutycycle(motorTwo, motorTwoSpeed)
pi.set_PWM_dutycycle(motorThree, motorThreeSpeed)
pi.set_PWM_dutycycle(motorFour, motorFourSpeed)
if (motorOneSpeed == 0) and (motorTwoSpeed == 0) and (motorThreeSpeed == 0) and (motorFourSpeed == 0):
systemOn = False
monitoring = False
print ("System Shutdown")
# Done | 34.449721 | 167 | 0.498662 | #---------------------------------------------------------------------------------------------------------------|
# Organization: AllenRing |
# -- Created by Ritch |
# |
# This program is responsible for controlling the settings of RPi pins and the speed of the attached motors. |
# It is also set up to execute other scripts (more to come). |
# |
# Scripts: |
# motorSpeedLimiter |
# This program will countinuously check the frequency and dutycycle ranges of all pins assigned with motors.|
#---------------------------------------------------------------------------------------------------------------|
import logging
import sys
import time
import pigpio
from Adafruit_BNO055 import BNO055
#---------------------------------------------------------------------------------------------------------------|
# --- Initialize Orientation Sensor ---
bno = BNO055.BNO055(serial_port='/dev/ttyAMA0', rst=18)
# Enable verbose debug logging if -v is passed as a parameter.
if len(sys.argv) == 2 and sys.argv[1].lower() == '-v':
logging.basicConfig(level=logging.DEBUG)
# Initialize the BNO055 and stop if something went wrong.
if not bno.begin():
raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
# Print BNO055 software revision and other diagnostic data.
sw, bl, accel, mag, gyro = bno.get_revision()
print('Software version: {0}'.format(sw))
print('Bootloader version: {0}'.format(bl))
print('Accelerometer ID: 0x{0:02X}'.format(accel))
print('Magnetometer ID: 0x{0:02X}'.format(mag))
print('Gyroscope ID: 0x{0:02X}\n'.format(gyro))
#---------------------------------------------------------------------------------------------------------------|
pi = pigpio.pi()
systemOn = True
motorOneSpeed = 180
motorTwoSpeed = 180
motorThreeSpeed = 180
motorFourSpeed = 185
#---------------------
#------Functions------
#---------------------
def initializeMotors(motorOne, motorTwo, motorThree, motorFour):
pi.set_PWM_frequency(motorOne, 400)
pi.set_PWM_range(motorOne, 2500)
pi.set_PWM_frequency(motorTwo, 400)
pi.set_PWM_range(motorTwo, 2500)
pi.set_PWM_frequency(motorThree, 400)
pi.set_PWM_range(motorThree, 2500)
pi.set_PWM_frequency(motorFour, 400)
pi.set_PWM_range(motorFour, 2500)
pi.set_PWM_dutycycle(motorOne, motorOneSpeed)
pi.set_PWM_dutycycle(motorTwo, motorTwoSpeed)
pi.set_PWM_dutycycle(motorThree, motorThreeSpeed)
pi.set_PWM_dutycycle(motorFour, motorFourSpeed)
def increaseSpeed(motorSpeed):
motorSpeed = motorSpeed + 5
return motorSpeed
def decreaseSpeed(motorSpeed):
motorSpeed = motorSpeed - 5
return motorSpeed
def shutdownMotor(motorSpeed):
while motorSpeed > 0:
motorSpeed = motorSpeed - 1
return motorSpeed
#-----------------------------
#------Movement Fucntions-----
#-----------------------------
def strafeLeft():
motorThreeSpeed = increaseSpeed(motorThreeSpeed)
motorTwoSpeed = increaseSpeed(motorTwoSpeed)
sleep(.5)
motorThreeSpeed = decreaseSpeed(motorThreeSpeed)
motorTwoSpeed = decreaseSpeed(motorTwoSpeed)
def strafeRight():
motorOneSpeed = increaseSpeed(motorOneSpeed)
motorFourSpeed = increaseSpeed(motorFourSpeed)
sleep(.5)
motorOneSpeed = decreaseSpeed(motorOneSpeed)
motorFourSpeed = decreaseSpeed(motorFourSpeed)
def moveForward():
motorFourSpeed = increaseSpeed(motorFourSpeed)
motorTwoSpeed = increaseSpeed(motorTwoSpeed)
sleep(.5)
motorFourpeed = decreaseSpeed(motorFourSpeed)
motorTwoSpeed = decreaseSpeed(motorTwoSpeed)
def moveBackwards():
motorOneSpeed = increaseSpeed(motorOneSpeed)
motorThreeSpeed = increaseSpeed(motorThreeSpeed)
sleep(.5)
motorThreeSpeed = decreaseSpeed(motorThreeSpeed)
motorOneSpeed = decreaseSpeed(motorTwoSpeed)
#-----------------------------
#------Begin the program------
#-----------------------------
#----------------------------------------
# This will prompt for the pins to modify
# Each entered pin will be initialized and have global values set to default
#----------------------------------------
print('***Connect Battery & Press ENTER to start***')
res = raw_input()
print('***Enter Pins for Each Prompted Motor***')
print('Motor 1')
res = input()
motorOne = res
print('Motor 2')
res = input()
motorTwo = res
print('Motor 3')
res = input()
motorThree = res
print('Motor 4')
res = input()
motorFour = res
#----------------------------------------
# All motors will now be initialized
# The loop-to-follow will receive inputs and then change motorspeeds accordingly
#----------------------------------------
initializeMotors(motorOne, motorTwo, motorThree, motorFour)
res = raw_input()
motorOneSpeed = 195
motorTwoSpeed = 185
motorThreeSpeed = 190
motorFourSpeed = 190
res = raw_input()
print ('System initialized and running.')
print ('Follow your reference key or press 9 to shutdown')
strength = 218 #Sets base strength of motors
OldRoll = 0
OldRoll2 = 0
OldPitch = 0
OldPitch2 = 0
RMark = 0
PMark = 0
Front = 0
Back = 0
Left = 0
Right = 0
rollV = 0
Change = 20
ACR = 0 #Acceleration Roll
TestRV = 0
motorTwoSpeed = strength
motorFourSpeed = strength
motorThreeSpeed = strength
motorOneSpeed = strength
cycling = True
try:
while cycling:
pi.set_PWM_dutycycle(motorOne, motorOneSpeed)
pi.set_PWM_dutycycle(motorTwo, motorTwoSpeed)
pi.set_PWM_dutycycle(motorThree, motorThreeSpeed)
pi.set_PWM_dutycycle(motorFour, motorFourSpeed)
print ("motorOne: %s" % (motorOneSpeed))
print ("motorTwo: %s" % (motorTwoSpeed))
print ("motorThree: %s" % (motorThreeSpeed))
print ("motorFour: %s" % (motorFourSpeed))
#print(roll)
#print(OldRoll)
print("rollV: %s" % rollV)
# Read the Euler angles for heading, roll, pitch (all in degrees).
heading, roll, pitch = bno.read_euler()
# Read the calibration status, 0=uncalibrated and 3=fully calibrated.
sys, gyro, accel, mag = bno.get_calibration_status()
# Print everything out.
print('Heading={0:0.2F} roll={1:0.2F} Pitch={2:0.2F}\tSys_cal={3} Gyro_cal={4} Accel_cal={5} Mag_cal={6}'.format(heading, roll, pitch, sys, gyro, accel, mag))
# positive roll = tilt to right
# positive pitch = nose tilt up
#=====================================================
#calculates current roll and pitch velocity
OldRollV = rollV
rollV = ((roll - OldRoll) + (OldRoll - OldRoll2)) / 2
pitchV = ((pitch - OldPitch) + (OldPitch - OldPitch2)) / 2
TestRV = rollV
if(TestRV < 0):
TestRV *= -1
if(TestRV <= 0.03125):
rollV = 0
ACR = rollV - OldRollV
print("ACR %s" % ACR)
#=====================================================
#Start of roll calculations
if(roll > 0): # tilted right
if(rollV > 0): #Drone tilting right
Right += 1
#RMark = roll
elif(rollV < 0): # tilted right and tilting left
if(rollV > -1): #not tilting fast enough
Right += 1
elif(rollV < -1):
Right += -1
elif(roll < 0): #Drone tilted left
if(rollV < 0): # tilting left
Right += -1
#RMark = roll
elif(rollV > 0): #Drone tilting right
if(rollV < 1): #not tilting fast enough
Right += -1
elif(rollV > 1): #too fast
Right += 1
#Limits the change in direction
if(Right < Change * -1):
Right = Change * -1
elif(Right > Change):
Right = Change
#=====================================================
# start of pitch calculations
if(pitch > 0): # nose tilted upward
if(pitchV > 0): # nose tilting upward
Front += -1
#PMark = pitch
elif(pitchV < 0): # if moving down
if(pitchV < -1): # too fast
Front += 1
elif(pitchV > -1): #too slow
Front += -1
elif(pitch < 0): #nose tilted down
if(pitchV < 0): #nose tilting down
Front += 1
#PMark = pitch
elif(pitchV > 0): #tilting up
if(pitchV > 1):
Front += -1
elif(pitchV < 1):
Front += 1
else:
Front = 0
#limits the change in direction
if(Front < Change * -1):
Front = Change * -1
elif(Front > Change):
Front = Change
#=====================================================
# Sets values for Left and Back
Left = Right * float(-1)
Back = Front * float(-1)
#=====================================================
# updates motor speeds
motorTwoSpeed = Back + Right + strength
motorFourSpeed = Back + Left + strength
motorThreeSpeed = Front + Right + strength
motorOneSpeed = Front + Left + strength
#=====================================================
# sets old roll and pitch values
OldRoll2 = OldRoll
OldRoll = roll
OldPitch2 = OldPitch
OldPitch = pitch
time.sleep(1/20.0) # possible use in future for delay if ethan's idea doesnt work
# End of while
# End of Try
#----------------------------------------
# When the while loop has ended, the code will proceed here
# This will shutdown all motors in increments of one, until the speed value has reached '0'
#----------------------------------------
finally:
# shut down cleanly
while (systemOn):
if motorOneSpeed > 0:
motorOneSpeed = motorOneSpeed - 1
if motorTwoSpeed > 0:
motorTwoSpeed = motorTwoSpeed - 1
if motorThreeSpeed > 0:
motorThreeSpeed = motorThreeSpeed - 1
if motorFourSpeed > 0:
motorFourSpeed = motorFourSpeed - 1
pi.set_PWM_dutycycle(motorOne, motorOneSpeed)
pi.set_PWM_dutycycle(motorTwo, motorTwoSpeed)
pi.set_PWM_dutycycle(motorThree, motorThreeSpeed)
pi.set_PWM_dutycycle(motorFour, motorFourSpeed)
if (motorOneSpeed == 0) and (motorTwoSpeed == 0) and (motorThreeSpeed == 0) and (motorFourSpeed == 0):
systemOn = False
monitoring = False
print ("System Shutdown")
# Done | 1,683 | 0 | 200 |
98f8a7366a54f5a00a6a3578b72a35407085c33f | 739 | py | Python | code/split_output_manifest.py | linuxus/ground-truth | ff849233187301b2c19040077f2802128503fc61 | [
"MIT"
] | null | null | null | code/split_output_manifest.py | linuxus/ground-truth | ff849233187301b2c19040077f2802128503fc61 | [
"MIT"
] | null | null | null | code/split_output_manifest.py | linuxus/ground-truth | ff849233187301b2c19040077f2802128503fc61 | [
"MIT"
] | null | null | null | import os
import json
import random
import time
import numpy as np
with open('../manifest/output.manifest', 'r') as f:
output = [json.loads(line) for line in f.readlines()]
# Shuffle output in place.
np.random.shuffle(output)
dataset_size = len(output)
train_test_split_index = round(dataset_size*0.8)
train_data = output[:int(train_test_split_index)]
validation_data = output[int(train_test_split_index):]
num_training_samples = 0
with open('train.manifest', 'w') as f:
for line in train_data:
f.write(json.dumps(line))
f.write('\n')
num_training_samples += 1
with open('validation.manifest', 'w') as f:
for line in validation_data:
f.write(json.dumps(line))
f.write('\n') | 25.482759 | 57 | 0.688769 | import os
import json
import random
import time
import numpy as np
with open('../manifest/output.manifest', 'r') as f:
output = [json.loads(line) for line in f.readlines()]
# Shuffle output in place.
np.random.shuffle(output)
dataset_size = len(output)
train_test_split_index = round(dataset_size*0.8)
train_data = output[:int(train_test_split_index)]
validation_data = output[int(train_test_split_index):]
num_training_samples = 0
with open('train.manifest', 'w') as f:
for line in train_data:
f.write(json.dumps(line))
f.write('\n')
num_training_samples += 1
with open('validation.manifest', 'w') as f:
for line in validation_data:
f.write(json.dumps(line))
f.write('\n') | 0 | 0 | 0 |
a001ce47dfba472c5f1d59e9b2f19b59e9e3dbad | 645 | py | Python | exercicios-python/ex037.py | DaviStalleiken/codigos-de-desafios | e9ec388022b5f4bb2a5ab170c17969154ceaece3 | [
"MIT"
] | null | null | null | exercicios-python/ex037.py | DaviStalleiken/codigos-de-desafios | e9ec388022b5f4bb2a5ab170c17969154ceaece3 | [
"MIT"
] | null | null | null | exercicios-python/ex037.py | DaviStalleiken/codigos-de-desafios | e9ec388022b5f4bb2a5ab170c17969154ceaece3 | [
"MIT"
] | null | null | null | num = int(input('Digite um número inteiro: '))
print('''Escolha uma das bases para convertê-lo:
[1] Converter para \033[34mBinário\033[m
[2] Convertar para \033[34mOctal\033[m
[3] Converter para \033[34mHexadecimal\033[m ''')
opçao = int(input('Sua opção: '))
if opçao == 1:
print(f'{num} convertido para \033[31mBinário\033[m é igual a {bin(num)[2:]}')
elif opçao == 2:
print(f'{num} convertido para \033[31mOctal\033[m é igual a {oct(num)[2:]}')
elif opçao == 3:
print(f'{num} convertido para \033[31mHexadecimal\033[m é igual a {hex(num)[2:]}')
else:
print('A opção escolhida é \033[31minválida\033[m, selecione 1, 2 ou 3.')
| 43 | 86 | 0.672868 | num = int(input('Digite um número inteiro: '))
print('''Escolha uma das bases para convertê-lo:
[1] Converter para \033[34mBinário\033[m
[2] Convertar para \033[34mOctal\033[m
[3] Converter para \033[34mHexadecimal\033[m ''')
opçao = int(input('Sua opção: '))
if opçao == 1:
print(f'{num} convertido para \033[31mBinário\033[m é igual a {bin(num)[2:]}')
elif opçao == 2:
print(f'{num} convertido para \033[31mOctal\033[m é igual a {oct(num)[2:]}')
elif opçao == 3:
print(f'{num} convertido para \033[31mHexadecimal\033[m é igual a {hex(num)[2:]}')
else:
print('A opção escolhida é \033[31minválida\033[m, selecione 1, 2 ou 3.')
| 0 | 0 | 0 |
e0169230f61f2658c873bd2aba623b6a93fada0c | 136 | py | Python | src/zuthaka/backendapi/authentication.py | justinforbes/zuthaka | 26b4d37c6e280ff16858f37882752e9d15eac817 | [
"BSD-3-Clause"
] | 129 | 2021-08-05T21:10:35.000Z | 2022-03-08T06:38:50.000Z | src/zuthaka/backendapi/authentication.py | justinforbes/zuthaka | 26b4d37c6e280ff16858f37882752e9d15eac817 | [
"BSD-3-Clause"
] | 2 | 2021-08-20T06:11:16.000Z | 2021-09-08T03:25:09.000Z | src/zuthaka/backendapi/authentication.py | justinforbes/zuthaka | 26b4d37c6e280ff16858f37882752e9d15eac817 | [
"BSD-3-Clause"
] | 16 | 2021-08-06T01:01:20.000Z | 2022-02-02T14:19:17.000Z | from rest_framework.authentication import TokenAuthentication
| 22.666667 | 61 | 0.838235 | from rest_framework.authentication import TokenAuthentication
class BearerAuthentication(TokenAuthentication):
keyword = "Bearer"
| 0 | 50 | 23 |
5712f5578bd0fd9af62c545e2b778e5c098c9c56 | 1,189 | py | Python | ymir/backend/src/pymir-app/tests/api/test_users.py | under-chaos/ymir | 83e98186b23429e6027b187cdade247f5f93e5de | [
"Apache-2.0"
] | 1 | 2022-01-12T03:12:47.000Z | 2022-01-12T03:12:47.000Z | ymir/backend/src/pymir-app/tests/api/test_users.py | under-chaos/ymir | 83e98186b23429e6027b187cdade247f5f93e5de | [
"Apache-2.0"
] | null | null | null | ymir/backend/src/pymir-app/tests/api/test_users.py | under-chaos/ymir | 83e98186b23429e6027b187cdade247f5f93e5de | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from app.config import settings
from tests.utils.utils import random_email, random_lower_string
| 33.027778 | 88 | 0.718251 | from typing import Dict
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from app.config import settings
from tests.utils.utils import random_email, random_lower_string
def test_get_users_normal_user_me(
client: TestClient, normal_user_token_headers: Dict[str, str]
) -> None:
r = client.get(f"{settings.API_V1_STR}/users/me", headers=normal_user_token_headers)
current_user = r.json()["result"]
assert current_user
assert current_user["is_deleted"] is False
assert current_user["is_admin"] is False
assert current_user["email"] == settings.EMAIL_TEST_USER
def test_create_user_new_email(
client: TestClient, admin_token_headers: Dict, db: Session
) -> None:
email = random_email()
password = random_lower_string()
data = {"email": email, "password": password}
r = client.post(f"{settings.API_V1_STR}/users/", json=data)
assert 200 <= r.status_code < 300
created_user = r.json()["result"]
user = client.get(
f"{settings.API_V1_STR}/users/{created_user['id']}",
headers=admin_token_headers,
)
assert user
assert user.json()["result"]["email"] == created_user["email"]
| 942 | 0 | 46 |
69d827e655244788f40f241b562e81329d616cac | 2,019 | py | Python | examples/python/simple_scene/simple_scene.py | LightEngineProject/light-engine | 5a343dd0b4c58ff77e29c1c31ff8e7d7d16dce66 | [
"MIT"
] | null | null | null | examples/python/simple_scene/simple_scene.py | LightEngineProject/light-engine | 5a343dd0b4c58ff77e29c1c31ff8e7d7d16dce66 | [
"MIT"
] | 2 | 2019-02-24T16:52:34.000Z | 2019-02-28T10:41:22.000Z | examples/python/simple_scene/simple_scene.py | LightEngineProjects/light-engine | 5a343dd0b4c58ff77e29c1c31ff8e7d7d16dce66 | [
"MIT"
] | null | null | null | from random import uniform
import lepy
from PySide2Wrapper.PySide2Wrapper.window import MainWindow
from PySide2Wrapper.PySide2Wrapper.widget import OpenGLWidget
from PySide2Wrapper.PySide2Wrapper.app import Application
if __name__ == "__main__":
simple_scene = SimpleScene()
app = Application()
gl_widget = OpenGLWidget(simple_scene.init, simple_scene.resize, simple_scene.draw)
gl_widget.set_mouse_press_callback(simple_scene.process_mouse_press)
gl_widget.set_mouse_move_callback(simple_scene.process_cursore_movement)
gl_widget.set_wheel_scroll_event(simple_scene.process_wheel)
main_window = MainWindow("Simple Scene Example")
main_window.add_widget(gl_widget, need_stretch=False)
main_window.resize(800, 700)
main_window.move(100, 100)
main_window.show()
app.run()
| 32.564516 | 114 | 0.685983 | from random import uniform
import lepy
from PySide2Wrapper.PySide2Wrapper.window import MainWindow
from PySide2Wrapper.PySide2Wrapper.widget import OpenGLWidget
from PySide2Wrapper.PySide2Wrapper.app import Application
class SimpleScene:
def __init__(self):
self.engine = None
self.user_camera = None
def init(self):
self.engine = lepy.Engine()
frame = lepy.Frame()
scene = lepy.Scene()
frame.add_scene(scene)
self.engine.add_frame(frame)
self.user_camera = lepy.UserMouseCamera(scene.get_camera())
frame.set_background_color(lepy.Vec3(0, 0, 0))
scene.get_camera().look_at(lepy.Vec3(0, 0, 0))
for i in range(1000):
obj = lepy.BuiltinObjects.point(lepy.Vec3(uniform(-0.5, 0.5), uniform(-0.5, 0.5), uniform(-0.5, 0.5)),
lepy.Vec3(uniform(0, 1), uniform(0, 1), uniform(0, 1)))
scene.add_object(obj)
def resize(self, w, h):
self.engine.resize(w, h)
def draw(self):
self.engine.redraw()
def process_mouse_press(self, x, y, is_left, is_pressed):
self.user_camera.process_mouse_press(x, y, is_left, is_pressed)
def process_cursore_movement(self, x, y):
self.user_camera.process_cursore_movement(x, y)
def process_wheel(self, scrolls_count):
self.user_camera.process_wheel(scrolls_count)
if __name__ == "__main__":
simple_scene = SimpleScene()
app = Application()
gl_widget = OpenGLWidget(simple_scene.init, simple_scene.resize, simple_scene.draw)
gl_widget.set_mouse_press_callback(simple_scene.process_mouse_press)
gl_widget.set_mouse_move_callback(simple_scene.process_cursore_movement)
gl_widget.set_wheel_scroll_event(simple_scene.process_wheel)
main_window = MainWindow("Simple Scene Example")
main_window.add_widget(gl_widget, need_stretch=False)
main_window.resize(800, 700)
main_window.move(100, 100)
main_window.show()
app.run()
| 985 | -3 | 211 |
767f2421702adb529b5f2bf1c70e81884cc41af1 | 140 | py | Python | manage_web.py | zakharovadaria/receipts | a1ac0936d8df4191c5500b605341e6380276595d | [
"MIT"
] | 2 | 2020-02-01T17:42:23.000Z | 2020-02-01T18:09:00.000Z | manage_web.py | zakharovadaria/receipts | a1ac0936d8df4191c5500b605341e6380276595d | [
"MIT"
] | null | null | null | manage_web.py | zakharovadaria/receipts | a1ac0936d8df4191c5500b605341e6380276595d | [
"MIT"
] | null | null | null | from dotenv import load_dotenv
load_dotenv()
from app.web import create_app
app = create_app()
if __name__ == '__main__':
app.run()
| 12.727273 | 30 | 0.721429 | from dotenv import load_dotenv
load_dotenv()
from app.web import create_app
app = create_app()
if __name__ == '__main__':
app.run()
| 0 | 0 | 0 |
8f56893a2eca8a9c660dd465aecbdf31e87e9548 | 664 | py | Python | app/src/main/python/a.py | piyushagr00786/Document-Scanner | d9ace813badbffe9af3c3e0627bf00dc0a3ebc50 | [
"MIT"
] | null | null | null | app/src/main/python/a.py | piyushagr00786/Document-Scanner | d9ace813badbffe9af3c3e0627bf00dc0a3ebc50 | [
"MIT"
] | null | null | null | app/src/main/python/a.py | piyushagr00786/Document-Scanner | d9ace813badbffe9af3c3e0627bf00dc0a3ebc50 | [
"MIT"
] | null | null | null |
import numpy as np
import base64
from PIL import Image
import cv2
#from StringIO import StringIO
import numpy as np
import io
from io import BytesIO
# if(img):
#img=cv2.imread(np1,cv2.IMREAD_UNCHANGED)
| 17.025641 | 92 | 0.698795 |
import numpy as np
import base64
from PIL import Image
import cv2
#from StringIO import StringIO
import numpy as np
import io
from io import BytesIO
def aa(bmp):
# cvimg = readb64(bmp)
d=base64.b64decode(bmp)
np_data = np.fromstring(d,np.uint8)
img = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED)
g=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
nimg=cv2.adaptiveThreshold(g,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,115,3)
p=Image.fromarray(nimg)
b=io.BytesIO()
p.save(b,format="PNG")
st=base64.b64encode(b.getvalue())
return ""+str(st,'utf-8')
# if(img):
#img=cv2.imread(np1,cv2.IMREAD_UNCHANGED)
| 426 | 0 | 23 |
e62a17e95e6e39bf9f63dc1a369e8d2b82925165 | 5,298 | py | Python | st2api/st2api/controllers/exp/actionalias.py | UbuntuEvangelist/st2 | 36af04f2caa03b396fb8ab00fd6d700e827fda8d | [
"Apache-2.0"
] | 1 | 2020-11-21T10:11:25.000Z | 2020-11-21T10:11:25.000Z | st2api/st2api/controllers/exp/actionalias.py | UbuntuEvangelist/st2 | 36af04f2caa03b396fb8ab00fd6d700e827fda8d | [
"Apache-2.0"
] | 1 | 2015-06-08T15:27:11.000Z | 2015-06-08T15:27:11.000Z | st2api/st2api/controllers/exp/actionalias.py | UbuntuEvangelist/st2 | 36af04f2caa03b396fb8ab00fd6d700e827fda8d | [
"Apache-2.0"
] | 13 | 2017-01-12T11:07:20.000Z | 2019-04-19T09:55:49.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
import six
from mongoengine import ValidationError
from st2api.controllers import resource
from st2common import log as logging
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.exceptions.db import StackStormDBObjectConflictError
from st2common.models.api.action import ActionAliasAPI
from st2common.persistence.actionalias import ActionAlias
from st2common.models.api.base import jsexpose
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class ActionAliasController(resource.ContentPackResourceController):
"""
Implements the RESTful interface for ActionAliases.
"""
model = ActionAliasAPI
access = ActionAlias
supported_filters = {
'name': 'name',
'pack': 'pack'
}
query_options = {
'sort': ['pack', 'name']
}
@jsexpose(body_cls=ActionAliasAPI, status_code=http_client.CREATED)
def post(self, action_alias):
"""
Create a new ActionAlias.
Handles requests:
POST /actionalias/
"""
try:
action_alias_db = ActionAliasAPI.to_model(action_alias)
LOG.debug('/actionalias/ POST verified ActionAliasAPI and formulated ActionAliasDB=%s',
action_alias_db)
action_alias_db = ActionAlias.add_or_update(action_alias_db)
except (ValidationError, ValueError, ValueValidationException) as e:
LOG.exception('Validation failed for action alias data=%s.', action_alias)
pecan.abort(http_client.BAD_REQUEST, str(e))
return
except StackStormDBObjectConflictError as e:
LOG.warn('ActionAlias creation of %s failed with uniqueness conflict.', action_alias,
exc_info=True)
pecan.abort(http_client.CONFLICT, str(e), body={'conflict-id': e.conflict_id})
return
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias created. ActionAlias.id=%s' % (action_alias_db.id), extra=extra)
action_alias_api = ActionAliasAPI.from_model(action_alias_db)
return action_alias_api
@jsexpose(arg_types=[str], body_cls=ActionAliasAPI)
@jsexpose(arg_types=[str], status_code=http_client.NO_CONTENT)
def delete(self, action_alias_ref_or_id):
"""
Delete an action alias.
Handles requests:
DELETE /actionalias/1
"""
action_alias_db = self._get_by_ref_or_id(ref_or_id=action_alias_ref_or_id)
LOG.debug('DELETE /actionalias/ lookup with id=%s found object: %s', action_alias_ref_or_id,
action_alias_db)
try:
ActionAlias.delete(action_alias_db)
except Exception as e:
LOG.exception('Database delete encountered exception during delete of id="%s".',
action_alias_ref_or_id)
pecan.abort(http_client.INTERNAL_SERVER_ERROR, str(e))
return
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias deleted. ActionAlias.id=%s.' % (action_alias_db.id), extra=extra)
| 42.725806 | 100 | 0.687618 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
import six
from mongoengine import ValidationError
from st2api.controllers import resource
from st2common import log as logging
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.exceptions.db import StackStormDBObjectConflictError
from st2common.models.api.action import ActionAliasAPI
from st2common.persistence.actionalias import ActionAlias
from st2common.models.api.base import jsexpose
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class ActionAliasController(resource.ContentPackResourceController):
"""
Implements the RESTful interface for ActionAliases.
"""
model = ActionAliasAPI
access = ActionAlias
supported_filters = {
'name': 'name',
'pack': 'pack'
}
query_options = {
'sort': ['pack', 'name']
}
@jsexpose(body_cls=ActionAliasAPI, status_code=http_client.CREATED)
def post(self, action_alias):
"""
Create a new ActionAlias.
Handles requests:
POST /actionalias/
"""
try:
action_alias_db = ActionAliasAPI.to_model(action_alias)
LOG.debug('/actionalias/ POST verified ActionAliasAPI and formulated ActionAliasDB=%s',
action_alias_db)
action_alias_db = ActionAlias.add_or_update(action_alias_db)
except (ValidationError, ValueError, ValueValidationException) as e:
LOG.exception('Validation failed for action alias data=%s.', action_alias)
pecan.abort(http_client.BAD_REQUEST, str(e))
return
except StackStormDBObjectConflictError as e:
LOG.warn('ActionAlias creation of %s failed with uniqueness conflict.', action_alias,
exc_info=True)
pecan.abort(http_client.CONFLICT, str(e), body={'conflict-id': e.conflict_id})
return
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias created. ActionAlias.id=%s' % (action_alias_db.id), extra=extra)
action_alias_api = ActionAliasAPI.from_model(action_alias_db)
return action_alias_api
@jsexpose(arg_types=[str], body_cls=ActionAliasAPI)
def put(self, action_alias_ref_or_id, action_alias):
action_alias_db = self._get_by_ref_or_id(ref_or_id=action_alias_ref_or_id)
LOG.debug('PUT /actionalias/ lookup with id=%s found object: %s', action_alias_ref_or_id,
action_alias_db)
try:
if action_alias.id is not None and action_alias.id is not '' and \
action_alias.id != action_alias_ref_or_id:
LOG.warning('Discarding mismatched id=%s found in payload and using uri_id=%s.',
action_alias.id, action_alias_ref_or_id)
old_action_alias_db = action_alias_db
action_alias_db = ActionAliasAPI.to_model(action_alias)
action_alias_db.id = action_alias_ref_or_id
action_alias_db = ActionAlias.add_or_update(action_alias_db)
except (ValidationError, ValueError) as e:
LOG.exception('Validation failed for action alias data=%s', action_alias)
pecan.abort(http_client.BAD_REQUEST, str(e))
return
extra = {'old_action_alias_db': old_action_alias_db, 'new_action_alias_db': action_alias_db}
LOG.audit('Action alias updated. ActionAlias.id=%s.' % (action_alias_db.id), extra=extra)
action_alias_api = ActionAliasAPI.from_model(action_alias_db)
return action_alias_api
@jsexpose(arg_types=[str], status_code=http_client.NO_CONTENT)
def delete(self, action_alias_ref_or_id):
"""
Delete an action alias.
Handles requests:
DELETE /actionalias/1
"""
action_alias_db = self._get_by_ref_or_id(ref_or_id=action_alias_ref_or_id)
LOG.debug('DELETE /actionalias/ lookup with id=%s found object: %s', action_alias_ref_or_id,
action_alias_db)
try:
ActionAlias.delete(action_alias_db)
except Exception as e:
LOG.exception('Database delete encountered exception during delete of id="%s".',
action_alias_ref_or_id)
pecan.abort(http_client.INTERNAL_SERVER_ERROR, str(e))
return
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias deleted. ActionAlias.id=%s.' % (action_alias_db.id), extra=extra)
| 1,327 | 0 | 26 |
66de7b576236c5ad0aa8c5332d138cfa2e2d1325 | 3,989 | py | Python | rogal/term/ansi.py | kosciak/ecs-rogal | d553104e0ea350d11272d274a900419620b9389e | [
"MIT"
] | 4 | 2021-01-23T13:25:46.000Z | 2021-03-19T03:08:05.000Z | rogal/term/ansi.py | kosciak/ecs-rogal | d553104e0ea350d11272d274a900419620b9389e | [
"MIT"
] | null | null | null | rogal/term/ansi.py | kosciak/ecs-rogal | d553104e0ea350d11272d274a900419620b9389e | [
"MIT"
] | null | null | null | import sys
from .escape_seq import csi
from .escape_seq import CSI, SGR, ColorsMode, Color
"""Rudimentary ANSI support.
See: https://en.wikipedia.org/wiki/ANSI_escape_code
"""
def cursor_position(n=1, m=1):
"""Move cursor to row n, column m (1-indexed from top-left)."""
return csi(CSI.CUP, n, m)
| 20.668394 | 67 | 0.607671 | import sys
from .escape_seq import csi
from .escape_seq import CSI, SGR, ColorsMode, Color
"""Rudimentary ANSI support.
See: https://en.wikipedia.org/wiki/ANSI_escape_code
"""
def cursor_up(n=1):
return csi(CSI.CUU, n)
def cursor_down(n=1):
return csi(CSI.CUD, n)
def cursor_forward(n=1):
return csi(CSI.CUF, n)
def cursor_back(n=1):
return csi(CSI.CUB, n)
def cursor_next_line(n=1):
return csi(CSI.CNL, n)
def cursor_prev_line(n=1):
return csi(CSI.CPL, n)
def cursor_column(n=1):
return csi(CSI.CHA, n)
def cursor_position(n=1, m=1):
"""Move cursor to row n, column m (1-indexed from top-left)."""
return csi(CSI.CUP, n, m)
def erase_display(n=1):
return csi(CSI.ED, n)
def sgr(*parameters):
return csi(CSI.SGR, *parameters)
def reset():
return sgr(SGR.RESET)
def bold():
return sgr(SGR.BOLD)
def dim():
return sgr(SGR.DIM)
def italic():
return sgr(SGR.ITALIC)
def underlined():
return sgr(SGR.UNDERLINED)
def slow_blink():
return sgr(SGR.SLOW_BLINK)
def rapid_blink():
return sgr(SGR.RAPID_BLINK)
def inverted():
return sgr(SGR.INVERT)
def hide():
return sgr(SGR.HIDE)
def strike():
return sgr(SGR.STRIKE)
def overlined():
return sgr(SGR.OVERLINED)
def fg(color):
return sgr(SGR.FG_BASE+color%8)
def bg(color):
return sgr(SGR.BG_BASE+color%8)
def fg_bright(color):
return sgr(SGR.FG_BRIGHT_BASE+color%8)
def bg_bright(color):
return sgr(SGR.BG_BRIGHT_BASE+color%8)
def fg_bold(color):
return sgr(SGR.FG_BASE+color%8, SGR.BOLD)
def bg_bold(color):
return sgr(SGR.FG_BASE+color%8, SGR.BOLD)
def fg_256(color):
return sgr(SGR.SET_FG, ColorsMode.COLORS_256, color)
def bg_256(color):
return sgr(SGR.SET_BG, ColorsMode.COLORS_256, color)
def fg_rgb(r, g, b):
return sgr(SGR.SET_FG, ColorsMode.COLORS_RGB, r, g, b)
def bg_rgb(r, g, b):
return sgr(SGR.SET_BG, ColorsMode.COLORS_RGB, r, g, b)
def color_256(fg, bg):
sequences = []
if fg:
sequences.append(fg_256(fg))
if bg:
sequences.append(bg_256(bg))
return ''.join(sequences)
def color_rgb(fg, bg):
sequences = []
if fg:
sequences.append(fg_rgb(*fg))
if bg:
sequences.append(bg_rgb(*bg))
return ''.join(sequences)
def show_colors(fn, colors_num=256):
elements = []
print('SYSTEM COLORS:')
columns = 8
for color in range(colors_num):
element = '%s %03d %s' % (fn(color), color, reset())
elements.append(element)
if len(elements) == columns:
print(''.join(elements))
elements = []
if color == 15:
print('216 COLORS:')
columns = 6
if color == 231:
print('GRAYSCALE COLORS:')
columns = 12
if elements:
print(''.join(elements))
def show_colors_rgb(colors, columns=8):
elements = []
for idx, color in enumerate(colors):
element = '%s %03d %s' % (bg_rgb(*color.rgb), idx, reset())
elements.append(element)
if len(elements) == columns:
print(''.join(elements))
elements = []
if elements:
print(''.join(elements))
def show_color(color):
print(f'{bg_rgb(*color.rgb)} {color.rgb} {reset()}')
def show_rgb_console(console):
prev_fg = None
prev_bg = None
lines = []
columns = console.width
line = []
column = 0
for ch, fg, bg in console.tiles_gen(encode_ch=chr):
column += 1
if prev_fg is None or not (fg == prev_fg).all():
line.append(fg_rgb(*fg))
prev_fg = fg
if prev_bg is None or not (bg == prev_bg).all():
line.append(bg_rgb(*bg))
prev_bg = bg
line.append(ch)
if column >= columns:
lines.append(''.join(line))
line = []
column = 0
sys.stdout.write(cursor_next_line(1).join(lines))
sys.stdout.write(reset())
sys.stdout.flush()
| 2,838 | 0 | 828 |
f7706ebd4ccc61b41b8d11296f2650beee97c144 | 270 | py | Python | exercicios curso em video/ex037.py | Nilton-Miguel/Prog_Python3 | 4cabcb1a30dde6ababce3cb8d1fbb7d417cb1d8b | [
"MIT"
] | null | null | null | exercicios curso em video/ex037.py | Nilton-Miguel/Prog_Python3 | 4cabcb1a30dde6ababce3cb8d1fbb7d417cb1d8b | [
"MIT"
] | null | null | null | exercicios curso em video/ex037.py | Nilton-Miguel/Prog_Python3 | 4cabcb1a30dde6ababce3cb8d1fbb7d417cb1d8b | [
"MIT"
] | null | null | null | n = int(input('digite um número inteiro: '))
op = int(input('''escolha uma opção de conversão:
[ 1 ] Binário
[ 2 ] Octal
[ 3 ] Hexadecimal'''))
if op == 1:
print(bin(n)[2:])
elif op == 2:
print(oct(n)[2:])
elif op == 3:
print(hex(n)[2:])
| 16.875 | 51 | 0.522222 | n = int(input('digite um número inteiro: '))
op = int(input('''escolha uma opção de conversão:
[ 1 ] Binário
[ 2 ] Octal
[ 3 ] Hexadecimal'''))
if op == 1:
print(bin(n)[2:])
elif op == 2:
print(oct(n)[2:])
elif op == 3:
print(hex(n)[2:])
| 0 | 0 | 0 |
c78003c24de1301b0b4edcdeab9519a097f9d2ee | 106 | py | Python | loader/__init__.py | bsbateam/genoxs | 3df0c07211e1adb3f58fab9231d4aaa8a89ebbb8 | [
"BSD-3-Clause"
] | 3 | 2020-05-04T00:33:57.000Z | 2021-01-10T04:42:07.000Z | loader/__init__.py | genemators/genoxs | 3df0c07211e1adb3f58fab9231d4aaa8a89ebbb8 | [
"BSD-3-Clause"
] | null | null | null | loader/__init__.py | genemators/genoxs | 3df0c07211e1adb3f58fab9231d4aaa8a89ebbb8 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T00:35:01.000Z | 2020-07-22T00:35:01.000Z | try:
from loader.loader import loader
except ImportError as err:
print(f"Import failed, {err}") | 26.5 | 37 | 0.698113 | try:
from loader.loader import loader
except ImportError as err:
print(f"Import failed, {err}") | 0 | 0 | 0 |
943bd3612c05eea98e46831925b8384fd666a02a | 1,444 | py | Python | msibi/tutorials/propane/opt.py | jennyfothergill/msibi | 0e309eff836dc13016d87889fe8d8f6960a13599 | [
"MIT"
] | 7 | 2021-03-24T17:01:09.000Z | 2022-01-03T21:53:26.000Z | msibi/tutorials/propane/opt.py | jennyfothergill/msibi | 0e309eff836dc13016d87889fe8d8f6960a13599 | [
"MIT"
] | 22 | 2021-03-24T18:09:57.000Z | 2022-02-24T18:36:49.000Z | msibi/tutorials/propane/opt.py | jennyfothergill/msibi | 0e309eff836dc13016d87889fe8d8f6960a13599 | [
"MIT"
] | 2 | 2021-03-24T16:19:56.000Z | 2021-03-24T17:04:48.000Z | import itertools
import os
import string
import numpy as np
from msibi import MSIBI, Pair, State, mie
os.system("rm rdfs/pair_C3*_state*-step*.txt f_fits.log")
os.system("rm state_*/*.txt state*/run.py state*/*query.dcd")
# Set up global parameters.
rdf_cutoff = 5.0
opt = MSIBI(
rdf_cutoff=rdf_cutoff, n_rdf_points=201, pot_cutoff=3.0, smooth_rdfs=True
)
# Specify states.
stateA = State(
kT=0.5,
state_dir="./state_A",
top_file="start.hoomdxml",
name="stateA",
backup_trajectory=True,
)
stateB = State(
kT=1.5,
state_dir="./state_B",
top_file="start.hoomdxml",
name="stateB",
backup_trajectory=True,
)
stateC = State(
kT=2.0,
state_dir="./state_C",
top_file="start.hoomdxml",
name="stateC",
backup_trajectory=True,
)
states = [stateA, stateB, stateC]
# Specify pairs.
indices = list(itertools.combinations(range(1024), 2)) # all-all for 1024 atoms
initial_guess = mie(opt.pot_r, 1.0, 1.0) # 1-D array of potential values.
alphabet = ["A", "B", "C"]
rdf_targets = [
np.loadtxt("rdfs/C3-C3-state_{0}.txt".format(i)) for i in alphabet
]
pair0 = Pair("C3", "C3", initial_guess)
alphas = [1.0, 1.0, 1.0]
# Add targets to pair.
for state, target, alpha in zip(states, rdf_targets, alphas):
pair0.add_state(state, target, alpha, indices)
pairs = [pair0] # optimize() expects a list of pairs
# Do magic.
opt.optimize(states, pairs, n_iterations=5, engine="hoomd")
| 23.290323 | 80 | 0.677285 | import itertools
import os
import string
import numpy as np
from msibi import MSIBI, Pair, State, mie
os.system("rm rdfs/pair_C3*_state*-step*.txt f_fits.log")
os.system("rm state_*/*.txt state*/run.py state*/*query.dcd")
# Set up global parameters.
rdf_cutoff = 5.0
opt = MSIBI(
rdf_cutoff=rdf_cutoff, n_rdf_points=201, pot_cutoff=3.0, smooth_rdfs=True
)
# Specify states.
stateA = State(
kT=0.5,
state_dir="./state_A",
top_file="start.hoomdxml",
name="stateA",
backup_trajectory=True,
)
stateB = State(
kT=1.5,
state_dir="./state_B",
top_file="start.hoomdxml",
name="stateB",
backup_trajectory=True,
)
stateC = State(
kT=2.0,
state_dir="./state_C",
top_file="start.hoomdxml",
name="stateC",
backup_trajectory=True,
)
states = [stateA, stateB, stateC]
# Specify pairs.
indices = list(itertools.combinations(range(1024), 2)) # all-all for 1024 atoms
initial_guess = mie(opt.pot_r, 1.0, 1.0) # 1-D array of potential values.
alphabet = ["A", "B", "C"]
rdf_targets = [
np.loadtxt("rdfs/C3-C3-state_{0}.txt".format(i)) for i in alphabet
]
pair0 = Pair("C3", "C3", initial_guess)
alphas = [1.0, 1.0, 1.0]
# Add targets to pair.
for state, target, alpha in zip(states, rdf_targets, alphas):
pair0.add_state(state, target, alpha, indices)
pairs = [pair0] # optimize() expects a list of pairs
# Do magic.
opt.optimize(states, pairs, n_iterations=5, engine="hoomd")
| 0 | 0 | 0 |
39d3099d8f0fde6079c4fd1c75d9c16f70c2a453 | 1,742 | py | Python | Lib/site-packages/twisted/web2/dav/method/delete.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/twisted/web2/dav/method/delete.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/twisted/web2/dav/method/delete.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | # -*- test-case-name: twisted.web2.dav.test.test_delete -*-
##
# Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV DELETE method
"""
__all__ = ["http_DELETE"]
from twisted.python import log
from twisted.web2 import responsecode
from twisted.web2.dav.fileop import delete
def http_DELETE(self, request):
"""
Respond to a DELETE request. (RFC 2518, section 8.6)
"""
if not self.fp.exists():
log.err("File not found: %s" % (self.fp.path,))
return responsecode.NOT_FOUND
depth = request.headers.getHeader("depth", "infinity")
return delete(request.uri, self.fp, depth)
| 36.291667 | 80 | 0.740528 | # -*- test-case-name: twisted.web2.dav.test.test_delete -*-
##
# Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV DELETE method
"""
__all__ = ["http_DELETE"]
from twisted.python import log
from twisted.web2 import responsecode
from twisted.web2.dav.fileop import delete
def http_DELETE(self, request):
"""
Respond to a DELETE request. (RFC 2518, section 8.6)
"""
if not self.fp.exists():
log.err("File not found: %s" % (self.fp.path,))
return responsecode.NOT_FOUND
depth = request.headers.getHeader("depth", "infinity")
return delete(request.uri, self.fp, depth)
| 0 | 0 | 0 |
21af1df38dea7e7f28c08382f2e782d58bc43cd9 | 201 | py | Python | medvision/visualization/__init__.py | kyle0x54/medvision | a634208fe9b2dff13e54b28fa024a31328a78431 | [
"Apache-2.0"
] | 6 | 2019-02-06T15:27:53.000Z | 2021-08-20T03:19:13.000Z | medvision/visualization/__init__.py | kyle0x54/medvision | a634208fe9b2dff13e54b28fa024a31328a78431 | [
"Apache-2.0"
] | 2 | 2019-08-05T11:08:02.000Z | 2019-09-23T09:15:33.000Z | medvision/visualization/__init__.py | kyle0x54/medvision | a634208fe9b2dff13e54b28fa024a31328a78431 | [
"Apache-2.0"
] | 2 | 2019-02-19T06:48:14.000Z | 2019-08-05T08:40:45.000Z | # flake8: noqa
from .draw_curve import draw_froc_curve, draw_roc_curve, draw_pr_curve
from .image import Color, imshow, imshow_bboxes
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 28.714286 | 70 | 0.761194 | # flake8: noqa
from .draw_curve import draw_froc_curve, draw_roc_curve, draw_pr_curve
from .image import Color, imshow, imshow_bboxes
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 0 | 0 | 0 |
08930887731f54d32baa46295d141d583dcbce59 | 1,644 | py | Python | libs/plotutils.py | SebaDro/ST-DeepHydro | 98cf8c1fabd9098c34f486655cc608383db36eaa | [
"Apache-2.0"
] | null | null | null | libs/plotutils.py | SebaDro/ST-DeepHydro | 98cf8c1fabd9098c34f486655cc608383db36eaa | [
"Apache-2.0"
] | null | null | null | libs/plotutils.py | SebaDro/ST-DeepHydro | 98cf8c1fabd9098c34f486655cc608383db36eaa | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import tensorflow as tf
import xarray as xr
def plot_loss(history: tf.keras.callbacks.History):
"""
Visualizes the progress of a trained model by plotting the loss per epoch
Parameters
----------
history: tf.keras.callbacks.History
A Tensorflow history object that holds information about training progress
"""
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epoche')
plt.ylabel('RMSE')
plt.legend()
plt.grid(True)
| 34.25 | 90 | 0.677007 | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import tensorflow as tf
import xarray as xr
def plot_loss(history: tf.keras.callbacks.History):
"""
Visualizes the progress of a trained model by plotting the loss per epoch
Parameters
----------
history: tf.keras.callbacks.History
A Tensorflow history object that holds information about training progress
"""
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epoche')
plt.ylabel('RMSE')
plt.legend()
plt.grid(True)
def plot_predictions(ds: xr.Dataset, variable: str, basins: list = None):
if basins is None:
basins = ds.basin.values
nr_basins = len(basins)
if nr_basins == 1:
plot_prediction_for_single_basin(ds, basins[0], variable)
elif nr_basins > 1:
fig, axis = plt.subplots(1, nr_basins, figsize=(16, 10))
for ax, basin in zip(axis, basins):
plot_prediction_for_single_basin(ds, basin, variable, ax)
else:
raise ValueError("There must be one basin for plotting, at least!")
def plot_prediction_for_single_basin(ds: xr.Dataset, basin: str, variable: str, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(16, 10))
ds.sel(basin=basin)[f"{variable}_pred"].plot(ax=ax, label="prediction", zorder=1)
ds.sel(basin=basin)[f"{variable}_obs"].plot(ax=ax, label="observation", zorder=0)
ax.set_xlabel("time")
ax.set_ylabel(variable)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%b"))
ax.set_title(basin)
ax.legend() | 981 | 0 | 46 |
92bc4aa01ffafb6b30b47395867329fa36246ca5 | 1,822 | py | Python | python/spi/token.py | montreal91/jolly-jay | 7a2e145658177abc109b9f8f3787b8197f4ce7b5 | [
"BSD-3-Clause"
] | null | null | null | python/spi/token.py | montreal91/jolly-jay | 7a2e145658177abc109b9f8f3787b8197f4ce7b5 | [
"BSD-3-Clause"
] | null | null | null | python/spi/token.py | montreal91/jolly-jay | 7a2e145658177abc109b9f8f3787b8197f4ce7b5 | [
"BSD-3-Clause"
] | null | null | null | from enum import Enum
# Maps token symbol to the token type
ONE_SYMBOL_TOKENS = {
"+": TokenType.PLUS,
"-": TokenType.MINUS,
"*": TokenType.MULTIPLY,
"/": TokenType.REAL_DIV,
"(": TokenType.LPAR,
")": TokenType.RPAR,
";": TokenType.SEMI,
":": TokenType.COLON,
",": TokenType.COMMA,
".": TokenType.DOT,
}
# Maps token word to the token type
RESERVED_KEYWORDS = {
"program": TokenType.PROGRAM,
"procedure": TokenType.PROCEDURE,
"var": TokenType.VAR,
"integer": TokenType.INTEGER,
"real": TokenType.REAL,
"begin": TokenType.BEGIN,
"end": TokenType.END,
"div": TokenType.INTEGER_DIV,
} | 21.951807 | 100 | 0.597146 | from enum import Enum
class TokenType(Enum):
ASSIGN = "ASSIGN"
BEGIN = "BEGIN"
COLON = "COLON"
COMMA = "COMMA"
DOT = "DOT"
END = "END"
ID = "ID"
INTEGER = "INTEGER"
INTEGER_DIV = "INT_DIV"
INTEGER_LITERAL = "INTEGER_LITERAL"
LPAR = "LPAR"
MINUS = "MINUS"
MULTIPLY = "MULTIPLY"
PLUS = "PLUS"
PROCEDURE = "PROCEDURE"
PROGRAM = "PROGRAM"
REAL = "REAL"
REAL_DIV = "REAL_DIV"
REAL_LITERAL = "REAL_LITERAL"
RPAR = "RPAR"
SEMI = "SEMI"
VAR = "VAR"
EOF = "EOF"
class Token:
def __init__(self, type_, value, line_number=None, column=None):
self._type = type_
self._value = value
self._line_number = line_number
self._column = column
def get_type(self):
return self._type
def get_value(self):
return self._value
def get_line_number(self):
return self._line_number
def get_column(self):
return self._column
def __str__(self):
return (
f"Token({self._type.value}, {self._value}, position={self._line_number}:{self._column})"
)
def __repr__(self):
return self.__str__()
# Maps token symbol to the token type
ONE_SYMBOL_TOKENS = {
"+": TokenType.PLUS,
"-": TokenType.MINUS,
"*": TokenType.MULTIPLY,
"/": TokenType.REAL_DIV,
"(": TokenType.LPAR,
")": TokenType.RPAR,
";": TokenType.SEMI,
":": TokenType.COLON,
",": TokenType.COMMA,
".": TokenType.DOT,
}
# Maps token word to the token type
RESERVED_KEYWORDS = {
"program": TokenType.PROGRAM,
"procedure": TokenType.PROCEDURE,
"var": TokenType.VAR,
"integer": TokenType.INTEGER,
"real": TokenType.REAL,
"begin": TokenType.BEGIN,
"end": TokenType.END,
"div": TokenType.INTEGER_DIV,
} | 437 | 493 | 234 |
b67874391faf56c3e272be2959e71d5c592d9b0c | 2,051 | py | Python | app/library/providers/storage/s3.py | wasilak/notes-manager | 8d320d73c78d3c0690cc2dd4248830c81474c3b4 | [
"MIT"
] | null | null | null | app/library/providers/storage/s3.py | wasilak/notes-manager | 8d320d73c78d3c0690cc2dd4248830c81474c3b4 | [
"MIT"
] | 18 | 2019-09-15T15:22:25.000Z | 2022-02-21T09:01:29.000Z | app/library/providers/storage/s3.py | wasilak/notes-manager | 8d320d73c78d3c0690cc2dd4248830c81474c3b4 | [
"MIT"
] | null | null | null | import os
import logging
import boto3
from botocore.exceptions import ClientError
from .common import get_file, create_path
| 32.046875 | 111 | 0.58508 | import os
import logging
import boto3
from botocore.exceptions import ClientError
from .common import get_file, create_path
class Storage():
bucket_name = os.getenv("S3_BUCKET", "notes")
app_root = os.getcwd()
storage_root = "%s/storage" % (app_root)
def __init__(self):
if not os.path.exists(self.storage_root):
os.makedirs(self.storage_root, exist_ok=True)
self.s3_client = boto3.client(
's3',
aws_access_key_id=os.getenv("S3_ID", ""),
aws_secret_access_key=os.getenv("S3_SECRET", ""),
region_name=os.getenv("S3_REGION", "")
)
self.s3_resource = boto3.resource(
's3',
aws_access_key_id=os.getenv("S3_ID", ""),
aws_secret_access_key=os.getenv("S3_SECRET", ""),
region_name=os.getenv("S3_REGION", "")
)
self.bucket = self.s3_resource.Bucket(self.bucket_name)
self.logger = logging.getLogger("api")
def get_files(self, doc_uuid, image_urls):
for item in image_urls:
create_path(self.storage_root, doc_uuid)
local_path, file_hash, error = get_file(self.logger, self.storage_root, doc_uuid, item["original"])
if not error:
filename = "%s/storage/images/%s.%s" % (doc_uuid, file_hash, item["original"]["extension"])
try:
self.s3_client.upload_file(local_path, self.bucket_name, filename)
item["replacement"] = "/storage/%s" % (filename)
except ClientError as e:
self.logger.exception(e)
def cleanup(self, doc_uuid):
self.bucket.objects.filter(Prefix="%s/" % (doc_uuid)).delete()
def get_object(self, filename, expiration=20):
response = self.s3_client.generate_presigned_url(
'get_object',
Params={
'Bucket': self.bucket_name,
'Key': filename
},
ExpiresIn=expiration
)
return response
| 1,677 | 226 | 23 |
99e3887f57aecf09193e7c848f0d73ac30999a95 | 10,712 | py | Python | compiler.py | reidrac/jtc | bd9f90bf767cc1c05050ec0d4ddf2b6c22a54b31 | [
"MIT"
] | 2 | 2016-05-12T19:29:21.000Z | 2020-03-19T16:14:32.000Z | compiler.py | reidrac/jtc | bd9f90bf767cc1c05050ec0d4ddf2b6c22a54b31 | [
"MIT"
] | null | null | null | compiler.py | reidrac/jtc | bd9f90bf767cc1c05050ec0d4ddf2b6c22a54b31 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from sys import exit, stdin
from os import environ, path, unlink
from tempfile import NamedTemporaryFile
from subprocess import Popen, PIPE
from argparse import ArgumentParser
import parser
import lexer
__author__ = "Juan J. Martinez <jjm@usebox.net>"
__version__ = "0.6.3"
app_name = "JTC"
project_url = "http://www.usebox.net/jjm/jtc/"
operators = ('+', '-', '*', '/', '=', '<>', '>', '<', '>=', '<=', 'and', 'or', 'mod', 'not')
enum = ('ADD', 'SUB', 'MUL', 'DIV', 'EQ', 'NE', 'GT', 'LT', 'GE', 'LE', 'AND', 'OR', 'MOD', 'NOT')
op_trans = dict(zip(operators, enum))
if __name__ == "__main__":
ap = ArgumentParser(description="%s (Juan's Toy Compiler)" % app_name,
epilog=project_url,
)
ap.add_argument("source", help="source file to compile (use - for stdin)")
ap.add_argument("--lexer", action="store_true", help="dump lexer output and exit")
ap.add_argument("--parser", action="store_true", help="dump parser output and exit")
ap.add_argument("-c", action="store_true", help="dump C output and exit")
ap.add_argument("--debug", action="store_true", help="enable debug")
ap.add_argument("--no-gc", action="store_true", help="disable the garbage collector")
ap.add_argument("--verbose", action="store_true", help="enable verbose output")
ap.add_argument("--version", action="version", version="%(prog)s " + __version__)
args = ap.parse_args()
if args.verbose:
print("starting: %s ver %s" % (app_name, __version__))
if args.verbose:
print("reading source from:", args.source)
if args.source == "-":
source = "<stdin>"
data = stdin.read()
else:
source = args.source
try:
fd = open(args.source, "rt")
except IOError:
ap.error("failed to open %r" % args.source)
try:
data = fd.read()
except IOError as ex:
ap.error("failed to read %r: %s" % (args.source, ex))
finally:
fd.close()
if args.lexer:
l = lexer.Lexer()
l.build()
print(l.test(data))
exit(0)
ast = parser.parse(data, debug=args.debug)
if not ast:
exit(1)
ast.source = source
if args.parser:
print(ast)
exit(0)
if args.verbose:
print("generating code: %d function(s)" % len(ast.funcs))
c = generate(ast)
if args.c:
print(c)
exit(1)
cc = environ.get("CC", "gcc")
cflags = environ.get("CFLAGS", None)
home = environ.get("JTCHOME", path.abspath(path.dirname(__file__)))
fd = NamedTemporaryFile(mode="wt", suffix=".c", delete=False)
try:
fd.write(c)
fd.close()
cmd = [cc,]
if cflags:
cmd += cflags.split(" ")
cmd += ["-std=c99", "-Wall", "-I%s" % path.join(home, "include"), fd.name, "-o", source + ".out", "-lm"]
if not args.no_gc:
cmd.append("-lgc")
else:
cmd.append("-DDISABLE_GC")
if args.debug:
cmd.append("-ggdb")
else:
cmd.extend(["-s", "-O2"])
if args.verbose:
print("compiling:", ' '.join(cmd))
try:
p = Popen(cmd, stderr=PIPE, close_fds=True)
if p.wait() != 0:
errors = p.stderr.read()
if any(e in errors for e in (b"-lgc", b"gc.h:")):
print("*** gc not found, use --no-gc to disable the garbage collector")
print("%s output: %r" % (cc, errors))
exit(1)
except BaseException as ex:
print("error running the C compiler: %s" % ex)
exit(1)
finally:
unlink(fd.name)
if args.verbose:
print("done")
exit(0)
| 30.605714 | 122 | 0.537901 | #!/usr/bin/env python
from sys import exit, stdin
from os import environ, path, unlink
from tempfile import NamedTemporaryFile
from subprocess import Popen, PIPE
from argparse import ArgumentParser
import parser
import lexer
__author__ = "Juan J. Martinez <jjm@usebox.net>"
__version__ = "0.6.3"
app_name = "JTC"
project_url = "http://www.usebox.net/jjm/jtc/"
operators = ('+', '-', '*', '/', '=', '<>', '>', '<', '>=', '<=', 'and', 'or', 'mod', 'not')
enum = ('ADD', 'SUB', 'MUL', 'DIV', 'EQ', 'NE', 'GT', 'LT', 'GE', 'LE', 'AND', 'OR', 'MOD', 'NOT')
op_trans = dict(zip(operators, enum))
class Id(object):
index = 1
ids = dict()
stack = []
#INTEGER = 1
#FLOAT = 2
#STRING = 3
ID = 10
FUNC = 11
@staticmethod
def add(lineno, type, id, uvalue=None, params=None):
Id.ids[id] = Id(lineno, type, uvalue, params)
Id.index += 1
return Id.ids[id].index
@staticmethod
def get(id):
return Id.ids[id]
@staticmethod
def enter():
Id.stack.append([Id.ids, Id.index])
Id.ids = dict()
Id.index = 1
@staticmethod
def leave():
Id.ids, Id.index = Id.stack[-1]
Id.stack = Id.stack[:-1]
@staticmethod
def exists(id):
try:
return Id.ids[id]
except KeyError:
return None
def __init__(self, lineno, type, uvalue=None, params=None):
self.index = Id.index
self.lineno = lineno
self.type = type
self.uvalue = uvalue
self.params = params
def __repr__(self):
return "%r (%r, %r)" % (self.index, self.lineno, self.type)
def func_sign(node):
params = node.sub[0].sub
cparams = ', '.join(["obj *%s" % p for p in params])
return """obj *_%s(%s)""" % (node.uvalue, cparams)
def do_func(node):
Id.enter()
# make the function available inside itself to support
# recursive calls
nparams = len(node.sub[0].sub)
Id.add(node.lineno, Id.FUNC, node.value, node.uvalue, nparams)
output = "\n" + func_sign(node) + " { st *_ctx = NULL; "
for value in node.sub[0].sub:
index = Id.add(node.sub[0].lineno, Id.ID, value)
output += "store(&_ctx, %d, %d, %s); " % (node.sub[0].lineno, index, value)
output += " %s" % do_block(node.sub[1])
# all functions return 0 by default (unless there's an user provided return!)
output += "\nreturn o_return(&_ctx, o_int(0, 0)); }\n"
Id.leave()
return output
def do_if(node):
expr, block = node.sub
return """
if (o_lval(%d, %s)) { %s}
""" % (node.lineno, do_expr(expr), do_block(block))
def do_if_else(node):
expr, block, elseb = node.sub
return """
if (o_lval(%d, %s)) { %s} else { %s}
""" % (node.lineno, do_expr(expr), do_block(block), do_block(elseb))
def do_loop(node):
expr, block = node.sub
return """
while (o_lval(%d, %s)) { %s}
""" % (node.lineno, do_expr(expr), do_block(block))
def do_retrieve(node):
if not Id.exists(node.value):
print("line %d: undefined identifier %r" % (node.lineno, node.value))
exit(1)
index = Id.get(node.value).index
return "retrieve(&_ctx, %d, %d)" % (node.lineno, index)
def do_dict_index(node):
if node.sub[0].type == "string":
output = do_expr(node.sub[0])
else:
output = "o_dict_index(%d, %s)" % (node.lineno, do_expr(node.sub[0]))
return output
def do_expr(node):
output = ""
if node.type == "retrieve":
output += do_retrieve(node)
elif node.type == "numeric":
if isinstance(node.value, int):
output += "o_int(%d, %d)" % (node.lineno, node.value)
else:
output += "o_float(%d, %f)" % (node.lineno, node.value)
elif node.type == "string":
output += "o_string(%d, %s)" % (node.lineno, node.value)
elif node.type == "binop":
output += "o_op(%d, %s, %s, %s)" % (node.lineno, op_trans[node.value], do_expr(node.sub[0]), do_expr(node.sub[1]))
elif node.type == "unaop":
output += "o_op(%d, %s, %s, NULL)" % (node.lineno, op_trans[node.value], do_expr(node.sub[0]))
elif node.type == "call":
exists = Id.exists(node.value)
if exists and exists.type == Id.FUNC:
if exists.params != len(node.sub[0].sub):
print("line %d: %r expects %d parameters" % (node.lineno, node.value, exists.params))
exit(1)
params = ', '.join([do_expr(p) for p in node.sub[0].sub])
output += "_%s(%s)" % (exists.uvalue, params)
else:
print("line %d: undefined function %r" % (node.lineno, node.value))
exit(1)
elif node.type == "typeof":
output += "o_typeof(%d, %s)" % (node.lineno, do_expr(node.sub[0]))
elif node.type == "clone":
output += "o_clone(%d, %s)" % (node.lineno, do_retrieve(node.sub[0]))
elif node.type == "dict":
output += "o_dict(%d)" % node.lineno
elif node.type in ("dict-get", "dict-test"):
dict_index = do_dict_index(node)
if node.type == "dict-get":
func = "o_dict_get"
else:
func = "o_dict_test"
output += "%s(%d, %s, %s)" % (func, node.lineno, do_retrieve(node), dict_index)
return output
def do_block(node):
output = ""
for c in node.sub:
if c.type == "func":
exists = Id.exists(c.value)
if exists:
print("line %d: %r already defined in line %d in this context" % (c.lineno, c.value, exists.lineno))
exit(1)
# make the function available to this scope
nparams = len(c.sub[0].sub)
Id.add(c.lineno, Id.FUNC, c.value, c.uvalue, nparams)
elif c.type == "store":
exists = Id.exists(c.value)
if exists and exists.type == Id.FUNC:
print("line %d: %r already defined as function in line %d" % (c.lineno, c.value, exists.lineno))
if not exists:
index = Id.add(c.lineno, Id.ID, c.value)
else:
index = Id.get(c.value).index
output += "store(&_ctx, %d, %d, %s);\n" % (c.lineno, index, do_expr(c.sub[0]))
elif c.type == "if":
output += do_if(c) + "\n"
elif c.type == "if-else":
output += do_if_else(c) + "\n"
elif c.type == "loop":
output += do_loop(c) + "\n"
elif c.type == "return":
output += "return o_return(&_ctx, %s);\n" % do_expr(c.sub[0])
# we need the context!
Id.no_func = True
elif c.type == "println":
params = ', '.join([do_expr(p) for p in c.sub[0].sub])
output += "println(%d, %s);\n" % (len(c.sub[0].sub), params)
elif c.type == "dict-set":
dict_index = do_dict_index(c)
output += "o_dict_set(%d, %s, %s, %s);\n" % (c.lineno, do_retrieve(c), dict_index, do_expr(c.sub[1]))
else:
output += do_expr(c) + "; "
return output
def generate(ast):
output = """\
/*
* jtc ver %s
* source: %s
*/
#include "rt.h"
""" % (__version__, ast.source)
if ast.sub[0].sub:
print("line %d: syntax error: main function parameters" % ast.sub[0].lineno)
exit(1)
for f in ast.funcs:
output += func_sign(f) + ";\n"
for f in ast.funcs:
output += do_func(f)
output += """
int _ep() { obj *o = _%s(); return o_lval(0, o); }
/* EOF */
""" % ast.uvalue
return output
if __name__ == "__main__":
ap = ArgumentParser(description="%s (Juan's Toy Compiler)" % app_name,
epilog=project_url,
)
ap.add_argument("source", help="source file to compile (use - for stdin)")
ap.add_argument("--lexer", action="store_true", help="dump lexer output and exit")
ap.add_argument("--parser", action="store_true", help="dump parser output and exit")
ap.add_argument("-c", action="store_true", help="dump C output and exit")
ap.add_argument("--debug", action="store_true", help="enable debug")
ap.add_argument("--no-gc", action="store_true", help="disable the garbage collector")
ap.add_argument("--verbose", action="store_true", help="enable verbose output")
ap.add_argument("--version", action="version", version="%(prog)s " + __version__)
args = ap.parse_args()
if args.verbose:
print("starting: %s ver %s" % (app_name, __version__))
if args.verbose:
print("reading source from:", args.source)
if args.source == "-":
source = "<stdin>"
data = stdin.read()
else:
source = args.source
try:
fd = open(args.source, "rt")
except IOError:
ap.error("failed to open %r" % args.source)
try:
data = fd.read()
except IOError as ex:
ap.error("failed to read %r: %s" % (args.source, ex))
finally:
fd.close()
if args.lexer:
l = lexer.Lexer()
l.build()
print(l.test(data))
exit(0)
ast = parser.parse(data, debug=args.debug)
if not ast:
exit(1)
ast.source = source
if args.parser:
print(ast)
exit(0)
if args.verbose:
print("generating code: %d function(s)" % len(ast.funcs))
c = generate(ast)
if args.c:
print(c)
exit(1)
cc = environ.get("CC", "gcc")
cflags = environ.get("CFLAGS", None)
home = environ.get("JTCHOME", path.abspath(path.dirname(__file__)))
fd = NamedTemporaryFile(mode="wt", suffix=".c", delete=False)
try:
fd.write(c)
fd.close()
cmd = [cc,]
if cflags:
cmd += cflags.split(" ")
cmd += ["-std=c99", "-Wall", "-I%s" % path.join(home, "include"), fd.name, "-o", source + ".out", "-lm"]
if not args.no_gc:
cmd.append("-lgc")
else:
cmd.append("-DDISABLE_GC")
if args.debug:
cmd.append("-ggdb")
else:
cmd.extend(["-s", "-O2"])
if args.verbose:
print("compiling:", ' '.join(cmd))
try:
p = Popen(cmd, stderr=PIPE, close_fds=True)
if p.wait() != 0:
errors = p.stderr.read()
if any(e in errors for e in (b"-lgc", b"gc.h:")):
print("*** gc not found, use --no-gc to disable the garbage collector")
print("%s output: %r" % (cc, errors))
exit(1)
except BaseException as ex:
print("error running the C compiler: %s" % ex)
exit(1)
finally:
unlink(fd.name)
if args.verbose:
print("done")
exit(0)
| 6,238 | 396 | 253 |
1bbb3e688bbd4f3f08603ebe45c56d6a8f3a3d1a | 1,547 | py | Python | setup.py | jasonkatz/py-graphql-client | 9f938f3d379a8f4d8810961c87baf25dbe35889d | [
"BSD-3-Clause"
] | 38 | 2019-03-22T16:27:08.000Z | 2022-03-30T11:07:55.000Z | setup.py | anthonyhiga/py-graphql-client | 9c59b32bae5c5c6a12634b2bd6353f76328aa31a | [
"BSD-3-Clause"
] | 31 | 2019-03-25T20:28:40.000Z | 2022-01-26T21:22:47.000Z | setup.py | anthonyhiga/py-graphql-client | 9c59b32bae5c5c6a12634b2bd6353f76328aa31a | [
"BSD-3-Clause"
] | 11 | 2019-03-25T18:54:32.000Z | 2021-09-11T17:00:27.000Z | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
__version__ = "0.1.1"
__desc__ = "A dead-simple GraphQL client that supports subscriptions over websockets"
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
'websocket-client==0.54.0'
]
test_requirements = []
setup(
name='py-graphql-client',
version=__version__,
description=__desc__,
long_description=readme,
long_description_content_type='text/markdown',
author="Anon Ray",
author_email='rayanon004@gmail.com',
url='https://github.com/ecthiender/py-graphql-client',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={'': ['LICENSE']},
package_dir={'graphql_client': 'graphql_client'},
python_requires=">=3.4",
include_package_data=True,
install_requires=requirements,
license="BSD3",
zip_safe=False,
keywords=['graphql', 'websocket', 'subscriptions', 'graphql-client'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Environment :: Console',
'Environment :: Web Environment',
'Environment :: Other Environment',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
test_suite='tests',
tests_require=test_requirements
)
| 30.333333 | 85 | 0.652877 | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
__version__ = "0.1.1"
__desc__ = "A dead-simple GraphQL client that supports subscriptions over websockets"
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
'websocket-client==0.54.0'
]
test_requirements = []
setup(
name='py-graphql-client',
version=__version__,
description=__desc__,
long_description=readme,
long_description_content_type='text/markdown',
author="Anon Ray",
author_email='rayanon004@gmail.com',
url='https://github.com/ecthiender/py-graphql-client',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={'': ['LICENSE']},
package_dir={'graphql_client': 'graphql_client'},
python_requires=">=3.4",
include_package_data=True,
install_requires=requirements,
license="BSD3",
zip_safe=False,
keywords=['graphql', 'websocket', 'subscriptions', 'graphql-client'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Environment :: Console',
'Environment :: Web Environment',
'Environment :: Other Environment',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
test_suite='tests',
tests_require=test_requirements
)
| 0 | 0 | 0 |
9bbff3893e5f96c2c392c4af0b4ffa7ea128b58e | 1,686 | py | Python | src/error_correction/dataset_utils/describe_masks.py | gruentee/acl2021-factual-error-correction | b500f589cc3e73ffa6958c7dab8c07f2535a448f | [
"Apache-2.0"
] | 5 | 2021-06-12T14:24:22.000Z | 2021-06-15T02:03:44.000Z | src/error_correction/dataset_utils/describe_masks.py | gruentee/acl2021-factual-error-correction | b500f589cc3e73ffa6958c7dab8c07f2535a448f | [
"Apache-2.0"
] | 5 | 2021-06-14T14:52:17.000Z | 2021-07-04T13:18:03.000Z | src/error_correction/dataset_utils/describe_masks.py | gruentee/acl2021-factual-error-correction | b500f589cc3e73ffa6958c7dab8c07f2535a448f | [
"Apache-2.0"
] | 4 | 2021-07-10T13:40:37.000Z | 2022-01-20T08:50:05.000Z | #
# Copyright (c) 2019-2021 James Thorne.
#
# This file is part of factual error correction.
# See https://jamesthorne.co.uk for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("in_file")
args = parser.parse_args()
any_masks = 0
total_read = 0
mask_lens = []
tok_lens = []
mask_prop = []
with open(args.in_file) as f:
for line in f:
instance = json.loads(line)
total_read += 1
tok_lens.append(len(instance["original_claim"].split()))
if len(instance["master_explanation"]):
any_masks += 1
mask_lens.append(len(instance["master_explanation"]))
mask_prop.append(mask_lens[-1] / tok_lens[-1])
print(f"Read {total_read} instances, of which {any_masks} have masks")
print("Average mask length is: ", sum(mask_lens) / len(mask_lens))
print("Average mask prop is: ", sum(mask_prop) / len(mask_prop))
print("Average tok length is: ", sum(tok_lens) / len(tok_lens))
print()
| 33.72 | 78 | 0.662515 | #
# Copyright (c) 2019-2021 James Thorne.
#
# This file is part of factual error correction.
# See https://jamesthorne.co.uk for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("in_file")
args = parser.parse_args()
any_masks = 0
total_read = 0
mask_lens = []
tok_lens = []
mask_prop = []
with open(args.in_file) as f:
for line in f:
instance = json.loads(line)
total_read += 1
tok_lens.append(len(instance["original_claim"].split()))
if len(instance["master_explanation"]):
any_masks += 1
mask_lens.append(len(instance["master_explanation"]))
mask_prop.append(mask_lens[-1] / tok_lens[-1])
print(f"Read {total_read} instances, of which {any_masks} have masks")
print("Average mask length is: ", sum(mask_lens) / len(mask_lens))
print("Average mask prop is: ", sum(mask_prop) / len(mask_prop))
print("Average tok length is: ", sum(tok_lens) / len(tok_lens))
print()
| 0 | 0 | 0 |
177d200a81abac306eaa75a289ef7f0959cc6176 | 12,021 | py | Python | BiGI_src/utils/loader.py | caojiangxia/BiGI | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | [
"MIT"
] | 57 | 2020-10-19T08:54:57.000Z | 2022-03-19T12:20:43.000Z | BiGI_src/utils/loader.py | caojiangxia/BiGI | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | [
"MIT"
] | 6 | 2020-12-01T02:31:56.000Z | 2021-10-10T06:15:13.000Z | BiGI_src/utils/loader.py | caojiangxia/BiGI | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | [
"MIT"
] | 9 | 2021-05-15T03:29:31.000Z | 2022-03-14T20:28:44.000Z | """
Data loader for TACRED json files.
"""
import json
import random
import torch
import numpy as np
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def preprocess(self, data, opt):
""" Preprocess the data and convert to ids. """
processed = []
self.user_item_pair = []
for mytuple in data:
processed.append((mytuple[0],mytuple[1]))
if len(self.user_real_dict[mytuple[0]]) > self.opt["min_neighbor"] and len(self.user_fake_dict[mytuple[0]]) > self.opt[
"min_neighbor"] and len(self.item_real_dict[mytuple[1]]) > self.opt["min_neighbor"] and len(
self.item_fake_dict[mytuple[1]]) > self.opt["min_neighbor"]:
self.user_item_pair.append((mytuple[0],mytuple[1]))
return processed
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
if self.eval :
batch = list(zip(*batch))
return torch.LongTensor(batch[0]), batch[1]
else :
negative_tmp = []
for i in range(batch_size):
for j in range(self.opt["negative"]):
while 1:
rand = random.randint(0,self.opt["number_item"]-1)
if rand not in self.user_real_dict[batch[i][0]]:
negative_tmp.append((batch[i][0],rand))
break
batch = list(zip(*batch))
negative_tmp = list(zip(*negative_tmp))
if self.opt["number_user"] * self.opt["number_item"] > 10000000:
user_index = []
item_index = []
real_user_index_id = []
fake_user_index_id = []
real_item_index_id = []
fake_item_index_id = []
random.shuffle(self.user_item_pair)
for id in range(10):
user = self.user_item_pair[id][0]
item = self.user_item_pair[id][1]
real_item_id = list(self.user_real_dict[user])
real_user_id = list(self.item_real_dict[item])
fake_item_id = list(self.user_fake_dict[user])
fake_user_id = list(self.item_fake_dict[item])
random.shuffle(real_item_id)
random.shuffle(fake_item_id)
random.shuffle(real_user_id)
random.shuffle(fake_user_id)
real_item_id = real_item_id[:self.opt["min_neighbor"]]
fake_item_id = fake_item_id[:self.opt["min_neighbor"]]
real_user_id = real_user_id[:self.opt["min_neighbor"]]
fake_user_id = fake_user_id[:self.opt["min_neighbor"]]
user_index.append(user)
item_index.append(item)
fake_user_id = real_user_id
fake_item_id = real_item_id
real_item_index_id.append(real_item_id)
real_user_index_id.append(real_user_id)
fake_item_index_id.append(fake_item_id)
fake_user_index_id.append(fake_user_id)
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]) , torch.LongTensor(negative_tmp[1]) , torch.LongTensor(user_index), torch.LongTensor(item_index), torch.LongTensor(real_user_index_id), torch.LongTensor(fake_user_index_id), torch.LongTensor(real_item_index_id), torch.LongTensor(fake_item_index_id)
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]),torch.LongTensor(negative_tmp[1])
class wikiDataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def preprocess(self, data, opt):
""" Preprocess the data and convert to ids. """
processed = []
self.user_item_pair = []
for mytuple in data:
processed.append((mytuple[0],mytuple[1],mytuple[2]))
if len(self.user_real_dict[mytuple[0]]) > self.opt["min_neighbor"] and len(
self.user_fake_dict[mytuple[0]]) > self.opt[
"min_neighbor"] and len(self.item_real_dict[mytuple[1]]) > self.opt["min_neighbor"] and len(
self.item_fake_dict[mytuple[1]]) > self.opt["min_neighbor"]:
self.user_item_pair.append((mytuple[0], mytuple[1]))
return processed
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
if self.eval :
batch = list(zip(*batch))
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1])+torch.tensor(self.opt["number_user"]), np.array(batch[2])
else :
negative_tmp = []
for i in range(batch_size):
for j in range(self.opt["negative"]):
while 1:
rand = random.randint(0,self.opt["number_item"]-1)
if rand not in self.user_real_dict[batch[i][0]]:
negative_tmp.append((batch[i][0],rand))
break
batch = list(zip(*batch))
negative_tmp = list(zip(*negative_tmp))
if self.opt["number_user"] * self.opt["number_item"] > 10000000:
user_index = []
item_index = []
real_user_index_id = []
fake_user_index_id = []
real_item_index_id = []
fake_item_index_id = []
random.shuffle(self.user_item_pair)
for id in range(10):
user = self.user_item_pair[id][0]
item = self.user_item_pair[id][1]
real_item_id = list(self.user_real_dict[user])
real_user_id = list(self.item_real_dict[item])
fake_item_id = list(self.user_fake_dict[user])
fake_user_id = list(self.item_fake_dict[item])
random.shuffle(real_item_id)
random.shuffle(fake_item_id)
random.shuffle(real_user_id)
random.shuffle(fake_user_id)
real_item_id = real_item_id[:self.opt["min_neighbor"]]
fake_item_id = fake_item_id[:self.opt["min_neighbor"]]
real_user_id = real_user_id[:self.opt["min_neighbor"]]
fake_user_id = fake_user_id[:self.opt["min_neighbor"]]
user_index.append(user)
item_index.append(item)
fake_user_id = real_user_id
fake_item_id = real_item_id
real_item_index_id.append(real_item_id)
real_user_index_id.append(real_user_id)
fake_item_index_id.append(fake_item_id)
fake_user_index_id.append(fake_user_id)
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]) , torch.LongTensor(negative_tmp[1]) , torch.LongTensor(user_index), torch.LongTensor(item_index), torch.LongTensor(real_user_index_id), torch.LongTensor(fake_user_index_id), torch.LongTensor(real_item_index_id), torch.LongTensor(fake_item_index_id) # User , item, label -> batch | batch | batch
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]),torch.LongTensor(negative_tmp[1]) # User , item, neg_item -> batch | batch | batch
| 44.032967 | 373 | 0.556776 | """
Data loader for TACRED json files.
"""
import json
import random
import torch
import numpy as np
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, filename, batch_size, opt, user_real_dict, user_fake_dict, item_real_dict, item_fake_dict, evaluation):
self.batch_size = batch_size
self.opt = opt
self.eval = evaluation
self.ma = {}
with open(filename) as infile:
data=[]
for line in infile:
line=line.strip().split("\t")
data.append([int(line[0]),int(line[1])])
if int(line[0]) not in self.ma.keys():
self.ma[int(line[0])] = set()
self.ma[int(line[0])].add(int(line[1]))
self.raw_data = data
self.user_real_dict = user_real_dict
self.user_fake_dict = user_fake_dict
self.item_real_dict = item_real_dict
self.item_fake_dict = item_fake_dict
if not evaluation:
data = self.preprocess(data, opt) # [[user,item] ... ]
else :
data = self.preprocess_for_predict() # [ [user, [gound_truth]] ]
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
if batch_size > len(data):
batch_size = len(data)
self.batch_size = batch_size
if len(data)%batch_size != 0:
data += data[:batch_size]
data = data[: (len(data)//batch_size) * batch_size]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
print("{} batches created for {}".format(len(data), filename))
def preprocess_for_predict(self):
processed=[]
for user in range(self.opt["number_user"]):
ground_truth=[]
if user not in self.ma.keys():
continue
for item in self.ma[user]:
if item >= self.opt["number_item"]:
continue
ground_truth.append(item)
if len(ground_truth) == 0:
continue
ground_truth=sorted(ground_truth)
processed.append([user,ground_truth])
return processed
def preprocess(self, data, opt):
""" Preprocess the data and convert to ids. """
processed = []
self.user_item_pair = []
for mytuple in data:
processed.append((mytuple[0],mytuple[1]))
if len(self.user_real_dict[mytuple[0]]) > self.opt["min_neighbor"] and len(self.user_fake_dict[mytuple[0]]) > self.opt[
"min_neighbor"] and len(self.item_real_dict[mytuple[1]]) > self.opt["min_neighbor"] and len(
self.item_fake_dict[mytuple[1]]) > self.opt["min_neighbor"]:
self.user_item_pair.append((mytuple[0],mytuple[1]))
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
if self.eval :
batch = list(zip(*batch))
return torch.LongTensor(batch[0]), batch[1]
else :
negative_tmp = []
for i in range(batch_size):
for j in range(self.opt["negative"]):
while 1:
rand = random.randint(0,self.opt["number_item"]-1)
if rand not in self.user_real_dict[batch[i][0]]:
negative_tmp.append((batch[i][0],rand))
break
batch = list(zip(*batch))
negative_tmp = list(zip(*negative_tmp))
if self.opt["number_user"] * self.opt["number_item"] > 10000000:
user_index = []
item_index = []
real_user_index_id = []
fake_user_index_id = []
real_item_index_id = []
fake_item_index_id = []
random.shuffle(self.user_item_pair)
for id in range(10):
user = self.user_item_pair[id][0]
item = self.user_item_pair[id][1]
real_item_id = list(self.user_real_dict[user])
real_user_id = list(self.item_real_dict[item])
fake_item_id = list(self.user_fake_dict[user])
fake_user_id = list(self.item_fake_dict[item])
random.shuffle(real_item_id)
random.shuffle(fake_item_id)
random.shuffle(real_user_id)
random.shuffle(fake_user_id)
real_item_id = real_item_id[:self.opt["min_neighbor"]]
fake_item_id = fake_item_id[:self.opt["min_neighbor"]]
real_user_id = real_user_id[:self.opt["min_neighbor"]]
fake_user_id = fake_user_id[:self.opt["min_neighbor"]]
user_index.append(user)
item_index.append(item)
fake_user_id = real_user_id
fake_item_id = real_item_id
real_item_index_id.append(real_item_id)
real_user_index_id.append(real_user_id)
fake_item_index_id.append(fake_item_id)
fake_user_index_id.append(fake_user_id)
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]) , torch.LongTensor(negative_tmp[1]) , torch.LongTensor(user_index), torch.LongTensor(item_index), torch.LongTensor(real_user_index_id), torch.LongTensor(fake_user_index_id), torch.LongTensor(real_item_index_id), torch.LongTensor(fake_item_index_id)
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]),torch.LongTensor(negative_tmp[1])
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
class wikiDataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, filename, batch_size, opt, user_real_dict, user_fake_dict, item_real_dict, item_fake_dict, evaluation):
self.batch_size = batch_size
self.opt = opt
self.eval = evaluation
self.ma = {}
with open(filename) as infile:
data=[]
for line in infile:
line=line.strip().split("\t")
data.append([int(line[0]),int(line[1]),int(line[2])])
if int(line[0]) not in self.ma.keys():
self.ma[int(line[0])] = set()
self.ma[int(line[0])].add(int(line[1]))
self.raw_data = data
self.user_real_dict = user_real_dict
self.user_fake_dict = user_fake_dict
self.item_real_dict = item_real_dict
self.item_fake_dict = item_fake_dict
data = self.preprocess(data, opt)
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
if batch_size > len(data):
batch_size = len(data)
self.batch_size = batch_size
if len(data)%batch_size != 0:
data += data[:batch_size]
data = data[: (len(data)//batch_size) * batch_size]
self.num_examples = len(data)
if not evaluation:
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
else :
data = [data]
self.data = data
print("{} batches created for {}".format(len(data), filename))
def preprocess(self, data, opt):
""" Preprocess the data and convert to ids. """
processed = []
self.user_item_pair = []
for mytuple in data:
processed.append((mytuple[0],mytuple[1],mytuple[2]))
if len(self.user_real_dict[mytuple[0]]) > self.opt["min_neighbor"] and len(
self.user_fake_dict[mytuple[0]]) > self.opt[
"min_neighbor"] and len(self.item_real_dict[mytuple[1]]) > self.opt["min_neighbor"] and len(
self.item_fake_dict[mytuple[1]]) > self.opt["min_neighbor"]:
self.user_item_pair.append((mytuple[0], mytuple[1]))
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
if self.eval :
batch = list(zip(*batch))
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1])+torch.tensor(self.opt["number_user"]), np.array(batch[2])
else :
negative_tmp = []
for i in range(batch_size):
for j in range(self.opt["negative"]):
while 1:
rand = random.randint(0,self.opt["number_item"]-1)
if rand not in self.user_real_dict[batch[i][0]]:
negative_tmp.append((batch[i][0],rand))
break
batch = list(zip(*batch))
negative_tmp = list(zip(*negative_tmp))
if self.opt["number_user"] * self.opt["number_item"] > 10000000:
user_index = []
item_index = []
real_user_index_id = []
fake_user_index_id = []
real_item_index_id = []
fake_item_index_id = []
random.shuffle(self.user_item_pair)
for id in range(10):
user = self.user_item_pair[id][0]
item = self.user_item_pair[id][1]
real_item_id = list(self.user_real_dict[user])
real_user_id = list(self.item_real_dict[item])
fake_item_id = list(self.user_fake_dict[user])
fake_user_id = list(self.item_fake_dict[item])
random.shuffle(real_item_id)
random.shuffle(fake_item_id)
random.shuffle(real_user_id)
random.shuffle(fake_user_id)
real_item_id = real_item_id[:self.opt["min_neighbor"]]
fake_item_id = fake_item_id[:self.opt["min_neighbor"]]
real_user_id = real_user_id[:self.opt["min_neighbor"]]
fake_user_id = fake_user_id[:self.opt["min_neighbor"]]
user_index.append(user)
item_index.append(item)
fake_user_id = real_user_id
fake_item_id = real_item_id
real_item_index_id.append(real_item_id)
real_user_index_id.append(real_user_id)
fake_item_index_id.append(fake_item_id)
fake_user_index_id.append(fake_user_id)
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]) , torch.LongTensor(negative_tmp[1]) , torch.LongTensor(user_index), torch.LongTensor(item_index), torch.LongTensor(real_user_index_id), torch.LongTensor(fake_user_index_id), torch.LongTensor(real_item_index_id), torch.LongTensor(fake_item_index_id) # User , item, label -> batch | batch | batch
return torch.LongTensor(batch[0]), torch.LongTensor(batch[1]),torch.LongTensor(negative_tmp[1]) # User , item, neg_item -> batch | batch | batch
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
| 3,974 | 0 | 184 |
eca444c0f5a1e8f5a0116943df0c1cf53f8f4460 | 1,001 | py | Python | import/jsonify.py | 18F/NRM-Grants-Agreements | 7b9016e034b75a2237f7c70ba539b542108c335e | [
"CC0-1.0"
] | 5 | 2020-11-18T20:00:02.000Z | 2021-04-16T23:50:07.000Z | import/jsonify.py | USDAForestService/NRM-Grants-Agreements | 7b9016e034b75a2237f7c70ba539b542108c335e | [
"CC0-1.0"
] | 210 | 2021-04-28T16:26:34.000Z | 2022-03-14T16:31:21.000Z | import/jsonify.py | USDAForestService/NRM-Grants-Agreements | 7b9016e034b75a2237f7c70ba539b542108c335e | [
"CC0-1.0"
] | 2 | 2021-07-06T20:57:27.000Z | 2021-07-07T13:06:46.000Z | import csv
import json
import sys
def make_json(args):
"""
Pretty simple and somewhat naive script that =takes a `.xsv` file as an argument,
converts it to JSON, and outputs it as a json file of the same name as the original csv.
It can handle .csv or .tsv files.
It does, however, make a lot of assumptions about the nature of the file and PKs,
so... we'll see how well it works.
"""
jsondata = []
# convert from args list
cvspath = args[0]
with open(cvspath, encoding='utf-8', errors='replace') as csvf:
if cvspath.endswith('.tsv'):
read_file = csv.DictReader(csvf, delimiter="\t")
else:
read_file = csv.DictReader(csvf)
for row in read_file:
jsondata.append(row)
jsonpath = cvspath.rsplit('.')[0] + '.json'
with open(jsonpath, 'w', encoding='utf-8') as jsonf:
jsonf.write(json.dumps(jsondata, indent=4))
if __name__ == "__main__":
make_json(sys.argv[1:]) | 31.28125 | 92 | 0.62038 | import csv
import json
import sys
def make_json(args):
"""
Pretty simple and somewhat naive script that =takes a `.xsv` file as an argument,
converts it to JSON, and outputs it as a json file of the same name as the original csv.
It can handle .csv or .tsv files.
It does, however, make a lot of assumptions about the nature of the file and PKs,
so... we'll see how well it works.
"""
jsondata = []
# convert from args list
cvspath = args[0]
with open(cvspath, encoding='utf-8', errors='replace') as csvf:
if cvspath.endswith('.tsv'):
read_file = csv.DictReader(csvf, delimiter="\t")
else:
read_file = csv.DictReader(csvf)
for row in read_file:
jsondata.append(row)
jsonpath = cvspath.rsplit('.')[0] + '.json'
with open(jsonpath, 'w', encoding='utf-8') as jsonf:
jsonf.write(json.dumps(jsondata, indent=4))
if __name__ == "__main__":
make_json(sys.argv[1:]) | 0 | 0 | 0 |
46877eeab023b042935d4a1583c595ecfe184fe7 | 3,012 | py | Python | tests/test_ddem.py | erikmannerfelt/xdem | 725a216f576642f2af4ac3228c9290cd85e47e17 | [
"MIT"
] | 12 | 2021-03-05T22:40:49.000Z | 2022-03-25T14:01:54.000Z | tests/test_ddem.py | erikmannerfelt/xdem | 725a216f576642f2af4ac3228c9290cd85e47e17 | [
"MIT"
] | 205 | 2021-03-04T13:07:15.000Z | 2022-02-02T18:02:14.000Z | tests/test_ddem.py | erikmannerfelt/xdem | 725a216f576642f2af4ac3228c9290cd85e47e17 | [
"MIT"
] | 12 | 2021-03-10T09:27:51.000Z | 2022-03-28T12:29:53.000Z | """Functions to test the difference of DEMs tools."""
import warnings
import geoutils as gu
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import xdem
| 32.042553 | 93 | 0.648406 | """Functions to test the difference of DEMs tools."""
import warnings
import geoutils as gu
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import xdem
class TestdDEM:
dem_2009 = xdem.DEM(xdem.examples.get_path("longyearbyen_ref_dem"))
dem_1990 = xdem.DEM(xdem.examples.get_path("longyearbyen_tba_dem"))
outlines_1990 = gu.Vector(xdem.examples.get_path("longyearbyen_glacier_outlines"))
ddem = xdem.dDEM(
dem_2009 - dem_1990,
start_time=np.datetime64("1990-08-01"),
end_time=np.datetime64("2009-08-01")
)
def test_init(self):
"""Test that the dDEM object was instantiated correctly."""
assert isinstance(self.ddem, xdem.dDEM)
assert isinstance(self.ddem.data, np.ma.masked_array)
def test_copy(self):
"""Test that copying works as it should."""
ddem2 = self.ddem.copy()
assert isinstance(ddem2, xdem.dDEM)
ddem2.data += 1
assert self.ddem != ddem2
def test_filled_data(self):
"""Test that the filled_data property points to the right data."""
ddem2 = self.ddem.copy()
assert not np.any(np.isnan(ddem2.data)) or np.all(~ddem2.data.mask)
assert ddem2.filled_data is not None
assert np.count_nonzero(np.isnan(ddem2.data)) == 0
ddem2.data.ravel()[0] = np.nan
assert np.count_nonzero(np.isnan(ddem2.data)) == 1
assert ddem2.filled_data is None
ddem2.interpolate(method="linear")
assert ddem2.fill_method is not None
def test_regional_hypso(self):
"""Test the regional hypsometric approach."""
ddem = self.ddem.copy()
ddem.data.mask = np.zeros_like(ddem.data, dtype=bool)
ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True
assert np.count_nonzero(ddem.data.mask) > 0
assert ddem.filled_data is None
ddem.interpolate(
method="regional_hypsometric",
reference_elevation=self.dem_2009,
mask=self.outlines_1990
)
assert ddem._filled_data is not None
assert type(ddem.filled_data) == np.ndarray
assert ddem.filled_data.shape == ddem.data.shape
assert np.abs(np.nanmean(self.ddem.data - ddem.filled_data)) < 1
def test_local_hypso(self):
"""Test the local hypsometric approach."""
ddem = self.ddem.copy()
scott_1990 = self.outlines_1990.query("NAME == 'Scott Turnerbreen'")
ddem.data.mask = np.zeros_like(ddem.data, dtype=bool)
ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True
assert np.count_nonzero(ddem.data.mask) > 0
assert ddem.filled_data is None
ddem.interpolate(
method="local_hypsometric",
reference_elevation=self.dem_2009.data,
mask=self.outlines_1990
)
assert np.abs(np.mean(self.ddem.data - ddem.filled_data)) < 1
| 0 | 2,791 | 23 |
a41283ee12db1520d638c091b84a0d30196b74db | 719 | py | Python | docs/command_interface/src/resources/source/sample.py | CoderSong2015/Apache-Trafodion | 889631aae9cdcd38fca92418d633f2dedc0be619 | [
"Apache-2.0"
] | 148 | 2015-06-18T21:26:04.000Z | 2017-12-25T01:47:01.000Z | docs/command_interface/src/resources/source/sample.py | CoderSong2015/Apache-Trafodion | 889631aae9cdcd38fca92418d633f2dedc0be619 | [
"Apache-2.0"
] | 1,352 | 2015-06-20T03:05:01.000Z | 2017-12-25T14:13:18.000Z | docs/command_interface/src/resources/source/sample.py | CoderSong2015/Apache-Trafodion | 889631aae9cdcd38fca92418d633f2dedc0be619 | [
"Apache-2.0"
] | 166 | 2015-06-19T18:52:10.000Z | 2017-12-27T06:19:32.000Z | import os import sys
## Modify this path
sys.path.append("C:\\Program Files (x86)\\Apache Software Foundation\\Trafodion Command Interface\\lib\\python")
import Session
# create a new session
sess = Session.Session()
# Connect to the database
x=sess. connect ("user1","password","16.123.456.78","23400")
# Execute sample queries
# execute takes the query string as argument
setSchema = "set schema TRAFODION.CI_SAMPLE"
selectTable = "select * from employee"
getStats = "get statistics"
#Contruct a list of SQL statements to be executed
queryList = [setSchema, selectTable, getStats] print "\n";
for query in queryList:
print sess.execute (query)
# disconnect the session
sess.disconnect()
del sess
sess=None
| 23.966667 | 112 | 0.751043 | import os import sys
## Modify this path
sys.path.append("C:\\Program Files (x86)\\Apache Software Foundation\\Trafodion Command Interface\\lib\\python")
import Session
# create a new session
sess = Session.Session()
# Connect to the database
x=sess. connect ("user1","password","16.123.456.78","23400")
# Execute sample queries
# execute takes the query string as argument
setSchema = "set schema TRAFODION.CI_SAMPLE"
selectTable = "select * from employee"
getStats = "get statistics"
#Contruct a list of SQL statements to be executed
queryList = [setSchema, selectTable, getStats] print "\n";
for query in queryList:
print sess.execute (query)
# disconnect the session
sess.disconnect()
del sess
sess=None
| 0 | 0 | 0 |
168adf574b0816d2f1731870f4b134622f8eeccc | 9,276 | py | Python | tests/test_nasbench.py | Harald-R/aw_nas | 8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783 | [
"MIT"
] | 195 | 2020-08-15T17:39:23.000Z | 2022-02-28T07:48:03.000Z | tests/test_nasbench.py | Harald-R/aw_nas | 8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783 | [
"MIT"
] | 22 | 2020-08-16T01:44:48.000Z | 2022-03-12T00:46:54.000Z | tests/test_nasbench.py | Harald-R/aw_nas | 8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783 | [
"MIT"
] | 27 | 2020-08-16T06:34:56.000Z | 2022-03-06T18:16:52.000Z | import os
import pytest
import numpy as np
AWNAS_TEST_NASBENCH = os.environ.get("AWNAS_TEST_NASBENCH", None)
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
@pytest.mark.parametrize("case", [
{"cls": "nasbench-101", "load_nasbench": False, "validate_spec": False}])
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
@pytest.mark.parametrize(
"case", [
{"embedder_type": "nb101-lstm"},
{"embedder_type": "nb101-lstm",
"embedder_cfg": {
"use_hid": True,
"num_layers": 3
}},
{"embedder_type": "nb101-seq"},
{"embedder_type": "nb101-flow"},
{"embedder_type": "nb101-flow",
"embedder_cfg": {
"use_final_only": True,
"use_global_node": True
}},
{"embedder_type": "nb101-gcn"},
{"embedder_type": "nb101-gcn",
"embedder_cfg": {
"use_final_only": True,
"use_global_node": True
}},
])
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
| 43.549296 | 109 | 0.579452 | import os
import pytest
import numpy as np
AWNAS_TEST_NASBENCH = os.environ.get("AWNAS_TEST_NASBENCH", None)
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
@pytest.mark.parametrize("case", [
{"cls": "nasbench-101", "load_nasbench": False, "validate_spec": False}])
def test_rollout_from_genotype_str(case):
from aw_nas.common import get_search_space, rollout_from_genotype_str
genotype_str = case.pop("genotype_str", None)
ss = get_search_space(**case)
if genotype_str:
rec_rollout = rollout_from_genotype_str(genotype_str, ss)
else:
rollout = ss.random_sample()
rec_rollout = rollout_from_genotype_str(str(rollout.genotype), ss)
assert all(np.all(rec_rollout.arch[i] == rollout.arch[i])
for i in range(len(rec_rollout.arch)))
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
def test_plot_arch(tmp_path):
from aw_nas.common import get_search_space
from aw_nas.btcs.nasbench_101 import NasBench101Rollout
nasbench_ss = get_search_space("nasbench-101", load_nasbench=False)
prefix = os.path.join(str(tmp_path), "nb101-cell")
arch_1 = (np.array([[0, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0]], dtype=np.int8), [1, 2, 1, 1, 0])
rollout = NasBench101Rollout(*arch_1, search_space=nasbench_ss)
print("genotype: ", rollout.genotype, "save to: ", prefix)
rollout.plot_arch(prefix, label="test plot")
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
@pytest.mark.parametrize(
"case", [
{"embedder_type": "nb101-lstm"},
{"embedder_type": "nb101-lstm",
"embedder_cfg": {
"use_hid": True,
"num_layers": 3
}},
{"embedder_type": "nb101-seq"},
{"embedder_type": "nb101-flow"},
{"embedder_type": "nb101-flow",
"embedder_cfg": {
"use_final_only": True,
"use_global_node": True
}},
{"embedder_type": "nb101-gcn"},
{"embedder_type": "nb101-gcn",
"embedder_cfg": {
"use_final_only": True,
"use_global_node": True
}},
])
def test_embedder(case):
from aw_nas.evaluator.arch_network import ArchEmbedder
from aw_nas.common import get_search_space
nasbench_search_space = get_search_space("nasbench-101", load_nasbench=False)
device = "cuda"
embedder = ArchEmbedder.get_class_(case["embedder_type"])(
nasbench_search_space,
**case.get("embedder_cfg", {}))
embedder.to(device)
arch_1 = (np.array([[0, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0]], dtype=np.int8), [1, 2, 1, 1, 0])
arch_2 = (np.array([[0, 1, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=np.int8),
nasbench_search_space.op_to_idx(
['input', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3',
'conv3x3-bn-relu', 'none', 'output']))
print(arch_1)
print(arch_2)
print(embedder.forward([arch_1, arch_2]))
# embedder.embed_and_transform_arch(arch_2)
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
def test_nasbench(nasbench_search_space):
import numpy as np
from scipy.stats import stats
from aw_nas.btcs import nasbench_101
from aw_nas.evaluator.arch_network import PointwiseComparator
from aw_nas.rollout.compare import CompareRollout
ss = nasbench_search_space
# construct controller
controller = nasbench_101.NasBench101Controller(ss, device="cuda")
compare_controller = nasbench_101.NasBench101CompareController(ss, device="cuda", rollout_type="compare")
# construct evaluator
evaluator = nasbench_101.NasBench101Evaluator(None, None, None)
# test random sample
_ = ss.random_sample()
# test controller.sample
rollouts = controller.sample(n=20)
# test genotype
print(rollouts[0].genotype)
# test evaluator.evaluate_rollout
rollouts = evaluator.evaluate_rollouts(rollouts, False)
print(rollouts)
evaluator.rollout_type = "compare"
c_rollouts = compare_controller.sample(n=4)
print(c_rollouts[0].genotype)
# test evaluator.evaluate_rollout for compare rollouts
c_rollouts = evaluator.evaluate_rollouts(c_rollouts, False)
print(c_rollouts)
# test nb101-gcn embedder
comparator = PointwiseComparator(ss, arch_embedder_type="nb101-gcn",
arch_embedder_cfg={"hid_dim": 96})
comparator_2 = PointwiseComparator(ss, arch_embedder_type="nb101-gcn",
arch_embedder_cfg={"hid_dim": 96})
pred_scores = comparator.predict([r.arch for r in rollouts])
pred_scores_2 = comparator_2.predict([r.arch for r in rollouts])
label_scores = [r.perf["reward"] for r in rollouts]
corr_init_1 = stats.kendalltau(label_scores, pred_scores.cpu().data.numpy()).correlation
corr_init_2 = stats.kendalltau(label_scores, pred_scores_2.cpu().data.numpy()).correlation
# compare_scores = comparator.compare([r.rollout_1.arch for r in c_rollouts],
# [r.rollout_2.arch for r in c_rollouts])
# try training for several epochs using update_predict
true_scores = np.random.rand(len(rollouts))
for i_step in range(5):
loss = comparator.update_predict([r.arch for r in rollouts],
true_scores)
print("update predict {}: {:.4f}".format(i_step, loss))
# try training for several epochs using update_compare
# construct compare rollouts between every pair in rollouts
c_rollouts_2 = [CompareRollout(rollout_1=rollouts[i], rollout_2=rollouts[j])
for i in range(len(rollouts)) for j in range(i)]
better_lst = [label_scores[j] > label_scores[i] for i in range(len(rollouts)) for j in range(i)]
for i_step in range(5):
loss = comparator_2.update_compare_rollouts(c_rollouts_2, better_lst)
print("update compare {}: {:.4f}".format(i_step, loss))
# test after training
pred_scores_after = comparator.predict([r.arch for r in rollouts])
pred_scores_2_after = comparator_2.predict([r.arch for r in rollouts])
corr_after_1 = stats.kendalltau(label_scores, pred_scores_after.cpu().data.numpy()).correlation
corr_after_2 = stats.kendalltau(
label_scores, pred_scores_2_after.cpu().data.numpy()).correlation
print("True accs: ", label_scores)
print("PREDICT: before training: {} (corr {:.3f}); after training: {} (corr {:.3f})".format(
pred_scores, corr_init_1, pred_scores_after, corr_after_1
))
print("COMPARE: before training: {} (corr {:.3f}); after training: {} (corr {:.3f})".format(
pred_scores_2, corr_init_2, pred_scores_2_after, corr_after_2
))
@pytest.mark.skipif(
not AWNAS_TEST_NASBENCH, reason="do not test the nasbench BTC by default.")
def test_equal(nasbench_search_space):
import numpy as np
from nasbench import api
ss = nasbench_search_space
arch_1 = (np.array([[0, 0, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0]], dtype=np.int8),
[1, 2, 1, 1, 0])
arch_2 = (np.array([[0, 0, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0]], dtype=np.int8),
[1, 2, 1, 1, 0])
spec_1 = api.ModelSpec(arch_1[0], ["input"] + [ss.ops_choices[ind] for
ind in arch_1[1]] + ["output"])
spec_2 = api.ModelSpec(arch_2[0], ["input"] + [ss.ops_choices[ind] for
ind in arch_2[1]] + ["output"])
assert not spec_1 == spec_2
r_1 = ss.rollout_from_genotype(spec_1)
r_2 = ss.rollout_from_genotype(spec_2)
assert r_1.genotype.hash_spec(ss.ops_choices) == r_2.genotype.hash_spec(ss.ops_choices)
assert r_1 == r_2
ss.compare_reduced = False
assert r_1 != r_2
| 7,778 | 0 | 110 |
2806076df6167d2584f2050dbeef65e5995f6784 | 536 | py | Python | tests/r/test_forbes2000.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_forbes2000.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_forbes2000.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.forbes2000 import forbes2000
def test_forbes2000():
"""Test module forbes2000.py by downloading
forbes2000.csv and testing shape of
extracted data has 2000 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = forbes2000(test_path)
try:
assert x_train.shape == (2000, 8)
except:
shutil.rmtree(test_path)
raise()
| 22.333333 | 48 | 0.76306 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.forbes2000 import forbes2000
def test_forbes2000():
"""Test module forbes2000.py by downloading
forbes2000.csv and testing shape of
extracted data has 2000 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = forbes2000(test_path)
try:
assert x_train.shape == (2000, 8)
except:
shutil.rmtree(test_path)
raise()
| 0 | 0 | 0 |
5f61a50e6cd2506d053464655ef7fe7163ee3ba2 | 9,604 | py | Python | pytorch_lightning/metrics/metric.py | GimmickNG/pytorch-lightning | b36c5e86d014671b0fa922d750b27420bc73b6f9 | [
"Apache-2.0"
] | 2 | 2020-08-28T12:11:59.000Z | 2020-09-27T09:44:04.000Z | pytorch_lightning/metrics/metric.py | GimmickNG/pytorch-lightning | b36c5e86d014671b0fa922d750b27420bc73b6f9 | [
"Apache-2.0"
] | 8 | 2021-01-15T13:15:09.000Z | 2021-04-04T19:52:25.000Z | pytorch_lightning/metrics/metric.py | GimmickNG/pytorch-lightning | b36c5e86d014671b0fa922d750b27420bc73b6f9 | [
"Apache-2.0"
] | 1 | 2020-10-19T03:52:27.000Z | 2020-10-19T03:52:27.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Optional
import numbers
import torch
from torch import nn
import numpy as np
from pytorch_lightning.metrics.converters import (
sync_ddp_if_available, gather_all_tensors_if_available,
convert_to_tensor, convert_to_numpy)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
class Metric(DeviceDtypeModuleMixin, nn.Module, ABC):
"""
Abstract base class for metric implementation.
Should be used to implement metrics that
1. Return multiple Outputs
2. Handle their own DDP sync
Metric hooks that can be implemented are
* input_convert: pre-forward hook that takes care of input conversion
* output_convert: post-forward hook that takes care of output convertion
* ddp_sync: implementation of ddp sync, default is gather all
* aggregate: implement how values should be aggregated
* compute: post-ddp sync for additional metric computations
Call order
input_convert -> forward -> output_convert -> ddp_sync -> aggregate -> compute
"""
def __init__(self, name: str):
"""
Args:
name: the metric's name
"""
super().__init__()
self.name = name
self._dtype = torch.get_default_dtype()
self._device = torch.device('cpu')
# Register hooks
self.register_forward_pre_hook(self.input_convert)
self.register_forward_hook(self.output_convert)
self.register_forward_hook(self.ddp_sync)
self.register_forward_hook(self.aggregate)
self.register_forward_hook(self.compute)
@staticmethod
def input_convert(self, data: Any):
"""
Implement how the inputs should be casted before calling forward
Args:
data: input to forward method
Returns:
casted data
"""
return data
@abstractmethod
def forward(self, *args, **kwargs):
"""
Implements the actual metric computation.
Returns:
metric value or metric state
"""
raise NotImplementedError
@staticmethod
def output_convert(self, data: Any, output: Any):
"""
Implement how outputs from forward should be casted
Args:
data: input to forward method
output: output from forward method
Returns:
casted outputs
"""
return output
@staticmethod
def ddp_sync(self, data: Any, output: Any):
"""
Implement how the outputs from forward should be synced
Args:
data: input to forward method
output: output from the `output_convert` hook
Returns:
synced output
"""
return output
@staticmethod
def aggregate(self, data: Any, output: Any):
"""
Implement aggregation of values on the same device
Args:
data: input to forward method
output: output from the `ddp_sync` hook
Returns:
aggregated values
"""
return output
@staticmethod
def compute(self, data: Any, output: Any):
"""
Implement additionally metric computations to be done after the ddp sync
Args:
data: input to forward method
output: output from the `aggregate` hook
Returns:
final metric value
"""
return output
class TensorMetric(Metric):
"""
Base class for metric implementation operating directly on tensors.
All inputs and outputs will be casted to tensors if necessary.
Already handles DDP sync and input/output conversions.
"""
def __init__(self, name: str,
reduce_group: Optional[Any] = None,
reduce_op: Optional[Any] = None):
"""
Args:
name: the metric's name
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(name)
self.reduce_group = reduce_group
self.reduce_op = reduce_op
@staticmethod
@staticmethod
@staticmethod
class TensorCollectionMetric(Metric):
"""
Base class for metric implementation operating directly on tensors.
All inputs will be casted to tensors if necessary. Outputs won't be casted.
Already handles DDP sync and input conversions.
This class differs from :class:`TensorMetric`, as it assumes all outputs to
be collections of tensors and does not explicitly convert them. This is
necessary, since some collections (like for ROC, Precision-Recall Curve etc.)
cannot be converted to tensors at the highest level.
All numpy arrays and numbers occuring in these outputs will still be converted.
Use this class as a baseclass, whenever you want to ensure inputs are
tensors and outputs cannot be converted to tensors automatically
"""
def __init__(self, name: str,
reduce_group: Optional[Any] = None,
reduce_op: Optional[Any] = None):
"""
Args:
name: the metric's name
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(name)
self.reduce_group = reduce_group
self.reduce_op = reduce_op
@staticmethod
@staticmethod
@staticmethod
class NumpyMetric(Metric):
"""
Base class for metric implementation operating on numpy arrays.
All inputs will be casted to numpy if necessary and all outputs will
be casted to tensors if necessary.
Already handles DDP sync and input/output conversions.
"""
def __init__(self, name: str,
reduce_group: Optional[Any] = None,
reduce_op: Optional[Any] = None):
"""
Args:
name: the metric's name
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(name)
self.reduce_group = reduce_group
self.reduce_op = reduce_op
@staticmethod
@staticmethod
@staticmethod
| 33.117241 | 107 | 0.619429 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Optional
import numbers
import torch
from torch import nn
import numpy as np
from pytorch_lightning.metrics.converters import (
sync_ddp_if_available, gather_all_tensors_if_available,
convert_to_tensor, convert_to_numpy)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
class Metric(DeviceDtypeModuleMixin, nn.Module, ABC):
"""
Abstract base class for metric implementation.
Should be used to implement metrics that
1. Return multiple Outputs
2. Handle their own DDP sync
Metric hooks that can be implemented are
* input_convert: pre-forward hook that takes care of input conversion
* output_convert: post-forward hook that takes care of output convertion
* ddp_sync: implementation of ddp sync, default is gather all
* aggregate: implement how values should be aggregated
* compute: post-ddp sync for additional metric computations
Call order
input_convert -> forward -> output_convert -> ddp_sync -> aggregate -> compute
"""
def __init__(self, name: str):
"""
Args:
name: the metric's name
"""
super().__init__()
self.name = name
self._dtype = torch.get_default_dtype()
self._device = torch.device('cpu')
# Register hooks
self.register_forward_pre_hook(self.input_convert)
self.register_forward_hook(self.output_convert)
self.register_forward_hook(self.ddp_sync)
self.register_forward_hook(self.aggregate)
self.register_forward_hook(self.compute)
@staticmethod
def input_convert(self, data: Any):
"""
Implement how the inputs should be casted before calling forward
Args:
data: input to forward method
Returns:
casted data
"""
return data
@abstractmethod
def forward(self, *args, **kwargs):
"""
Implements the actual metric computation.
Returns:
metric value or metric state
"""
raise NotImplementedError
@staticmethod
def output_convert(self, data: Any, output: Any):
"""
Implement how outputs from forward should be casted
Args:
data: input to forward method
output: output from forward method
Returns:
casted outputs
"""
return output
@staticmethod
def ddp_sync(self, data: Any, output: Any):
"""
Implement how the outputs from forward should be synced
Args:
data: input to forward method
output: output from the `output_convert` hook
Returns:
synced output
"""
return output
@staticmethod
def aggregate(self, data: Any, output: Any):
"""
Implement aggregation of values on the same device
Args:
data: input to forward method
output: output from the `ddp_sync` hook
Returns:
aggregated values
"""
return output
@staticmethod
def compute(self, data: Any, output: Any):
"""
Implement additionally metric computations to be done after the ddp sync
Args:
data: input to forward method
output: output from the `aggregate` hook
Returns:
final metric value
"""
return output
class TensorMetric(Metric):
"""
Base class for metric implementation operating directly on tensors.
All inputs and outputs will be casted to tensors if necessary.
Already handles DDP sync and input/output conversions.
"""
def __init__(self, name: str,
reduce_group: Optional[Any] = None,
reduce_op: Optional[Any] = None):
"""
Args:
name: the metric's name
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(name)
self.reduce_group = reduce_group
self.reduce_op = reduce_op
@staticmethod
def input_convert(self, data: Any):
return apply_to_collection(data,
(torch.Tensor, np.ndarray, numbers.Number),
convert_to_tensor,
self.dtype, self.device)
@staticmethod
def output_convert(self, data: Any, output: Any):
return apply_to_collection(output, torch.Tensor, convert_to_tensor,
self.dtype, self.device)
@staticmethod
def ddp_sync(self, data: Any, output: Any):
return apply_to_collection(output, torch.Tensor, sync_ddp_if_available,
self.reduce_group, self.reduce_op)
class TensorCollectionMetric(Metric):
"""
Base class for metric implementation operating directly on tensors.
All inputs will be casted to tensors if necessary. Outputs won't be casted.
Already handles DDP sync and input conversions.
This class differs from :class:`TensorMetric`, as it assumes all outputs to
be collections of tensors and does not explicitly convert them. This is
necessary, since some collections (like for ROC, Precision-Recall Curve etc.)
cannot be converted to tensors at the highest level.
All numpy arrays and numbers occuring in these outputs will still be converted.
Use this class as a baseclass, whenever you want to ensure inputs are
tensors and outputs cannot be converted to tensors automatically
"""
def __init__(self, name: str,
reduce_group: Optional[Any] = None,
reduce_op: Optional[Any] = None):
"""
Args:
name: the metric's name
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(name)
self.reduce_group = reduce_group
self.reduce_op = reduce_op
@staticmethod
def input_convert(self, data: Any):
return apply_to_collection(data,
(torch.Tensor, np.ndarray, numbers.Number),
convert_to_tensor,
self.dtype, self.device)
@staticmethod
def output_convert(self, data: Any, output: Any):
return apply_to_collection(output,
(torch.Tensor, np.ndarray, numbers.Number),
convert_to_tensor,
self.dtype, self.device)
@staticmethod
def ddp_sync(self, data: Any, output: Any):
return apply_to_collection(output, torch.Tensor, sync_ddp_if_available,
self.reduce_group, self.reduce_op)
class NumpyMetric(Metric):
"""
Base class for metric implementation operating on numpy arrays.
All inputs will be casted to numpy if necessary and all outputs will
be casted to tensors if necessary.
Already handles DDP sync and input/output conversions.
"""
def __init__(self, name: str,
reduce_group: Optional[Any] = None,
reduce_op: Optional[Any] = None):
"""
Args:
name: the metric's name
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(name)
self.reduce_group = reduce_group
self.reduce_op = reduce_op
@staticmethod
def input_convert(self, data: Any):
return apply_to_collection(data,
(torch.Tensor, np.ndarray, numbers.Number),
convert_to_numpy)
@staticmethod
def output_convert(self, data: Any, output: Any):
return apply_to_collection(output,
(torch.Tensor, np.ndarray, numbers.Number),
convert_to_tensor,
self.dtype, self.device)
@staticmethod
def ddp_sync(self, data: Any, output: Any):
return apply_to_collection(output, torch.Tensor, sync_ddp_if_available,
self.reduce_group, self.reduce_op)
| 1,891 | 0 | 234 |
8e9143d76b7e83ec39ca81333f61c41cbb0b5da7 | 3,637 | py | Python | test/test_basic_style.py | jarret/bitcoin_helpers | 4b6155ea3b004ad58a717b36cd58138d058281b1 | [
"MIT"
] | null | null | null | test/test_basic_style.py | jarret/bitcoin_helpers | 4b6155ea3b004ad58a717b36cd58138d058281b1 | [
"MIT"
] | null | null | null | test/test_basic_style.py | jarret/bitcoin_helpers | 4b6155ea3b004ad58a717b36cd58138d058281b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import argparse
from framework.argparse.option import add_tmp_directory_option
from framework.bitcoin.setup import bitcoin_setup_repo
from framework.test.exec import exec_cmd_no_error
from framework.test.exec import exec_cmd_error
from framework.test.exec import exec_cmd_json_no_error
from framework.test.exec import exec_cmd_json_error
from framework.test.exec import exec_modify_fixes_check
from framework.test.exec import exec_modify_doesnt_fix_check
from framework.test.cmd import ScriptTestCmd
###############################################################################
# test
###############################################################################
###############################################################################
# UI
###############################################################################
if __name__ == "__main__":
description = ("Tests basic_style.py through its range of subcommands and "
"options.")
parser = argparse.ArgumentParser(description=description)
add_tmp_directory_option(parser)
settings = parser.parse_args()
settings.repository = bitcoin_setup_repo(settings.tmp_directory,
branch="v0.14.0")
TestBasicStyleCmd(settings).run()
| 35.31068 | 79 | 0.648337 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import argparse
from framework.argparse.option import add_tmp_directory_option
from framework.bitcoin.setup import bitcoin_setup_repo
from framework.test.exec import exec_cmd_no_error
from framework.test.exec import exec_cmd_error
from framework.test.exec import exec_cmd_json_no_error
from framework.test.exec import exec_cmd_json_error
from framework.test.exec import exec_modify_fixes_check
from framework.test.exec import exec_modify_doesnt_fix_check
from framework.test.cmd import ScriptTestCmd
###############################################################################
# test
###############################################################################
def test_help(repository):
cmd = 'bin/basic_style.py -h'
print(exec_cmd_no_error(cmd))
def test_report(repository):
cmd = 'bin/basic_style.py report -h'
print(exec_cmd_no_error(cmd))
cmd = 'bin/basic_style.py report %s' % repository
print(exec_cmd_no_error(cmd))
cmd = 'bin/basic_style.py report -j3 %s' % repository
print(exec_cmd_no_error(cmd))
cmd = ('bin/basic_style.py report -j3 %s/src/init.cpp %s/src/qt/' %
(repository, repository))
print(exec_cmd_no_error(cmd))
cmd = 'bin/basic_style.py report --json %s' % repository
print(exec_cmd_json_no_error(cmd))
cmd = ('bin/basic_style.py report --json %s/src/init.cpp %s/src/qt/' %
(repository, repository))
print(exec_cmd_json_no_error(cmd))
# no specified targets runs it on the path/repository it is invoked from:
cmd = 'bin/basic_style.py report'
original = os.getcwd()
os.chdir(str(repository))
print(exec_cmd_no_error(cmd))
os.chdir(original)
def test_check(repository):
cmd = 'bin/basic_style.py check -h'
print(exec_cmd_no_error(cmd))
cmd = 'bin/basic_style.py check -j3 %s' % repository
e, out = exec_cmd_error(cmd)
print("%d\n%s" % (e, out))
cmd = 'bin/basic_style.py check --json %s' % repository
e, out = exec_cmd_json_error(cmd)
print("%d\n%s" % (e, out))
cmd = 'bin/basic_style.py check %s/src/init.cpp' % repository
print(exec_cmd_no_error(cmd))
def test_fix(repository):
cmd = 'bin/basic_style.py fix -h'
print(exec_cmd_no_error(cmd))
check_cmd = "bin/basic_style.py check %s" % repository
modify_cmd = "bin/basic_style.py fix %s" % repository
exec_modify_fixes_check(repository, check_cmd, modify_cmd)
repository.reset_hard_head()
def tests(settings):
test_help(settings.repository)
test_report(settings.repository)
test_check(settings.repository)
test_fix(settings.repository)
class TestBasicStyleCmd(ScriptTestCmd):
def __init__(self, settings):
super().__init__(settings)
self.title = __file__
def _exec(self):
return super()._exec(tests)
###############################################################################
# UI
###############################################################################
if __name__ == "__main__":
description = ("Tests basic_style.py through its range of subcommands and "
"options.")
parser = argparse.ArgumentParser(description=description)
add_tmp_directory_option(parser)
settings = parser.parse_args()
settings.repository = bitcoin_setup_repo(settings.tmp_directory,
branch="v0.14.0")
TestBasicStyleCmd(settings).run()
| 1,923 | 18 | 191 |
79b61d44ff4d004fe8be95d305b19b4d67cb1470 | 2,068 | py | Python | conftest.py | zodman/gotrue-py | 2f94bfdbc2cf20ef50ec777bbda03face1da3e85 | [
"MIT"
] | 13 | 2021-10-06T08:50:55.000Z | 2022-03-29T18:21:12.000Z | conftest.py | zodman/gotrue-py | 2f94bfdbc2cf20ef50ec777bbda03face1da3e85 | [
"MIT"
] | 82 | 2021-09-29T11:50:29.000Z | 2022-03-24T07:27:33.000Z | conftest.py | zodman/gotrue-py | 2f94bfdbc2cf20ef50ec777bbda03face1da3e85 | [
"MIT"
] | 4 | 2021-09-15T07:33:22.000Z | 2022-01-13T22:53:01.000Z | from typing import Dict, Tuple
import pytest
# store history of failures per test class name and per index
# in parametrize (if parametrize used)
_test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {}
| 39.769231 | 87 | 0.60735 | from typing import Dict, Tuple
import pytest
# store history of failures per test class name and per index
# in parametrize (if parametrize used)
_test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {}
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
# incremental marker is used
if call.excinfo is not None:
# the test has failed
# retrieve the class name of the test
cls_name = str(item.cls)
# retrieve the index of the test (if parametrize is used
# in combination with incremental)
parametrize_index = (
tuple(item.callspec.indices.values())
if hasattr(item, "callspec")
else ()
)
# retrieve the name of the test function
test_name = item.originalname or item.name
# store in _test_failed_incremental the original name of the failed test
_test_failed_incremental.setdefault(cls_name, {}).setdefault(
parametrize_index, test_name
)
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
# retrieve the class name of the test
cls_name = str(item.cls)
# check if a previous test has failed for this class
if cls_name in _test_failed_incremental:
# retrieve the index of the test (if parametrize is used
# in combination with incremental)
parametrize_index = (
tuple(item.callspec.indices.values())
if hasattr(item, "callspec")
else ()
)
# retrieve the name of the first test function to
# fail for this class name and index
test_name = _test_failed_incremental[cls_name].get(parametrize_index, None)
# if name found, test has failed for the combination of
# class name & test name
if test_name is not None:
pytest.xfail(f"previous test failed ({test_name})")
| 1,803 | 0 | 46 |
ebc98c9be150485094483d351832f5f41ddeb341 | 3,760 | py | Python | Table Objects/Striker.py | rval002/Air-Hockey-Table-Project-UCR | babc11637d0db89c012205e0c2ad1a608d1c54e3 | [
"MIT"
] | 1 | 2021-06-08T05:22:55.000Z | 2021-06-08T05:22:55.000Z | Table Objects/Striker.py | rval002/Air-Hockey-Table-Project-UCR | babc11637d0db89c012205e0c2ad1a608d1c54e3 | [
"MIT"
] | null | null | null | Table Objects/Striker.py | rval002/Air-Hockey-Table-Project-UCR | babc11637d0db89c012205e0c2ad1a608d1c54e3 | [
"MIT"
] | 1 | 2021-01-11T04:48:08.000Z | 2021-01-11T04:48:08.000Z | # **************************************************************************** #
# Air Hockey Table Project
# Author: Ricardo Valverde
# -------------------------
#This Code is Created for UCR's EE175 Senior Design Project
# This Code contains The Puck Object Striker
#For Pygame Documentation please see:
#https://www.pygame.org/docs/index.html
# **************************************************************************** #
import math
from pygame.math import Vector2
import pygame
from Constants import*
from linex import*
| 30.819672 | 118 | 0.588564 | # **************************************************************************** #
# Air Hockey Table Project
# Author: Ricardo Valverde
# -------------------------
#This Code is Created for UCR's EE175 Senior Design Project
# This Code contains The Puck Object Striker
#For Pygame Documentation please see:
#https://www.pygame.org/docs/index.html
# **************************************************************************** #
import math
from pygame.math import Vector2
import pygame
from Constants import*
from linex import*
class Striker(object):
def __init__(self,x,y):
self.x = x
self.y = y
self.startpos = Vector2(x,y)
self.position = Vector2(x,y)
self.velocity = Vector2(0,0)
initl = (x-STRIKER_RADIUS,y+STRIKER_RADIUS)
wl = (2*STRIKER_RADIUS,2*STRIKER_RADIUS)
self.startRect = pygame.Rect(initl,(1,1))
self.Rect = pygame.Rect(initl,wl)
#these lines are the boundary lines
self.centerxline = [(0,0),(0,0)]
self.centeryline = [(0,0),(0,0)]
self.xTline = [(0,0),(0,0)]
self.xBlinet = [(0,0),(0,0)]
self.yLline1 = [(0,0),(0,0)]
self.yRline1 = [(0,0),(0,0)]
def updatePosition(self,position):
self.position = Vector2(position)
def updateVelocity(self,vel):
self.position += Vector2(vel)
def getposition(self):
return self.position
def StrikerEyes(self):
#get the differnce of
radius_diff= STRIKER_RADIUS+PUCK_RADIUS
self.centerxline = [(BORDER_POSITION[0],self.y),(BORDER_POSITION[0] + FACE_LENGTH,self.y)]
self.centeryline = [(self.x,BORDER_POSITION[1]),(self.x+1,BORDER_POSITION[1] + FACE_WIDTH)]
self.xTline = [(BORDER_POSITION[0],self.y-radius_diff),(BORDER_POSITION[0] + FACE_LENGTH,self.y-radius_diff)]
self.xBlinet = [(BORDER_POSITION[0],self.y+radius_diff),(BORDER_POSITION[0] + FACE_LENGTH,self.y+radius_diff)]
self.yLline1 = [(self.x-radius_diff,BORDER_POSITION[1]),(self.x-radius_diff,BORDER_POSITION[1] + FACE_WIDTH)]
self.yRline1 = [(self.x+radius_diff,BORDER_POSITION[1]),(self.x+radius_diff,BORDER_POSITION[1] + FACE_WIDTH)]
def calculateVelocity(self,dis,time):
try:
xvel = round(dis[0]/time[0])
except:
xvel = 0
try:
yvel = round(dis[1]/time[1])
except:
yvel = 0
return Vector2(xvel,yvel)
def movetovel(self,desiredpos,dis,time):
x1 = self.position[0]
x2 = desiredpos[0]
y1 = self.position[1]
y2 = desiredpos[1]
velneeded = self.calculateVelocity(dis,time)
xval = math.copysign(velneeded[0],(x2-x1))
yval = math.copysign(velneeded[1],(y2-y1))
yval = round(yval*5)
self.updateVelocity((xval,yval))
print("updatedvel")
print(self.velocity)
def checkcol(self,puck):
if(self.Rect.colliderect(puck.Rect)):
val = self.velocity * -1
valx = val[0]
valy = val[1]
self.updateVelocity(xval,yval)
def checkstart(self):
if(self.Rect.colliderect(puck.startRect)):
self.updateVelocity(0,0)
def updatedrawpos(self):
self.position += self.velocity
self.x += self.velocity[0]
self.y += self.velocity[1]
self.Rect.move(self.position[0],self.position[1])
def draw(self,win):
#CENTER LINE FOR NOW
pygame.draw.line(win,STRIKER_LINE_COLOR,self.centerxline[0],self.centerxline[1] , 1)
pygame.draw.line(win,STRIKER_LINE_COLOR,self.centeryline[0],self.centeryline[1] , 1)
#Stiker
pygame.draw.circle(win, STRIKER_COLOR, self.position, STRIKER_RADIUS)
| 2,902 | 1 | 319 |
e67cc3f77e80a72454938caa03121621ff00417a | 19,307 | py | Python | BUtils.py | joacocruz6/cc3501-tarea2 | e6560b007a377769ef27d2b128a4dce560568542 | [
"MIT"
] | null | null | null | BUtils.py | joacocruz6/cc3501-tarea2 | e6560b007a377769ef27d2b128a4dce560568542 | [
"MIT"
] | null | null | null | BUtils.py | joacocruz6/cc3501-tarea2 | e6560b007a377769ef27d2b128a4dce560568542 | [
"MIT"
] | null | null | null |
from CC3501Utils import *
from Bombs import *
from Vista import *
from Pared import *
from Player import *
from Explosion import *
from PDes import *
from Enemy import *
from Win import *
from Power import *
import pygame
import math as m
import random as rand
#################################################################
#######MODULO DE FUNCIONES AUXILIARES PARA EL BOMBERMAN
#################################################################
###################
######choques######
###################
def chocaPared(p,l_pared,l_destructibles,direccion,dx,dy):
"""
True si esta chocando una pared
:param p: Player or Enemy
:param l_pared: List (listas de paredes)
:param direccion: str
:param dx: float
:param dy: float
:return: boolean
"""
(x,y)=p.getcenter()
epsilon=20
if direccion=="arriba":
y+=dy
if direccion=="abajo":
y-=dy
if direccion=="izquierda":
x-=dx
if direccion=="derecha":
x+=dx
for j in l_pared:
(z,w)=j.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
for j in l_destructibles:
(z,w)=j.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
return False
def chocar(obj1,obj2):
"""
True si objeto 1 y objeto 2 chocan
:param obj1: Object
:param obj2: Object
:return: boolean
"""
(x,y)=obj1.getcenter()
(z,w)=obj2.getcenter()
epsilon=20
return abs(z-x)<epsilon and abs(w-y)<epsilon
def frenteEnemigo(p,l_enemigos,direccion,dx,dy):
"""
True si p esta en frente de un enemigo
:param p: Enemy or Player
:param l_enemigos: list(Enemy)
:param direccion: str
:param dx: float
:param dy: float
:return: boolean
"""
(x,y)=p.getcenter()
epsilon=20
if direccion=="arriba":
y+=dy
if direccion=="abajo":
y-=dy
if direccion=="izquierda":
x-=dx
if direccion=="derecha":
x+=dx
for enemigo in l_enemigos:
(z,w)=enemigo.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
return False
def moverEnemigo(l_enemigos,l_pared,l_destructibles,l_bombas,dificultad,dx,dy):
"""
Mueve los enemigos a posiciones adyacentes aleatorias
:param enemigo: Enemy
:param l_pared: list
:param l_destructibles: list
:param dx: float
:param dy: float
:return: none
"""
n=100
if dificultad=="medio":
n=50
if dificultad=="dificil":
n=25
if dificultad=="extremo":
n=5
direcciones = ["arriba", "abajo", "izquierda", "derecha"]
for enemigo in l_enemigos:
aux=[]
for e in l_enemigos:
if e!=enemigo:
aux.append(e)
j=rand.randint(0,n)
if j<4:
dire=direcciones[j]
if not chocaPared(enemigo,l_pared,l_destructibles,dire,dx,dy) and not chocaBomba(enemigo,l_bombas,dire,dx,dy) and not frenteEnemigo(enemigo,aux,dire,dx,dy):
if dire=="arriba":
enemigo.movery(1)
if dire=="abajo":
enemigo.movery(-1)
if dire=="izquierda":
enemigo.moverx(-1)
if dire=="derecha":
enemigo.moverx(1)
def chocaBorde(p,l_borde,direccion,dx,dy):
"""
True si el player o enemy apunta hacia un borde del laberinto
:param p: Player or Enemy
:param l_borde: List
:param direccion: str
:param dx: num
:param dy: num
:return: boolean
"""
(x, y) = p.getcenter()
epsilon = 20
if direccion == "arriba":
y += dy
if direccion == "abajo":
y -= dy
if direccion == "izquierda":
x -= dx
if direccion == "derecha":
x += dx
for j in l_borde:
(z, w) = j.getcenter()
if abs(z - x) < epsilon and abs(w - y) < epsilon:
return True
return False
#####################
######power ups######
#####################
#mecanicas:
def dis_prox_espacio(p,l_paredes,l_borde,l_destructibles,l_bomba,direccion,dx,dy):
"""
Da cuantos pasos debe hacer el jugador para llegar al proximo sin una pared o bomba, sirve para los power ups de saltos
:param p: Player
:param l_paredes: list
:param l_bordes: list
:param l_destructibles: list
:param l_bomba: list
:param direccion: str
:param dx:num
:param dy:num
:return:int
"""
i=1
inc=1
(x,y)=p.getpos()
aux=Player(Vector(x,y))
if direccion=="abajo" or direccion=="izquierda":
i=-1
inc=-1
while True:
if chocaBorde(aux, l_borde, direccion, dx, dy):
i = 0
break
if not chocaPared(aux,l_paredes,l_destructibles,direccion,dx,dy) and not chocaBomba(aux,l_bomba,direccion,dx,dy):
break
if direccion == "derecha":
aux.moverx(1)
if direccion == "arriba":
aux.movery(1)
if direccion == "abajo":
aux.movery(-1)
if direccion == "izquierda":
aux.moverx(-1)
i+=inc
return i
#generacion:
def generate_pwup(l_pw,l_players,l_paredes,l_win,l_bombas,l_enemigos,dx,dy):
"""
Genera un power up al azar en una posicion al azar
:param l_pw: list
:param l_players: list
:param l_paredes: list
:param l_win: list
:param l_bombas: list
:param l_enemigos: list
:param dx: num
:param dy: num
:return: None
"""
tipos=["rango","pared","bomba","inmortal","cadena"]
k=rand.randint(0,4)
tipe=tipos[k]
while True:
i=rand.randint(0,14)
j=rand.randint(0,12)
pw=Power(Vector(i*dx,j*dy),tipe)
poner=True
for pared in l_paredes:
if chocar(pared,pw):
poner=False
for power in l_pw:
if chocar(pw,power):
poner=False
for jugador in l_players:
if chocar(jugador,pw):
poner=False
for win in l_win:
if chocar(win,pw):
poner=False
for bomb in l_bombas:
if chocar(bomb,pw):
poner=False
for enemigo in l_enemigos:
if chocar(enemigo,pw):
poner=False
if poner:
l_pw.append(pw)
break
#obtencion,duracion y obtencion
def obtener_pwup(p: Player,l_power_ups,l_obtenidos,l_activados,t_a,sound=None):
"""
Mecanica para obtener un power up
:param p: Player
:param l_power_ups: list
:param l_obtenidos: list
:param l_activados: list
:param t_a: num
:param sound: wav or None
:return: None
"""
prob=rand.randint(0,100)
for pw in l_power_ups:
if chocar(p,pw):
if prob <= 40 and sound != None:
pygame.mixer.Sound.play(sound)
pw.tomar(t_a)
l_obtenidos.append(pw)
i=pw.getindex()
l_activados[i]=True
def duracion_pwup(l_power_ups,l_activados,t_a):
"""
Ve si el power up debe parar o no
:param l_power_ups: list
:param l_activados: list
:param t_a: num
:return: None
"""
for pw in l_power_ups:
tomado=pw.gettomado()
if tomado:
tiempo=pw.gettime()
duracion=pw.getduracion()
dif= abs(tiempo-t_a)
if dif>=duracion:
pw.setlife(False)
i=pw.getindex()
l_activados[i]=False
else:
i=pw.getindex()
l_activados[i]=True
def pwup_color(p: Player,l_power_ups,l_activados):
"""
Cambia el color de la camisa o el vestido
:param p: Player
:param l_power_ups: list
:param l_activados: list
:return: None
"""
for pw in l_power_ups:
i=pw.getindex()
if l_activados[i]:
(r,g,b)=pw.getcoloracion()
p.setcoloracion((r,g,b))
inactivos=True
for efecto in l_activados:
if efecto:
inactivos=False
if inactivos:
p.normalizar_camisa()
###################
######bombas#######
###################
def chocaBomba(p,l_bombas,direccion,dx,dy):
"""
Ve si el jugador o enemigo va a chocar con una bomba ( o apunta hacia ella)
:param p: Player or Enemy
:param l_bombas: list
:param direccion: str
:param dx: num
:param dy: num
:return: boolean
"""
(x,y)=p.getcenter()
epsilon=20
if direccion=="arriba":
y+=dy
if direccion=="izquierda":
x-=dx
if direccion=="abajo":
y-=dy
if direccion=="derecha":
x+=dx
for bomba in l_bombas:
(z,w)=bomba.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
return False
def ponerBomba(l_bombas,jugador,direccion,t):
"""
pone una bomba en la direccion en que se mira
:param l_bombas: List
:param jugador: Player
:param direccion: str
:param t: float
:return: none
"""
(px,py)=jugador.getpos()
if direccion=="derecha":
l_bombas.append(Bombs(Vector(px+53.0,py),t))
if direccion=="izquierda":
l_bombas.append(Bombs(Vector(px-53.0,py),t))
if direccion=="arriba":
l_bombas.append(Bombs(Vector(px,py+46.0),t))
if direccion=="abajo":
l_bombas.append(Bombs(Vector(px,py-46.0),t))
def explosion_bombas(l_explosiones,l_bombas,l_paredes,rango,t_a,sonido=None):
"""
Ve que bombas deben explotar y genera las explosiones correspondientes
:param l_explosiones: list
:param l_bombas: list
:param l_paredes: list
:param rango: boolean
:param t_a: num
:param sonido: wav or None
:return: None
"""
dx=53.0
dy=46.0
for bomba in l_bombas:
t0=bomba.gettime()
dt=t_a-t0
if dt>=3000.0:
if sonido!=None:
pygame.mixer.Sound.play(sonido)
(x,y)=bomba.getcenter()
xp=x-dx/2
yp=y-dy/2
e=Explosion(t_a,Vector(xp,yp))
(e_arriba,ar)=(Explosion(t_a,Vector(xp,yp+dy)),True)
(e_izq,iz)=(Explosion(t_a,Vector(xp-dx,yp)),True)
(e_abajo,aba)=(Explosion(t_a,Vector(xp,yp-dy)),True)
(e_der,der)=(Explosion(t_a,Vector(xp+dx,yp)),True)
for pared in l_paredes:
if chocar(e_arriba,pared):
ar=False
if chocar(e_izq,pared):
iz=False
if chocar(e_abajo,pared):
aba=False
if chocar(e_der,pared):
der=False
l_explosiones.append(e)
if ar:
l_explosiones.append(e_arriba)
if iz:
l_explosiones.append(e_izq)
if aba:
l_explosiones.append(e_abajo)
if der:
l_explosiones.append(e_der)
#si tengo el power up:
if rango:
if ar:
(p_arriba,ar2)=(Explosion(t_a,Vector(xp,yp+2*dy)),True)
if iz:
(p_izq,iz2)=(Explosion(t_a,Vector(xp-2*dx,yp)),True)
if aba:
(p_aba,aba2)=(Explosion(t_a,Vector(xp,yp-2*dy)),True)
if der:
(p_der,der2)=(Explosion(t_a,Vector(xp+2*dx,yp)),True)
for pared in l_paredes:
if ar:
if chocar(p_arriba,pared):
ar2 = False
if iz:
if chocar(p_izq, pared):
iz2 = False
if aba:
if chocar(p_aba, pared):
aba2 = False
if der:
if chocar(p_der, pared):
der2 = False
if ar:
if ar2:
l_explosiones.append(p_arriba)
if iz:
if iz2:
l_explosiones.append(p_izq)
if aba:
if aba2:
l_explosiones.append(p_aba)
if der:
if der2:
l_explosiones.append(p_der)
bomba.setlife(False)
if dt>=1000.0 and not bomba.getcambio():
bomba.color=(63.0/255,63.0/255,63.0/255)
bomba.Cambio_change()
def explotar(l_objetos,l_exp):
"""
Ve que objetos son alcanzados por la explosion y los mata
:param l_objetos: list
:param l_exp: list
:return: None
"""
for obj in l_objetos:
for exp in l_exp:
if chocar(obj,exp):
obj.setlife(False)
def choque_players(l_players,l_enemigo):
"""
Ve si un enemigo esta chocando con un jugador
:param l_players: list
:param l_enemigo: list
:return: None
"""
for enemigo in l_enemigo:
for p in l_players:
if chocar(p,enemigo):
p.setlife(False)
def cadena_explosiones(l_explosiones,l_bombas,sonido,l_paredes,rango,t_a):
"""
Ve si hay que hacer una cadena de explosiones con el power up
:param l_explosiones: list
:param l_bombas: list
:param sonido: wav or None
:param l_paredes: list
:param rango:boolean
:param t_a: num
:return: boolean
"""
respuesta=False
for explosion in l_explosiones:
for bomba in l_bombas:
if chocar(explosion,bomba):
bomba.settime(4000)
respuesta=True
explosion_bombas(l_explosiones,l_bombas,l_paredes,rango,t_a,sonido)
return respuesta
######################
######creacion########
######################
def creaciondestructibles(l_destructibles,l_players,dificultad,dx,dy):
"""
Crea las paredes destructibles iniciales en posiciones aleatorias
:param l_destructibles: List
:param l_players: Player
:param dx: num
:param dy: num
:return:
"""
pos_players=[]
for p in l_players:
(x,y)=p.getcenter()
pos_players.append((x,y))
pos_players.append((x+dx,y))
pos_players.append((x-dx,y))
pos_players.append((x,y+dy))
pos_players.append((x,y-dy))
pos_players.append((x + 2*dx, y))
pos_players.append((x - 2*dx, y))
pos_players.append((x, y + 2*dy))
pos_players.append((x, y - 2*dy))
numeros=[]
n_min=5
n_max=20
if dificultad=="medio":
n_min=15
n_max=25
if dificultad=="dificil":
n_min=25
n_max=28
if dificultad=="extremo":
n_min=30
n_max=30
n_destructibles=rand.randint(n_min,n_max)
i=0;
while i<n_destructibles:
y=rand.randrange(1,11)
if y%2==0:
x=rand.randrange(1,13,2)
if y%2!=0:
x=rand.randrange(1,13)
if (x,y) in numeros:
continue
else:
pared=PDes(Vector(x*dx,y*dy))
if pared.getcenter() in pos_players:
continue
else:
numeros.append((x,y))
l_destructibles.append(pared)
i+=1
def creacionenemigos(l_enemigos,l_destructibles,l_players,dificultad,dx,dy):
"""
Crea n enemigos en posiciones aleatorias, segun la dificultad escogida
:param l_enemigos:List
:param l_destructibles: List
:param l_players: List
:param dx: num
:param dy: num
:return: None
"""
pos_players=[]
for p in l_players:
(x,y)=p.getpos()
pos_players.append(p)
pos_players.append(Player(Vector(x+dx,y)))
pos_players.append(Player(Vector(x - dx, y)))
pos_players.append(Player(Vector(x, y+dy)))
pos_players.append(Player(Vector(x, y-dy)))
pos_players.append(Player(Vector(x + 2*dx, y)))
pos_players.append(Player(Vector(x - 2*dx, y)))
pos_players.append(Player(Vector(x, y+2*dx)))
pos_players.append(Player(Vector(x , y-2*dy)))
max=4
min=4
if dificultad=="medio":
min=7
max=10
if dificultad=="dificil":
min=10
max=13
if dificultad=="extremo":
if len(l_players)==2:
min=14
max=14
else:
min=15
max=15
n_enemigos = rand.randint(min,max)
i = 0;
while i < n_enemigos:
y = rand.randrange(1, 11)
if y % 2 == 0:
x = rand.randrange(1, 13, 2)
if y % 2 != 0:
x = rand.randrange(1, 13)
else:
tipe=rand.randint(1,2)
enemigo=Enemy(Vector(x*dx, y*dy),tipe)
poner=True
for p in pos_players:
if chocar(enemigo,p):
poner=False
for destructible in l_destructibles:
if chocar(enemigo,destructible):
poner=False
for e in l_enemigos:
if chocar(enemigo,e):
poner=False
if poner:
l_enemigos.append(enemigo)
i += 1
def creacionposganar(l_destructibles: list,l_win):
"""
genera la posicion de victoria dentro de una caja destructible al azar
:param l_destructibles: list
:param l_win: list
:return: None
"""
i=rand.randrange(0,len(l_destructibles)-1)
des=l_destructibles[i]
l_win.append(Win(des.pos))
#############################
###Creacion de mas niveles###
#############################
def creacionnivel(l_win,l_powers,l_enemigos,l_paredes,l_destructibles,l_players,l_bombas,dificultad,dx,dy):
"""
Crea un nivel nuevo al azar
:param l_win: list
:param l_powers: list
:param l_enemigos: list
:param l_paredes: list
:param l_destructibles: list
:param l_players: list
:param l_bombas: list
:param dificultad: str
:param dx: num
:param dy: num
:return: None
"""
creaciondestructibles(l_destructibles,l_players,dificultad,dx,dy)
creacionenemigos(l_enemigos,l_destructibles,l_players,dificultad,dx,dy)
creacionposganar(l_destructibles,l_win)
generate_pwup(l_powers,l_players,l_paredes,l_win,l_bombas,l_enemigos,dx,dy)
generate_pwup(l_powers, l_players, l_paredes, l_win, l_bombas, l_enemigos, dx, dy)
####################
######limpieza######
####################
def ganaste(l_win,l_player):
"""
True si algun jugador toco la posicion de victoria
:param l_win: list
:param l_player: list
:return: boolean
"""
p=l_win[0]
for jugador in l_player:
if chocar(jugador,p):
return True
return False
def lexp(l_explosiones,t_a):
"""
Modifica el estado de las explosiones
:param l_explosiones: List
:param t_a: num
:return: none
"""
for exp in l_explosiones:
t0=exp.gettime()
dt=t_a-t0
if dt>=500.0:
exp.setlife(False)
def limpiar(arr):
"""
Borra los elementos que no estan en escena (o no estan vivos)
:param arr: List
:return: none
"""
n=len(arr)
aux=[]
for i in range(n):
elemento=arr.pop(0)
if elemento.getlife():
aux.append(elemento)
for b in aux:
arr.append(b) | 29.033083 | 168 | 0.545346 |
from CC3501Utils import *
from Bombs import *
from Vista import *
from Pared import *
from Player import *
from Explosion import *
from PDes import *
from Enemy import *
from Win import *
from Power import *
import pygame
import math as m
import random as rand
#################################################################
#######MODULO DE FUNCIONES AUXILIARES PARA EL BOMBERMAN
#################################################################
###################
######choques######
###################
def chocaPared(p,l_pared,l_destructibles,direccion,dx,dy):
"""
True si esta chocando una pared
:param p: Player or Enemy
:param l_pared: List (listas de paredes)
:param direccion: str
:param dx: float
:param dy: float
:return: boolean
"""
(x,y)=p.getcenter()
epsilon=20
if direccion=="arriba":
y+=dy
if direccion=="abajo":
y-=dy
if direccion=="izquierda":
x-=dx
if direccion=="derecha":
x+=dx
for j in l_pared:
(z,w)=j.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
for j in l_destructibles:
(z,w)=j.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
return False
def chocar(obj1,obj2):
"""
True si objeto 1 y objeto 2 chocan
:param obj1: Object
:param obj2: Object
:return: boolean
"""
(x,y)=obj1.getcenter()
(z,w)=obj2.getcenter()
epsilon=20
return abs(z-x)<epsilon and abs(w-y)<epsilon
def frenteEnemigo(p,l_enemigos,direccion,dx,dy):
"""
True si p esta en frente de un enemigo
:param p: Enemy or Player
:param l_enemigos: list(Enemy)
:param direccion: str
:param dx: float
:param dy: float
:return: boolean
"""
(x,y)=p.getcenter()
epsilon=20
if direccion=="arriba":
y+=dy
if direccion=="abajo":
y-=dy
if direccion=="izquierda":
x-=dx
if direccion=="derecha":
x+=dx
for enemigo in l_enemigos:
(z,w)=enemigo.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
return False
def moverEnemigo(l_enemigos,l_pared,l_destructibles,l_bombas,dificultad,dx,dy):
"""
Mueve los enemigos a posiciones adyacentes aleatorias
:param enemigo: Enemy
:param l_pared: list
:param l_destructibles: list
:param dx: float
:param dy: float
:return: none
"""
n=100
if dificultad=="medio":
n=50
if dificultad=="dificil":
n=25
if dificultad=="extremo":
n=5
direcciones = ["arriba", "abajo", "izquierda", "derecha"]
for enemigo in l_enemigos:
aux=[]
for e in l_enemigos:
if e!=enemigo:
aux.append(e)
j=rand.randint(0,n)
if j<4:
dire=direcciones[j]
if not chocaPared(enemigo,l_pared,l_destructibles,dire,dx,dy) and not chocaBomba(enemigo,l_bombas,dire,dx,dy) and not frenteEnemigo(enemigo,aux,dire,dx,dy):
if dire=="arriba":
enemigo.movery(1)
if dire=="abajo":
enemigo.movery(-1)
if dire=="izquierda":
enemigo.moverx(-1)
if dire=="derecha":
enemigo.moverx(1)
def chocaBorde(p,l_borde,direccion,dx,dy):
"""
True si el player o enemy apunta hacia un borde del laberinto
:param p: Player or Enemy
:param l_borde: List
:param direccion: str
:param dx: num
:param dy: num
:return: boolean
"""
(x, y) = p.getcenter()
epsilon = 20
if direccion == "arriba":
y += dy
if direccion == "abajo":
y -= dy
if direccion == "izquierda":
x -= dx
if direccion == "derecha":
x += dx
for j in l_borde:
(z, w) = j.getcenter()
if abs(z - x) < epsilon and abs(w - y) < epsilon:
return True
return False
#####################
######power ups######
#####################
#mecanicas:
def dis_prox_espacio(p,l_paredes,l_borde,l_destructibles,l_bomba,direccion,dx,dy):
"""
Da cuantos pasos debe hacer el jugador para llegar al proximo sin una pared o bomba, sirve para los power ups de saltos
:param p: Player
:param l_paredes: list
:param l_bordes: list
:param l_destructibles: list
:param l_bomba: list
:param direccion: str
:param dx:num
:param dy:num
:return:int
"""
i=1
inc=1
(x,y)=p.getpos()
aux=Player(Vector(x,y))
if direccion=="abajo" or direccion=="izquierda":
i=-1
inc=-1
while True:
if chocaBorde(aux, l_borde, direccion, dx, dy):
i = 0
break
if not chocaPared(aux,l_paredes,l_destructibles,direccion,dx,dy) and not chocaBomba(aux,l_bomba,direccion,dx,dy):
break
if direccion == "derecha":
aux.moverx(1)
if direccion == "arriba":
aux.movery(1)
if direccion == "abajo":
aux.movery(-1)
if direccion == "izquierda":
aux.moverx(-1)
i+=inc
return i
#generacion:
def generate_pwup(l_pw,l_players,l_paredes,l_win,l_bombas,l_enemigos,dx,dy):
"""
Genera un power up al azar en una posicion al azar
:param l_pw: list
:param l_players: list
:param l_paredes: list
:param l_win: list
:param l_bombas: list
:param l_enemigos: list
:param dx: num
:param dy: num
:return: None
"""
tipos=["rango","pared","bomba","inmortal","cadena"]
k=rand.randint(0,4)
tipe=tipos[k]
while True:
i=rand.randint(0,14)
j=rand.randint(0,12)
pw=Power(Vector(i*dx,j*dy),tipe)
poner=True
for pared in l_paredes:
if chocar(pared,pw):
poner=False
for power in l_pw:
if chocar(pw,power):
poner=False
for jugador in l_players:
if chocar(jugador,pw):
poner=False
for win in l_win:
if chocar(win,pw):
poner=False
for bomb in l_bombas:
if chocar(bomb,pw):
poner=False
for enemigo in l_enemigos:
if chocar(enemigo,pw):
poner=False
if poner:
l_pw.append(pw)
break
#obtencion,duracion y obtencion
def obtener_pwup(p: Player,l_power_ups,l_obtenidos,l_activados,t_a,sound=None):
"""
Mecanica para obtener un power up
:param p: Player
:param l_power_ups: list
:param l_obtenidos: list
:param l_activados: list
:param t_a: num
:param sound: wav or None
:return: None
"""
prob=rand.randint(0,100)
for pw in l_power_ups:
if chocar(p,pw):
if prob <= 40 and sound != None:
pygame.mixer.Sound.play(sound)
pw.tomar(t_a)
l_obtenidos.append(pw)
i=pw.getindex()
l_activados[i]=True
def duracion_pwup(l_power_ups,l_activados,t_a):
"""
Ve si el power up debe parar o no
:param l_power_ups: list
:param l_activados: list
:param t_a: num
:return: None
"""
for pw in l_power_ups:
tomado=pw.gettomado()
if tomado:
tiempo=pw.gettime()
duracion=pw.getduracion()
dif= abs(tiempo-t_a)
if dif>=duracion:
pw.setlife(False)
i=pw.getindex()
l_activados[i]=False
else:
i=pw.getindex()
l_activados[i]=True
def pwup_color(p: Player,l_power_ups,l_activados):
"""
Cambia el color de la camisa o el vestido
:param p: Player
:param l_power_ups: list
:param l_activados: list
:return: None
"""
for pw in l_power_ups:
i=pw.getindex()
if l_activados[i]:
(r,g,b)=pw.getcoloracion()
p.setcoloracion((r,g,b))
inactivos=True
for efecto in l_activados:
if efecto:
inactivos=False
if inactivos:
p.normalizar_camisa()
###################
######bombas#######
###################
def chocaBomba(p,l_bombas,direccion,dx,dy):
"""
Ve si el jugador o enemigo va a chocar con una bomba ( o apunta hacia ella)
:param p: Player or Enemy
:param l_bombas: list
:param direccion: str
:param dx: num
:param dy: num
:return: boolean
"""
(x,y)=p.getcenter()
epsilon=20
if direccion=="arriba":
y+=dy
if direccion=="izquierda":
x-=dx
if direccion=="abajo":
y-=dy
if direccion=="derecha":
x+=dx
for bomba in l_bombas:
(z,w)=bomba.getcenter()
if abs(z-x)<epsilon and abs(w-y)<epsilon:
return True
return False
def ponerBomba(l_bombas,jugador,direccion,t):
"""
pone una bomba en la direccion en que se mira
:param l_bombas: List
:param jugador: Player
:param direccion: str
:param t: float
:return: none
"""
(px,py)=jugador.getpos()
if direccion=="derecha":
l_bombas.append(Bombs(Vector(px+53.0,py),t))
if direccion=="izquierda":
l_bombas.append(Bombs(Vector(px-53.0,py),t))
if direccion=="arriba":
l_bombas.append(Bombs(Vector(px,py+46.0),t))
if direccion=="abajo":
l_bombas.append(Bombs(Vector(px,py-46.0),t))
def explosion_bombas(l_explosiones,l_bombas,l_paredes,rango,t_a,sonido=None):
"""
Ve que bombas deben explotar y genera las explosiones correspondientes
:param l_explosiones: list
:param l_bombas: list
:param l_paredes: list
:param rango: boolean
:param t_a: num
:param sonido: wav or None
:return: None
"""
dx=53.0
dy=46.0
for bomba in l_bombas:
t0=bomba.gettime()
dt=t_a-t0
if dt>=3000.0:
if sonido!=None:
pygame.mixer.Sound.play(sonido)
(x,y)=bomba.getcenter()
xp=x-dx/2
yp=y-dy/2
e=Explosion(t_a,Vector(xp,yp))
(e_arriba,ar)=(Explosion(t_a,Vector(xp,yp+dy)),True)
(e_izq,iz)=(Explosion(t_a,Vector(xp-dx,yp)),True)
(e_abajo,aba)=(Explosion(t_a,Vector(xp,yp-dy)),True)
(e_der,der)=(Explosion(t_a,Vector(xp+dx,yp)),True)
for pared in l_paredes:
if chocar(e_arriba,pared):
ar=False
if chocar(e_izq,pared):
iz=False
if chocar(e_abajo,pared):
aba=False
if chocar(e_der,pared):
der=False
l_explosiones.append(e)
if ar:
l_explosiones.append(e_arriba)
if iz:
l_explosiones.append(e_izq)
if aba:
l_explosiones.append(e_abajo)
if der:
l_explosiones.append(e_der)
#si tengo el power up:
if rango:
if ar:
(p_arriba,ar2)=(Explosion(t_a,Vector(xp,yp+2*dy)),True)
if iz:
(p_izq,iz2)=(Explosion(t_a,Vector(xp-2*dx,yp)),True)
if aba:
(p_aba,aba2)=(Explosion(t_a,Vector(xp,yp-2*dy)),True)
if der:
(p_der,der2)=(Explosion(t_a,Vector(xp+2*dx,yp)),True)
for pared in l_paredes:
if ar:
if chocar(p_arriba,pared):
ar2 = False
if iz:
if chocar(p_izq, pared):
iz2 = False
if aba:
if chocar(p_aba, pared):
aba2 = False
if der:
if chocar(p_der, pared):
der2 = False
if ar:
if ar2:
l_explosiones.append(p_arriba)
if iz:
if iz2:
l_explosiones.append(p_izq)
if aba:
if aba2:
l_explosiones.append(p_aba)
if der:
if der2:
l_explosiones.append(p_der)
bomba.setlife(False)
if dt>=1000.0 and not bomba.getcambio():
bomba.color=(63.0/255,63.0/255,63.0/255)
bomba.Cambio_change()
def explotar(l_objetos,l_exp):
"""
Ve que objetos son alcanzados por la explosion y los mata
:param l_objetos: list
:param l_exp: list
:return: None
"""
for obj in l_objetos:
for exp in l_exp:
if chocar(obj,exp):
obj.setlife(False)
def choque_players(l_players,l_enemigo):
"""
Ve si un enemigo esta chocando con un jugador
:param l_players: list
:param l_enemigo: list
:return: None
"""
for enemigo in l_enemigo:
for p in l_players:
if chocar(p,enemigo):
p.setlife(False)
def cadena_explosiones(l_explosiones,l_bombas,sonido,l_paredes,rango,t_a):
"""
Ve si hay que hacer una cadena de explosiones con el power up
:param l_explosiones: list
:param l_bombas: list
:param sonido: wav or None
:param l_paredes: list
:param rango:boolean
:param t_a: num
:return: boolean
"""
respuesta=False
for explosion in l_explosiones:
for bomba in l_bombas:
if chocar(explosion,bomba):
bomba.settime(4000)
respuesta=True
explosion_bombas(l_explosiones,l_bombas,l_paredes,rango,t_a,sonido)
return respuesta
######################
######creacion########
######################
def creaciondestructibles(l_destructibles,l_players,dificultad,dx,dy):
"""
Crea las paredes destructibles iniciales en posiciones aleatorias
:param l_destructibles: List
:param l_players: Player
:param dx: num
:param dy: num
:return:
"""
pos_players=[]
for p in l_players:
(x,y)=p.getcenter()
pos_players.append((x,y))
pos_players.append((x+dx,y))
pos_players.append((x-dx,y))
pos_players.append((x,y+dy))
pos_players.append((x,y-dy))
pos_players.append((x + 2*dx, y))
pos_players.append((x - 2*dx, y))
pos_players.append((x, y + 2*dy))
pos_players.append((x, y - 2*dy))
numeros=[]
n_min=5
n_max=20
if dificultad=="medio":
n_min=15
n_max=25
if dificultad=="dificil":
n_min=25
n_max=28
if dificultad=="extremo":
n_min=30
n_max=30
n_destructibles=rand.randint(n_min,n_max)
i=0;
while i<n_destructibles:
y=rand.randrange(1,11)
if y%2==0:
x=rand.randrange(1,13,2)
if y%2!=0:
x=rand.randrange(1,13)
if (x,y) in numeros:
continue
else:
pared=PDes(Vector(x*dx,y*dy))
if pared.getcenter() in pos_players:
continue
else:
numeros.append((x,y))
l_destructibles.append(pared)
i+=1
def creacionenemigos(l_enemigos,l_destructibles,l_players,dificultad,dx,dy):
"""
Crea n enemigos en posiciones aleatorias, segun la dificultad escogida
:param l_enemigos:List
:param l_destructibles: List
:param l_players: List
:param dx: num
:param dy: num
:return: None
"""
pos_players=[]
for p in l_players:
(x,y)=p.getpos()
pos_players.append(p)
pos_players.append(Player(Vector(x+dx,y)))
pos_players.append(Player(Vector(x - dx, y)))
pos_players.append(Player(Vector(x, y+dy)))
pos_players.append(Player(Vector(x, y-dy)))
pos_players.append(Player(Vector(x + 2*dx, y)))
pos_players.append(Player(Vector(x - 2*dx, y)))
pos_players.append(Player(Vector(x, y+2*dx)))
pos_players.append(Player(Vector(x , y-2*dy)))
max=4
min=4
if dificultad=="medio":
min=7
max=10
if dificultad=="dificil":
min=10
max=13
if dificultad=="extremo":
if len(l_players)==2:
min=14
max=14
else:
min=15
max=15
n_enemigos = rand.randint(min,max)
i = 0;
while i < n_enemigos:
y = rand.randrange(1, 11)
if y % 2 == 0:
x = rand.randrange(1, 13, 2)
if y % 2 != 0:
x = rand.randrange(1, 13)
else:
tipe=rand.randint(1,2)
enemigo=Enemy(Vector(x*dx, y*dy),tipe)
poner=True
for p in pos_players:
if chocar(enemigo,p):
poner=False
for destructible in l_destructibles:
if chocar(enemigo,destructible):
poner=False
for e in l_enemigos:
if chocar(enemigo,e):
poner=False
if poner:
l_enemigos.append(enemigo)
i += 1
def creacionposganar(l_destructibles: list,l_win):
"""
genera la posicion de victoria dentro de una caja destructible al azar
:param l_destructibles: list
:param l_win: list
:return: None
"""
i=rand.randrange(0,len(l_destructibles)-1)
des=l_destructibles[i]
l_win.append(Win(des.pos))
#############################
###Creacion de mas niveles###
#############################
def creacionnivel(l_win,l_powers,l_enemigos,l_paredes,l_destructibles,l_players,l_bombas,dificultad,dx,dy):
"""
Crea un nivel nuevo al azar
:param l_win: list
:param l_powers: list
:param l_enemigos: list
:param l_paredes: list
:param l_destructibles: list
:param l_players: list
:param l_bombas: list
:param dificultad: str
:param dx: num
:param dy: num
:return: None
"""
creaciondestructibles(l_destructibles,l_players,dificultad,dx,dy)
creacionenemigos(l_enemigos,l_destructibles,l_players,dificultad,dx,dy)
creacionposganar(l_destructibles,l_win)
generate_pwup(l_powers,l_players,l_paredes,l_win,l_bombas,l_enemigos,dx,dy)
generate_pwup(l_powers, l_players, l_paredes, l_win, l_bombas, l_enemigos, dx, dy)
####################
######limpieza######
####################
def ganaste(l_win,l_player):
"""
True si algun jugador toco la posicion de victoria
:param l_win: list
:param l_player: list
:return: boolean
"""
p=l_win[0]
for jugador in l_player:
if chocar(jugador,p):
return True
return False
def lexp(l_explosiones,t_a):
"""
Modifica el estado de las explosiones
:param l_explosiones: List
:param t_a: num
:return: none
"""
for exp in l_explosiones:
t0=exp.gettime()
dt=t_a-t0
if dt>=500.0:
exp.setlife(False)
def limpiar(arr):
"""
Borra los elementos que no estan en escena (o no estan vivos)
:param arr: List
:return: none
"""
n=len(arr)
aux=[]
for i in range(n):
elemento=arr.pop(0)
if elemento.getlife():
aux.append(elemento)
for b in aux:
arr.append(b) | 0 | 0 | 0 |
48f7268c6405c1a34b2e0c7b8c22e043eb5b6d4b | 102 | py | Python | myint.py | codes-by-kiwi/defining-an-int-py | 793a781f650b2ef38d429a1dc25c0e4a15997889 | [
"MIT"
] | null | null | null | myint.py | codes-by-kiwi/defining-an-int-py | 793a781f650b2ef38d429a1dc25c0e4a15997889 | [
"MIT"
] | null | null | null | myint.py | codes-by-kiwi/defining-an-int-py | 793a781f650b2ef38d429a1dc25c0e4a15997889 | [
"MIT"
] | null | null | null | myint = 7
print(myint)
# the below way is another way to do the above
print(7)
# This will print 7.
| 12.75 | 46 | 0.696078 | myint = 7
print(myint)
# the below way is another way to do the above
print(7)
# This will print 7.
| 0 | 0 | 0 |