hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
668ecae4a8a5f8ccec1d3d967fce97cd7de1ef5e | 1,738 | py | Python | curation/parsers/score.py | HDRUK/PGS_Catalog | d59067fc61961770d1e0f8bb6081d10d8bbea3e9 | [
"Apache-2.0"
] | 5 | 2020-01-29T18:04:08.000Z | 2022-01-04T18:04:05.000Z | curation/parsers/score.py | PGScatalog/PGS_Catalog | d59067fc61961770d1e0f8bb6081d10d8bbea3e9 | [
"Apache-2.0"
] | 37 | 2020-02-25T08:50:04.000Z | 2022-02-15T10:11:34.000Z | curation/parsers/score.py | HDRUK/PGS_Catalog | d59067fc61961770d1e0f8bb6081d10d8bbea3e9 | [
"Apache-2.0"
] | 3 | 2020-01-14T10:19:14.000Z | 2020-09-08T20:11:34.000Z | from django.db import IntegrityError, transaction
from curation.parsers.generic import GenericData
from curation.parsers.trait import TraitData
from catalog.models import Score
| 36.978723 | 80 | 0.54603 | from django.db import IntegrityError, transaction
from curation.parsers.generic import GenericData
from curation.parsers.trait import TraitData
from catalog.models import Score
class ScoreData(GenericData):
def __init__(self,score_name):
GenericData.__init__(self)
self.name = score_name
self.data = {'name': score_name}
@transaction.atomic
def create_score_model(self,publication):
'''
Create an instance of the Score model.
It also create instance(s) of the EFOTrait model if needed.
- publication: instance of the Publication model
Return type: Score model
'''
try:
with transaction.atomic():
self.model = Score()
self.model.set_score_ids(self.next_id_number(Score))
for field, val in self.data.items():
if field == 'trait_efo':
efo_traits = []
for trait_id in val:
trait_id = trait_id.replace(':','_').strip()
trait = TraitData(trait_id)
efo = trait.efotrait_model()
efo_traits.append(efo)
else:
setattr(self.model, field, val)
# Associate a Publication
self.model.publication = publication
self.model.save()
for efo in efo_traits:
self.model.trait_efo.add(efo)
self.model.save()
except IntegrityError as e:
self.model = None
print('Error with the creation of the Score(s) and/or the Trait(s)')
return self.model | 116 | 1,422 | 23 |
eb705a1f9ff0b084e59f7ccc5c02dfcb49cf32cd | 2,853 | py | Python | tests/variable_tests.py | bwhitesell/SpyNN | 52ade7c9f54fa81abc6f6d9133ecccafed69e5dc | [
"BSD-3-Clause"
] | 12 | 2019-08-16T15:20:47.000Z | 2021-12-08T03:18:20.000Z | tests/variable_tests.py | aiden27/SpyNE | 52ade7c9f54fa81abc6f6d9133ecccafed69e5dc | [
"BSD-3-Clause"
] | null | null | null | tests/variable_tests.py | aiden27/SpyNE | 52ade7c9f54fa81abc6f6d9133ecccafed69e5dc | [
"BSD-3-Clause"
] | 1 | 2019-08-28T14:30:07.000Z | 2019-08-28T14:30:07.000Z | import unittest
import numpy as np
from spyne import Tensor, Constant
class TestTensor(unittest.TestCase):
""" A set of tests to validate tensor definitions, attributes, functionality etc..."""
def test_tensor_attributes(self):
""" Test tensor attributes """
# value attr should return the wrapped data
self.assertTrue(np.array_equal(self.tens.value, self.data))
# shape should return the shape of the wrapped data
self.assertEqual(self.tens.shape, (2, 2, 3))
# node_uid should be a random string generated uniquely for each tensor
self.assertTrue(type(self.tens.node_uid) == str)
self.assertTrue(len(self.tens.node_uid) > 0)
new_tens = Tensor(self.tens.value)
self.assertNotEqual(self.tens.node_uid, new_tens.node_uid)
def test_tensor_instantiation(self):
""" Test tensor instantiation approaches """
new_tensor = Tensor(self.data.tolist())
# tensors can accept lists on instantiation and will internally convert the data to an ndarray
self.assertTrue(np.array_equal(new_tensor.value, self.tens.value))
# for now tensors and constants share identical functionality. In the future they may not.
class TestConstant(unittest.TestCase):
""" A set of tests to validate Constant's definitions, attributes, functionality etc..."""
def test_constants_attributes(self):
""" Test tensor attributes """
# value attr should return the wrapped data
self.assertTrue(np.array_equal(self.const.value, self.data))
# shape should return the shape of the wrapped data
self.assertEqual(self.const.shape, (2, 2, 3))
# node_uid should be a random string generated uniquely for each tensor
self.assertTrue(type(self.const.node_uid) == str)
self.assertTrue(len(self.const.node_uid) > 0)
new_const = Tensor(self.const.value)
self.assertNotEqual(self.const.node_uid, new_const.node_uid)
def test_constants_instantiation(self):
""" Test tensor instantiation approaches """
new_const = Tensor(self.data.tolist())
# tensors can accept lists on instantiation and will internally convert the data to an ndarray
self.assertTrue(np.array_equal(new_const.value, self.const.value))
if __name__ == '__main__':
unittest.main() | 34.792683 | 102 | 0.623204 | import unittest
import numpy as np
from spyne import Tensor, Constant
class TestTensor(unittest.TestCase):
""" A set of tests to validate tensor definitions, attributes, functionality etc..."""
def setUp(self):
self.data = np.array(
[
[[1, 2, 3],
[4, 5, 6]],
[[7, 8, 9],
[10, 11, 12]],
]
)
self.tens = Tensor(self.data)
def test_tensor_attributes(self):
""" Test tensor attributes """
# value attr should return the wrapped data
self.assertTrue(np.array_equal(self.tens.value, self.data))
# shape should return the shape of the wrapped data
self.assertEqual(self.tens.shape, (2, 2, 3))
# node_uid should be a random string generated uniquely for each tensor
self.assertTrue(type(self.tens.node_uid) == str)
self.assertTrue(len(self.tens.node_uid) > 0)
new_tens = Tensor(self.tens.value)
self.assertNotEqual(self.tens.node_uid, new_tens.node_uid)
def test_tensor_instantiation(self):
""" Test tensor instantiation approaches """
new_tensor = Tensor(self.data.tolist())
# tensors can accept lists on instantiation and will internally convert the data to an ndarray
self.assertTrue(np.array_equal(new_tensor.value, self.tens.value))
# for now tensors and constants share identical functionality. In the future they may not.
class TestConstant(unittest.TestCase):
""" A set of tests to validate Constant's definitions, attributes, functionality etc..."""
def setUp(self):
self.data = np.array(
[
[[1, 2, 3],
[4, 5, 6]],
[[7, 8, 9],
[10, 11, 12]],
]
)
self.const = Constant(self.data)
def test_constants_attributes(self):
""" Test tensor attributes """
# value attr should return the wrapped data
self.assertTrue(np.array_equal(self.const.value, self.data))
# shape should return the shape of the wrapped data
self.assertEqual(self.const.shape, (2, 2, 3))
# node_uid should be a random string generated uniquely for each tensor
self.assertTrue(type(self.const.node_uid) == str)
self.assertTrue(len(self.const.node_uid) > 0)
new_const = Tensor(self.const.value)
self.assertNotEqual(self.const.node_uid, new_const.node_uid)
def test_constants_instantiation(self):
""" Test tensor instantiation approaches """
new_const = Tensor(self.data.tolist())
# tensors can accept lists on instantiation and will internally convert the data to an ndarray
self.assertTrue(np.array_equal(new_const.value, self.const.value))
if __name__ == '__main__':
unittest.main() | 443 | 0 | 54 |
f1faefdac8d6e5a2820166b970bb5c7d84b3021d | 823 | py | Python | option_keyboard/scripts/test_env.py | aditimavalankar/option-keyboard | 7bace01f1aa67e0aa40bb516d8f5d96268c2aa02 | [
"MIT"
] | 13 | 2020-09-05T00:20:31.000Z | 2022-02-27T21:19:57.000Z | option_keyboard/scripts/test_env.py | aditimavalankar/option-keyboard | 7bace01f1aa67e0aa40bb516d8f5d96268c2aa02 | [
"MIT"
] | 1 | 2021-08-12T12:19:06.000Z | 2021-09-02T20:12:31.000Z | option_keyboard/scripts/test_env.py | aditimavalankar/option-keyboard | 7bace01f1aa67e0aa40bb516d8f5d96268c2aa02 | [
"MIT"
] | 1 | 2020-10-17T11:25:32.000Z | 2020-10-17T11:25:32.000Z | import gym
import option_keyboard.envs
import time
import numpy as np
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
KEY_MAP = ['UP', 'RIGHT', 'DOWN', 'LEFT']
if __name__ == '__main__':
main()
| 18.704545 | 60 | 0.547995 | import gym
import option_keyboard.envs
import time
import numpy as np
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
KEY_MAP = ['UP', 'RIGHT', 'DOWN', 'LEFT']
def main():
env = gym.make('ForagingWorld-v0', scenario=1)
env.set_learning_options()
s = env.reset()
env.render()
ep_reward = 0
rewards = []
for _ in range(300000):
a = np.random.randint(0, 4)
s, r, done, _ = env.step(a)
ep_reward += r
if done:
print('Episode terminated!')
print('Total reward:', ep_reward)
rewards.append(ep_reward)
s = env.reset()
ep_reward = 0
time.sleep(1)
env.render()
time.sleep(0.1)
print(np.array(rewards).mean(), np.array(rewards).std())
return
if __name__ == '__main__':
main()
| 610 | 0 | 23 |
34d42d3325c9297fb391f4744a194a7a6453cd05 | 3,210 | py | Python | history-data.py | toanalien/binance-toolkit | 1dbd8f9e5f7ad68569ed21e02f2326313fdb6dbe | [
"MIT"
] | null | null | null | history-data.py | toanalien/binance-toolkit | 1dbd8f9e5f7ad68569ed21e02f2326313fdb6dbe | [
"MIT"
] | null | null | null | history-data.py | toanalien/binance-toolkit | 1dbd8f9e5f7ad68569ed21e02f2326313fdb6dbe | [
"MIT"
] | null | null | null | # https://github.com/codeninja/CCXT-Historical-Data/blob/master/Binance%20Historical%20Data%20.ipynb
import ccxt
from datetime import datetime, timedelta, timezone
import math
import argparse
import pandas as pd
import csv
from pathlib import Path
import sys
scrape_candles_to_csv('btc_usdt_1m.csv', 'binance', 3, 'BTC/USDT', '1m',
'2020-09-0100:00:00Z', 1000)
| 39.146341 | 133 | 0.642679 | # https://github.com/codeninja/CCXT-Historical-Data/blob/master/Binance%20Historical%20Data%20.ipynb
import ccxt
from datetime import datetime, timedelta, timezone
import math
import argparse
import pandas as pd
import csv
from pathlib import Path
import sys
def retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
num_retries = 0
try:
num_retries += 1
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
# print('Fetched', len(ohlcv), symbol, 'candles from', exchange.iso8601 (ohlcv[0][0]), 'to', exchange.iso8601 (ohlcv[-1][0]))
return ohlcv
except Exception:
if num_retries > max_retries:
raise # Exception('Failed to fetch', timeframe, symbol, 'OHLCV in', max_retries, 'attempts')
def scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
earliest_timestamp = exchange.milliseconds()
timeframe_duration_in_seconds = exchange.parse_timeframe(timeframe)
timeframe_duration_in_ms = timeframe_duration_in_seconds * 1000
timedelta = limit * timeframe_duration_in_ms
all_ohlcv = []
while True:
fetch_since = earliest_timestamp - timedelta
ohlcv = retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe,
fetch_since, limit)
# if we have reached the beginning of history
if ohlcv[0][0] >= earliest_timestamp:
break
earliest_timestamp = ohlcv[0][0]
all_ohlcv = ohlcv + all_ohlcv
print(len(all_ohlcv), symbol, 'candles in total from',
exchange.iso8601(all_ohlcv[0][0]), 'to',
exchange.iso8601(all_ohlcv[-1][0]))
# if we have reached the checkpoint
if fetch_since < since:
break
return all_ohlcv
def write_to_csv(filename, exchange, data):
p = Path("./data/raw/", str(exchange))
p.mkdir(parents=True, exist_ok=True)
full_path = p / str(filename)
with Path(full_path).open('w+', newline='') as output_file:
csv_writer = csv.writer(output_file,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
csv_writer.writerows(data)
def scrape_candles_to_csv(filename, exchange_id, max_retries, symbol,
timeframe, since, limit):
# instantiate the exchange by id
exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True, # required by the Manual
})
# convert since from string to milliseconds integer if needed
if isinstance(since, str):
since = exchange.parse8601(since)
# preload all markets from the exchange
exchange.load_markets()
# fetch all candles
ohlcv = scrape_ohlcv(exchange, max_retries, symbol, timeframe, since,
limit)
# save them to csv file
write_to_csv(filename, exchange, ohlcv)
print('Saved', len(ohlcv), 'candles from', exchange.iso8601(ohlcv[0][0]),
'to', exchange.iso8601(ohlcv[-1][0]), 'to', filename)
scrape_candles_to_csv('btc_usdt_1m.csv', 'binance', 3, 'BTC/USDT', '1m',
'2020-09-0100:00:00Z', 1000)
| 2,729 | 0 | 92 |
f388fccb51ce37133123c656d0d26c5392831720 | 2,570 | py | Python | scnn/graph_proportion_experiment.py | jcatw/scnn | b89d9ccb548a560d5c993faa023598c573a86914 | [
"MIT"
] | 57 | 2016-04-08T04:57:55.000Z | 2021-08-12T18:45:32.000Z | scnn/graph_proportion_experiment.py | jcatw/scnn | b89d9ccb548a560d5c993faa023598c573a86914 | [
"MIT"
] | 2 | 2016-02-11T21:14:48.000Z | 2016-08-30T14:19:18.000Z | scnn/graph_proportion_experiment.py | jcatw/scnn | b89d9ccb548a560d5c993faa023598c573a86914 | [
"MIT"
] | 9 | 2016-02-11T21:11:34.000Z | 2018-05-04T08:29:13.000Z | __author__ = 'jatwood'
import sys
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from graph_scnn import GraphSCNN
import data
import util
if __name__ == '__main__':
np.random.seed()
args = sys.argv[1:]
name_to_data = {
'nci1': lambda: data.parse_nci(graph_name='nci1.graph'),
'nci109': lambda: data.parse_nci(graph_name='nci109.graph'),
'mutag': lambda : data.parse_nci(graph_name='mutag.graph'),
'ptc': lambda : data.parse_nci(graph_name='ptc.graph'),
'enzymes': lambda : data.parse_nci(graph_name='enzymes.graph'),
'nci1struct': lambda: data.parse_nci(graph_name='nci1.graph', with_structural_features=True),
'nci109struct': lambda: data.parse_nci(graph_name='nci109.graph', with_structural_features=True),
}
transform_lookup = {
'id': None,
'rwl': util.rw_laplacian,
'l': util.laplacian,
}
name = args[0]
data_fn = name_to_data[name]
n_hops = int(args[1])
transform_name = args[2]
transform_fn = transform_lookup[transform_name]
scnn_graph_proportion_experiment(data_fn, name, n_hops, 0.1, 0.1, transform_fn=transform_fn, transform_name=transform_name)
| 33.376623 | 137 | 0.676265 | __author__ = 'jatwood'
import sys
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from graph_scnn import GraphSCNN
import data
import util
def scnn_graph_proportion_experiment(data_fn, name, n_hops, prop_valid, prop_test, transform_fn=util.rw_laplacian, transform_name='rwl'):
print 'Running node experiment (%s)...' % (name,)
A, X, Y = data_fn()
n_graphs = len(A)
indices = np.arange(n_graphs)
np.random.seed(4)
np.random.shuffle(indices)
print indices
valid_start = int(n_graphs * (1 - (prop_valid + prop_test)))
test_start = int(n_graphs * (1 - prop_test))
valid_indices = indices[valid_start:test_start]
test_indices = indices[test_start:]
for train_prop in [x / 10.0 for x in range(1, 11)]:
train_end = int(valid_start * train_prop)
train_indices = indices[:train_end]
scnn = GraphSCNN(n_hops=n_hops, transform_fn=transform_fn)
scnn.fit(A, X, Y, train_indices=train_indices, valid_indices=valid_indices)
preds = scnn.predict(A, X, test_indices)
actuals = np.argmax(Y[test_indices,:], axis=1)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
accuracy = accuracy_score(actuals, preds)
print 'form: name,n_hops,transform_name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,scnn%d%s,%.6f,%.8f,%.8f,%.8f' % (name, n_hops, transform_name, train_prop, f1_micro, f1_macro, accuracy)
if __name__ == '__main__':
np.random.seed()
args = sys.argv[1:]
name_to_data = {
'nci1': lambda: data.parse_nci(graph_name='nci1.graph'),
'nci109': lambda: data.parse_nci(graph_name='nci109.graph'),
'mutag': lambda : data.parse_nci(graph_name='mutag.graph'),
'ptc': lambda : data.parse_nci(graph_name='ptc.graph'),
'enzymes': lambda : data.parse_nci(graph_name='enzymes.graph'),
'nci1struct': lambda: data.parse_nci(graph_name='nci1.graph', with_structural_features=True),
'nci109struct': lambda: data.parse_nci(graph_name='nci109.graph', with_structural_features=True),
}
transform_lookup = {
'id': None,
'rwl': util.rw_laplacian,
'l': util.laplacian,
}
name = args[0]
data_fn = name_to_data[name]
n_hops = int(args[1])
transform_name = args[2]
transform_fn = transform_lookup[transform_name]
scnn_graph_proportion_experiment(data_fn, name, n_hops, 0.1, 0.1, transform_fn=transform_fn, transform_name=transform_name)
| 1,327 | 0 | 23 |
04065f101ad6b48b1aed03a8c4cee4bec1b002bd | 583 | py | Python | evennia_extensions/character_extensions/constants.py | dvoraen/arxcode | a89f2004fca10e0b1f1995c2420dd3ffbf08774b | [
"MIT"
] | 42 | 2018-08-12T00:55:24.000Z | 2021-12-24T15:16:08.000Z | evennia_extensions/character_extensions/constants.py | dvoraen/arxcode | a89f2004fca10e0b1f1995c2420dd3ffbf08774b | [
"MIT"
] | 312 | 2018-10-22T23:03:27.000Z | 2022-02-06T13:02:58.000Z | evennia_extensions/character_extensions/constants.py | dvoraen/arxcode | a89f2004fca10e0b1f1995c2420dd3ffbf08774b | [
"MIT"
] | 42 | 2018-08-12T00:41:48.000Z | 2022-01-27T14:03:16.000Z | PC_RACE, NPC_RACE, SMALL_ANIMAL, LARGE_ANIMAL, MONSTER = range(5)
RACE_TYPE_CHOICES = (
(PC_RACE, "Allowed Player Character Race"),
(NPC_RACE, "NPC Only Race"),
(SMALL_ANIMAL, "Small Animal"),
(LARGE_ANIMAL, "Large Animal"),
(MONSTER, "Monster"),
)
CHEST_KEY, ROOM_KEY = range(2)
KEY_CHOICES = ((CHEST_KEY, "chest key"), (ROOM_KEY, "room key"))
SINGLE, MARRIED, WIDOWED, DIVORCED = "single", "married", "widowed", "divorced"
MARITAL_STATUS_CHOICES = (
(SINGLE, "Single"),
(MARRIED, "Married"),
(WIDOWED, "Widowed"),
(DIVORCED, "Divorced"),
)
| 25.347826 | 79 | 0.653516 | PC_RACE, NPC_RACE, SMALL_ANIMAL, LARGE_ANIMAL, MONSTER = range(5)
RACE_TYPE_CHOICES = (
(PC_RACE, "Allowed Player Character Race"),
(NPC_RACE, "NPC Only Race"),
(SMALL_ANIMAL, "Small Animal"),
(LARGE_ANIMAL, "Large Animal"),
(MONSTER, "Monster"),
)
CHEST_KEY, ROOM_KEY = range(2)
KEY_CHOICES = ((CHEST_KEY, "chest key"), (ROOM_KEY, "room key"))
SINGLE, MARRIED, WIDOWED, DIVORCED = "single", "married", "widowed", "divorced"
MARITAL_STATUS_CHOICES = (
(SINGLE, "Single"),
(MARRIED, "Married"),
(WIDOWED, "Widowed"),
(DIVORCED, "Divorced"),
)
| 0 | 0 | 0 |
23fa47253d40d7cf2cda586bf23290dc31ed2951 | 1,411 | py | Python | ecc/performance.py | lhq1/ECDSA-SPDZ | 49db0468e3fde2a2d7d391a2a4067fc1ddeb2cb4 | [
"Unlicense",
"MIT"
] | 13 | 2015-03-18T09:51:37.000Z | 2020-07-21T09:22:31.000Z | ecc/performance.py | lhq1/ECDSA-SPDZ | 49db0468e3fde2a2d7d391a2a4067fc1ddeb2cb4 | [
"Unlicense",
"MIT"
] | 7 | 2015-07-07T02:54:10.000Z | 2019-03-12T10:17:21.000Z | ecc/performance.py | lhq1/ECDSA-SPDZ | 49db0468e3fde2a2d7d391a2a4067fc1ddeb2cb4 | [
"Unlicense",
"MIT"
] | 17 | 2015-02-28T11:44:17.000Z | 2021-07-01T19:19:57.000Z | #!/usr/bin/python
# coding=utf-8
import time
from collections import OrderedDict
from Key import Key
if __name__ == '__main__':
n = 100
print_dict('Key generation', test_generation_perf(n), n)
print_dict('Signing', test_signing_perf(n), n)
print_dict('Verifying', test_verification_perf(n), n)
| 23.516667 | 76 | 0.569809 | #!/usr/bin/python
# coding=utf-8
import time
from collections import OrderedDict
from Key import Key
def test_generation_perf(n=100):
results = OrderedDict()
for bits in (192, 224, 256, 384, 521):
t = time.time()
for i in xrange(n):
k = Key.generate(bits)
t = time.time() - t
results[bits] = t
return results
def test_signing_perf(n=100):
results = OrderedDict()
for bits in (192, 224, 256, 384, 521):
k = Key.generate(bits)
t = time.time()
for i in xrange(n):
k.sign('random string')
t = time.time() - t
results[bits] = t
return results
def test_verification_perf(n=100):
results = OrderedDict()
for bits in (192, 224, 256, 384, 521):
k = Key.generate(bits)
s = k.sign('random string')
t = time.time()
for i in xrange(n):
k.verify('random string', s)
t = time.time() - t
results[bits] = t
return results
def print_dict(title, d, n):
print(title)
print('-' * len(title))
for k, v in d.items():
print('{} bits {:10.5f} seconds {:10.5f}/sec'.format(k, v, n / v))
print('')
if __name__ == '__main__':
n = 100
print_dict('Key generation', test_generation_perf(n), n)
print_dict('Signing', test_signing_perf(n), n)
print_dict('Verifying', test_verification_perf(n), n)
| 1,000 | 0 | 92 |
9c6884c210080bb289f8281a9be3a47c5d6bf2de | 1,253 | py | Python | client_apitext/client.py | ricardoperezf/Python.APIProcesamientoDeTexto | d1a97dc6cd64dc9b8e3c19e02f7bad61c7112d9a | [
"MIT"
] | null | null | null | client_apitext/client.py | ricardoperezf/Python.APIProcesamientoDeTexto | d1a97dc6cd64dc9b8e3c19e02f7bad61c7112d9a | [
"MIT"
] | null | null | null | client_apitext/client.py | ricardoperezf/Python.APIProcesamientoDeTexto | d1a97dc6cd64dc9b8e3c19e02f7bad61c7112d9a | [
"MIT"
] | null | null | null | from flask import Flask, render_template
import requests
import json
########################################################################################################################
app = Flask(__name__)
########################################################################################################################
# MÉTODO QUE PERMITE MEDIANTE LA RUTA DE /START HACER EL POST AL SERVIDOR
@app.route('/start')
# MÉTODO QUE PERMITE MEDIANTE LA RUTA DE /RESULTADO VER EL PROCESAMIENTO DEL TXT FILE.
@app.route('/resultado')
########################################################################################################################
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| 40.419355 | 120 | 0.506784 | from flask import Flask, render_template
import requests
import json
########################################################################################################################
app = Flask(__name__)
########################################################################################################################
# MÉTODO QUE PERMITE MEDIANTE LA RUTA DE /START HACER EL POST AL SERVIDOR
@app.route('/start')
def post_server():
libro = open('Ortega Y Gasset, José - El Sentido Histórico De La Teoría De Einstein.txt', 'r')
word = libro.read()
post_request = requests.post('http://192.168.1.16:5000/api/v1', data={"ejemplo": str(word)})
return render_template('./index.html', resultado=json.dumps(post_request.text))
# MÉTODO QUE PERMITE MEDIANTE LA RUTA DE /RESULTADO VER EL PROCESAMIENTO DEL TXT FILE.
@app.route('/resultado')
def get_final_result():
theRequest = requests.get('http://192.168.1.16:5000/api/v1')
return render_template('./index_result.html', resultado=json.dumps(theRequest.text))
########################################################################################################################
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| 460 | 0 | 44 |
341c7c0c69febb2922c266e624a7076fdc99ed70 | 1,360 | py | Python | atlas/todoview/views.py | kiae-grid/panda-bigmon-atlas | babb883430de489e68c774bafe6c5f44f99d9a2d | [
"Apache-2.0"
] | null | null | null | atlas/todoview/views.py | kiae-grid/panda-bigmon-atlas | babb883430de489e68c774bafe6c5f44f99d9a2d | [
"Apache-2.0"
] | null | null | null | atlas/todoview/views.py | kiae-grid/panda-bigmon-atlas | babb883430de489e68c774bafe6c5f44f99d9a2d | [
"Apache-2.0"
] | null | null | null | """
views
"""
import logging
import sys, traceback
from django.shortcuts import render_to_response
from django.template import RequestContext
from core.common.utils import getPrefix, getContextVariables
from django.core.urlresolvers import reverse
#_logger = logging.getLogger(__name__)
_logger = logging.getLogger('todoview')
# Create your views here.
def todoTaskDescription(request, taskid="1"):
"""
placeholder for implementation of view with ID "TODO-task-description(taskid)":
"""
# _logger.debug('reverse:' + str(reverse('todoview:todoTaskDescription')))
_logger.debug('taskid:' + str(taskid))
try:
_logger.debug('reverse(ExtraTodoTaskDescription):' + str(reverse('ExtraTodoTaskDescription', args=(taskid,))))
except:
_logger.debug('reverse(ExtraTodoTaskDescription) failed:' + str(traceback.format_exc()))
try:
_logger.debug('reverse(todoview:todoTaskDescription):' + str(reverse('todoview:todoTaskDescription', args=(taskid,))))
except:
_logger.debug('reverse(todoview:todoTaskDescription) failed:' + str(traceback.format_exc()))
data = {
'prefix': getPrefix(request),
'taskid': taskid,
}
data.update(getContextVariables(request))
return render_to_response('todoview/todo-task-description.html', data, RequestContext(request))
| 34.871795 | 126 | 0.713971 | """
views
"""
import logging
import sys, traceback
from django.shortcuts import render_to_response
from django.template import RequestContext
from core.common.utils import getPrefix, getContextVariables
from django.core.urlresolvers import reverse
#_logger = logging.getLogger(__name__)
_logger = logging.getLogger('todoview')
# Create your views here.
def todoTaskDescription(request, taskid="1"):
"""
placeholder for implementation of view with ID "TODO-task-description(taskid)":
"""
# _logger.debug('reverse:' + str(reverse('todoview:todoTaskDescription')))
_logger.debug('taskid:' + str(taskid))
try:
_logger.debug('reverse(ExtraTodoTaskDescription):' + str(reverse('ExtraTodoTaskDescription', args=(taskid,))))
except:
_logger.debug('reverse(ExtraTodoTaskDescription) failed:' + str(traceback.format_exc()))
try:
_logger.debug('reverse(todoview:todoTaskDescription):' + str(reverse('todoview:todoTaskDescription', args=(taskid,))))
except:
_logger.debug('reverse(todoview:todoTaskDescription) failed:' + str(traceback.format_exc()))
data = {
'prefix': getPrefix(request),
'taskid': taskid,
}
data.update(getContextVariables(request))
return render_to_response('todoview/todo-task-description.html', data, RequestContext(request))
| 0 | 0 | 0 |
ac222f311bba929433f37a6842cd57fd645371d7 | 4,926 | py | Python | stream/splunk/src/splunk.py | tiiibs/connectors | 65349790813a1d49f4536662725d99d8bfa08481 | [
"Apache-2.0"
] | 132 | 2019-06-28T23:23:18.000Z | 2022-03-30T07:47:55.000Z | stream/splunk/src/splunk.py | tiiibs/connectors | 65349790813a1d49f4536662725d99d8bfa08481 | [
"Apache-2.0"
] | 472 | 2019-06-26T12:14:54.000Z | 2022-03-31T13:49:53.000Z | stream/splunk/src/splunk.py | tiiibs/connectors | 65349790813a1d49f4536662725d99d8bfa08481 | [
"Apache-2.0"
] | 185 | 2019-07-01T09:32:14.000Z | 2022-03-28T05:29:12.000Z | ################################
# Splunk Connector for OpenCTI #
################################
import os
import yaml
import json
import requests
from pycti import OpenCTIConnectorHelper, get_config_variable
if __name__ == "__main__":
SplunkInstance = SplunkConnector()
SplunkInstance.start()
| 33.510204 | 88 | 0.512789 | ################################
# Splunk Connector for OpenCTI #
################################
import os
import yaml
import json
import requests
from pycti import OpenCTIConnectorHelper, get_config_variable
class SplunkConnector:
def __init__(self):
# Initialize parameters and OpenCTI helper
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
self.splunk_url = get_config_variable("SPLUNK_URL", ["splunk", "url"], config)
self.splunk_ssl_verify = get_config_variable(
"SPLUNK_SSL_VERIFY", ["splunk", "ssl_verify"], config, False, True
)
self.splunk_login = get_config_variable(
"SPLUNK_LOGIN", ["splunk", "login"], config
)
self.splunk_password = get_config_variable(
"SPLUNK_PASSWORD", ["splunk", "password"], config
)
self.splunk_owner = get_config_variable(
"SPLUNK_OWNER", ["splunk", "owner"], config
)
self.splunk_app = get_config_variable("SPLUNK_APP", ["splunk", "app"], config)
self.splunk_kv_store_name = get_config_variable(
"SPLUNK_KV_STORE_NAME", ["splunk", "kv_store_name"], config
)
if (
self.helper.connect_live_stream_id is None
or self.helper.connect_live_stream_id == "ChangeMe"
):
raise ValueError("Missing Live Stream ID")
# Initialize the KV Store
self._query("post", "/config", {"name": self.splunk_kv_store_name})
def _query(self, method, uri, payload=None, is_json=False):
self.helper.log_info("Query " + method + " on " + uri)
url = (
self.splunk_url
+ "/servicesNS/"
+ self.splunk_owner
+ "/"
+ self.splunk_app
+ "/storage/collections"
+ uri
)
if method == "get":
r = requests.get(
url,
auth=(self.splunk_login, self.splunk_password),
params=payload,
verify=self.splunk_ssl_verify,
)
elif method == "post":
if is_json:
headers = {"content-type": "application/json"}
r = requests.post(
url,
auth=(self.splunk_login, self.splunk_password),
headers=headers,
json=payload,
verify=self.splunk_ssl_verify,
)
else:
r = requests.post(
url,
auth=(self.splunk_login, self.splunk_password),
data=payload,
verify=self.splunk_ssl_verify,
)
elif method == "delete":
r = requests.delete(
url,
auth=(self.splunk_login, self.splunk_password),
verify=self.splunk_ssl_verify,
)
else:
raise ValueError("Unsupported method")
if r.status_code < 500:
print(r.text)
try:
return r.json()
except:
return r.text
else:
self.helper.log_info(r.text)
def _process_message(self, msg):
try:
data = json.loads(msg.data)["data"]
except:
raise ValueError("Cannot process the message: " + msg)
# Handle creation
if msg.event == "create":
self.helper.log_info(
"[CREATE] Processing data {" + data["x_opencti_id"] + "}"
)
data["_key"] = data["x_opencti_id"]
return self._query("post", "/data/" + self.splunk_kv_store_name, data, True)
# Handle update
if msg.event == "update":
self.helper.log_info(
"[UPDATE] Processing data {" + data["x_opencti_id"] + "}"
)
data["_key"] = data["x_opencti_id"]
return self._query(
"post",
"/data/" + self.splunk_kv_store_name + "/" + data["x_opencti_id"],
data,
True,
)
# Handle delete
elif msg.event == "delete":
self.helper.log_info(
"[DELETE] Processing data {" + data["x_opencti_id"] + "}"
)
return self._query(
"delete",
"/data/" + self.splunk_kv_store_name + "/" + data["x_opencti_id"],
data,
)
return None
def start(self):
self.helper.listen_stream(self._process_message)
if __name__ == "__main__":
SplunkInstance = SplunkConnector()
SplunkInstance.start()
| 4,486 | 1 | 130 |
780d001b5832616ea114538d674bb733eb4fe038 | 3,356 | py | Python | gym_minigrid/envelopes_light.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | null | null | null | gym_minigrid/envelopes_light.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | 6 | 2021-03-18T21:24:56.000Z | 2022-03-11T23:34:25.000Z | gym_minigrid/envelopes_light.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | null | null | null |
from configurations import config_grabber as cg
from extendedminigrid import *
from perception import Perception
import gym
class SafetyEnvelope(gym.core.Wrapper):
"""
Safety envelope for safe exploration.
Uses monitors for avoiding unsafe actions and shaping rewards
""" | 33.56 | 107 | 0.650179 |
from configurations import config_grabber as cg
from extendedminigrid import *
from perception import Perception
import gym
class SafetyEnvelope(gym.core.Wrapper):
"""
Safety envelope for safe exploration.
Uses monitors for avoiding unsafe actions and shaping rewards
"""
def __init__(self, env):
super(SafetyEnvelope, self).__init__(env)
# Grab configuration
self.config = cg.Configuration.grab()
# Action proposed by the agent
self.propsed_action = None
# Action proposed by the monitor
self.shaped_action = None
# List of all monitors with their states, rewards and unsafe-actions
self.meta_monitor = []
# Dictionary that gets populated with information by all the monitors at runtime
self.monitor_states = {}
# Perceptions of the agent, it gets updated at each step with the current observations
self.perception = Perception(env.gen_obs_decoded())
# Set rewards
self.step_reward = self.config.rewards.standard.step
self.goal_reward = self.config.rewards.standard.goal
self.death_reward = self.config.rewards.standard.death
def step(self, proposed_action):
if self.config.debug_mode: print("proposed_action = " + self.env.action_to_string(proposed_action))
self.perception.update(self.env.gen_obs_decoded())
# Rendering
if self.config.a2c.num_processes == 1 and self.config.rendering:
self.env.render('human')
n_violations = 0
shaped_reward = 0
safe_action = proposed_action
# Checking waterAbsence
if self.perception.is_condition_satisfied("stepping-on-water", proposed_action):
n_violations += 1
shaped_reward -= 0.1
safe_action = self.env.actions.done
# Checking lightUniversally
if not self.perception.is_condition_satisfied("light-on-current-room"):
n_violations += 1
shaped_reward -= 0.1
safe_action = self.env.actions.done
# Checking lightPrecedence
if (self.perception.is_condition_satisfied("entering-a-room", proposed_action)
and not self.perception.is_condition_satisfied("light-switch-turned-on")):
n_violations += 1
shaped_reward -= 0.1
safe_action = self.env.actions.right
# Checking openDoorResponse
if (self.perception.is_condition_satisfied("door-closed-in-front")
and proposed_action != self.env.actions.toggle):
n_violations += 1
shaped_reward -= 0.1
safe_action = self.env.actions.toggle
# Checking switchOffResponse
if (self.perception.is_condition_satisfied("light-switch-in-front-off")
and proposed_action != self.env.actions.toggle):
n_violations += 1
shaped_reward -= 0.1
safe_action = self.env.actions.toggle
# Send a suitable action to the environment
obs, reward, done, info = self.env.step(safe_action)
# Shape the reward at the cumulative sum of all the rewards from the monitors
reward += shaped_reward
for i in range(n_violations):
info["event"].append("violation")
return obs, reward, done, info | 3,009 | 0 | 54 |
ff9d4ac491b229ed70f9691ce99edc52322e332e | 13,230 | py | Python | tests/test_validate_network_config.py | Cray-HPE/canu | 3a92ce1e9b63f35aa30b9135afaa734e61909407 | [
"MIT"
] | 6 | 2021-09-16T22:02:48.000Z | 2022-02-04T18:08:57.000Z | tests/test_validate_network_config.py | Cray-HPE/canu | 3a92ce1e9b63f35aa30b9135afaa734e61909407 | [
"MIT"
] | 57 | 2021-09-17T17:15:59.000Z | 2022-03-31T20:56:21.000Z | tests/test_validate_network_config.py | Cray-HPE/canu | 3a92ce1e9b63f35aa30b9135afaa734e61909407 | [
"MIT"
] | 4 | 2022-01-06T17:09:02.000Z | 2022-02-04T18:09:33.000Z | # MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Test CANU validate network config commands."""
from os import mkdir, urandom
from unittest.mock import patch
from click import testing
from netmiko import ssh_exception
from canu.cli import cli
from .test_validate_switch_config import switch_config
username = "admin"
password = "admin"
ips = "192.168.1.1"
credentials = {"username": username, "password": password}
cache_minutes = 0
running_config_file = "running_switch.cfg"
csm = "1.0"
runner = testing.CliRunner()
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
def test_validate_network_config_running_file():
"""Test that the `canu validate network config` command runs."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
mkdir("running")
mkdir("generated")
with open("running/running_switch.cfg", "w") as f:
f.writelines(switch_config)
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--running",
"running/",
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_file(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs from a file."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("test.txt", "w") as f:
f.write("192.168.1.1")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips-file",
"test.txt",
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_password_prompt(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs and prompts for password."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--generated",
"generated/",
"--csm",
csm,
],
input=password,
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_timeout(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command errors on timeout."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.side_effect = ssh_exception.NetmikoTimeoutException
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Errors\n"
+ "----------------------------------------------------------------------------------------------------\n"
+ "192.168.1.1 - Timeout error. Check the IP address and try again.\n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_authentication(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command errors on authentication."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.side_effect = ssh_exception.NetmikoAuthenticationException
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Errors\n"
+ "----------------------------------------------------------------------------------------------------\n"
+ "192.168.1.1 - Authentication error. Check the credentials or IP address and try again"
) in str(result.output)
def test_validate_network_config_bad_config_file():
"""Test that the `canu validate network config` command fails on bad file."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
mkdir("running")
mkdir("generated")
# Generate random binary file
with open("running/bad.file", "wb") as f:
f.write(urandom(128))
with open("running/bad_config.cfg", "w") as f:
f.write("bad")
with open("running/switch.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--running",
"running/",
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"running/bad_config.cfg - The file running/bad_config.cfg is not a valid config file."
) in str(result.output)
assert (
"sw-spine-001 - Could not find generated config file generated/sw-spine-001.cfg"
) in str(result.output)
assert (
"running/bad.file - The file running/bad.file is not a valid config file."
) in str(result.output)
| 38.459302 | 118 | 0.480877 | # MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Test CANU validate network config commands."""
from os import mkdir, urandom
from unittest.mock import patch
from click import testing
from netmiko import ssh_exception
from canu.cli import cli
from .test_validate_switch_config import switch_config
username = "admin"
password = "admin"
ips = "192.168.1.1"
credentials = {"username": username, "password": password}
cache_minutes = 0
running_config_file = "running_switch.cfg"
csm = "1.0"
runner = testing.CliRunner()
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
def test_validate_network_config_running_file():
"""Test that the `canu validate network config` command runs."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
mkdir("running")
mkdir("generated")
with open("running/running_switch.cfg", "w") as f:
f.writelines(switch_config)
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--running",
"running/",
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_file(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs from a file."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("test.txt", "w") as f:
f.write("192.168.1.1")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips-file",
"test.txt",
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_password_prompt(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs and prompts for password."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--generated",
"generated/",
"--csm",
csm,
],
input=password,
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_timeout(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command errors on timeout."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.side_effect = ssh_exception.NetmikoTimeoutException
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Errors\n"
+ "----------------------------------------------------------------------------------------------------\n"
+ "192.168.1.1 - Timeout error. Check the IP address and try again.\n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_authentication(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command errors on authentication."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.side_effect = ssh_exception.NetmikoAuthenticationException
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Errors\n"
+ "----------------------------------------------------------------------------------------------------\n"
+ "192.168.1.1 - Authentication error. Check the credentials or IP address and try again"
) in str(result.output)
def test_validate_network_config_bad_config_file():
"""Test that the `canu validate network config` command fails on bad file."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
mkdir("running")
mkdir("generated")
# Generate random binary file
with open("running/bad.file", "wb") as f:
f.write(urandom(128))
with open("running/bad_config.cfg", "w") as f:
f.write("bad")
with open("running/switch.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--running",
"running/",
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"running/bad_config.cfg - The file running/bad_config.cfg is not a valid config file."
) in str(result.output)
assert (
"sw-spine-001 - Could not find generated config file generated/sw-spine-001.cfg"
) in str(result.output)
assert (
"running/bad.file - The file running/bad.file is not a valid config file."
) in str(result.output)
| 0 | 0 | 0 |
655eb8bd1fd6b16d3d8e01035eeb305c77d22aa3 | 2,311 | py | Python | sunless_web/management/commands/reset_entries_by_entities.py | bluedisk/SunlessSeaKo | 1e6d498ff7e735b8d272dd0bca6c17741a2faedb | [
"MIT"
] | 2 | 2019-02-19T11:53:29.000Z | 2021-02-18T23:57:20.000Z | sunless_web/management/commands/reset_entries_by_entities.py | bluedisk/SunlessSeaKo | 1e6d498ff7e735b8d272dd0bca6c17741a2faedb | [
"MIT"
] | 4 | 2018-05-26T13:18:27.000Z | 2018-05-26T13:19:50.000Z | sunless_web/management/commands/reset_entries_by_entities.py | bluedisk/SunlessSeaKo | 1e6d498ff7e735b8d272dd0bca6c17741a2faedb | [
"MIT"
] | null | null | null | """
rest_entiries_by_entities 커맨드용 클래스 파일
"""
import tqdm
from django.core.management.base import BaseCommand
from django.db import transaction
from sunless_web.models import Entry, Entity, Translation, Discussion
class Command(BaseCommand):
""" V1 > V2 데이터 마이그레이션용 임시 커맨드 """
help = 'Delete all translations of entries and get from entities'
@transaction.atomic
def insert_as_checker(user, entry, translate, final):
""" insert old tranlations to entry as user """
if translate:
trans = Translation(entry=entry, text=translate, user=user)
trans.save()
Discussion(msg='기존 유저 번역 등록', translate=trans).save()
if final:
trans = Translation(entry=entry, text=final, user=user)
trans.save()
Discussion(msg='기존 유저 번역 등록', translate=trans).save()
| 33.985294 | 84 | 0.553873 | """
rest_entiries_by_entities 커맨드용 클래스 파일
"""
import tqdm
from django.core.management.base import BaseCommand
from django.db import transaction
from sunless_web.models import Entry, Entity, Translation, Discussion
class Command(BaseCommand):
""" V1 > V2 데이터 마이그레이션용 임시 커맨드 """
help = 'Delete all translations of entries and get from entities'
@transaction.atomic
def handle(self, *args, **options):
Translation.objects.all().delete()
Discussion.objects.all().delete()
for entity in tqdm.tqdm(Entity.objects.all()):
for field in ['Name', 'Teaser', 'Description']:
original = entity.original.get(field, '')
reference = entity.reference.get(field, '')
papago = entity.papago.get(field, '')
translate = entity.translate.get(field, '')
final = entity.final.get(field, '')
if original and (reference or papago or translate or final):
try:
entry = Entry.objects.get(hash_v1=entity.hash, object=field)
if not entry.checker.exists():
insert_as_checker(None, entry, translate, final)
for checker in entry.checker.all():
insert_as_checker(checker, entry, translate, final)
if reference:
entry.reference = reference
if papago:
entry.text_pp = papago
entry.update_status()
entry.save()
except Entry.DoesNotExist:
print(entity.hash, field, entity.path())
print("|".join([reference, papago, translate, final]))
raise ValueError()
def insert_as_checker(user, entry, translate, final):
""" insert old tranlations to entry as user """
if translate:
trans = Translation(entry=entry, text=translate, user=user)
trans.save()
Discussion(msg='기존 유저 번역 등록', translate=trans).save()
if final:
trans = Translation(entry=entry, text=final, user=user)
trans.save()
Discussion(msg='기존 유저 번역 등록', translate=trans).save()
| 1,464 | 0 | 26 |
691ec26445dae7f029cb8e8c0f0288ca9fe7b55f | 4,101 | py | Python | dpd/mapping/link.py | davidbailey/dpd | 29bce937e34afa2161788a5c4a911e590a388229 | [
"MIT"
] | 6 | 2020-08-13T22:21:25.000Z | 2021-09-15T19:12:51.000Z | dpd/mapping/link.py | davidbailey/dpd | 29bce937e34afa2161788a5c4a911e590a388229 | [
"MIT"
] | 3 | 2018-01-25T09:11:01.000Z | 2020-12-22T17:31:24.000Z | dpd/mapping/link.py | davidbailey/dpd | 29bce937e34afa2161788a5c4a911e590a388229 | [
"MIT"
] | null | null | null | """
A link is based on an GMNS link (https://github.com/zephyr-data-specs/GMNS/blob/master/Specification/link.schema.json). However, our links are only one way: all two way links are broken into two one-way links. This means there is only one direction to consider.
Links are made up of one, two, three, or four of the following Segments:
1. Lanes. Lanes are wide enough for a motor vehicle. Bicycles and pedestrians may also use segments. Lanes have direction from their parent link.
2. Cycleways. Cycleways may be found between segments and sidewalks. They are wide enough for a bicycle. Pedestrians may also use segments. Motor vehicles may unfortunately end up in cycleways.
3. Sidewalks. Sidewalks are on the side of a link. Bicycles and pedestrians may use sidewalks. Motor vehicles may also end up on sidewalks.
4. Parking. A link must have at least one lane to have parking. Parking goes in between the segments and the cycleway in the case of a protected cycleway and in between the cycleway and the sidewalk in the case of an unprotected cycleway.
Things to fix: What about a (right-hand drive) cycleway on the left side of a one-way street?
"""
import requests
from .lane import Lane
from .cycleway import Cycleway
from .sidewalk import Sidewalk
from .parking import Parking
class Link:
"""
Note: the output_intersection of a link means that link is an input_link of that intersection. And the input_intersection of a link means that link is an output_link of that intersection
"""
| 43.168421 | 261 | 0.64911 | """
A link is based on an GMNS link (https://github.com/zephyr-data-specs/GMNS/blob/master/Specification/link.schema.json). However, our links are only one way: all two way links are broken into two one-way links. This means there is only one direction to consider.
Links are made up of one, two, three, or four of the following Segments:
1. Lanes. Lanes are wide enough for a motor vehicle. Bicycles and pedestrians may also use segments. Lanes have direction from their parent link.
2. Cycleways. Cycleways may be found between segments and sidewalks. They are wide enough for a bicycle. Pedestrians may also use segments. Motor vehicles may unfortunately end up in cycleways.
3. Sidewalks. Sidewalks are on the side of a link. Bicycles and pedestrians may use sidewalks. Motor vehicles may also end up on sidewalks.
4. Parking. A link must have at least one lane to have parking. Parking goes in between the segments and the cycleway in the case of a protected cycleway and in between the cycleway and the sidewalk in the case of an unprotected cycleway.
Things to fix: What about a (right-hand drive) cycleway on the left side of a one-way street?
"""
import requests
from .lane import Lane
from .cycleway import Cycleway
from .sidewalk import Sidewalk
from .parking import Parking
class Link:
"""
Note: the output_intersection of a link means that link is an input_link of that intersection. And the input_intersection of a link means that link is an output_link of that intersection
"""
def __init__(
self,
name,
geometry,
segments,
input_intersection=None,
output_intersection=None,
opposite_direction_link=None,
**kwargs
):
self.name = name
self.geometry = geometry
self.segments = segments
self.input_intersection = input_intersection
if input_intersection:
input_intersection.add_output_link(self)
self.output_intersection = output_intersection
if output_intersection:
output_intersection.add_input_link(self)
self.opposite_direction_link = opposite_direction_link
# kwargs are useful for setting things like max_speed
for attribute, value in kwargs.items():
setattr(self, attribute, value)
def update_segments_from_osm(
self,
number_of_lanes=0,
parking=None,
cycleway=None,
sidewalk=None,
):
self.segments = [None, None]
segment_number = 0
for _ in range(number_of_lanes):
lane = Lane(self, segment_number)
self.segments.insert(-1, lane)
segment_number += 1
if cycleway == "lane":
self.segments.insert(-1, Cycleway(self, segment_number))
segment_number += 1
if parking:
self.segments.insert(-1, Parking(self, segment_number, parking))
segment_number += 1
if cycleway == "track":
self.segments.insert(-1, Cycleway(self, segment_number))
segment_number += 1
if sidewalk:
self.segments.insert(-1, Sidewalk(self, segment_number))
segment_number += 1
def update_segments_from_streetmix(self, url):
r = requests.get(url)
street = r.json()
self.segments = [None, None]
segment_number = 0
for segment in street["data"]["street"]["segments"]:
if segment["type"] == "drive-lane":
lane = Lane(self, segment_number)
self.segments.insert(-1, lane)
segment_number += 1
elif segment["type"] == "parking":
self.segments.insert(-1, Parking(self, segment_number))
segment_number += 1
elif segment["type"] == "bike-lane":
self.segments.insert(-1, Cycleway(self, segment_number))
segment_number += 1
elif segment["type"] == "sidewalk":
self.segments.insert(-1, Sidewalk(self, segment_number))
segment_number += 1
| 2,493 | 0 | 81 |
29db9a561e24afa6d811aeff05ee249defa2f6e1 | 4,292 | py | Python | fetch_temps.py | acaird/nest-graph | 99d0d404aa99a5e9b50fc0acc26c1facd9839620 | [
"Apache-2.0"
] | null | null | null | fetch_temps.py | acaird/nest-graph | 99d0d404aa99a5e9b50fc0acc26c1facd9839620 | [
"Apache-2.0"
] | null | null | null | fetch_temps.py | acaird/nest-graph | 99d0d404aa99a5e9b50fc0acc26c1facd9839620 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# formatted with black
#
import json
import requests
import sys
from os import getenv
if __name__ == "__main__":
# nest_access_token = get_nest_access_token(
# getenv("NEST_CLIENT_ID"),
# getenv("NEST_CLIENT_SECRET"),
# getenv("NEST_AUTHORIZATION_CODE"),
# )
nest_access_token = {}
nest_access_token["access_token"] = getenv("NEST_ACCESS_TOKEN")
if nest_access_token["access_token"] is None:
print("Please set the NEST_ACCESS_TOKEN environment variable")
sys.exit(1)
results = get_nest_temperatures(nest_access_token["access_token"])
if "status_code" in results:
print(json.dumps(results, indent=4))
else:
print_results_stdout(results)
| 33.015385 | 87 | 0.618826 | #!/usr/bin/env python3
#
# formatted with black
#
import json
import requests
import sys
from os import getenv
def get_nest_access_token(
client_id,
client_secret,
authorization_code,
grant_type="authorization_code",
url="https://api.home.nest.com/oauth2/access_token",
):
print(
"Client ID: {}\nClient Secret: {}\nAuthorization Code (PIN): {}".format(
client_id, client_secret, authorization_code
)
)
results = requests.post(
url,
data={
"client_id": client_id,
"client_secret": client_secret,
"grant_type": grant_type,
"code": authorization_code,
},
)
if results.status_code != requests.codes.ok:
print(
"""===> Error getting access code from Nest.
Do you have the correct client_id, client_secret,
and authorization code (pin)? See the Nest Developer
Guide at https://developers.nest.com/ for information
on getting them; they are required.
""",
file=sys.stderr,
)
print("(Status code: {})".format(results.status_code))
print("(Error data: {})".format(json.dumps(results.json())))
sys.exit(1)
return results.json()
def get_nest_temperatures(access_token, url="https://developer-api.nest.com/"):
authz = "Bearer {}".format(access_token)
headers = {"content-type": "application/json", "authorization": authz}
results = requests.get(url, headers=headers, allow_redirects=False)
# Nest always redirects, but it is good to check, just in case
if results.status_code == requests.codes.temporary_redirect:
url = results.headers["Location"]
results = requests.get(url, headers=headers, allow_redirects=False)
if results.status_code == requests.codes.ok:
return results.json()
else:
return {"status_code": results.status_code, "reason": results.reason}
def print_results_stdout(results):
if "devices" not in results:
print("The Nest API returned no devices for your account", file=sys.stderr)
sys.exit(1)
if "structures" not in results:
print("The Nest API returned no structures for your account", file=sys.stderr)
sys.exit(1)
if "thermostats" not in results["devices"]:
print(
"There don't seem to be any thermostats associated with your Nest account",
file=sys.stderr,
)
sys.exit(1)
for structure in results["structures"]:
print(
"{}: {} Smoke detectors: {} Thermostats: {}".format(
results["structures"][structure]["name"],
results["structures"][structure]["away"],
len(results["structures"][structure]["smoke_co_alarms"]),
len(results["structures"][structure]["thermostats"]),
)
)
for tstat in results["devices"]["thermostats"]:
temp_scale = results["devices"]["thermostats"][tstat]["temperature_scale"]
ambient_temp_key = "ambient_temperature_{}".format(temp_scale.lower())
target_temp_key = "target_temperature_{}".format(temp_scale.lower())
print(
"{:>25}: Currently: {}{} Set to: {}{} {:>7}".format(
results["devices"]["thermostats"][tstat]["name_long"],
results["devices"]["thermostats"][tstat][ambient_temp_key],
temp_scale,
results["devices"]["thermostats"][tstat][target_temp_key],
temp_scale,
results["devices"]["thermostats"][tstat]["hvac_state"].capitalize(),
)
)
if __name__ == "__main__":
# nest_access_token = get_nest_access_token(
# getenv("NEST_CLIENT_ID"),
# getenv("NEST_CLIENT_SECRET"),
# getenv("NEST_AUTHORIZATION_CODE"),
# )
nest_access_token = {}
nest_access_token["access_token"] = getenv("NEST_ACCESS_TOKEN")
if nest_access_token["access_token"] is None:
print("Please set the NEST_ACCESS_TOKEN environment variable")
sys.exit(1)
results = get_nest_temperatures(nest_access_token["access_token"])
if "status_code" in results:
print(json.dumps(results, indent=4))
else:
print_results_stdout(results)
| 3,466 | 0 | 69 |
685059f5435ac24c87869c9acb3bf2f0374fd3e4 | 1,209 | py | Python | apps/07_wizard_battle/you_try/program.py | pmealus/python-jumpstart-course-demos2 | 7b6faae3108c69afff2d735090d30d3691a51ff9 | [
"MIT"
] | null | null | null | apps/07_wizard_battle/you_try/program.py | pmealus/python-jumpstart-course-demos2 | 7b6faae3108c69afff2d735090d30d3691a51ff9 | [
"MIT"
] | null | null | null | apps/07_wizard_battle/you_try/program.py | pmealus/python-jumpstart-course-demos2 | 7b6faae3108c69afff2d735090d30d3691a51ff9 | [
"MIT"
] | null | null | null | # imports
import time
from actors import Wizard, Creature
import random
if __name__ == '__main__':
main()
| 19.819672 | 79 | 0.504549 | # imports
import time
from actors import Wizard, Creature
import random
def main():
print_header()
game_loop()
def print_header():
print('*' * 30)
print(' WIZARD APP')
print('*' * 30)
print()
def game_loop():
creatures = [
Creature('bat', 1),
Creature('toad', 5),
Creature('tiger', 12),
Creature('Dragon', 50),
Creature('Evil Wizard', 1000)
]
hero = Wizard('Krondor', 75)
#print(creatures)
while True:
baddy = random.choice(creatures)
print("A level {} {} has appeared".format(baddy.level, baddy.name))
cmd = input('Would you like to [a]ttack, [l]ook around, or [r]unaway?')
cmd = cmd.lower()
if cmd == 'a':
if hero.attack(baddy):
creatures.remove(baddy)
else:
print("The wizard must meditate.")
time.sleep(3)
print("The wizard recovers.")
elif cmd == 'l':
print('looking around')
elif cmd == 'r':
print('running away')
else:
print('Exiting')
break
print()
if __name__ == '__main__':
main()
| 1,025 | 0 | 69 |
8752296ce1a99fe613989ec385ef4b325218ce5f | 14,309 | py | Python | examples/text8_benchmark.py | johntrimble/adaptive-softmax-keras | f115d97f063c57056ef3134b3918e57d42750360 | [
"MIT"
] | 10 | 2018-06-13T06:33:33.000Z | 2020-06-17T18:56:49.000Z | examples/text8_benchmark.py | johntrimble/adaptive-softmax-keras | f115d97f063c57056ef3134b3918e57d42750360 | [
"MIT"
] | 1 | 2018-08-06T07:52:34.000Z | 2018-10-30T22:37:02.000Z | examples/text8_benchmark.py | johntrimble/adaptive-softmax-keras | f115d97f063c57056ef3134b3918e57d42750360 | [
"MIT"
] | 3 | 2018-11-23T10:32:24.000Z | 2020-07-06T06:07:29.000Z | """
Trains a simple language model based on the one found at
https://github.com/facebookresearch/adaptive-softmax and generates comparative
results for full softmax, differentiated softmax, and adaptive softmax. This
benchmark uses the text8 (http://mattmahoney.net/dc/textdata.html) dataset. This
dataset isn't the best demonstration of adaptive softmax's strengths, but it is
of a convenient size for downloading and training in a reasonable amount of
time.
You can run the benchmark by executing the following at the project root:
PYTHONPATH="$PYTHONPATH:." python examples/text8_benchmark.py --graph
You can see all of the other options by using the `--help` option:
usage: text8_benchmark.py [-h] [-b {adaptive,full,differentiated}]
[--no-resume] [--output-directory OUTPUT_DIRECTORY]
[--graph]
optional arguments:
-h, --help show this help message and exit
-b {adaptive,full,differentiated}, --benchmarks {adaptive,full,differentiated}
run benchmark for different variations of softmax
--no-resume prevents resuming a previously interrupted benchmark
--output-directory OUTPUT_DIRECTORY
where to store output of benchmark
--graph dump a graph of perplexity over time for bencmarks
By default, the benchmark runs for every variation of softmax. This can take a
long time to train on the CPU (over a day) so use of a GPU is recommended.
"""
from keras.utils.data_utils import get_file
from keras.preprocessing import text
from keras.preprocessing import sequence
from keras import initializers
from keras.models import Model
from keras.layers import (Dense,
Dropout,
Input,
LSTM,
Embedding,
Activation)
from keras.optimizers import Adagrad
from trimble.keras.adaptive import (DifferentiatedSoftmaxProduceLogits,
AdaptiveSoftmaxProduceLogits,
AdaptiveLogProb)
from zipfile import ZipFile
import numpy as np
import tensorflow as tf
import math
import io
import time
import os
import json
TEXT8_DATA_URL='http://mattmahoney.net/dc/text8.zip'
def load_data(vocab_size=45000, batch_size=128, sequence_length=20, output_directory='./benchmark_out'):
"""
Loads Text8 dataset. (http://mattmahoney.net/dc/textdata.html)
# Arguments
vocab_size: maximum number of words to use.
batch_size: the batch size that will be used when this data is passed to
`Model.fit(..)` or similar function.
sequence_length: the number of time steps for each batch.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
raw_data = _load_raw_text8_data(output_directory=output_directory)
train_text, dev_text = _split_text8(raw_data)
tokenizer = _build_tokenizer(train_text, vocab_size=vocab_size)
raw_data = None # allow gc
eos_idx = tokenizer.word_index['</s>']
results = []
data_sequence = tokenizer.texts_to_sequences([train_text])[0] + [eos_idx]
results.append(_segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length))
data_sequence = tokenizer.texts_to_sequences([dev_text])[0] + [eos_idx]
results.append(_segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length))
return tuple(results)
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--benchmarks',
choices=['adaptive', 'full', 'differentiated'],
action='append',
help="run benchmark for different variations of softmax")
parser.add_argument('--iterations',
type=int,
default=10,
help="number of training iterations")
parser.add_argument('--no-resume',
dest='resume',
action='store_false',
help="prevents resuming a previously interrupted benchmark")
parser.add_argument('--output-directory',
dest="output_directory",
default='benchmark_out',
help="where to store output of benchmark")
parser.add_argument('--graph',
action='store_true',
help="dump a graph of perplexity over time for bencmarks")
options = parser.parse_args()
options.benchmarks = options.benchmarks or ['adaptive', 'full', 'differentiated']
if not os.path.exists(options.output_directory):
os.mkdir(options.output_directory)
result = run_benchmarks(options.iterations, benchmarks=options.benchmarks, output_directory=options.output_directory, resume=options.resume)
print_summary(result)
if options.graph:
dump_graph(result, os.path.join(options.output_directory, 'text8_performance_comparison.png'))
| 37.655263 | 144 | 0.662241 | """
Trains a simple language model based on the one found at
https://github.com/facebookresearch/adaptive-softmax and generates comparative
results for full softmax, differentiated softmax, and adaptive softmax. This
benchmark uses the text8 (http://mattmahoney.net/dc/textdata.html) dataset. This
dataset isn't the best demonstration of adaptive softmax's strengths, but it is
of a convenient size for downloading and training in a reasonable amount of
time.
You can run the benchmark by executing the following at the project root:
PYTHONPATH="$PYTHONPATH:." python examples/text8_benchmark.py --graph
You can see all of the other options by using the `--help` option:
usage: text8_benchmark.py [-h] [-b {adaptive,full,differentiated}]
[--no-resume] [--output-directory OUTPUT_DIRECTORY]
[--graph]
optional arguments:
-h, --help show this help message and exit
-b {adaptive,full,differentiated}, --benchmarks {adaptive,full,differentiated}
run benchmark for different variations of softmax
--no-resume prevents resuming a previously interrupted benchmark
--output-directory OUTPUT_DIRECTORY
where to store output of benchmark
--graph dump a graph of perplexity over time for bencmarks
By default, the benchmark runs for every variation of softmax. This can take a
long time to train on the CPU (over a day) so use of a GPU is recommended.
"""
from keras.utils.data_utils import get_file
from keras.preprocessing import text
from keras.preprocessing import sequence
from keras import initializers
from keras.models import Model
from keras.layers import (Dense,
Dropout,
Input,
LSTM,
Embedding,
Activation)
from keras.optimizers import Adagrad
from trimble.keras.adaptive import (DifferentiatedSoftmaxProduceLogits,
AdaptiveSoftmaxProduceLogits,
AdaptiveLogProb)
from zipfile import ZipFile
import numpy as np
import tensorflow as tf
import math
import io
import time
import os
import json
TEXT8_DATA_URL='http://mattmahoney.net/dc/text8.zip'
def _load_raw_text8_data(output_directory='./benchmark_out'):
dirname = 'text8.zip'
path = get_file(
dirname,
origin=TEXT8_DATA_URL,
md5_hash='f26f94c5209bc6159618bad4a559ff81',
archive_format='zip',
cache_dir=output_directory)
with ZipFile(path) as text8zip:
with io.TextIOWrapper(text8zip.open('text8'), encoding='utf-8') as text8file:
return text8file.read()
def _build_tokenizer(data, vocab_size=45000):
num_words = vocab_size-1
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts([data])
words = ['</s>', '<unk>']
words.extend([w for (w,_) in sorted(list(tokenizer.word_index.items()), key=lambda x: x[1])])
words = words[:num_words]
word_index = dict(zip(words, range(1, vocab_size)))
tokenizer = text.Tokenizer(num_words=min(num_words, len(word_index)), oov_token='<unk>')
tokenizer.word_index = word_index
return tokenizer
def _split_text8(text8_text):
return text8_text[:99000000], text8_text[99000000:]
def _segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length):
number_batches = int(np.ceil(len(data_sequence) / (batch_size*sequence_length)))
data = np.full(number_batches*sequence_length*batch_size, eos_idx, dtype='int32')
data[-len(data_sequence):] = data_sequence
data = np.reshape(data, (batch_size, -1))
x = np.roll(data, 1, axis=1)
x[:, 0] = eos_idx
x = np.vstack(np.hsplit(x, x.shape[1] // 20))
labels = np.vstack(np.hsplit(data, data.shape[1] // 20))
labels = np.expand_dims(labels, axis=-1)
return x, labels
def load_data(vocab_size=45000, batch_size=128, sequence_length=20, output_directory='./benchmark_out'):
"""
Loads Text8 dataset. (http://mattmahoney.net/dc/textdata.html)
# Arguments
vocab_size: maximum number of words to use.
batch_size: the batch size that will be used when this data is passed to
`Model.fit(..)` or similar function.
sequence_length: the number of time steps for each batch.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
raw_data = _load_raw_text8_data(output_directory=output_directory)
train_text, dev_text = _split_text8(raw_data)
tokenizer = _build_tokenizer(train_text, vocab_size=vocab_size)
raw_data = None # allow gc
eos_idx = tokenizer.word_index['</s>']
results = []
data_sequence = tokenizer.texts_to_sequences([train_text])[0] + [eos_idx]
results.append(_segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length))
data_sequence = tokenizer.texts_to_sequences([dev_text])[0] + [eos_idx]
results.append(_segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length))
return tuple(results)
def get_word_index(vocab_size=45000, output_directory="./benchmark_out"):
raw_data = _load_raw_text8_data(output_directory=output_directory)
train_text, _ = _split_text8(raw_data)
tokenizer = _build_tokenizer(train_text, vocab_size=vocab_size)
return tokenizer.word_index
def sparse_cross_entropy(y_true, y_pred):
y_true = tf.cast(tf.squeeze(y_true, axis=-1), tf.int32)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true,
logits=y_pred)
return loss
def _build_base_model(
batch_size=128,
sequence_length=20,
vocab_size=45000,
input_word_vectors_dim=256,
hidden_dim=2048,
dropout=0.0):
inputs = Input(name='data_input', batch_shape=(batch_size, sequence_length), dtype='int32')
embedding = Embedding(input_dim=vocab_size, output_dim=input_word_vectors_dim, mask_zero=False)
dropout_pre_rnn = Dropout(dropout)
rnn = LSTM(hidden_dim, return_sequences=True, stateful=True, unroll=True)
dropout_post_rnn = Dropout(dropout)
x = inputs
x = embedding(x)
x = dropout_pre_rnn(x)
x = rnn(x)
x = dropout_post_rnn(x)
return (inputs, x)
def build_adaptive_softmax_model(cutoffs, lr=0.1, batch_size=128, sequence_length=20, vocab_size=45000, **kwargs):
labels = Input(name='labels_input', batch_shape=(batch_size, sequence_length, 1), dtype='int32')
(inputs, x) = _build_base_model(batch_size=batch_size, sequence_length=sequence_length, vocab_size=vocab_size, **kwargs)
adaptive_softmax_layer = AdaptiveSoftmaxProduceLogits(vocab_size, cutoffs=cutoffs)
x = adaptive_softmax_layer([x, labels])
model = Model(inputs=[inputs, labels], outputs=x)
optimizer = Adagrad(lr=lr)
model.compile(optimizer=optimizer)
return model
def build_full_softmax_model(lr=0.1, vocab_size=45000, **kwargs):
(inputs, x) = _build_base_model(**kwargs)
x = Dense(vocab_size, activation='linear')(x)
model = Model(inputs=inputs, outputs=x)
optimizer = Adagrad(lr=lr)
model.compile(optimizer=optimizer, loss=sparse_cross_entropy)
return model
def build_differentiated_softmax_model(cutoffs, lr=0.1, vocab_size=45000, **kwargs):
(inputs, x) = _build_base_model(vocab_size=vocab_size, **kwargs)
x = DifferentiatedSoftmaxProduceLogits(vocab_size, cutoffs)(x)
model = Model(inputs=inputs, outputs=x)
optimizer = Adagrad(lr=lr)
model.compile(optimizer=optimizer, loss=sparse_cross_entropy)
return model
def train_model(model, epochs, train_data, validation_data, batch_size=128, labels_as_input=False):
(x_train, y_train) = train_data
(x_valid, y_valid) = validation_data
model.reset_states()
times = [0]
ppls = []
# get a measurement before any training so that we having something
# for time 0
if labels_as_input:
valid_loss = model.evaluate(x=[x_valid, y_valid], batch_size=128)
else:
valid_loss = model.evaluate(x=x_valid, y=y_valid, batch_size=128)
ppls.append(math.e ** valid_loss)
for epoch in range(epochs):
start_time = time.time()
model.reset_states()
if labels_as_input:
model.fit(x=[x_train, y_train], batch_size=128, epochs=epoch+1, initial_epoch=epoch, shuffle=False)
else:
model.fit(x=x_train, y=y_train, batch_size=128, epochs=epoch+1, initial_epoch=epoch, shuffle=False)
end_time = time.time()
times.append(end_time - start_time)
model.reset_states()
valid_loss = None
try:
if labels_as_input:
valid_loss = model.evaluate(x=[x_valid, y_valid], batch_size=128)
else:
valid_loss = model.evaluate(x=x_valid, y=y_valid, batch_size=128)
finally:
model.reset_states()
ppl = math.e ** valid_loss
print("ppl: %s" % ppl)
ppls.append(ppl)
return times, ppls
def format_duration(seconds):
hours = seconds // (60**2)
seconds = seconds - hours*(60**2)
minutes = seconds // 60
seconds = seconds - minutes*60
return "%02d:%02d:%06.3f" % (hours, minutes, seconds)
def print_summary(benchmark_results):
for (label, [times, ppls]) in benchmark_results:
total_time = sum(times)
epochs = len(times) - 1
ppl = ppls[-1]
print()
print(label)
print("Epochs: %s" % epochs)
print("Training time: %s" % format_duration(total_time))
print("Perplexity: %.2f" % ppl)
print()
def dump_graph(results, destination_path):
import pprint
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
data = []
for (label, [times, ppls]) in results:
cumulative_times = [sum(times[:i]) for i in range(1,len(times)+1)]
data.append((label, [cumulative_times, ppls]))
shapes = ['o', '^', 's']
colors = ['b', 'g', 'r', 'y']
for i in range(len(data)):
(label, [times, ppls]) = data[i]
times = [t / 60 for t in times]
color = colors[i % len(colors)]
shape = shapes[i % len(shapes)]
plt.plot(times[1:], ppls[1:], "%s-" % color, label=label)
plt.plot(times[1:], ppls[1:], "%s%s" % (color, shape))
plt.ylim([80, 600])
plt.ylabel('Perplexity')
plt.xlim(xmin=0)
plt.xlabel('Time (minutes)')
plt.legend()
plt.savefig(destination_path)
def run_benchmarks(epochs, benchmarks=None, output_directory='benchmark_out', resume=False):
(x_train, y_train), (x_valid, y_valid) = load_data(output_directory=output_directory)
model_options = {'lr': 0.1, 'dropout': 0.25, 'hidden_dim': 512, 'input_word_vectors_dim': 512}
def full_softmax_benchmark():
model = build_full_softmax_model(**model_options)
return model, train_model(model, epochs, (x_train, y_train), (x_valid, y_valid), labels_as_input=False)
def adaptive_softmax_benchmark():
model = build_adaptive_softmax_model([2000,10000], **model_options)
return model, train_model(model, epochs, (x_train, y_train), (x_valid, y_valid), labels_as_input=True)
def differentiated_softmax_benchmark():
model = build_differentiated_softmax_model([2000,10000], **model_options)
return model, train_model(model, epochs, (x_train, y_train), (x_valid, y_valid), labels_as_input=False)
benchmark_descriptors = [
('full', 'Full Softmax', full_softmax_benchmark),
('adaptive', 'Adaptive Softmax', adaptive_softmax_benchmark),
('differentiated', 'Differentiated Softmax', differentiated_softmax_benchmark)
]
if benchmarks is None:
benchmarks = [benchmark_id for (benchmark_id, *_) in benchmark_descriptors]
results = []
for benchmark_id, label, benchmark_fn in benchmark_descriptors:
if not benchmark_id in benchmarks:
continue
filename = os.path.join(output_directory, benchmark_id)
stats_filename = filename + '.json'
weights_filename = filename + '.h5'
if resume and os.path.exists(stats_filename) and os.path.exists(weights_filename):
with open(stats_filename) as f:
results.append((label, json.load(f)))
else:
model, stats = benchmark_fn()
model.save_weights(weights_filename)
with open(stats_filename, 'w') as f:
json.dump(stats, f)
results.append((label, stats))
return results
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--benchmarks',
choices=['adaptive', 'full', 'differentiated'],
action='append',
help="run benchmark for different variations of softmax")
parser.add_argument('--iterations',
type=int,
default=10,
help="number of training iterations")
parser.add_argument('--no-resume',
dest='resume',
action='store_false',
help="prevents resuming a previously interrupted benchmark")
parser.add_argument('--output-directory',
dest="output_directory",
default='benchmark_out',
help="where to store output of benchmark")
parser.add_argument('--graph',
action='store_true',
help="dump a graph of perplexity over time for bencmarks")
options = parser.parse_args()
options.benchmarks = options.benchmarks or ['adaptive', 'full', 'differentiated']
if not os.path.exists(options.output_directory):
os.mkdir(options.output_directory)
result = run_benchmarks(options.iterations, benchmarks=options.benchmarks, output_directory=options.output_directory, resume=options.resume)
print_summary(result)
if options.graph:
dump_graph(result, os.path.join(options.output_directory, 'text8_performance_comparison.png'))
| 8,796 | 0 | 345 |
596c73013171bc2a3c68c3589937cbb87bd5a568 | 2,961 | py | Python | src/searchclient_mas/master_plan.py | mistermoutan/Ai-Final-Project | 655bd67734941c2f0f1f23f4f651ee96379dcd2f | [
"MIT"
] | null | null | null | src/searchclient_mas/master_plan.py | mistermoutan/Ai-Final-Project | 655bd67734941c2f0f1f23f4f651ee96379dcd2f | [
"MIT"
] | null | null | null | src/searchclient_mas/master_plan.py | mistermoutan/Ai-Final-Project | 655bd67734941c2f0f1f23f4f651ee96379dcd2f | [
"MIT"
] | null | null | null | from action import ActionType
from action import Action
from action import Dir
import copy
from merger import merge
from state import StateMA
from typing import List
import sys
| 40.013514 | 122 | 0.680176 | from action import ActionType
from action import Action
from action import Dir
import copy
from merger import merge
from state import StateMA
from typing import List
import sys
class MasterPlan(object):
def noop_action_vector(self):
return [ActionType.Wait] * self.number_of_agents
def __init__(self, number_of_agents: int, initial_state: StateMA):
self.number_of_agents = number_of_agents
#Initialize the empty plan
self.current_plan_length = 1
self.plan = [self.noop_action_vector() for _ in range(self.current_plan_length)]
self.states = [None]*self.current_plan_length
self.states[0] = initial_state
#The index of the last action that each agent commited to
self.index_after_last_action = [0]*number_of_agents
def copy(self):
return copy.deepcopy(self)
def pretty_print_master_plan(self):
agents = range(self.number_of_agents)
plans = [[] for i in agents]
for action_vector in self.plan:
for agent,action in enumerate(action_vector):
plans[agent].append(action)
print("MASTER PLAN", file=sys.stderr)
for plan in plans:
print(plan, file=sys.stderr)
def merge_plan_into_master(self, agent_id: int, agent_plan: List[Action]):
first_index_in_plan = self.index_after_last_action[agent_id]
revised_agent_plan = merge(agent_id, agent_plan, self.plan, first_index_in_plan, self.states[first_index_in_plan])
if not revised_agent_plan:
return False
#Increase the length of the master plan if necessary
difference_in_length = (len(revised_agent_plan) + first_index_in_plan) - len(self.plan)
if difference_in_length > 0:
self.plan.extend([self.noop_action_vector() for _ in range(difference_in_length)])
self.states.extend([None]*difference_in_length)
#Update the action vectors in the master plan to reflect the change to the plan
for i,action in enumerate(revised_agent_plan):
index_of_action_vector = first_index_in_plan + i
self.plan[index_of_action_vector][agent_id] = action
#Update the states to reflect the changes to the master plan
#Note that all states after a changed action vector must be updated, not just
#those at the same index as changed action vector
for i in range(first_index_in_plan, len(self.plan)-1):
self.states[i+1] = self.states[i].get_child(self.plan[i])
#Update the index of where to start the next plan for this agent
self.index_after_last_action[agent_id] += len(revised_agent_plan)
return True
def get_plan_of_agent(self, agent_id):
end_of_agents_plan = self.index_after_last_action[agent_id]
return [self.plan[i][agent_id] for i in range(end_of_agents_plan)]
| 2,589 | 4 | 184 |
8541e0db17a1993a3c9d4203e444bf6a1246ebbe | 444 | py | Python | final/question3.py | user1689/INFO6205Spring2022 | 8257e436b6ae5b6f512c1aaabcd7b331c7f82da7 | [
"MIT"
] | null | null | null | final/question3.py | user1689/INFO6205Spring2022 | 8257e436b6ae5b6f512c1aaabcd7b331c7f82da7 | [
"MIT"
] | null | null | null | final/question3.py | user1689/INFO6205Spring2022 | 8257e436b6ae5b6f512c1aaabcd7b331c7f82da7 | [
"MIT"
] | null | null | null | from collections import defaultdict
obj = solution()
s = ["eat","tea","tan","ate","nat","bat"]
s2 = [""]
s3 = ["a"]
res = obj.groupAnagrams(s)
print(res)
| 23.368421 | 46 | 0.533784 | from collections import defaultdict
class solution:
def groupAnagrams(self, strs):
map = defaultdict(list)
for word in strs:
cnt = [0] * 26
for char in word:
cnt[ord(char) - ord('a')] += 1
map[tuple(cnt)].append(word)
return list(map.values())
obj = solution()
s = ["eat","tea","tan","ate","nat","bat"]
s2 = [""]
s3 = ["a"]
res = obj.groupAnagrams(s)
print(res)
| 246 | -6 | 49 |
4c45216194e009047a5f5dd1ee3ee263ec6f15a1 | 5,390 | py | Python | src/cozmo/tkview.py | wmh123456789/cozmo-python-wmh | ce66a1b22d75c086e413d61cea9b65ade6aefa24 | [
"Apache-2.0"
] | null | null | null | src/cozmo/tkview.py | wmh123456789/cozmo-python-wmh | ce66a1b22d75c086e413d61cea9b65ade6aefa24 | [
"Apache-2.0"
] | null | null | null | src/cozmo/tkview.py | wmh123456789/cozmo-python-wmh | ce66a1b22d75c086e413d61cea9b65ade6aefa24 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016-2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides a simple GUI viewer for Cozmo's camera.
It uses Tkinter, the standard Python GUI toolkit which is optionally available
on most platforms, and also depends on the Pillow and numpy libraries for
image processing.
See the online SDK documentation for details on how to install these extra
packages on your platform.
The easiest way to make use of this viewer is to call
:func:`cozmo.run.connect_with_tkviewer`.
Warning:
This package requires Python to have Tkinter installed to display the GUI.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['TkImageViewer']
import cozmo
import collections
import functools
import queue
import platform
import time
from PIL import Image, ImageDraw, ImageTk
import tkinter
from . import world
class TkThreadable:
'''A mixin for adding threadsafe calls to tkinter methods.'''
#pylint: disable=no-member
# no-member errors are raised in pylint regarding members/methods called but not defined in our mixin.
class TkImageViewer(tkinter.Frame, TkThreadable):
'''Simple Tkinter camera viewer.'''
# TODO: rewrite this whole thing. Make a generic camera widget
# that can be used in other Tk applications. Also handle resizing
# the window properly.
# The base class configure doesn't take an event
#pylint: disable=arguments-differ
| 32.46988 | 106 | 0.666605 | # Copyright (c) 2016-2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides a simple GUI viewer for Cozmo's camera.
It uses Tkinter, the standard Python GUI toolkit which is optionally available
on most platforms, and also depends on the Pillow and numpy libraries for
image processing.
See the online SDK documentation for details on how to install these extra
packages on your platform.
The easiest way to make use of this viewer is to call
:func:`cozmo.run.connect_with_tkviewer`.
Warning:
This package requires Python to have Tkinter installed to display the GUI.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['TkImageViewer']
import cozmo
import collections
import functools
import queue
import platform
import time
from PIL import Image, ImageDraw, ImageTk
import tkinter
from . import world
class TkThreadable:
'''A mixin for adding threadsafe calls to tkinter methods.'''
#pylint: disable=no-member
# no-member errors are raised in pylint regarding members/methods called but not defined in our mixin.
def __init__(self, *a, **kw):
self._thread_queue = queue.Queue()
self.after(50, self._thread_call_dispatch)
def call_threadsafe(self, method, *a, **kw):
self._thread_queue.put((method, a, kw))
def _thread_call_dispatch(self):
while True:
try:
method, a, kw = self._thread_queue.get(block=False)
self.after_idle(method, *a, **kw)
except queue.Empty:
break
self.after(50, self._thread_call_dispatch)
class TkImageViewer(tkinter.Frame, TkThreadable):
'''Simple Tkinter camera viewer.'''
# TODO: rewrite this whole thing. Make a generic camera widget
# that can be used in other Tk applications. Also handle resizing
# the window properly.
def __init__(self,
tk_root=None, refresh_interval=10, image_scale = 2,
window_name = "CozmoView", force_on_top=True):
if tk_root is None:
tk_root = tkinter.Tk()
tkinter.Frame.__init__(self, tk_root)
TkThreadable.__init__(self)
self._img_queue = collections.deque(maxlen=1)
self._refresh_interval = refresh_interval
self.scale = image_scale
self.width = None
self.height = None
self.tk_root = tk_root
tk_root.wm_title(window_name)
self.label = tkinter.Label(self.tk_root,image=None)
self.tk_root.protocol("WM_DELETE_WINDOW", self._delete_window)
self._isRunning = True
self.robot = None
self.handler = None
self._first_image = True
tk_root.aspect(4,3,4,3)
if force_on_top:
# force window on top of all others, regardless of focus
tk_root.wm_attributes("-topmost", 1)
self.last_configure = time.time()
self.tk_root.bind("<Configure>", self.configure)
self._repeat_draw_frame()
async def connect(self, coz_conn):
self.robot = await coz_conn.wait_for_robot()
self.robot.camera.image_stream_enabled = True
self.handler = self.robot.world.add_event_handler(
world.EvtNewCameraImage, self.image_event)
def disconnect(self):
if self.handler:
self.handler.disable()
self.call_threadsafe(self.quit)
# The base class configure doesn't take an event
#pylint: disable=arguments-differ
def configure(self, event):
# hack to interrupt feedback loop between image resizing
# and frame resize detection; there has to be a better solution to this.
if time.time() - self.last_configure < 0.1:
return
if event.width < 50 or event.height < 50:
return
self.last_configure = time.time()
self.height = event.height
self.width = event.width
def image_event(self, evt, *, image, **kw):
if self._first_image or self.width is None:
img = image.annotate_image(scale=self.scale)
else:
img = image.annotate_image(fit_size=(self.width, self.height))
self._img_queue.append(img)
def _delete_window(self):
self.tk_root.destroy()
self.quit()
self._isRunning = False
def _draw_frame(self):
if ImageTk is None:
return
try:
image = self._img_queue.popleft()
except IndexError:
# no new image
return
self._first_image = False
photoImage = ImageTk.PhotoImage(image)
self.label.configure(image=photoImage)
self.label.image = photoImage
self.label.pack()
def _repeat_draw_frame(self, event=None):
self._draw_frame()
self.after(self._refresh_interval, self._repeat_draw_frame)
| 3,100 | 0 | 294 |
97da8e02b7df5e1a5b81e6e712e3a5f804a9ac43 | 998 | py | Python | waketor/single_wake/noj.py | rethore/waketor | 81a6688f27b5c718b98cf61e264ba9f127345ca6 | [
"Apache-2.0"
] | null | null | null | waketor/single_wake/noj.py | rethore/waketor | 81a6688f27b5c718b98cf61e264ba9f127345ca6 | [
"Apache-2.0"
] | 3 | 2015-12-10T08:35:19.000Z | 2015-12-10T08:37:36.000Z | waketor/single_wake/noj.py | rethore/waketor | 81a6688f27b5c718b98cf61e264ba9f127345ca6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def noj(rel_pos, c_t, D, k):
"""N.O. Jensen single wake deficit model
This function checks if r is greater than the wake radius!
Parameters
-----------
rel_pos: ndarray [n,3]
x,y,z relative position compared to the upstream turbine
c_t: float | ndarray [n]
upstream wind turbine thrust coefficient
D: float | ndarray [n]
upstream wind turbine rotor diameter
k: float | ndarray [n]
wake expansion parameter
Returns
-------
du: float | ndarray [n]
The wind speed deficit at the specified positions
"""
x = rel_pos[:, 0]
r = np.sqrt(rel_pos[:, 1] ** 2.0 + rel_pos[:, 2] ** 2.0)
# Radius
R = D / 2.0
# NOJ Specific
Rw = R + k * x # upstream turbine wake radius
DU = - (1.0 - np.sqrt(1.0 - c_t)) / (1.0 + (k * x) / R) ** 2.0
# Upstream cases
DU[x < 0.0] = 0.0
DU[abs(r) > Rw] = 0.0
return DU
| 29.352941 | 70 | 0.529058 | import numpy as np
def noj(rel_pos, c_t, D, k):
"""N.O. Jensen single wake deficit model
This function checks if r is greater than the wake radius!
Parameters
-----------
rel_pos: ndarray [n,3]
x,y,z relative position compared to the upstream turbine
c_t: float | ndarray [n]
upstream wind turbine thrust coefficient
D: float | ndarray [n]
upstream wind turbine rotor diameter
k: float | ndarray [n]
wake expansion parameter
Returns
-------
du: float | ndarray [n]
The wind speed deficit at the specified positions
"""
x = rel_pos[:, 0]
r = np.sqrt(rel_pos[:, 1] ** 2.0 + rel_pos[:, 2] ** 2.0)
# Radius
R = D / 2.0
# NOJ Specific
Rw = R + k * x # upstream turbine wake radius
DU = - (1.0 - np.sqrt(1.0 - c_t)) / (1.0 + (k * x) / R) ** 2.0
# Upstream cases
DU[x < 0.0] = 0.0
DU[abs(r) > Rw] = 0.0
return DU
| 0 | 0 | 0 |
56f407cd0e0c23463670fa0b78c499723162bd1b | 461 | py | Python | URL_Modules/errors.py | MrSSHH/URL-Shortener | 8d472d1143dab5cf05a0f176d9b3a14da93d8df5 | [
"MIT"
] | 2 | 2019-08-26T10:55:03.000Z | 2019-12-29T19:13:54.000Z | URL_Modules/errors.py | MrSSHH/URL-Shortener | 8d472d1143dab5cf05a0f176d9b3a14da93d8df5 | [
"MIT"
] | null | null | null | URL_Modules/errors.py | MrSSHH/URL-Shortener | 8d472d1143dab5cf05a0f176d9b3a14da93d8df5 | [
"MIT"
] | null | null | null | class WebsiteNotFound(Exception):
"""
*WebsiteNotFound raise*
When requests can't reach a website,
this error should pop up.
(This should make it more clear)
"""
pass
class KillSwitch(Exception):
"""
*KillSwitch raise*
Once a client sends a KillSwitch (your-ip/closeconnection)
the handler automatically will raise the KillSwitch Exception.
"""
pass
| 23.05 | 71 | 0.590022 | class WebsiteNotFound(Exception):
"""
*WebsiteNotFound raise*
When requests can't reach a website,
this error should pop up.
(This should make it more clear)
"""
pass
class KillSwitch(Exception):
"""
*KillSwitch raise*
Once a client sends a KillSwitch (your-ip/closeconnection)
the handler automatically will raise the KillSwitch Exception.
"""
pass
| 0 | 0 | 0 |
f13ac559a736154b7792ac2fcbec90c0abcc3a74 | 5,911 | py | Python | data_loader_DALI.py | Joshua-Ren/IL_for_SSL | f488ba41ab6c79cef9064a788f6ec1bd701214b1 | [
"MIT"
] | null | null | null | data_loader_DALI.py | Joshua-Ren/IL_for_SSL | f488ba41ab6c79cef9064a788f6ec1bd701214b1 | [
"MIT"
] | null | null | null | data_loader_DALI.py | Joshua-Ren/IL_for_SSL | f488ba41ab6c79cef9064a788f6ec1bd701214b1 | [
"MIT"
] | null | null | null | import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import warnings
warnings.filterwarnings('ignore')
#LOCAL_PATH = 'E:\DATASET\tiny-imagenet-200'
@pipeline_def
if __name__ == '__main__':
# iteration of PyTorch dataloader
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_dst = datasets.ImageFolder(IMG_DIR, transform_train)
train_loader = torch.utils.data.DataLoader(train_dst, batch_size=2048, shuffle=True, pin_memory=True, num_workers=8)
transform_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
val_dst = datasets.ImageFolder(IMG_DIR, transform_val)
val_loader = torch.utils.data.DataLoader(val_dst, batch_size=2000, shuffle=False, pin_memory=True, num_workers=8)
print('[PyTorch] start iterate test dataloader')
start = time.time()
for i, (x,y) in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = x.cuda(non_blocking=True)
labels = y.cuda(non_blocking=True)
end = time.time()
test_time = end-start
print('[PyTorch] end test dataloader iteration')
# print('[PyTorch] iteration time: %fs [train], %fs [test]' % (train_time, test_time))
print('[PyTorch] iteration time: %fs [test]' % (test_time))
pipe = create_dali_pipeline(batch_size=2048, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=224, size=256, dali_cpu=False, shard_id=0, num_shards=1, is_training=True)
pipe.build()
train_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
pipe = create_dali_pipeline(batch_size=2000, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=256, size=256, dali_cpu=True, shard_id=0, num_shards=1, is_training=False)
pipe.build()
val_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
print('[DALI-GPU] start iterate train dataloader')
start = time.time()
for i, data in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-GPU] iteration time: %fs [test]' % (test_time))
print('[DALI-cpu] start iterate val dataloader')
start = time.time()
for i, data in enumerate(val_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-cpu] iteration time: %fs [test]' % (test_time))
| 44.443609 | 141 | 0.663847 | import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import warnings
warnings.filterwarnings('ignore')
#LOCAL_PATH = 'E:\DATASET\tiny-imagenet-200'
@pipeline_def
def create_dali_pipeline(dataset, crop, size, shard_id, num_shards, dali_cpu=False, is_training=True):
if dataset.lower()=='imagenet':
DATA_PATH = '/home/sg955/rds/rds-nlp-cdt-VR7brx3H4V8/datasets/ImageNet/'
if is_training:
data_dir = os.path.join(DATA_PATH, 'train_caffe')
else:
data_dir = os.path.join(DATA_PATH, 'val_caffe')
images, labels = fn.readers.caffe(path=data_dir, shard_id=shard_id, num_shards=num_shards,
pad_last_batch=True, name="Reader")
elif dataset.lower()=='tiny':
DATA_PATH = '/home/sg955/rds/hpc-work/tiny-imagenet-200/'
if is_training:
data_dir = os.path.join(DATA_PATH, 'train')
else:
data_dir = os.path.join(DATA_PATH, 'val')
images, labels = fn.readers.file(file_root=data_dir, shard_id=shard_id, num_shards=num_shards,
random_shuffle=is_training, pad_last_batch=True, name="Reader")
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
if is_training:
#images = fn.decoders.image_random_crop(images, device=decoder_device, output_type=types.RGB, random_aspect_ratio=[0.8, 1.25],
# random_area=[0.1, 1.0], num_attempts=100)
images = fn.decoders.image(images, device=decoder_device, output_type=types.RGB)
images = fn.resize(images, device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
mirror = fn.random.coin_flip(probability=0.5)
else:
images = fn.decoders.image(images, device=decoder_device, output_type=types.RGB)
images = fn.resize(images, device=dali_device, resize_x=crop, resize_y=crop, mode="not_smaller", interp_type=types.INTERP_TRIANGULAR)
#images = fn.resize(images, device=dali_device, size=size, mode="not_smaller", interp_type=types.INTERP_TRIANGULAR)
mirror = False
images = fn.crop_mirror_normalize(images.gpu(), dtype=types.FLOAT, output_layout="CHW",
crop=(crop, crop),mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255], mirror=mirror)
labels = labels.gpu()
return images, labels
if __name__ == '__main__':
# iteration of PyTorch dataloader
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_dst = datasets.ImageFolder(IMG_DIR, transform_train)
train_loader = torch.utils.data.DataLoader(train_dst, batch_size=2048, shuffle=True, pin_memory=True, num_workers=8)
transform_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
val_dst = datasets.ImageFolder(IMG_DIR, transform_val)
val_loader = torch.utils.data.DataLoader(val_dst, batch_size=2000, shuffle=False, pin_memory=True, num_workers=8)
print('[PyTorch] start iterate test dataloader')
start = time.time()
for i, (x,y) in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = x.cuda(non_blocking=True)
labels = y.cuda(non_blocking=True)
end = time.time()
test_time = end-start
print('[PyTorch] end test dataloader iteration')
# print('[PyTorch] iteration time: %fs [train], %fs [test]' % (train_time, test_time))
print('[PyTorch] iteration time: %fs [test]' % (test_time))
pipe = create_dali_pipeline(batch_size=2048, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=224, size=256, dali_cpu=False, shard_id=0, num_shards=1, is_training=True)
pipe.build()
train_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
pipe = create_dali_pipeline(batch_size=2000, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=256, size=256, dali_cpu=True, shard_id=0, num_shards=1, is_training=False)
pipe.build()
val_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
print('[DALI-GPU] start iterate train dataloader')
start = time.time()
for i, data in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-GPU] iteration time: %fs [test]' % (test_time))
print('[DALI-cpu] start iterate val dataloader')
start = time.time()
for i, data in enumerate(val_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-cpu] iteration time: %fs [test]' % (test_time))
| 2,326 | 0 | 22 |
b7c8ca2655f5c5c29e908c97ff90e2e3d417bcdb | 1,763 | py | Python | setup.py | optidash-ai/optidash-python | 5cc594c904addb588b8843d728f2941b9642704d | [
"MIT"
] | 3 | 2020-06-27T22:58:20.000Z | 2021-07-14T06:59:58.000Z | setup.py | optidash-ai/optidash-python | 5cc594c904addb588b8843d728f2941b9642704d | [
"MIT"
] | null | null | null | setup.py | optidash-ai/optidash-python | 5cc594c904addb588b8843d728f2941b9642704d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup (
name = 'optidash',
version = '1.0.1',
description = 'Official Python integration for Optidash API',
long_description = 'Optidash: AI-powered image optimization and processing API. We will drastically speed-up your websites and save you money on bandwidth and storage.',
url = 'https://github.com/optidash-ai/optidash-python',
download_url = 'https://github.com/optidash-ai/optidash-python/archive/1.0.0.tar.gz',
author = 'Optidash UG',
author_email = 'support@optidash.ai',
license = 'MIT',
keywords = 'optidash image optimization processing resizing resizer cropping scaling masking watermarking filtering thumbnails pic picture photo face face detection visual watermark filter crop mask resize resizer thumbs thumbnail thumbnails jpg jpeg png gif svg bmp psd tiff heic',
packages = [
'optidash'
],
install_requires = [
'requests'
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
]
) | 37.510638 | 286 | 0.654566 | # -*- coding: utf-8 -*-
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup (
name = 'optidash',
version = '1.0.1',
description = 'Official Python integration for Optidash API',
long_description = 'Optidash: AI-powered image optimization and processing API. We will drastically speed-up your websites and save you money on bandwidth and storage.',
url = 'https://github.com/optidash-ai/optidash-python',
download_url = 'https://github.com/optidash-ai/optidash-python/archive/1.0.0.tar.gz',
author = 'Optidash UG',
author_email = 'support@optidash.ai',
license = 'MIT',
keywords = 'optidash image optimization processing resizing resizer cropping scaling masking watermarking filtering thumbnails pic picture photo face face detection visual watermark filter crop mask resize resizer thumbs thumbnail thumbnails jpg jpeg png gif svg bmp psd tiff heic',
packages = [
'optidash'
],
install_requires = [
'requests'
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
]
) | 0 | 0 | 0 |
7f38e574cbba9660adf87c84b321f6523753db04 | 880 | py | Python | svca_limix/demos/demo_gp2kronSum.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 65 | 2015-01-20T20:46:26.000Z | 2021-06-27T14:40:35.000Z | svca_limix/demos/demo_gp2kronSum.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 29 | 2015-02-01T22:35:17.000Z | 2017-08-07T08:18:23.000Z | svca_limix/demos/demo_gp2kronSum.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 35 | 2015-02-01T17:26:50.000Z | 2019-09-13T07:06:16.000Z | import scipy as sp
import scipy.linalg as la
import pdb
from limix.core.covar import FreeFormCov
from limix.core.mean import MeanKronSum
from limix.core.gp import GP2KronSum
from limix.core.gp import GP
from limix.utils.preprocess import covar_rescale
import time
import copy
if __name__=='__main__':
# define phenotype
N = 1000
P = 4
Y = sp.randn(N,P)
# define fixed effects
F = []; A = []
F.append(1.*(sp.rand(N,2)<0.5))
A.append(sp.eye(P))
# define row caoriance
f = 10
X = 1.*(sp.rand(N, f)<0.2)
R = covar_rescale(sp.dot(X,X.T))
R+= 1e-4 * sp.eye(N)
S_R, U_R = la.eigh(R)
# define col covariances
Cg = FreeFormCov(P)
Cn = FreeFormCov(P)
Cg.setRandomParams()
Cn.setRandomParams()
# define gp and optimize
gp = GP2KronSum(Y=Y, F=F, A=A, Cg=Cg, Cn=Cn, S_R=S_R, U_R=U_R)
gp.optimize()
| 21.463415 | 66 | 0.635227 | import scipy as sp
import scipy.linalg as la
import pdb
from limix.core.covar import FreeFormCov
from limix.core.mean import MeanKronSum
from limix.core.gp import GP2KronSum
from limix.core.gp import GP
from limix.utils.preprocess import covar_rescale
import time
import copy
if __name__=='__main__':
# define phenotype
N = 1000
P = 4
Y = sp.randn(N,P)
# define fixed effects
F = []; A = []
F.append(1.*(sp.rand(N,2)<0.5))
A.append(sp.eye(P))
# define row caoriance
f = 10
X = 1.*(sp.rand(N, f)<0.2)
R = covar_rescale(sp.dot(X,X.T))
R+= 1e-4 * sp.eye(N)
S_R, U_R = la.eigh(R)
# define col covariances
Cg = FreeFormCov(P)
Cn = FreeFormCov(P)
Cg.setRandomParams()
Cn.setRandomParams()
# define gp and optimize
gp = GP2KronSum(Y=Y, F=F, A=A, Cg=Cg, Cn=Cn, S_R=S_R, U_R=U_R)
gp.optimize()
| 0 | 0 | 0 |
62131fc31a94f1dd52c146dfc865bbc77f8997c5 | 9,023 | py | Python | day15_beverage_bandits_UNCOMPLETED.py | errir503/advent2018 | aae1e36417d203e8b40b43f45094f04e81799923 | [
"Unlicense"
] | 16 | 2018-11-30T23:42:44.000Z | 2021-07-07T08:44:04.000Z | day15_beverage_bandits_UNCOMPLETED.py | joelgrus/advent2018 | aae1e36417d203e8b40b43f45094f04e81799923 | [
"Unlicense"
] | null | null | null | day15_beverage_bandits_UNCOMPLETED.py | joelgrus/advent2018 | aae1e36417d203e8b40b43f45094f04e81799923 | [
"Unlicense"
] | 6 | 2018-12-05T21:19:28.000Z | 2021-07-07T08:44:09.000Z | from typing import Set, List, Tuple, NamedTuple, Iterator, Optional
from collections import deque
import heapq
RAW = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
backpointers = {fighter.pos: None}
reached = set()
frontier = [(min(fighter.pos.manhattan(target),
0,
for target in targets), fighter.pos)]
best_score = float('inf')
while frontier:
score, length, pos = heapq.heappop(frontier)
if score > best_score:
# The best remaining candidate is worse than
# what we've already found, so break
break
if pos in targets:
reached.add(pos)
best_score = length
for next_pos in pos.neighbors():
if next_pos in off_limits:
continue
if next_pos in backpointers:
if pos in path:
continue
new_path = path + [pos]
new_score = len(new_path) + min(pos.manhattan(target) for target in targets)
heapq.heappush(frontier, (new_score, new_path))
# at this point, shortest_paths has all the shortest paths
# need to sort by (1) reading order of destination (2) reading order of first step
successful_paths.sort(key=lambda path: (path[-1].i, path[-1].j, path[1].i, path[1].j))
if successful_paths:
return successful_paths[0]
else:
#print("nowhere good to go")
return None
def round(self) -> bool:
"""Return true if the game is not over"""
occupied = {f.pos: f.elf for f in self.fighters if not f.dead}
movement_last_round = occupied != self.last_occupied[0]
self.fighters.sort(key=lambda f: (f.pos.i, f.pos.j))
game_over = False
for fighter in self.fighters:
if fighter.dead:
continue
found_enemies = fighter.take_turn(self, movement_last_round)
if not found_enemies:
game_over = True
self.last_occupied[0] = occupied
return game_over
def total_hit_points(self) -> int:
return sum(f.hp for f in self.fighters if not f.dead)
def __repr__(self) -> str:
outputs = {**{pos: '#' for pos in self.walls},
**{f.pos: 'E' if f.elf else 'G' for f in self.fighters if not f.dead}}
max_i = max(pos.i for pos in outputs)
max_j = max(pos.j for pos in outputs)
return "\n".join("".join(outputs.get(Pos(i, j), ".") for j in range(max_j + 1))
for i in range(max_i + 1))
def parse(raw: str) -> Cave:
walls = set()
fighters = []
for i, row in enumerate(raw.split("\n")):
for j, c in enumerate(row.strip()):
if c == '#':
walls.add(Pos(i, j))
elif c == 'E':
fighters.append(Fighter(elf=True, pos=Pos(i, j)))
elif c == 'G':
fighters.append(Fighter(elf=False, pos=Pos(i, j)))
return Cave(walls, fighters)
def run_game(cave: Cave) -> int:
num_rounds = 0
while True:
print("round", num_rounds)
print(cave)
game_over = cave.round()
if game_over:
break
num_rounds += 1
return num_rounds * cave.total_hit_points()
CAVE = parse(RAW)
assert run_game(CAVE) == 27730
CAVE2 = parse("""#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######""")
#assert run_game(CAVE2) == 36334
CAVE3 = parse("""#########
#G......#
#.E.#...#
#..##..G#
#...##..#
#...#...#
#.G...G.#
#.....G.#
#########""")
assert run_game(CAVE3) == 18740
with open('data/day15.txt') as f:
raw = f.read()
cave = parse(raw)
print(run_game(cave))
| 29.486928 | 94 | 0.543389 | from typing import Set, List, Tuple, NamedTuple, Iterator, Optional
from collections import deque
import heapq
RAW = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
class Pos(NamedTuple):
i: int
j: int
def adjacent(self, other: 'Pos') -> int:
return ((abs(self.i - other.i) == 1 and self.j == other.j) or
(abs(self.j - other.j) == 1 and self.i == other.i))
def neighbors(self) -> Iterator['Pos']:
i, j = self.i, self.j
yield from [Pos(i-1, j), Pos(i+1,j), Pos(i, j-1), Pos(i, j+1)]
def manhattan(self, other: 'Pos') -> int:
return abs(self.i - other.i) + abs(self.j - other.j)
class Fighter:
def __init__(self, elf: bool, pos: Pos, attack: int = 3, hp: int = 200) -> None:
self.elf = elf
self.attack = attack
self.hp = hp
self.pos = pos
self.dead = False
self.moved_last_time = False
self.path_to_follow = None
def __repr__(self) -> str:
return f"Fighter(elf={self.elf}, pos={self.pos}, attack={self.attack}, hp={self.hp})"
def take_turn(self, cave: 'Cave', movement_last_round: bool) -> bool:
print(self)
"returns whether it found enemies"
# If no enemies, return False
enemies = [fighter for fighter in cave.fighters
if fighter.elf != self.elf and not fighter.dead]
if not enemies:
print("no enemies")
return False
# Elif enemy in range, attack
adjacent_enemies = [enemy for enemy in enemies
if self.pos.adjacent(enemy.pos)]
adjacent_enemies.sort(key=lambda enemy: (enemy.hp, enemy.pos.i, enemy.pos.j))
if adjacent_enemies:
self.moved_last_time = True
attackee = adjacent_enemies[0]
print("attacking", attackee)
attackee.hp -= self.attack
if attackee.hp <= 0:
attackee.dead = True
return True # able to do somethign
# Else move and (maybe) attack
if not self.moved_last_time and not movement_last_round:
# just return, if I couldn't move last time, I can't move now
print("unable to move")
return True
path_to_follow = cave.optimal_path(self)
if path_to_follow is None:
print("no path")
self.moved_last_time = False
return True
else:
# move
print(path_to_follow)
self.pos = path_to_follow[1]
self.path_to_follow = path_to_follow[1:]
self.moved_last_time = True
# maybe attack
# Elif enemy in range, attack
adjacent_enemies = [enemy for enemy in enemies
if self.pos.adjacent(enemy.pos)]
adjacent_enemies.sort(key=lambda enemy: (enemy.hp, enemy.pos.i, enemy.pos.j))
if adjacent_enemies:
attackee = adjacent_enemies[0]
attackee.hp -= self.attack
if attackee.hp <= 0:
attackee.dead = True
return True # able to do somethign
class Cave(NamedTuple):
walls: Set[Pos]
fighters: List[Fighter]
last_occupied = [None]
def optimal_path(self, fighter: Fighter) -> Optional[List[Pos]]:
"""
return the optimal path that will put me next to an enemy
"""
#print(f"{fighter} considering where to move to")
enemies = [f for f in self.fighters
if fighter.elf != f.elf and not f.dead]
off_limits = self.walls | {f.pos for f in self.fighters if not f.dead}
# These are the places I would like to get to
targets = {pos
for enemy in enemies
for pos in enemy.pos.neighbors()
if pos not in off_limits}
if not targets:
# no targets
return None
visited = set()
not_visited = {fighter.pos}
came_from = {}
# For each node, the cost of getting from the start node to that node.
g_score = {} # deafult iinfinity
g_score[pos] = 0
# or each node, the total cost of getting from the start node to the goal
f_score = {} # deault infinity
f_score[pos] = min(fighter.pos.manhattan(target) for target in targets)
while not_visited:
current = min(not_visited, key=lambda pos: f_score.get(pos, float('inf')))
if current in targets:
pass
visited.add(pos)
not_visited.remove(pos)
for each neighbor of current
if neighbor in closedSet
continue // Ignore the neighbor which is already evaluated.
// The distance from start to a neighbor
tentative_gScore := gScore[current] + dist_between(current, neighbor)
if neighbor not in openSet // Discover a new node
openSet.Add(neighbor)
else if tentative_gScore >= gScore[neighbor]
continue;
// This path is the best until now. Record it!
cameFrom[neighbor] := current
gScore[neighbor] := tentative_gScore
fScore[neighbor] := gScore[neighbor] + heuristic_cost_estimate(neighbor, goal)
backpointers = {fighter.pos: None}
reached = set()
frontier = [(min(fighter.pos.manhattan(target),
0,
for target in targets), fighter.pos)]
best_score = float('inf')
while frontier:
score, length, pos = heapq.heappop(frontier)
if score > best_score:
# The best remaining candidate is worse than
# what we've already found, so break
break
if pos in targets:
reached.add(pos)
best_score = length
for next_pos in pos.neighbors():
if next_pos in off_limits:
continue
if next_pos in backpointers:
if pos in path:
continue
new_path = path + [pos]
new_score = len(new_path) + min(pos.manhattan(target) for target in targets)
heapq.heappush(frontier, (new_score, new_path))
# at this point, shortest_paths has all the shortest paths
# need to sort by (1) reading order of destination (2) reading order of first step
successful_paths.sort(key=lambda path: (path[-1].i, path[-1].j, path[1].i, path[1].j))
if successful_paths:
return successful_paths[0]
else:
#print("nowhere good to go")
return None
def round(self) -> bool:
"""Return true if the game is not over"""
occupied = {f.pos: f.elf for f in self.fighters if not f.dead}
movement_last_round = occupied != self.last_occupied[0]
self.fighters.sort(key=lambda f: (f.pos.i, f.pos.j))
game_over = False
for fighter in self.fighters:
if fighter.dead:
continue
found_enemies = fighter.take_turn(self, movement_last_round)
if not found_enemies:
game_over = True
self.last_occupied[0] = occupied
return game_over
def total_hit_points(self) -> int:
return sum(f.hp for f in self.fighters if not f.dead)
def __repr__(self) -> str:
outputs = {**{pos: '#' for pos in self.walls},
**{f.pos: 'E' if f.elf else 'G' for f in self.fighters if not f.dead}}
max_i = max(pos.i for pos in outputs)
max_j = max(pos.j for pos in outputs)
return "\n".join("".join(outputs.get(Pos(i, j), ".") for j in range(max_j + 1))
for i in range(max_i + 1))
def parse(raw: str) -> Cave:
walls = set()
fighters = []
for i, row in enumerate(raw.split("\n")):
for j, c in enumerate(row.strip()):
if c == '#':
walls.add(Pos(i, j))
elif c == 'E':
fighters.append(Fighter(elf=True, pos=Pos(i, j)))
elif c == 'G':
fighters.append(Fighter(elf=False, pos=Pos(i, j)))
return Cave(walls, fighters)
def run_game(cave: Cave) -> int:
num_rounds = 0
while True:
print("round", num_rounds)
print(cave)
game_over = cave.round()
if game_over:
break
num_rounds += 1
return num_rounds * cave.total_hit_points()
CAVE = parse(RAW)
assert run_game(CAVE) == 27730
CAVE2 = parse("""#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######""")
#assert run_game(CAVE2) == 36334
CAVE3 = parse("""#########
#G......#
#.E.#...#
#..##..G#
#...##..#
#...#...#
#.G...G.#
#.....G.#
#########""")
assert run_game(CAVE3) == 18740
with open('data/day15.txt') as f:
raw = f.read()
cave = parse(raw)
print(run_game(cave))
| 4,497 | 527 | 149 |
b4600c3d8ae2796ee9346a4900e1d027d283396a | 2,491 | py | Python | infer/lib/python/inferlib/capture/util.py | ievans/infer | c483fe101a3bdb3e86c4444a7b8d6197eada67c0 | [
"BSD-3-Clause"
] | null | null | null | infer/lib/python/inferlib/capture/util.py | ievans/infer | c483fe101a3bdb3e86c4444a7b8d6197eada67c0 | [
"BSD-3-Clause"
] | 2 | 2020-11-13T19:42:27.000Z | 2020-11-13T19:49:19.000Z | infer/lib/python/inferlib/capture/util.py | ievans/infer | c483fe101a3bdb3e86c4444a7b8d6197eada67c0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import logging
import subprocess
import traceback
def run_compilation_commands(cmds, clean_cmd):
"""runs compilation commands, and suggests a project cleaning command
in case there is nothing to compile.
"""
from inferlib import utils
# TODO call it in parallel
if cmds is None or len(cmds) == 0:
utils.stderr('Nothing to compile. Try running `{}` first.'
.format(clean_cmd))
return os.EX_NOINPUT
for cmd in cmds:
if cmd.start() != os.EX_OK:
return os.EX_SOFTWARE
return os.EX_OK
| 32.350649 | 78 | 0.689683 | #!/usr/bin/env python
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import logging
import subprocess
import traceback
def get_build_output(build_cmd):
from inferlib import utils
# TODO make it return generator to be able to handle large builds
proc = subprocess.Popen(build_cmd, stdout=subprocess.PIPE)
(verbose_out_chars, _) = proc.communicate()
if proc.returncode != 0:
utils.stderr(
'ERROR: couldn\'t run compilation command `{}`'.format(build_cmd))
return (proc.returncode, None)
out = utils.decode(verbose_out_chars).split('\n')
return (os.EX_OK, out)
def run_compilation_commands(cmds, clean_cmd):
"""runs compilation commands, and suggests a project cleaning command
in case there is nothing to compile.
"""
from inferlib import utils
# TODO call it in parallel
if cmds is None or len(cmds) == 0:
utils.stderr('Nothing to compile. Try running `{}` first.'
.format(clean_cmd))
return os.EX_NOINPUT
for cmd in cmds:
if cmd.start() != os.EX_OK:
return os.EX_SOFTWARE
return os.EX_OK
def run_cmd_ignore_fail(cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except:
return 'calling {cmd} failed\n{trace}'.format(
cmd=' '.join(cmd),
trace=traceback.format_exc())
def log_java_version():
java_version = run_cmd_ignore_fail(['java', '-version'])
javac_version = run_cmd_ignore_fail(['javac', '-version'])
logging.info('java versions:\n%s%s', java_version, javac_version)
def base_argparser(description, module_name):
def _func(group_name=module_name):
"""This creates an empty argparser for the module, which provides only
description/usage information and no arguments."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument_group(
'{grp} module'.format(grp=group_name),
description=description,
)
return parser
return _func
| 1,327 | 0 | 92 |
2aad7ef0a2d21bef1c4f2e68f9714c9647487d7d | 3,659 | py | Python | siberianpine/mixedmodel.py | ekaterinailin/SiberianPine | 4172b1997a1242bdd130c910179063239ed69bfc | [
"MIT"
] | null | null | null | siberianpine/mixedmodel.py | ekaterinailin/SiberianPine | 4172b1997a1242bdd130c910179063239ed69bfc | [
"MIT"
] | 2 | 2019-10-16T13:31:51.000Z | 2019-10-16T13:33:06.000Z | siberianpine/mixedmodel.py | ekaterinailin/siberianpine | 4172b1997a1242bdd130c910179063239ed69bfc | [
"MIT"
] | 1 | 2019-10-20T09:18:28.000Z | 2019-10-20T09:18:28.000Z | import corner
import emcee
import numpy as np
class MixedModel(object):
"""Combine multiple FFDs and fit
their parameters simultaneously with
shared alpha.
"""
def __init__(self, BFA=[], loglikelihood=None, alpha_prior=None):
'''Constructor for a Mixed Model Bayesian analysis suite.
Attributes:
-----------
BFA : list of BayesianFlaringAnalysis objects
loglikelihood : func
loglikelihood function
alpha_prior : float
shared prior for alpha
'''
self.BFA = BFA
self.loglikelihood = loglikelihood
self.alpha_prior = alpha_prior
def sample_posterior_with_mcmc(self, nwalkers=300, cutoff=100, steps=500):
'''Sample from the posterior using MCMC.
Parameters:
-------------
inits : list
initial variable values in the correct order
for lnprob
lnprob : func
posterior distribution that takes
inits as first argument and *args as second
to last arguments
nwalkers : int
number of walkers to run around the parameter space
cutoff : int
You do not want to use values in the beginning
of the chain, so cut them off.
steps : int
How long to run the walk.
Return:
--------
Sampling results as ndarray with dimensions like
len(init) x ((steps - cutoff) * nwalkers)
'''
args, inits = [], []
for bfa in self.BFA:
args.append([bfa.mined, bfa.Tprime, bfa.Mprime,
bfa.deltaT, bfa.threshed, bfa.M,
bfa.events])
inits.append(bfa.eps_prior)
inits.append(self.alpha_prior)
args = [i for i in args if i is not None]
inits = [i for i in inits if i]
ndim = len(inits)
pos = [inits + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.loglikelihood, args=args)
sampler.run_mcmc(pos, steps)
self.samples = sampler.chain[:, cutoff:, :].reshape((-1, ndim))
def show_corner_plot(self, save=False, path=''):
'''Show (and save) a corner plot. NOT TESTED.
'''
truths = [bfa.eps_prior for bfa in self.BFA]
truths.append(2.)
ndim = len(self.BFA)
labels = [r'$\epsilon_{}$'.format(i) for i in range(ndim)] + [r'$\alpha$']
fig = corner.corner(self.samples,
labels=labels,
quantiles=[0.16, 0.5, 0.84],
show_titles=True,
title_kwargs={"fontsize": 12},
truths=truths,)
if save==True:
fig.savefig(path, dpi=300)
def calculate_percentiles(self, percentiles=[16, 50, 84]):
'''Calculate best fit value and its uncertainties.
Parameters:
-----------
percentiles : n-list
percentiles to compute
Return:
--------
a tuple of n-tuples with
(median, upper_uncert, lower_uncert)
each.
'''
map_of_results = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
zip(*np.percentile(self.samples, percentiles, axis=0)))
p = list(map_of_results)
self.percentiles = p
return p
| 32.096491 | 86 | 0.52528 | import corner
import emcee
import numpy as np
class MixedModel(object):
"""Combine multiple FFDs and fit
their parameters simultaneously with
shared alpha.
"""
def __init__(self, BFA=[], loglikelihood=None, alpha_prior=None):
'''Constructor for a Mixed Model Bayesian analysis suite.
Attributes:
-----------
BFA : list of BayesianFlaringAnalysis objects
loglikelihood : func
loglikelihood function
alpha_prior : float
shared prior for alpha
'''
self.BFA = BFA
self.loglikelihood = loglikelihood
self.alpha_prior = alpha_prior
def __repr__(self):
return(f"MixedModel: {len(self.BFA)} data sets.")
def sample_posterior_with_mcmc(self, nwalkers=300, cutoff=100, steps=500):
'''Sample from the posterior using MCMC.
Parameters:
-------------
inits : list
initial variable values in the correct order
for lnprob
lnprob : func
posterior distribution that takes
inits as first argument and *args as second
to last arguments
nwalkers : int
number of walkers to run around the parameter space
cutoff : int
You do not want to use values in the beginning
of the chain, so cut them off.
steps : int
How long to run the walk.
Return:
--------
Sampling results as ndarray with dimensions like
len(init) x ((steps - cutoff) * nwalkers)
'''
args, inits = [], []
for bfa in self.BFA:
args.append([bfa.mined, bfa.Tprime, bfa.Mprime,
bfa.deltaT, bfa.threshed, bfa.M,
bfa.events])
inits.append(bfa.eps_prior)
inits.append(self.alpha_prior)
args = [i for i in args if i is not None]
inits = [i for i in inits if i]
ndim = len(inits)
pos = [inits + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.loglikelihood, args=args)
sampler.run_mcmc(pos, steps)
self.samples = sampler.chain[:, cutoff:, :].reshape((-1, ndim))
def show_corner_plot(self, save=False, path=''):
'''Show (and save) a corner plot. NOT TESTED.
'''
truths = [bfa.eps_prior for bfa in self.BFA]
truths.append(2.)
ndim = len(self.BFA)
labels = [r'$\epsilon_{}$'.format(i) for i in range(ndim)] + [r'$\alpha$']
fig = corner.corner(self.samples,
labels=labels,
quantiles=[0.16, 0.5, 0.84],
show_titles=True,
title_kwargs={"fontsize": 12},
truths=truths,)
if save==True:
fig.savefig(path, dpi=300)
def calculate_percentiles(self, percentiles=[16, 50, 84]):
'''Calculate best fit value and its uncertainties.
Parameters:
-----------
percentiles : n-list
percentiles to compute
Return:
--------
a tuple of n-tuples with
(median, upper_uncert, lower_uncert)
each.
'''
map_of_results = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
zip(*np.percentile(self.samples, percentiles, axis=0)))
p = list(map_of_results)
self.percentiles = p
return p
| 56 | 0 | 32 |
5f5c36a532e06621d2a59be7a791db4e60593dd0 | 143 | py | Python | 2021/Day5_argparse_makefile_docker/funcs.py | afarnudi/ScientificSoftwareDevelopment | c70f8b1c80d24dbcca12dbcca3722053954f7eaa | [
"BSD-3-Clause"
] | null | null | null | 2021/Day5_argparse_makefile_docker/funcs.py | afarnudi/ScientificSoftwareDevelopment | c70f8b1c80d24dbcca12dbcca3722053954f7eaa | [
"BSD-3-Clause"
] | null | null | null | 2021/Day5_argparse_makefile_docker/funcs.py | afarnudi/ScientificSoftwareDevelopment | c70f8b1c80d24dbcca12dbcca3722053954f7eaa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
| 14.3 | 23 | 0.468531 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def fibonacci(n):
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
| 72 | 0 | 23 |
136eea535e6412e077f5bf39645b0a54461c222c | 1,032 | py | Python | cmd/parser.py | bfu4/mdis | fac5ec078ffeaa9339df4b31b9b71140563f4f14 | [
"MIT"
] | 13 | 2021-05-17T06:38:50.000Z | 2022-03-27T15:39:57.000Z | cmd/parser.py | bfu4/mdis | fac5ec078ffeaa9339df4b31b9b71140563f4f14 | [
"MIT"
] | null | null | null | cmd/parser.py | bfu4/mdis | fac5ec078ffeaa9339df4b31b9b71140563f4f14 | [
"MIT"
] | null | null | null | import argparse
# borrowed usage from @netspooky/inhale
from cmd.bytecode_format_command import format_to_bytecode
from cmd.instruction_command import get_instr
from cmd.opcode_command import get_op
parser = argparse.ArgumentParser(description="mdis.py")
args = [
('-b', "INT_TO_BC", "shift into bytecode format", format_to_bytecode, 1),
('-f', "FILE", "get instructions of a given file", get_instr, 1),
('-op', "INT_TO_OP", "get opcode of a given integer", get_op, 1),
('-fr', "FROM", "from address", None, 1),
('-t', "TO", "to address", None, 1)
]
def set_up_arguments():
"""
Set up the arguments
:return:
"""
for arg in args:
add_argument(arg[0], arg[1], arg[2], arg[4])
def add_argument(flag: str, dest, _help: str, nargs: int):
"""
Add an argument
:param nargs: number of arguments
:param flag: argument flag
:param dest: destination
:param _help: help message
:return:
"""
parser.add_argument(flag, dest=dest, nargs=nargs, help=_help)
| 27.157895 | 77 | 0.657946 | import argparse
# borrowed usage from @netspooky/inhale
from cmd.bytecode_format_command import format_to_bytecode
from cmd.instruction_command import get_instr
from cmd.opcode_command import get_op
parser = argparse.ArgumentParser(description="mdis.py")
args = [
('-b', "INT_TO_BC", "shift into bytecode format", format_to_bytecode, 1),
('-f', "FILE", "get instructions of a given file", get_instr, 1),
('-op', "INT_TO_OP", "get opcode of a given integer", get_op, 1),
('-fr', "FROM", "from address", None, 1),
('-t', "TO", "to address", None, 1)
]
def set_up_arguments():
"""
Set up the arguments
:return:
"""
for arg in args:
add_argument(arg[0], arg[1], arg[2], arg[4])
def add_argument(flag: str, dest, _help: str, nargs: int):
"""
Add an argument
:param nargs: number of arguments
:param flag: argument flag
:param dest: destination
:param _help: help message
:return:
"""
parser.add_argument(flag, dest=dest, nargs=nargs, help=_help)
| 0 | 0 | 0 |
5f6ad36fbdca6e78e071b1869344a9dadc84f0a6 | 1,013 | py | Python | utils/salad_earnings_update.py | kllmagn/SaladCLIPlus-Linux | 840d77da503d85d59fff6c7984e0271462b0a475 | [
"MIT"
] | 2 | 2022-02-02T07:11:53.000Z | 2022-02-13T05:20:51.000Z | utils/salad_earnings_update.py | kllmagn/SaladCLIPlus-Linux | 840d77da503d85d59fff6c7984e0271462b0a475 | [
"MIT"
] | 1 | 2022-03-31T21:57:50.000Z | 2022-03-31T21:57:50.000Z | utils/salad_earnings_update.py | kllmagn/SaladCLIPlus-Linux | 840d77da503d85d59fff6c7984e0271462b0a475 | [
"MIT"
] | 9 | 2021-05-19T11:35:13.000Z | 2022-02-15T21:20:37.000Z | import requests
import json
import time
import sys
import os
| 29.794118 | 106 | 0.600197 | import requests
import json
import time
import sys
import os
def Salad_Earnings():
sys.stdout.write("\x1b]2;Downloading History\x07")
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Salad/0.4.0 Chrome/78.0.3904.130 Electron/7.1.9 Safari/537.36'
}
with open('config.json') as f:
js = json.load(f)
salad_auth = js['salad_key']
salad_refresh_token = js['salad_refresh_token']
cookie = {
"sAccessToken": salad_auth,
"sIdRefreshToken": salad_refresh_token
}
r = requests.get(url='https://app-api.salad.io/api/v2/reports/1-day-earning-history', cookies=cookie,
headers=headers)
jason = r.json()
with open('data.json', 'w+') as f:
f.write(json.dumps(jason))
print('Downloading data...')
time.sleep(2)
os.system('python3 utils/History_show.py --asd -f data.json --smh -min -rev')
| 920 | 0 | 25 |
e1ad284486084686508cbe765e4e85868a259f90 | 5,468 | py | Python | forecast/extract.py | fab13n/balloon | 5c45c21c9012178edc4409685493bf4873fae107 | [
"MIT"
] | 1 | 2020-05-14T10:40:35.000Z | 2020-05-14T10:40:35.000Z | forecast/extract.py | fab13n/balloon | 5c45c21c9012178edc4409685493bf4873fae107 | [
"MIT"
] | null | null | null | forecast/extract.py | fab13n/balloon | 5c45c21c9012178edc4409685493bf4873fae107 | [
"MIT"
] | null | null | null | import numpy as np
import json
from datetime import datetime
from dateutil.parser import parse
from balloon.settings import GRIB_PATH
from core.models import Column, Cell
from forecast.models import GribModel, grib_models
from forecast.preprocess import SHORT_NAMES
EPSILON = 1e-5 # EPSILON° < 1m
| 39.338129 | 107 | 0.597842 | import numpy as np
import json
from datetime import datetime
from dateutil.parser import parse
from balloon.settings import GRIB_PATH
from core.models import Column, Cell
from forecast.models import GribModel, grib_models
from forecast.preprocess import SHORT_NAMES
EPSILON = 1e-5 # EPSILON° < 1m
class ColumnExtractor(object):
def __init__(self, model, extrapolated_pressures=()):
if isinstance(model, GribModel):
self.model = model
else:
model_name = model
self.model = grib_models[model_name]
self.model = model
self.extrapolated_pressures = extrapolated_pressures
# Those will be filled by `update_array_and_shape` lazily.
self.date = None
self.array = None
self.shape = None
def _update_array_and_shape(self, date):
"""
Ensures that self.array and self.shape contain the atmosphere's description for that date.
Won't reload if the previous extraction request was for the same date.
"""
date = self.model.round_time(date)
if self.array is None or self.date != date: # TODO perform rounding here?
basename = date.strftime("%Y%m%d%H%M")
model_name = f"{self.model.name}_{self.model.grid_pitch}"
try:
with (GRIB_PATH / model_name / (basename + ".json")).open('r') as f:
self.shape = json.load(f)
with (GRIB_PATH / model_name / (basename + ".np")).open('rb') as f:
self.array = np.load(f)
except IOError:
raise ValueError("No preprocessed data for this date")
self.date = date
def extract_ground_altitude(self, position):
"""
Extracts ground altitude at given position
:param position: (lon, lat)
:return: altitude above MSL in meters
"""
model_name = f"{self.model.name}_{self.model.grid_pitch}"
(lon, lat) = self.model.round_position(position)
try:
with (GRIB_PATH / model_name / "terrain.json").open('r') as f:
shape = json.load(f)
with (GRIB_PATH / model_name / "terrain.np").open('rb') as f:
array = np.load(f)
except IOError:
raise ValueError("No preprocessed terrain for this date")
try:
# TODO Round both coords to grid instead of testing up to epsilon?
lon_idx = next(idx for (idx, lon2) in enumerate(shape['lons']) if abs(lon-lon2)<EPSILON)
lat_idx = next(idx for (idx, lat2) in enumerate(shape['lats']) if abs(lat-lat2)<EPSILON)
except StopIteration:
raise ValueError("No preprocessed data for this position")
return int(array[lon_idx][lat_idx])
def extract(self, date, position):
"""
Retrieve an atmospheric column for the given date and position.
:param date: UTC valid datetime
:param position: (lon, lat)
:return: a `Column` object
"""
(lon, lat) = self.model.round_position(position)
self._update_array_and_shape(date)
try:
lon_idx = next(idx for (idx, lon2) in enumerate(self.shape['lons']) if abs(lon-lon2) < EPSILON)
lat_idx = next(idx for (idx, lat2) in enumerate(self.shape['lats']) if abs(lat-lat2) < EPSILON)
except StopIteration:
raise ValueError("No preprocessed weather data for this position")
np_column = self.array[lon_idx][lat_idx][:]
column = []
for p, cell in zip(self.shape['alts'], np_column):
kwargs = {'p': p}
for name, val in zip(SHORT_NAMES, cell):
kwargs[name] = float(val)
cell = Cell(**kwargs)
column.append(cell)
column = Column(
grib_model=self.model,
position=position,
valid_date=self.model.round_time(date),
analysis_date=parse(self.shape['analysis_date']),
ground_altitude=self.extract_ground_altitude(position),
cells=column,
extrapolated_pressures=self.extrapolated_pressures)
return column
def list_files(self, date_from=None, n=None):
"""
Returns a dict `valid_date -> analysis_date` of weather files available for
this model, optionally filtered by date (only those more recent in `valid_date`
than `date_from`).
:param date_from: optional starting datetime. `valid_date`s older than that are discarded.
:return: `valid_date -> analysis_date` dict.
"""
model_name = f"{self.model.name}_{self.model.grid_pitch}"
results = {}
for shape_file in (GRIB_PATH / model_name).glob("*.json"):
try:
valid_date = datetime.strptime(shape_file.stem, '%Y%m%d%H%M')
except ValueError:
continue # Not a forecast file
if n is None and date_from is not None and valid_date < date_from:
continue
try:
with shape_file.open() as f:
analysis_date = parse(json.load(f)['analysis_date'])
except Exception:
continue
results[valid_date] = analysis_date
if n is not None:
# Only keep `n` most recent valid dates
results = {k: v for k, v in sorted(results.items(), reverse=True)[:n]}
return results
| 431 | 4,712 | 23 |
626f831a6f459bb24efab770561e80689c01d978 | 2,976 | py | Python | DockerBuildManagement/BuildManager.py | hansehe/DockerBuildManagement | 774ddfb5184dc9c9ae0c307c7d5963a4ccb104f8 | [
"MIT"
] | 8 | 2019-04-03T13:40:30.000Z | 2020-11-29T09:20:13.000Z | DockerBuildManagement/BuildManager.py | hansehe/DockerBuildManagement | 774ddfb5184dc9c9ae0c307c7d5963a4ccb104f8 | [
"MIT"
] | 1 | 2019-02-06T16:05:06.000Z | 2019-02-24T22:59:43.000Z | DockerBuildManagement/BuildManager.py | hansehe/DockerBuildManagement | 774ddfb5184dc9c9ae0c307c7d5963a4ccb104f8 | [
"MIT"
] | 5 | 2018-12-15T19:03:25.000Z | 2021-09-22T23:42:33.000Z | import sys
import os
from DockerBuildManagement import ChangelogSelections, BuildSelections, PublishSelections, RunSelections, SwarmSelections, TestSelections, BuildTools, PromoteSelections
from SwarmManagement import SwarmTools
if __name__ == "__main__":
arguments = sys.argv[1:]
HandleManagement(arguments)
| 36.292683 | 168 | 0.689852 | import sys
import os
from DockerBuildManagement import ChangelogSelections, BuildSelections, PublishSelections, RunSelections, SwarmSelections, TestSelections, BuildTools, PromoteSelections
from SwarmManagement import SwarmTools
def GetInfoMsg():
infoMsg = "Docker Build Management\r\n\r\n"
infoMsg += "Run:\r\n"
infoMsg += RunSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Build:\r\n"
infoMsg += BuildSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Publish:\r\n"
infoMsg += PublishSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Promote:\r\n"
infoMsg += PromoteSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Test:\r\n"
infoMsg += TestSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Swarm Deployment of Domain Services:\r\n"
infoMsg += SwarmSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Export Version From Changelog:\r\n"
infoMsg += ChangelogSelections.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Additional Info:\r\n"
infoMsg += BuildTools.GetInfoMsg() + "\r\n\r\n"
infoMsg += "Add '-help' to arguments to print this info again.\r\n\r\n"
return infoMsg
def GetPositionalActionArguments(arguments, index):
actionArgs = []
newIndex = index + 1
if arguments[index].startswith('-'):
actionArgs.append(arguments[index])
if SwarmSelections.CheckSwarmInArguments(actionArgs) \
and index + 1 < len(arguments) \
and SwarmSelections.CheckSwarmCommandInArguments([arguments[index + 1]]):
actionArgs.append(arguments[index + 1])
newIndex += 1
if len(actionArgs) > 0:
selections = SwarmTools.GetArgumentValues(arguments[index:], actionArgs[-1])
actionArgs += selections
return actionArgs, newIndex
def SetDefaultCommonEnvVariables():
if 'PWD' not in os.environ:
os.environ['PWD'] = os.getcwd().replace('\\', '/')
def HandleManagement(arguments):
if len(arguments) == 0:
print(GetInfoMsg())
return
if '-help' in arguments and len(arguments) == 1:
print(GetInfoMsg())
return
SwarmTools.LoadEnvironmentVariables(
arguments, BuildTools.DEFAULT_BUILD_MANAGEMENT_YAML_FILES)
SetDefaultCommonEnvVariables()
SwarmTools.HandleDumpYamlData(
arguments, BuildTools.DEFAULT_BUILD_MANAGEMENT_YAML_FILES)
ChangelogSelections.HandleChangelogSelections(arguments)
index = 0
while index < len(arguments):
actionArgs, index = GetPositionalActionArguments(arguments, index)
SwarmSelections.HandleSwarmSelections(actionArgs)
BuildSelections.HandleBuildSelections(actionArgs)
TestSelections.HandleTestSelections(actionArgs)
RunSelections.HandleRunSelections(actionArgs)
PublishSelections.HandlePublishSelections(actionArgs)
PromoteSelections.HandlePromoteSelections(actionArgs)
if __name__ == "__main__":
arguments = sys.argv[1:]
HandleManagement(arguments)
| 2,554 | 0 | 96 |
cc75624a7048225759483caaec344e5fd1f6c5f8 | 3,267 | py | Python | multilayernn/mnist.py | subratpp/neuralnetworks | cf9719d59101b256f0f8737e44b6cf067be5d913 | [
"MIT"
] | null | null | null | multilayernn/mnist.py | subratpp/neuralnetworks | cf9719d59101b256f0f8737e44b6cf067be5d913 | [
"MIT"
] | null | null | null | multilayernn/mnist.py | subratpp/neuralnetworks | cf9719d59101b256f0f8737e44b6cf067be5d913 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from multilayernn import * #Import from own library
# # MNIST
# ## 1. Data Processing and One Hot Encoding
# In[2]:
train = pd.read_csv("datasets/mnist_train.csv") #read data from file
#separating labels and pixels
train_labels=np.array(train.loc[:,'label'])
train_data=np.array(train.loc[:,train.columns!='label'])
#The characteristics of MNIST data pixels = 784 samples = 42000 classes = 10
# In[3]:
#Convert to onehot encoding
pixels = 784
samples = len(train_labels)
classes = 10
train_data = train_data.T #Transpose the matrix: where each column is a sample
train_label=np.zeros((classes, samples))
for col in range (samples):
train_label[train_labels[col],col]=1
#Scaling Down of dataset
train_data = train_data/255
# ##====================== 2. Training of Model
# Hypermeters:
# 1. Tune the right weights as improper weights will cause exploding outputs
# 2. Tune the learning rate and gamma
# 3. Tune the number of epoch to be trained
# In[4]:
#Create Mulit Layer Network
nodes_per_layer = [784, 500, 200, 80, 10] #nodes in each layer of neural network
mnist_nn = deepNN(nodes_per_layer, learning_rate = 0.3, gamma = 0.7, epoch=2000)
# In[5]:
#Train the network
mnist_nn.train_model(train_data, train_label, train_labels, verbose = True, filename="accuracy/mnist/mnistdata")
# ##====================== 3. Testing of Model
# In[6]:
#data preprocessing
test = pd.read_csv("datasets/mnist_test.csv") #read data from file
#separating labels and pixels
test_labels=np.array(test.loc[:,'label'])
test_data=np.array(test.loc[:,test.columns!='label'])
#The characteristics of MNIST data pixels = 784 samples = 42000 classes = 10
# In[7]:
#Convert to onehot encoding
pixels = 784
samples = len(test_labels)
classes = 10
test_data = test_data.T #Transpose the matrix: where each column is a sample
test_label=np.zeros((classes, samples))
for col in range (samples):
test_label[test_labels[col],col]=1
#Scaling Down of dataset
test_data = test_data/255
# In[8]:
test_error, test_accuracy = mnist_nn.test_model( test_data, test_label, test_labels, filename="accuracy/mnist/mnistdata")
# ## Conclusion:
# Check accuracy folder for all the error and accuracy data.
# <hr>
# #============================== Kaggle: Test and Compute Accuracy for Submission
# For submission to the Kaggle the kaggle test data needs to be passed through the model.
# The following code will generate the "sample_submission.csv" for the Kaggle MNIST.
#
# **Uncomment the Following for Kaggle**
# In[9]:
# test_data= pd.read_csv("datasets/kaggle/mnist_test.csv") #This generated cvs file which can be submitted to the Kaggle
# test_data=np.array(test_data) #separating labels and pixels
# #Preprocess data for the model
# test_data = test_data.T #Transpose the matrix: where each column is a sample
# test_data = test_data/255 #scale the data to range 1
# #Test the data for the model
# Y_hat, cache = mnist_nn.forward_propagation(test_data)
# Y_predicted = np.argmax(Y_hat, axis=0)
# #Create submission ready data
# df = pd.DataFrame(Y_predicted, columns = ["Label"])
# df.index.name = 'ImageId'
# df.index += 1
# df.to_csv('kaggle_submission/sample_submission.csv', index = True)
| 24.75 | 121 | 0.720233 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from multilayernn import * #Import from own library
# # MNIST
# ## 1. Data Processing and One Hot Encoding
# In[2]:
train = pd.read_csv("datasets/mnist_train.csv") #read data from file
#separating labels and pixels
train_labels=np.array(train.loc[:,'label'])
train_data=np.array(train.loc[:,train.columns!='label'])
#The characteristics of MNIST data pixels = 784 samples = 42000 classes = 10
# In[3]:
#Convert to onehot encoding
pixels = 784
samples = len(train_labels)
classes = 10
train_data = train_data.T #Transpose the matrix: where each column is a sample
train_label=np.zeros((classes, samples))
for col in range (samples):
train_label[train_labels[col],col]=1
#Scaling Down of dataset
train_data = train_data/255
# ##====================== 2. Training of Model
# Hypermeters:
# 1. Tune the right weights as improper weights will cause exploding outputs
# 2. Tune the learning rate and gamma
# 3. Tune the number of epoch to be trained
# In[4]:
#Create Mulit Layer Network
nodes_per_layer = [784, 500, 200, 80, 10] #nodes in each layer of neural network
mnist_nn = deepNN(nodes_per_layer, learning_rate = 0.3, gamma = 0.7, epoch=2000)
# In[5]:
#Train the network
mnist_nn.train_model(train_data, train_label, train_labels, verbose = True, filename="accuracy/mnist/mnistdata")
# ##====================== 3. Testing of Model
# In[6]:
#data preprocessing
test = pd.read_csv("datasets/mnist_test.csv") #read data from file
#separating labels and pixels
test_labels=np.array(test.loc[:,'label'])
test_data=np.array(test.loc[:,test.columns!='label'])
#The characteristics of MNIST data pixels = 784 samples = 42000 classes = 10
# In[7]:
#Convert to onehot encoding
pixels = 784
samples = len(test_labels)
classes = 10
test_data = test_data.T #Transpose the matrix: where each column is a sample
test_label=np.zeros((classes, samples))
for col in range (samples):
test_label[test_labels[col],col]=1
#Scaling Down of dataset
test_data = test_data/255
# In[8]:
test_error, test_accuracy = mnist_nn.test_model( test_data, test_label, test_labels, filename="accuracy/mnist/mnistdata")
# ## Conclusion:
# Check accuracy folder for all the error and accuracy data.
# <hr>
# #============================== Kaggle: Test and Compute Accuracy for Submission
# For submission to the Kaggle the kaggle test data needs to be passed through the model.
# The following code will generate the "sample_submission.csv" for the Kaggle MNIST.
#
# **Uncomment the Following for Kaggle**
# In[9]:
# test_data= pd.read_csv("datasets/kaggle/mnist_test.csv") #This generated cvs file which can be submitted to the Kaggle
# test_data=np.array(test_data) #separating labels and pixels
# #Preprocess data for the model
# test_data = test_data.T #Transpose the matrix: where each column is a sample
# test_data = test_data/255 #scale the data to range 1
# #Test the data for the model
# Y_hat, cache = mnist_nn.forward_propagation(test_data)
# Y_predicted = np.argmax(Y_hat, axis=0)
# #Create submission ready data
# df = pd.DataFrame(Y_predicted, columns = ["Label"])
# df.index.name = 'ImageId'
# df.index += 1
# df.to_csv('kaggle_submission/sample_submission.csv', index = True)
| 0 | 0 | 0 |
673364c085a4dc88d6e276d4c1ab42e2a5db1bc4 | 5,833 | py | Python | tests/test_workflows_builder_init.py | broeder-j/aiida_fleur_plugin | cca54b194f4b217abb69aaa1fca0db52c6c830c3 | [
"MIT"
] | 1 | 2017-02-07T12:31:38.000Z | 2017-02-07T12:31:38.000Z | tests/test_workflows_builder_init.py | broeder-j/aiida_fleur_plugin | cca54b194f4b217abb69aaa1fca0db52c6c830c3 | [
"MIT"
] | 16 | 2017-04-03T11:42:50.000Z | 2017-05-18T16:25:39.000Z | tests/test_workflows_builder_init.py | broeder-j/aiida_fleur_plugin | cca54b194f4b217abb69aaa1fca0db52c6c830c3 | [
"MIT"
] | null | null | null | ###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
'''
Contains smoke tests for all workchains of aiida-fleur,
checks if builderis from aiida-core gets the correct class.
'''
import pytest
@pytest.mark.usefixtures('aiida_profile', 'clear_database')
class TestFleurWorkchainInterfaces:
"""
Test all aiida-fleur workflow interfaces
"""
# TODO
# prepare some nodes:
# structure, option, fleurinp, wfparameters
# add to builder and see if he takes it
# ggf if possible run initial step only, that the input is checked...
# In general the interfaces should be fixed and not changed. this is what
# these tests are for, to test be aware of interface breaks
def test_fleur_scf_wc_init(self):
"""
Test the interface of the scf workchain
"""
from aiida_fleur.workflows.scf import FleurScfWorkChain
builder = FleurScfWorkChain.get_builder()
def test_fleur_eos_wc_init(self):
"""
Test the interface of the eos workchain
"""
from aiida_fleur.workflows.eos import FleurEosWorkChain
builder = FleurEosWorkChain.get_builder()
def test_fleur_dos_wc_init(self):
"""
Test the interface of the dos workchain
"""
from aiida_fleur.workflows.dos import fleur_dos_wc
builder = fleur_dos_wc.get_builder()
def test_fleur_corehole_wc_init(self):
"""
Test the interface of the corehole workchain
"""
from aiida_fleur.workflows.corehole import FleurCoreholeWorkChain
builder = FleurCoreholeWorkChain.get_builder()
def test_fleur_initial_cls_wc_init(self):
"""
Test the interface of the scf workchain
"""
from aiida_fleur.workflows.initial_cls import FleurInitialCLSWorkChain
builder = FleurInitialCLSWorkChain.get_builder()
def test_fleur_relax_wc_init(self):
"""
Test the interface of the relax workchain
"""
from aiida_fleur.workflows.relax import FleurRelaxWorkChain
builder = FleurRelaxWorkChain.get_builder()
def test_fleur_optimize_para_wc_init(self):
"""
Test the interface of the optimize_para_ workchain
"""
from aiida_fleur.workflows.optimize_para import fleur_optimize_parameters_wc
builder = fleur_optimize_parameters_wc.get_builder()
def test_fleur_mae_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.mae import FleurMaeWorkChain
builder = FleurMaeWorkChain.get_builder()
def test_fleur_mae_conv_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.mae_conv import FleurMaeConvWorkChain
builder = FleurMaeConvWorkChain.get_builder()
def test_fleur_ssdisp_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.ssdisp import FleurSSDispWorkChain
builder = FleurSSDispWorkChain.get_builder()
def test_fleur_ssdisp_conv_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.ssdisp_conv import FleurSSDispConvWorkChain
builder = FleurSSDispConvWorkChain.get_builder()
def test_fleur_dmi_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.dmi import FleurDMIWorkChain
builder = FleurDMIWorkChain.get_builder()
def test_fleur_base_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.base_fleur import FleurBaseWorkChain
builder = FleurBaseWorkChain.get_builder()
def test_fleur_base_relax_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.base_relax import FleurBaseRelaxWorkChain
builder = FleurBaseRelaxWorkChain.get_builder()
def test_fleur_create_magnetic_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.create_magnetic_film import FleurCreateMagneticWorkChain
builder = FleurCreateMagneticWorkChain.get_builder()
def test_fleur_strain_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.strain import FleurStrainWorkChain
builder = FleurStrainWorkChain.get_builder()
def test_fleur_orbcontrol_wc_init(self):
"""
Test the interface of the orbcontrol workchain
"""
from aiida_fleur.workflows.orbcontrol import FleurOrbControlWorkChain
builder = FleurOrbControlWorkChain.get_builder()
def test_fleur_cfcoeff_wc_init(self):
"""
Test the interface of the cfcoeff workchain
"""
from aiida_fleur.workflows.cfcoeff import FleurCFCoeffWorkChain
builder = FleurCFCoeffWorkChain.get_builder()
| 33.331429 | 91 | 0.639637 | ###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
'''
Contains smoke tests for all workchains of aiida-fleur,
checks if builderis from aiida-core gets the correct class.
'''
import pytest
@pytest.mark.usefixtures('aiida_profile', 'clear_database')
class TestFleurWorkchainInterfaces:
"""
Test all aiida-fleur workflow interfaces
"""
# TODO
# prepare some nodes:
# structure, option, fleurinp, wfparameters
# add to builder and see if he takes it
# ggf if possible run initial step only, that the input is checked...
# In general the interfaces should be fixed and not changed. this is what
# these tests are for, to test be aware of interface breaks
def test_fleur_scf_wc_init(self):
"""
Test the interface of the scf workchain
"""
from aiida_fleur.workflows.scf import FleurScfWorkChain
builder = FleurScfWorkChain.get_builder()
def test_fleur_eos_wc_init(self):
"""
Test the interface of the eos workchain
"""
from aiida_fleur.workflows.eos import FleurEosWorkChain
builder = FleurEosWorkChain.get_builder()
def test_fleur_dos_wc_init(self):
"""
Test the interface of the dos workchain
"""
from aiida_fleur.workflows.dos import fleur_dos_wc
builder = fleur_dos_wc.get_builder()
def test_fleur_corehole_wc_init(self):
"""
Test the interface of the corehole workchain
"""
from aiida_fleur.workflows.corehole import FleurCoreholeWorkChain
builder = FleurCoreholeWorkChain.get_builder()
def test_fleur_initial_cls_wc_init(self):
"""
Test the interface of the scf workchain
"""
from aiida_fleur.workflows.initial_cls import FleurInitialCLSWorkChain
builder = FleurInitialCLSWorkChain.get_builder()
def test_fleur_relax_wc_init(self):
"""
Test the interface of the relax workchain
"""
from aiida_fleur.workflows.relax import FleurRelaxWorkChain
builder = FleurRelaxWorkChain.get_builder()
def test_fleur_optimize_para_wc_init(self):
"""
Test the interface of the optimize_para_ workchain
"""
from aiida_fleur.workflows.optimize_para import fleur_optimize_parameters_wc
builder = fleur_optimize_parameters_wc.get_builder()
def test_fleur_mae_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.mae import FleurMaeWorkChain
builder = FleurMaeWorkChain.get_builder()
def test_fleur_mae_conv_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.mae_conv import FleurMaeConvWorkChain
builder = FleurMaeConvWorkChain.get_builder()
def test_fleur_ssdisp_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.ssdisp import FleurSSDispWorkChain
builder = FleurSSDispWorkChain.get_builder()
def test_fleur_ssdisp_conv_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.ssdisp_conv import FleurSSDispConvWorkChain
builder = FleurSSDispConvWorkChain.get_builder()
def test_fleur_dmi_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.dmi import FleurDMIWorkChain
builder = FleurDMIWorkChain.get_builder()
def test_fleur_base_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.base_fleur import FleurBaseWorkChain
builder = FleurBaseWorkChain.get_builder()
def test_fleur_base_relax_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.base_relax import FleurBaseRelaxWorkChain
builder = FleurBaseRelaxWorkChain.get_builder()
def test_fleur_create_magnetic_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.create_magnetic_film import FleurCreateMagneticWorkChain
builder = FleurCreateMagneticWorkChain.get_builder()
def test_fleur_strain_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.strain import FleurStrainWorkChain
builder = FleurStrainWorkChain.get_builder()
def test_fleur_orbcontrol_wc_init(self):
"""
Test the interface of the orbcontrol workchain
"""
from aiida_fleur.workflows.orbcontrol import FleurOrbControlWorkChain
builder = FleurOrbControlWorkChain.get_builder()
def test_fleur_cfcoeff_wc_init(self):
"""
Test the interface of the cfcoeff workchain
"""
from aiida_fleur.workflows.cfcoeff import FleurCFCoeffWorkChain
builder = FleurCFCoeffWorkChain.get_builder()
| 0 | 0 | 0 |
2ac7127200f109cd777482f96ca25797a6b58e10 | 487 | py | Python | kudu/twisted/orbreapthread.py | UCSD-ANF/kudu | 5a5828d32996509674eb735b348ed0f90c57cf35 | [
"BSD-2-Clause"
] | null | null | null | kudu/twisted/orbreapthread.py | UCSD-ANF/kudu | 5a5828d32996509674eb735b348ed0f90c57cf35 | [
"BSD-2-Clause"
] | null | null | null | kudu/twisted/orbreapthread.py | UCSD-ANF/kudu | 5a5828d32996509674eb735b348ed0f90c57cf35 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Twisted-Friendly Orb Reap Threads
---------------------------------
"""
from twisted.internet.threads import deferToThread
import antelope.brttpkt
class OrbreapThr(antelope.brttpkt.OrbreapThr):
"""Twisted-compatible subclass of ``antelope.brttpkt.OrbreapThr``."""
def get(self):
"""Defer ``get`` to a thread.
:rtype: ``Deferred``
"""
d = deferToThread(
super(OrbreapThr, self).get)
return d
| 21.173913 | 73 | 0.585216 | #!/usr/bin/env python
"""
Twisted-Friendly Orb Reap Threads
---------------------------------
"""
from twisted.internet.threads import deferToThread
import antelope.brttpkt
class OrbreapThr(antelope.brttpkt.OrbreapThr):
"""Twisted-compatible subclass of ``antelope.brttpkt.OrbreapThr``."""
def get(self):
"""Defer ``get`` to a thread.
:rtype: ``Deferred``
"""
d = deferToThread(
super(OrbreapThr, self).get)
return d
| 0 | 0 | 0 |
81590da2889cd702084df85afd4b517902881aaa | 677 | py | Python | labs/lab8/client-UDP.py | ioanabirsan/python | 5cff2acf36092e450bb269b37a0571ee62ccec31 | [
"Apache-2.0"
] | null | null | null | labs/lab8/client-UDP.py | ioanabirsan/python | 5cff2acf36092e450bb269b37a0571ee62ccec31 | [
"Apache-2.0"
] | null | null | null | labs/lab8/client-UDP.py | ioanabirsan/python | 5cff2acf36092e450bb269b37a0571ee62ccec31 | [
"Apache-2.0"
] | null | null | null | # 2b. Implement a client for the deployed server at 2a: a script that receives
# from the command line an addr string, a port integer, and a string msg,
# and sends a UDP packet to the addr address, the port port, and the msg content.
import socket
import sys
if len(sys.argv) < 4:
print('Please enter <address> <port> <message')
else:
ADDRESS = sys.argv[1]
PORT = int(sys.argv[2])
MESSAGE = sys.argv[3:]
content = ''
for word in MESSAGE:
content += ' ' + word
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.connect((ADDRESS, PORT))
client_socket.send(content.encode())
client_socket.close()
| 28.208333 | 81 | 0.680945 | # 2b. Implement a client for the deployed server at 2a: a script that receives
# from the command line an addr string, a port integer, and a string msg,
# and sends a UDP packet to the addr address, the port port, and the msg content.
import socket
import sys
if len(sys.argv) < 4:
print('Please enter <address> <port> <message')
else:
ADDRESS = sys.argv[1]
PORT = int(sys.argv[2])
MESSAGE = sys.argv[3:]
content = ''
for word in MESSAGE:
content += ' ' + word
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.connect((ADDRESS, PORT))
client_socket.send(content.encode())
client_socket.close()
| 0 | 0 | 0 |
fc123140441076bc66b2fbf3bd27a5ad65b7bbc0 | 2,376 | py | Python | Atoi.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 14 | 2020-10-15T21:47:18.000Z | 2021-12-01T06:06:51.000Z | Atoi.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | null | null | null | Atoi.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 4 | 2020-06-15T14:40:45.000Z | 2021-06-15T06:22:03.000Z | # GHC_Codepath SE101
# Sandbox - 3
# 1. SE101:String to Integer (ATOI)
# Implement atoi which converts a string to an integer.
# The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character,
# takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
# The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
# If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains
# only whitespace characters, no conversion is performed. If no valid conversion could be performed, a zero value is returned.
# Note:
# • Only the space character' is considered as whitespace character.
# • Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [-231. 231 - 1]. If the numerical value is out of the range of representable values, INT_MAX (231 - 1) or INT_MIN (-231) is returned.
#!/bin/python3
import math
import os
import random
import re
import sys
# The function is expected to return an INTEGER.
# The function accepts STRING a as parameter.
# The function will convert the string parameter
# into an integer, and return the result.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
a = input()
result = atoi(a)
fptr.write(str(result) + '\n')
fptr.close()
| 32.108108 | 252 | 0.683923 | # GHC_Codepath SE101
# Sandbox - 3
# 1. SE101:String to Integer (ATOI)
# Implement atoi which converts a string to an integer.
# The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character,
# takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
# The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
# If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains
# only whitespace characters, no conversion is performed. If no valid conversion could be performed, a zero value is returned.
# Note:
# • Only the space character' is considered as whitespace character.
# • Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [-231. 231 - 1]. If the numerical value is out of the range of representable values, INT_MAX (231 - 1) or INT_MIN (-231) is returned.
#!/bin/python3
import math
import os
import random
import re
import sys
# The function is expected to return an INTEGER.
# The function accepts STRING a as parameter.
# The function will convert the string parameter
# into an integer, and return the result.
def atoi(a):
# strip whitespaces from input
a = a.strip()
# if starting character is a letter
# return zero, as mentioned
if a[0].isalpha(): return 0
# to store the sign of number
sign = 0
if a.startswith("-"): sign = 1
# to index upto a digit
i = 0
while i < len(a) and not a[i].isdigit():
i += 1
# to take all digits until a space/alpha is found
ans = 0
while i < len(a) and a[i].isdigit():
ans = ans*10 + int(a[i])
i += 1
# replacing back the sign
if sign == 1:
ans = -ans
# following the boundary checks
if ans < -(2**31): return -2**31
if ans > 2**31 - 1: return 2**31 - 1
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
a = input()
result = atoi(a)
fptr.write(str(result) + '\n')
fptr.close()
| 691 | 0 | 23 |
69f72cc5a9141e1c539085b3a3d6f76d8141a85e | 261 | py | Python | fun/funuser/admin.py | larryw3i/fun | e753ce6d448f7f6ec3169a4d1fa7e1c7bff70a27 | [
"MIT"
] | null | null | null | fun/funuser/admin.py | larryw3i/fun | e753ce6d448f7f6ec3169a4d1fa7e1c7bff70a27 | [
"MIT"
] | 4 | 2021-06-12T06:05:44.000Z | 2021-06-13T06:20:00.000Z | fun/funuser/admin.py | larryw3i/fun | e753ce6d448f7f6ec3169a4d1fa7e1c7bff70a27 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from django.utils.translation import gettext_lazy as _
from .models import Funuser
@admin.register(Funuser)
| 21.75 | 54 | 0.808429 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from django.utils.translation import gettext_lazy as _
from .models import Funuser
@admin.register(Funuser)
class FunuserAdmin(UserAdmin):
pass
| 0 | 18 | 22 |
778a279ec5868e6910c58c9541a1dca1ef69ab62 | 1,686 | py | Python | hw4.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | null | null | null | hw4.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | null | null | null | hw4.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | 1 | 2022-01-03T01:44:39.000Z | 2022-01-03T01:44:39.000Z | '''
Created on Oct 4, 2017
@author: jschmid3@stevens.edu
Pledge: I pledge my honor that I have abided by the Stevens Honor System -Joshua Schmidt
CS115 - hw4
'''
def pascal_row(n):
"""returns the pascal triangle row of the given integer n"""
return triangle(n + 1, [])[n]
def pascal_triangle(n):
"""returns the pascal triangle from 0 to n"""
return triangle(n + 1, [])
#TESTING
#print(pascal_row(0))
#print(pascal_triangle(3)) | 31.222222 | 91 | 0.463227 | '''
Created on Oct 4, 2017
@author: jschmid3@stevens.edu
Pledge: I pledge my honor that I have abided by the Stevens Honor System -Joshua Schmidt
CS115 - hw4
'''
def pascal_row(n):
"""returns the pascal triangle row of the given integer n"""
def triangle(n, lst):
if lst ==[]:
lst = [[1]]
if n == 1:
return lst
else:
oldRow = lst[-1]
def helpRows(lst1, lst2):
if lst1 == [] or lst2 == []:
return []
return [(lst1[0], lst2[0])] + helpRows(lst1[1:], lst2[1:])
def summation(lst):
if lst == []:
return []
return [sum(lst[0])] + summation(lst[1:])
newRow = [1] + summation(helpRows(oldRow, oldRow[1:])) + [1]
return triangle(n - 1, lst + [newRow])
return triangle(n + 1, [])[n]
def pascal_triangle(n):
"""returns the pascal triangle from 0 to n"""
def triangle(n, lst):
if lst ==[]:
lst = [[1]]
if n == 1:
return lst
else:
oldRow = lst[-1]
def helpRows(lst1, lst2):
if lst1 == [] or lst2 == []:
return []
return [(lst1[0], lst2[0])] + helpRows(lst1[1:], lst2[1:])
def summation(lst):
if lst == []:
return []
return [sum(lst[0])] + summation(lst[1:])
newRow = [1] + summation(helpRows(oldRow, oldRow[1:])) + [1]
return triangle(n - 1, lst + [newRow])
return triangle(n + 1, [])
#TESTING
#print(pascal_row(0))
#print(pascal_triangle(3)) | 1,184 | 0 | 52 |
abc38679bf270e05890f70052f6b07215ad5a045 | 409 | py | Python | setup.py | AnthonyDugarte/osl_api | 32f2b25582ac72a0cca6ca65c0d473db9806d5ba | [
"MIT"
] | null | null | null | setup.py | AnthonyDugarte/osl_api | 32f2b25582ac72a0cca6ca65c0d473db9806d5ba | [
"MIT"
] | null | null | null | setup.py | AnthonyDugarte/osl_api | 32f2b25582ac72a0cca6ca65c0d473db9806d5ba | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='osl_api',
version='0.0.1',
packages=find_packages(),
author="Anthony Dugarte",
author_email="toonny1998@gmai.com",
description="Simple Python OSL Exchange API client which handles authorization for you and exposes a requests-like interface",
url="https://github.com/AnthonyDugarte/osl_api",
python_requires='>=3.5',
)
| 29.214286 | 130 | 0.721271 | from setuptools import find_packages, setup
setup(
name='osl_api',
version='0.0.1',
packages=find_packages(),
author="Anthony Dugarte",
author_email="toonny1998@gmai.com",
description="Simple Python OSL Exchange API client which handles authorization for you and exposes a requests-like interface",
url="https://github.com/AnthonyDugarte/osl_api",
python_requires='>=3.5',
)
| 0 | 0 | 0 |
15b90c640bd0c848b564f57706d32738fc51d1b6 | 31 | py | Python | spam/math/__init__.py | Lucky-Mano/Poetry_C_Extension_Example | 3d95d253063eb46e94cb9b1666e2463a9b7d0401 | [
"CC0-1.0"
] | 3 | 2020-07-03T11:31:31.000Z | 2021-01-12T01:04:38.000Z | spam/math/__init__.py | Lucky-Mano/Poetry_C_Extension_Example | 3d95d253063eb46e94cb9b1666e2463a9b7d0401 | [
"CC0-1.0"
] | null | null | null | spam/math/__init__.py | Lucky-Mano/Poetry_C_Extension_Example | 3d95d253063eb46e94cb9b1666e2463a9b7d0401 | [
"CC0-1.0"
] | null | null | null | from .cmath import add # noqa
| 15.5 | 30 | 0.709677 | from .cmath import add # noqa
| 0 | 0 | 0 |
b3da1798b53498fd2e2d8ce45778e7202631f7b4 | 4,312 | py | Python | dataflow/dags/enquiry_mgmt_pipelines.py | akumuthan-dev/data-flow | efb29a05136adff6c8d103f228568353e3eebee0 | [
"MIT"
] | null | null | null | dataflow/dags/enquiry_mgmt_pipelines.py | akumuthan-dev/data-flow | efb29a05136adff6c8d103f228568353e3eebee0 | [
"MIT"
] | null | null | null | dataflow/dags/enquiry_mgmt_pipelines.py | akumuthan-dev/data-flow | efb29a05136adff6c8d103f228568353e3eebee0 | [
"MIT"
] | null | null | null | import sqlalchemy as sa
from airflow.operators.python_operator import PythonOperator
from dataflow import config
from dataflow.dags import _PipelineDAG
from dataflow.operators.common import fetch_from_hawk_api
from dataflow.utils import TableConfig
| 40.679245 | 93 | 0.574212 | import sqlalchemy as sa
from airflow.operators.python_operator import PythonOperator
from dataflow import config
from dataflow.dags import _PipelineDAG
from dataflow.operators.common import fetch_from_hawk_api
from dataflow.utils import TableConfig
def field_transformation(record, table_config, contexts):
owner = record.pop("owner", None) or {}
enquirer = record.pop("enquirer", None) or {}
return {
**record,
"owner_id": owner.get("id"),
"enquirer_id": enquirer.get("id"),
}
class EnquiryMgmtEnquiriesPipeline(_PipelineDAG):
schedule_interval = "@daily"
use_utc_now_as_source_modified = True
table_config = TableConfig(
schema="enquiry_mgmt",
table_name="enquiries",
transforms=[field_transformation],
field_mapping=[
("id", sa.Column("id", sa.BigInteger, primary_key=True)),
("owner_id", sa.Column("owner_id", sa.BigInteger, index=True)),
("enquirer_id", sa.Column("enquirer_id", sa.BigInteger, index=True)),
("created", sa.Column("created", sa.DateTime, nullable=False, index=True)),
(
"modified",
sa.Column("modified", sa.DateTime, nullable=False, index=True),
),
("enquiry_stage", sa.Column("enquiry_stage", sa.String, nullable=False)),
(
"investment_readiness",
sa.Column("investment_readiness", sa.String, nullable=False),
),
("quality", sa.Column("quality", sa.String, nullable=False)),
(
"marketing_channel",
sa.Column("marketing_channel", sa.String, nullable=False),
),
(
"how_they_heard_dit",
sa.Column("how_they_heard_dit", sa.String, nullable=False),
),
("primary_sector", sa.Column("primary_sector", sa.String, nullable=False)),
("ist_sector", sa.Column("ist_sector", sa.String, nullable=False)),
("country", sa.Column("country", sa.String, nullable=False)),
("region", sa.Column("region", sa.String, nullable=False)),
(
"first_response_channel",
sa.Column("first_response_channel", sa.String, nullable=False),
),
("first_hpo_selection", sa.Column("first_hpo_selection", sa.String)),
(
"second_hpo_selection",
sa.Column("second_hpo_selection", sa.String, nullable=False),
),
(
"third_hpo_selection",
sa.Column("third_hpo_selection", sa.String, nullable=False),
),
(
"organisation_type",
sa.Column("organisation_type", sa.String, nullable=False),
),
(
"investment_type",
sa.Column("investment_type", sa.String, nullable=False),
),
("estimated_land_date", sa.Column("estimated_land_date", sa.DateTime)),
(
"new_existing_investor",
sa.Column("new_existing_investor", sa.String, nullable=False),
),
(
"investor_involvement_level",
sa.Column("investor_involvement_level", sa.String, nullable=False),
),
(
"specific_investment_programme",
sa.Column("specific_investment_programme", sa.String, nullable=False),
),
("date_added_to_datahub", sa.Column("date_added_to_datahub", sa.DateTime)),
("project_success_date", sa.Column("project_success_date", sa.DateTime)),
("date_received", sa.Column("date_received", sa.DateTime)),
],
)
def get_fetch_operator(self) -> PythonOperator:
return PythonOperator(
task_id="fetch-enquiries",
python_callable=fetch_from_hawk_api,
provide_context=True,
op_kwargs=dict(
table_name=self.table_config.table_name,
source_url=f"{config.ENQUIRY_MGMT_BASE_URL}/enquiries?page_size=1000&page=1",
hawk_credentials=config.ENQUIRY_MGMT_HAWK_CREDENTIALS,
),
retries=self.fetch_retries,
)
| 741 | 3,273 | 46 |
dd4c22d628438d194663f747596bc56c68677c5c | 10,028 | py | Python | research/GenKGC/data/data_module.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | 11 | 2022-02-04T12:32:37.000Z | 2022-03-25T11:49:48.000Z | research/GenKGC/data/data_module.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | null | null | null | research/GenKGC/data/data_module.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | 4 | 2022-02-04T05:08:23.000Z | 2022-03-16T02:07:52.000Z | from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from enum import Enum
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, BertTokenizer
# from transformers.configuration_bert import BertTokenizer, BertTokenizerFast
from transformers.tokenization_utils_base import (BatchEncoding,
PreTrainedTokenizerBase)
from .base_data_module import BaseDataModule
from .processor import KGProcessor, get_dataset
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion
in an IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
class Pipeline():
""" Pre-process Pipeline Class : callable """
| 45.375566 | 222 | 0.662545 | from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from enum import Enum
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, BertTokenizer
# from transformers.configuration_bert import BertTokenizer, BertTokenizerFast
from transformers.tokenization_utils_base import (BatchEncoding,
PreTrainedTokenizerBase)
from .base_data_module import BaseDataModule
from .processor import KGProcessor, get_dataset
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
def _missing_(cls, value):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}"
)
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion
in an IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
import numpy as np
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
ent = [_.pop("filter_ent_ids") for _ in features]
input_sentences = None
input_sentences = [_.pop("input_sentences") for _ in features]
relation_ids = [_.pop("relation_ids") for _ in features]
if labels is not None:
max_label_length = max(len(l) for l in labels)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
features["filter_ent_ids"] = ent
if input_sentences[0]:
features["input_sentences"] = input_sentences
# prepare decoder_input_ids
if self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels"):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
class KGC(BaseDataModule):
def __init__(self, args, model) -> None:
super().__init__(args)
if "mbart" in args.model_name_or_path:
self.tokenizer = AutoTokenizer.from_pretrained(self.args.model_name_or_path, add_prefix_space=True, src_lang=args.src_lang, tgt_lang=args.tgt_lang)
else:
if "Ali" in args.data_dir:
self.tokenizer = BertTokenizer.from_pretrained(self.args.model_name_or_path, add_prefix_space=True)
self.tokenizer.bos_token = self.tokenizer.cls_token
self.tokenizer.eos_token = self.tokenizer.sep_token
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.args.model_name_or_path, add_prefix_space=True)
self.processor = KGProcessor()
self.label_list = self.processor.get_labels(args.data_dir)
spo_list = ["(reverse)"]
if spo_list[0] not in self.tokenizer.additional_special_tokens:
num_added_tokens = self.tokenizer.add_special_tokens({'additional_special_tokens': spo_list})
# fix
relations_ids = ["[RELATION_{i}]" for i in range(len(self.label_list))]
if spo_list[0] not in self.tokenizer.additional_special_tokens:
num_added_tokens = self.tokenizer.add_special_tokens({'additional_special_tokens': relations_ids})
self.sampler = DataCollatorForSeq2Seq(self.tokenizer,
model=model,
label_pad_token_id=self.tokenizer.pad_token_id,
pad_to_multiple_of=8 if self.args.precision == 16 else None,
padding="longest",
max_length=self.args.max_seq_length
)
def setup(self, stage=None):
self.data_train = get_dataset(self.args, self.processor, self.label_list, self.tokenizer, "train")
self.data_val = get_dataset(self.args, self.processor, self.label_list, self.tokenizer, "dev")
self.data_test = get_dataset(self.args, self.processor, self.label_list, self.tokenizer, "test")
def prepare_data(self):
pass
@staticmethod
def add_to_argparse(parser):
BaseDataModule.add_to_argparse(parser)
parser.add_argument("--model_name_or_path", type=str, default="roberta-base", help="the name or the path to the pretrained model")
parser.add_argument("--data_dir", type=str, default="roberta-base", help="the name or the path to the pretrained model")
parser.add_argument("--max_seq_length", type=int, default=256, help="Number of examples to operate on per forward step.")
parser.add_argument("--warm_up_radio", type=float, default=0.1, help="Number of examples to operate on per forward step.")
parser.add_argument("--eval_batch_size", type=int, default=8)
parser.add_argument("--overwrite_cache", action="store_true", default=False)
parser.add_argument("--use_demos", type=int, default=0)
return parser
def get_tokenizer(self):
return self.tokenizer
def train_dataloader(self):
return DataLoader(self.data_train, num_workers=self.num_workers, pin_memory=True, collate_fn=self.sampler, batch_size=self.args.batch_size, shuffle=not ("YAGO" in self.args.data_dir or "Ali" in self.args.data_dir))
def val_dataloader(self):
return DataLoader(self.data_val, num_workers=self.num_workers, pin_memory=True, collate_fn=self.sampler, batch_size=self.args.eval_batch_size)
def test_dataloader(self):
return DataLoader(self.data_test, num_workers=self.num_workers, pin_memory=True, collate_fn=self.sampler, batch_size=self.args.eval_batch_size)
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.vocab_words = None
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def init_skipgram_size_geo_list(self, p):
if p > 0:
g_list = []
t = p
for _ in range(self.skipgram_size):
g_list.append(t)
t *= (1-p)
s = sum(g_list)
self.skipgram_size_geo_list = [x/s for x in g_list]
def __call__(self, instance):
raise NotImplementedError
| 6,318 | 239 | 157 |
a4a08a12f64e0848af1f86b7b67e4c220433ce8f | 3,272 | py | Python | ircbot.py | Technik-Tueftler/TeTueTwitchBot | e67abc1493d02ec890a4d31dd57a5bc7715f85c5 | [
"BSD-3-Clause"
] | 2 | 2021-04-03T23:19:55.000Z | 2021-04-12T19:50:39.000Z | ircbot.py | Technik-Tueftler/TeTueTwitchBot | e67abc1493d02ec890a4d31dd57a5bc7715f85c5 | [
"BSD-3-Clause"
] | 4 | 2020-12-27T11:35:46.000Z | 2021-02-28T00:35:04.000Z | ircbot.py | Technik-Tueftler/TeTueTwitchBot | e67abc1493d02ec890a4d31dd57a5bc7715f85c5 | [
"BSD-3-Clause"
] | null | null | null | import logging
import sys
import tetueSrc
from irc.bot import SingleServerIRCBot
# config
HOST = 'irc.twitch.tv'
PORT = 6667
read_successful, cfg = tetueSrc.get_configuration("bot")
CLIENT_ID = cfg["client_id"]
owner = cfg["owner"]
USERNAME = cfg["name"].lower()
TOKEN = cfg["token"]
PASSWORD = f"oauth:{TOKEN}"
CHANNEL = f"#{owner}"
logger = _get_logger()
if __name__ == '__main__':
main() | 33.387755 | 100 | 0.667176 | import logging
import sys
import tetueSrc
from irc.bot import SingleServerIRCBot
# config
HOST = 'irc.twitch.tv'
PORT = 6667
read_successful, cfg = tetueSrc.get_configuration("bot")
CLIENT_ID = cfg["client_id"]
owner = cfg["owner"]
USERNAME = cfg["name"].lower()
TOKEN = cfg["token"]
PASSWORD = f"oauth:{TOKEN}"
CHANNEL = f"#{owner}"
def _get_logger():
logger_name = 'vbot'
logger_level = logging.DEBUG
log_line_format = '%(asctime)s | %(name)s - %(levelname)s : %(message)s'
log_line_date_format = '%Y-%m-%dT%H:%M:%SZ'
logger_ = logging.getLogger(logger_name)
logger_.setLevel(logger_level)
logging_handler = logging.StreamHandler(stream=sys.stdout)
logging_handler.setLevel(logger_level)
logging_formatter = logging.Formatter(log_line_format, datefmt=log_line_date_format)
logging_handler.setFormatter(logging_formatter)
logger_.addHandler(logging_handler)
return logger_
logger = _get_logger()
class VBot(SingleServerIRCBot):
VERSION = '1.0.0'
def __init__(self, host, port, nickname, password, channel):
logger.debug('VBot.__init__ (VERSION = %r)', self.VERSION)
SingleServerIRCBot.__init__(self, [(host, port, password)], nickname, nickname)
self.channel = channel
self.viewers = []
def on_welcome(self, connection, event):
logger.debug('VBot.on_welcome')
connection.join(self.channel)
connection.privmsg(event.target, 'Hello world!')
def on_join(self, connection, event):
logger.debug('VBot.on_join')
nickname = self._parse_nickname_from_twitch_user_id(event.source)
self.viewers.append(nickname)
if nickname.lower() == connection.get_nickname().lower():
connection.privmsg(event.target, 'Hello world!')
def on_part(self, connection, event):
logger.debug('VBot.on_part')
nickname = self._parse_nickname_from_twitch_user_id(event.source)
self.viewers.remove(nickname)
def on_pubmsg(self, connection, event):
logger.debug('VBot.on_pubmsg')
message = event.arguments[0]
logger.debug('message = %r', message)
message_parts = message.split(":", 1)
if len(message_parts) > 1 and message_parts[0].lower() == connection.get_nickname().lower():
self.do_command(event, message_parts[1].strip())
def do_command(self, event, command):
logger.debug('VBot.do_command (command = %r)', command)
if command == "version":
version_message = 'Version: %s' % self.VERSION
self.connection.privmsg(event.target, version_message)
if command == "count_viewers":
num_viewers = len(self.viewers)
num_viewers_message = 'Viewer count: %d' % num_viewers
self.connection.privmsg(event.target, num_viewers_message)
elif command == 'exit':
self.die(msg="")
else:
logger.error('Unrecognized command: %r', command)
@staticmethod
def _parse_nickname_from_twitch_user_id(user_id):
# nickname!username@nickname.tmi.twitch.tv
return user_id.split('!', 1)[0]
def main():
my_bot = VBot(HOST, PORT, USERNAME, PASSWORD, CHANNEL)
my_bot.start()
if __name__ == '__main__':
main() | 2,562 | 240 | 69 |
00c0bebe861360df0a011b3bca7648880c4db0b9 | 143 | py | Python | dvc/dependency/ssh.py | alustenberg/dvc | 2e9fac5fb3f5882904f8005209feabe0dd3d5003 | [
"Apache-2.0"
] | 1 | 2020-08-01T08:31:18.000Z | 2020-08-01T08:31:18.000Z | dvc/dependency/ssh.py | alustenberg/dvc | 2e9fac5fb3f5882904f8005209feabe0dd3d5003 | [
"Apache-2.0"
] | 82 | 2021-05-04T02:40:05.000Z | 2022-03-31T03:14:04.000Z | dvc/dependency/ssh.py | alustenberg/dvc | 2e9fac5fb3f5882904f8005209feabe0dd3d5003 | [
"Apache-2.0"
] | 2 | 2021-06-14T19:12:25.000Z | 2021-06-14T19:12:29.000Z | from dvc.dependency.base import BaseDependency
from dvc.output.ssh import SSHOutput
| 20.428571 | 47 | 0.818182 | from dvc.dependency.base import BaseDependency
from dvc.output.ssh import SSHOutput
class SSHDependency(BaseDependency, SSHOutput):
pass
| 0 | 35 | 23 |
30b2d20aabe754df59839a350547c33dc74259f2 | 867 | py | Python | prime.py | konglx90/algorithm | e79a0753d01a39f33482e5d33663ea81ecd435f8 | [
"MIT"
] | null | null | null | prime.py | konglx90/algorithm | e79a0753d01a39f33482e5d33663ea81ecd435f8 | [
"MIT"
] | null | null | null | prime.py | konglx90/algorithm | e79a0753d01a39f33482e5d33663ea81ecd435f8 | [
"MIT"
] | null | null | null | import math
while True:
try:
(PM, PN) = (int(i) for i in raw_input().split())
find_num_of_prime = 0
i = 0
while True:
if isPrime(i):
find_num_of_prime += 1
if find_num_of_prime == PM:
break
i += 1
x = []
find_num_of_prime -= 1
while True:
if isPrime(i):
x.append(i)
find_num_of_prime += 1
if find_num_of_prime == PN:
break
i += 1
for j in range(len(x)):
print x[j],
if (j+1) % 10 == 0:
print
print
except EOFError:
break
| 21.675 | 56 | 0.417532 | import math
def isPrime(n):
if n <= 1:
return False
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
while True:
try:
(PM, PN) = (int(i) for i in raw_input().split())
find_num_of_prime = 0
i = 0
while True:
if isPrime(i):
find_num_of_prime += 1
if find_num_of_prime == PM:
break
i += 1
x = []
find_num_of_prime -= 1
while True:
if isPrime(i):
x.append(i)
find_num_of_prime += 1
if find_num_of_prime == PN:
break
i += 1
for j in range(len(x)):
print x[j],
if (j+1) % 10 == 0:
print
print
except EOFError:
break
| 140 | 0 | 23 |
9ecdcedd4d35efe841bb994ee688395a329afa5b | 1,709 | py | Python | tests/core/test_material.py | pauliacomi/pyGAPS | c4d45b710e171c937471686437e382e05aec4ed5 | [
"MIT"
] | 35 | 2018-01-24T14:59:08.000Z | 2022-03-10T02:47:58.000Z | tests/core/test_material.py | pauliacomi/pyGAPS | c4d45b710e171c937471686437e382e05aec4ed5 | [
"MIT"
] | 29 | 2018-01-06T12:08:08.000Z | 2022-03-11T20:26:53.000Z | tests/core/test_material.py | pauliacomi/pyGAPS | c4d45b710e171c937471686437e382e05aec4ed5 | [
"MIT"
] | 20 | 2019-06-12T19:20:29.000Z | 2022-03-02T09:57:02.000Z | """Tests relating to the Material class."""
import pytest
import pygaps
import pygaps.utilities.exceptions as pgEx
@pytest.mark.core
class TestMaterial():
"""Test the material class."""
def test_material_basic(self):
"""Basic creation tests."""
mat = pygaps.Material('material1')
assert mat == 'material1'
assert mat != 'Material1'
mat2 = pygaps.Material('material1')
assert mat == mat2
def test_material_create(self, material_data, basic_material):
"""Check material can be created from test data."""
assert material_data == basic_material.to_dict()
def test_material_retrieved_list(self, material_data, basic_material):
"""Check material can be retrieved from master list."""
pygaps.MATERIAL_LIST.append(basic_material)
uploaded_material = pygaps.Material.find(material_data.get('name'))
assert material_data == uploaded_material.to_dict()
with pytest.raises(pgEx.ParameterError):
pygaps.Material.find('noname')
pygaps.MATERIAL_LIST.remove(basic_material)
def test_material_get_properties(self, material_data, basic_material):
"""Check if properties of a material can be located."""
assert basic_material.get_prop('density'
) == material_data.get('density')
density = basic_material.properties.pop('density')
with pytest.raises(pgEx.ParameterError):
basic_material.get_prop('density')
basic_material.properties['density'] = density
def test_material_print(self, basic_material):
"""Checks the printing can be done."""
print(basic_material)
| 34.877551 | 75 | 0.669397 | """Tests relating to the Material class."""
import pytest
import pygaps
import pygaps.utilities.exceptions as pgEx
@pytest.mark.core
class TestMaterial():
"""Test the material class."""
def test_material_basic(self):
"""Basic creation tests."""
mat = pygaps.Material('material1')
assert mat == 'material1'
assert mat != 'Material1'
mat2 = pygaps.Material('material1')
assert mat == mat2
def test_material_create(self, material_data, basic_material):
"""Check material can be created from test data."""
assert material_data == basic_material.to_dict()
def test_material_retrieved_list(self, material_data, basic_material):
"""Check material can be retrieved from master list."""
pygaps.MATERIAL_LIST.append(basic_material)
uploaded_material = pygaps.Material.find(material_data.get('name'))
assert material_data == uploaded_material.to_dict()
with pytest.raises(pgEx.ParameterError):
pygaps.Material.find('noname')
pygaps.MATERIAL_LIST.remove(basic_material)
def test_material_get_properties(self, material_data, basic_material):
"""Check if properties of a material can be located."""
assert basic_material.get_prop('density'
) == material_data.get('density')
density = basic_material.properties.pop('density')
with pytest.raises(pgEx.ParameterError):
basic_material.get_prop('density')
basic_material.properties['density'] = density
def test_material_print(self, basic_material):
"""Checks the printing can be done."""
print(basic_material)
| 0 | 0 | 0 |
3c702d158004e84a8def211d6901cce9989f6692 | 140 | py | Python | Code/Miscellaneous/PypeRExample.py | tchakravarty/PythonExamples | a20a866f0f1dcf6ca429e5114baac1e40cf1da42 | [
"Apache-2.0"
] | null | null | null | Code/Miscellaneous/PypeRExample.py | tchakravarty/PythonExamples | a20a866f0f1dcf6ca429e5114baac1e40cf1da42 | [
"Apache-2.0"
] | null | null | null | Code/Miscellaneous/PypeRExample.py | tchakravarty/PythonExamples | a20a866f0f1dcf6ca429e5114baac1e40cf1da42 | [
"Apache-2.0"
] | 1 | 2018-11-23T17:21:05.000Z | 2018-11-23T17:21:05.000Z | from pyper import R
for i in range(20):
r("a <- rbind(a, seq(1000000) * 1.0 * %d)" % i)
print r("sum(a)") | 23.333333 | 51 | 0.528571 | from pyper import R
def foo(r):
r("a <- NULL")
for i in range(20):
r("a <- rbind(a, seq(1000000) * 1.0 * %d)" % i)
print r("sum(a)") | 9 | 0 | 22 |
7f4d2ebeae3cc67e39cbdfc48742c908fa328c2c | 7,557 | py | Python | mayday/features/mainpanel.py | codacy-badger/mayday-ticketing-bot | 7cbb1d201ececd2eb879c047e2cf7588862eb89f | [
"MIT"
] | null | null | null | mayday/features/mainpanel.py | codacy-badger/mayday-ticketing-bot | 7cbb1d201ececd2eb879c047e2cf7588862eb89f | [
"MIT"
] | null | null | null | mayday/features/mainpanel.py | codacy-badger/mayday-ticketing-bot | 7cbb1d201ececd2eb879c047e2cf7588862eb89f | [
"MIT"
] | null | null | null | import time
import traceback
import telegram
from telegram.ext.dispatcher import run_async
from mayday import LogConfig
from mayday.constants import conversations, stages
from mayday.constants.replykeyboards import ReplyKeyboards
from mayday.controllers.redis import RedisHelper
from mayday.features import (platform_stats, post_ticket, quick_search, search,
support, update_ticket)
from mayday.utils import log_util
from mayday.validators import authenticator
flogger = LogConfig.flogger
KEYBOARDS = ReplyKeyboards()
REDIS = RedisHelper()
@run_async
@run_async
@run_async
@run_async
@run_async
@run_async
| 32.294872 | 109 | 0.615059 | import time
import traceback
import telegram
from telegram.ext.dispatcher import run_async
from mayday import LogConfig
from mayday.constants import conversations, stages
from mayday.constants.replykeyboards import ReplyKeyboards
from mayday.controllers.redis import RedisHelper
from mayday.features import (platform_stats, post_ticket, quick_search, search,
support, update_ticket)
from mayday.utils import log_util
from mayday.validators import authenticator
flogger = LogConfig.flogger
KEYBOARDS = ReplyKeyboards()
REDIS = RedisHelper()
@run_async
def start(bot, update, user_data, chat_data):
try:
telegram_info = update._effective_user
auth = authenticator.auth(telegram_info)
flogger.info('user: {}, username:{}, auth:{}'.format(telegram_info.id, telegram_info.username, auth))
if auth.is_username_valid is False:
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='auth',
error='username is missing'
)
flogger.warning(msg)
update.message.reply_text(conversations.MAIN_PANEL_USERNAME_MISSING)
return stages.END
if auth.is_admin:
# TODO: Add Admin Panel
pass
if auth.is_banned:
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='auth',
error='banned'
)
flogger.warning(msg)
update.message.reply_text(conversations.MAIN_PANEL_YELLOWCOW)
return stages.END
if auth.status:
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='auth',
)
flogger.info(msg)
update.message.reply_text(conversations.MAIN_PANEL_REMINDER)
time.sleep(0.5)
bot.sendMessage(
chat_id=update.message.chat.id,
text=conversations.MAIN_PANEL_START.format_map({'username': telegram_info.username}),
reply_markup=KEYBOARDS.actions_keyboard_markup
)
return stages.MAIN_PANEL
except Exception:
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='start',
extra=str(update),
trace_back=str(traceback.format_exc())
)
flogger.error(msg)
@run_async
def route(bot, update, user_data, chat_data):
try:
telegram_info = update._effective_user
callback_data = update.callback_query.data
flogger.info("user_data: {}, update: {}".format(user_data, update))
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data=callback_data,
)
flogger.info(msg)
if callback_data == 'info':
telegram_info = update._effective_user
bot.edit_message_text(
text=conversations.INFO,
chat_id=telegram_info.id,
message_id=update.callback_query.message.message_id,
reply_markup=KEYBOARDS.actions_keyboard_markup
)
return stages.MAIN_PANEL
if callback_data == 'post':
post_ticket.start(bot, update, user_data)
return stages.POST_SELECT_FIELD
if callback_data == 'search':
search.start(bot, update, user_data)
return stages.SEARCH_SELECT_FIELD
if callback_data == 'stats':
platform_stats.stats(bot, update, user_data)
return stages.TICKET_STAT_LIST
if callback_data == 'my_ticket':
update_ticket.start(bot, update, user_data)
return stages.UPDATE_SELECT_TICKET
if callback_data == 'events':
support.start(bot, update, user_data)
return stages.SUPPORT_EVENT_LIST
if callback_data == 'quick_search':
quick_search.start(bot, update, user_data)
return stages.QUICK_SEARCH_MODE_SELECTION
if callback_data == 'bye':
done(bot, update, user_data, chat_data)
return stages.END
except Exception:
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='done',
extra=str(update),
trace_back=str(traceback.format_exc())
)
flogger.error(msg)
@run_async
def done(bot, update, user_data, chat_data):
telegram_info = update._effective_user
try:
chat_id = update.callback_query.message.chat.id
except Exception:
chat_id = update.message.chat.id
try:
flogger.info(log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='done'))
bot.sendPhoto(
chat_id=chat_id,
photo=REDIS.direct_read('MAYDAY-BOT-CONFIG-GOODBYE_PHOTO_ID'),
caption=conversations.MAIN_PANEL_DONE)
return stages.END
except Exception:
bot.sendPhoto(
chat_id=chat_id,
photo=REDIS.direct_read('MAYDAY-BOT-CONFIG-GOODBYE_PHOTO_URL'),
caption=conversations.MAIN_PANEL_DONE)
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='done_catch_miss',
extra=str(update),
trace_back=str(traceback.format_exc()))
flogger.warning(msg)
return stages.END
@run_async
def error(bot, update, error):
# TODO: Amend the log format
flogger.error('Update "%s" caused error "%s"' % (update, error))
@run_async
def help(bot, update):
bot.edit_message_text(
chat_id=update.callback_query.message.chat.id,
message_id=update.callback_query.message.message_id,
text=conversations.MAIN_PANEL_TIMEOUT)
return stages.END
@run_async
def timeout(bot, update, chat_data):
try:
chat_id = update.callback_query.message.chat.id
except Exception:
chat_id = update.message.chat.id
try:
telegram_info = update._effective_user
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='timeout'
)
flogger.info(msg)
bot.edit_message_text(
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
text=conversations.MAIN_PANEL_TIMEOUT)
return stages.END
except Exception:
msg = log_util.get_ub_log(
user_id=telegram_info.id,
username=telegram_info.username,
funcname=__name__,
callback_data='timeout',
extra=str(update)
)
flogger.warning(msg)
bot.sendMessage(
chat_id=chat_id,
text=conversations.MAIN_PANEL_TIMEOUT
)
return stages.END
| 6,775 | 0 | 132 |
a3966c97a421cef76170dd71988515b787fbb048 | 79 | py | Python | program.py | nielsds/gitbetter | 405d906ef585e9cb28315f7fd8401246c37c40d0 | [
"Apache-2.0"
] | 1 | 2020-06-01T19:18:27.000Z | 2020-06-01T19:18:27.000Z | program.py | nielsds/gitbetter | 405d906ef585e9cb28315f7fd8401246c37c40d0 | [
"Apache-2.0"
] | null | null | null | program.py | nielsds/gitbetter | 405d906ef585e9cb28315f7fd8401246c37c40d0 | [
"Apache-2.0"
] | null | null | null | '''hello world'''
def hello():
'''Hello world'''
print('hello world')
| 13.166667 | 24 | 0.544304 | '''hello world'''
def hello():
'''Hello world'''
print('hello world')
| 0 | 0 | 0 |
24026c71573a4758d6a859f52c4145159637db25 | 1,909 | py | Python | master/spark_unit.py | Ica-Riluci/simSpark | 820c3e3d46319e4c6e80efc3ed7bfdb6ceffb6fb | [
"Apache-2.0"
] | null | null | null | master/spark_unit.py | Ica-Riluci/simSpark | 820c3e3d46319e4c6e80efc3ed7bfdb6ceffb6fb | [
"Apache-2.0"
] | 15 | 2018-12-19T14:51:19.000Z | 2019-01-13T16:07:31.000Z | master/spark_unit.py | Ica-Riluci/simSpark | 820c3e3d46319e4c6e80efc3ed7bfdb6ceffb6fb | [
"Apache-2.0"
] | 1 | 2018-12-19T14:46:18.000Z | 2018-12-19T14:46:18.000Z | from datetime import datetime, timedelta | 31.295082 | 83 | 0.644316 | from datetime import datetime, timedelta
class SparkUnit:
def __init__(self, address, port):
self.host = address
self.port = port
class ApplicationUnit(SparkUnit):
app_count = 0
def __init__(self, address, port, name, did):
ApplicationUnit.app_count += 1
super(ApplicationUnit, self).__init__(address, port)
self.app_name = name
self.driver_id = did
self.executors_req = -1
self.executor_list = []
self.state = 'WAIT'
self.app_id = ApplicationUnit.app_count
class DriverUnit(SparkUnit):
driver_count = 0
def __init__(self, address, port):
DriverUnit.driver_count += 1
super(DriverUnit, self).__init__(address, port)
self.driver_id = DriverUnit.driver_count
self.app_id = None
def set_app_id(self, aid=None):
self.app_id = aid
class ExecutorUnit(SparkUnit):
executor_count = 0
def __init__(self, address, port, wid, aid):
ExecutorUnit.executor_count += 1
super(ExecutorUnit, self).__init__(address, port)
self.executor_id = ExecutorUnit.executor_count
self.worker_id = wid
self.app_id = aid
self.state = 'WAIT'
class WorkerUnit(SparkUnit):
worker_count = 0
def __init__(self, address, port):
WorkerUnit.worker_count += 1
super(WorkerUnit, self).__init__(address, port)
self.worker_id = WorkerUnit.worker_count
self.alive = True
self.last_heartbeat = datetime.now()
self.executor_list = []
def heartbeat_expired(self, lim):
return self.last_heartbeat + timedelta(seconds=lim) < datetime.now()
def dead(self, lim, it):
return self.last_heartbeat + timedelta(seconds=(lim * it)) < datetime.now()
def awake(self):
self.alive = True
def update_heartbeat(self, hb):
self.last_heartbeat = hb | 1,364 | 356 | 149 |
92d6fc3cc6332e2f45105aac103d46c01feea1d5 | 6,627 | py | Python | apps/life_sci/examples/property_prediction/utils.py | arangoml/dgl | d135058f9986fadcbdf6aa1011a00c3ad45a8ce3 | [
"Apache-2.0"
] | null | null | null | apps/life_sci/examples/property_prediction/utils.py | arangoml/dgl | d135058f9986fadcbdf6aa1011a00c3ad45a8ce3 | [
"Apache-2.0"
] | null | null | null | apps/life_sci/examples/property_prediction/utils.py | arangoml/dgl | d135058f9986fadcbdf6aa1011a00c3ad45a8ce3 | [
"Apache-2.0"
] | null | null | null | import dgl
import numpy as np
import random
import torch
from dgllife.utils.featurizers import one_hot_encoding
from dgllife.utils.mol_to_graph import smiles_to_bigraph
from dgllife.utils.splitters import RandomSplitter
def set_random_seed(seed=0):
"""Set random seed.
Parameters
----------
seed : int
Random seed to use
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def load_dataset_for_classification(args):
"""Load dataset for classification tasks.
Parameters
----------
args : dict
Configurations.
Returns
-------
dataset
The whole dataset.
train_set
Subset for training.
val_set
Subset for validation.
test_set
Subset for test.
"""
assert args['dataset'] in ['Tox21']
if args['dataset'] == 'Tox21':
from dgllife.data import Tox21
dataset = Tox21(smiles_to_bigraph, args['atom_featurizer'])
train_set, val_set, test_set = RandomSplitter.train_val_test_split(
dataset, frac_train=args['frac_train'], frac_val=args['frac_val'],
frac_test=args['frac_test'], random_state=args['random_seed'])
return dataset, train_set, val_set, test_set
def load_dataset_for_regression(args):
"""Load dataset for regression tasks.
Parameters
----------
args : dict
Configurations.
Returns
-------
train_set
Subset for training.
val_set
Subset for validation.
test_set
Subset for test.
"""
assert args['dataset'] in ['Alchemy', 'Aromaticity']
if args['dataset'] == 'Alchemy':
from dgllife.data import TencentAlchemyDataset
train_set = TencentAlchemyDataset(mode='dev')
val_set = TencentAlchemyDataset(mode='valid')
test_set = None
if args['dataset'] == 'Aromaticity':
from dgllife.data import PubChemBioAssayAromaticity
dataset = PubChemBioAssayAromaticity(smiles_to_bigraph,
args['atom_featurizer'],
args['bond_featurizer'])
train_set, val_set, test_set = RandomSplitter.train_val_test_split(
dataset, frac_train=args['frac_train'], frac_val=args['frac_val'],
frac_test=args['frac_test'], random_state=args['random_seed'])
return train_set, val_set, test_set
def collate_molgraphs(data):
"""Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally
a binary mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : DGLGraph
The batched DGLGraph.
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels. If binary masks are not
provided, return a tensor with ones.
"""
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
| 36.61326 | 88 | 0.586842 | import dgl
import numpy as np
import random
import torch
from dgllife.utils.featurizers import one_hot_encoding
from dgllife.utils.mol_to_graph import smiles_to_bigraph
from dgllife.utils.splitters import RandomSplitter
def set_random_seed(seed=0):
"""Set random seed.
Parameters
----------
seed : int
Random seed to use
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def load_dataset_for_classification(args):
"""Load dataset for classification tasks.
Parameters
----------
args : dict
Configurations.
Returns
-------
dataset
The whole dataset.
train_set
Subset for training.
val_set
Subset for validation.
test_set
Subset for test.
"""
assert args['dataset'] in ['Tox21']
if args['dataset'] == 'Tox21':
from dgllife.data import Tox21
dataset = Tox21(smiles_to_bigraph, args['atom_featurizer'])
train_set, val_set, test_set = RandomSplitter.train_val_test_split(
dataset, frac_train=args['frac_train'], frac_val=args['frac_val'],
frac_test=args['frac_test'], random_state=args['random_seed'])
return dataset, train_set, val_set, test_set
def load_dataset_for_regression(args):
"""Load dataset for regression tasks.
Parameters
----------
args : dict
Configurations.
Returns
-------
train_set
Subset for training.
val_set
Subset for validation.
test_set
Subset for test.
"""
assert args['dataset'] in ['Alchemy', 'Aromaticity']
if args['dataset'] == 'Alchemy':
from dgllife.data import TencentAlchemyDataset
train_set = TencentAlchemyDataset(mode='dev')
val_set = TencentAlchemyDataset(mode='valid')
test_set = None
if args['dataset'] == 'Aromaticity':
from dgllife.data import PubChemBioAssayAromaticity
dataset = PubChemBioAssayAromaticity(smiles_to_bigraph,
args['atom_featurizer'],
args['bond_featurizer'])
train_set, val_set, test_set = RandomSplitter.train_val_test_split(
dataset, frac_train=args['frac_train'], frac_val=args['frac_val'],
frac_test=args['frac_test'], random_state=args['random_seed'])
return train_set, val_set, test_set
def collate_molgraphs(data):
"""Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally
a binary mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : DGLGraph
The batched DGLGraph.
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels. If binary masks are not
provided, return a tensor with ones.
"""
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
def load_model(args):
if args['model'] == 'GCN':
from dgllife.model import GCNPredictor
model = GCNPredictor(in_feats=args['in_feats'],
hidden_feats=args['gcn_hidden_feats'],
classifier_hidden_feats=args['classifier_hidden_feats'],
n_tasks=args['n_tasks'])
if args['model'] == 'GAT':
from dgllife.model import GATPredictor
model = GATPredictor(in_feats=args['in_feats'],
hidden_feats=args['gat_hidden_feats'],
num_heads=args['num_heads'],
classifier_hidden_feats=args['classifier_hidden_feats'],
n_tasks=args['n_tasks'])
if args['model'] == 'AttentiveFP':
from dgllife.model import AttentiveFPPredictor
model = AttentiveFPPredictor(node_feat_size=args['node_feat_size'],
edge_feat_size=args['edge_feat_size'],
num_layers=args['num_layers'],
num_timesteps=args['num_timesteps'],
graph_feat_size=args['graph_feat_size'],
n_tasks=args['n_tasks'],
dropout=args['dropout'])
if args['model'] == 'SchNet':
from dgllife.model import SchNetPredictor
model = SchNetPredictor(node_feats=args['node_feats'],
hidden_feats=args['hidden_feats'],
classifier_hidden_feats=args['classifier_hidden_feats'],
n_tasks=args['n_tasks'])
if args['model'] == 'MGCN':
from dgllife.model import MGCNPredictor
model = MGCNPredictor(feats=args['feats'],
n_layers=args['n_layers'],
classifier_hidden_feats=args['classifier_hidden_feats'],
n_tasks=args['n_tasks'])
if args['model'] == 'MPNN':
from dgllife.model import MPNNPredictor
model = MPNNPredictor(node_in_feats=args['node_in_feats'],
edge_in_feats=args['edge_in_feats'],
node_out_feats=args['node_out_feats'],
edge_hidden_feats=args['edge_hidden_feats'],
n_tasks=args['n_tasks'])
return model
def chirality(atom):
try:
return one_hot_encoding(atom.GetProp('_CIPCode'), ['R', 'S']) + \
[atom.HasProp('_ChiralityPossible')]
except:
return [False, False] + [atom.HasProp('_ChiralityPossible')]
| 2,673 | 0 | 46 |
d26077b2ffb49f48ab0288639e011d891c76c8af | 941 | py | Python | ejemplos hpc/powercouples/base/powercouples.py | NicoCaro/cepal_estudiantes | 91d886263ff91a93ad37f4db25e8bac888e65d3e | [
"MIT"
] | null | null | null | ejemplos hpc/powercouples/base/powercouples.py | NicoCaro/cepal_estudiantes | 91d886263ff91a93ad37f4db25e8bac888e65d3e | [
"MIT"
] | null | null | null | ejemplos hpc/powercouples/base/powercouples.py | NicoCaro/cepal_estudiantes | 91d886263ff91a93ad37f4db25e8bac888e65d3e | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import csv
import itertools
import pew.pew as p
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PowerCouples Serial native version')
parser.add_argument('-i','--input', dest="input_csv", help="input file in csv format", required=True, type=argparse.FileType('r'))
parser.add_argument('-o','--output', dest="output_csv", help="output file in csv format", default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args()
out = csv.writer(args.output_csv)
for row in csv.reader(args.input_csv):
name = row[0]
numbers = [int(i) for i in row[1:] ]
pc = find_powerCouple(numbers)
out.writerow( (name, pc[0], pc[1]) )
| 26.885714 | 142 | 0.66525 | import os
import sys
import argparse
import csv
import itertools
import pew.pew as p
def pow(x):
return x[0]**x[1]
def pewT(x):
return p.pew(x[0],x[1])
def find_powerCouple(numbers):
tuples = itertools.permutations(numbers,2)
return max(tuples, key=pewT)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PowerCouples Serial native version')
parser.add_argument('-i','--input', dest="input_csv", help="input file in csv format", required=True, type=argparse.FileType('r'))
parser.add_argument('-o','--output', dest="output_csv", help="output file in csv format", default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args()
out = csv.writer(args.output_csv)
for row in csv.reader(args.input_csv):
name = row[0]
numbers = [int(i) for i in row[1:] ]
pc = find_powerCouple(numbers)
out.writerow( (name, pc[0], pc[1]) )
| 120 | 0 | 69 |
be431ee99df5f493cc808cf88d93040dd2d8793d | 1,010 | py | Python | twitterConn.py | Msgana/Twitter_Picture_Update | aa5e393c0ac56af5a7ade26ff5da7692fcf9e52e | [
"MIT"
] | 1 | 2022-03-04T22:46:59.000Z | 2022-03-04T22:46:59.000Z | twitterConn.py | Msgana/Twitter_Picture_Update | aa5e393c0ac56af5a7ade26ff5da7692fcf9e52e | [
"MIT"
] | null | null | null | twitterConn.py | Msgana/Twitter_Picture_Update | aa5e393c0ac56af5a7ade26ff5da7692fcf9e52e | [
"MIT"
] | null | null | null | # importing the module
import tweepy
import os
# personal details
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
# authentication of consumer key and secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# authentication of access token and secret
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# time stamp images before positng to twitter
# post image function
| 28.857143 | 82 | 0.729703 | # importing the module
import tweepy
import os
# personal details
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
# authentication of consumer key and secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# authentication of access token and secret
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# time stamp images before positng to twitter
def timeStamp(filepath):
# Create our stamp variable
timestampMessage = currentTime.strftime("%Y.%m.%d - %H:%M:%S")
# Create time stamp command to have executed
timestampCommand = "/usr/bin/convert " + completeFilePath + " -pointsize 30 \
-fill green -annotate +750+700 '" + timestampMessage + "' " + completeFilePath
# Actually execute the command!
call([timestampCommand], shell=True)
# post image function
def postImage(filepath):
media = api.media_upload(filepath)
text = ""
api.update_status(status=text, media_ids=[media.media_id])
| 513 | 0 | 44 |
6aa35b6265cf55bdcc08534087fa8d96b3896661 | 17,523 | py | Python | umap/utils.py | cjweir/umap | 60d6b7be30e5d9c40746dcf6052bec09478942b6 | [
"BSD-3-Clause"
] | 16 | 2021-01-11T19:39:15.000Z | 2022-01-26T14:39:00.000Z | umap/utils.py | cjweir/umap | 60d6b7be30e5d9c40746dcf6052bec09478942b6 | [
"BSD-3-Clause"
] | 3 | 2021-05-11T10:30:43.000Z | 2021-05-21T07:24:47.000Z | umap/utils.py | cjweir/umap | 60d6b7be30e5d9c40746dcf6052bec09478942b6 | [
"BSD-3-Clause"
] | 2 | 2021-05-13T12:24:34.000Z | 2021-06-08T14:03:07.000Z | # Author: Leland McInnes <leland.mcinnes@gmail.com>
#
# License: BSD 3 clause
import time
import numpy as np
import numba
import scipy.sparse
@numba.njit(parallel=True)
def fast_knn_indices(X, n_neighbors):
"""A fast computation of knn indices.
Parameters
----------
X: array of shape (n_samples, n_features)
The input data to compute the k-neighbor indices of.
n_neighbors: int
The number of nearest neighbors to compute for each sample in ``X``.
Returns
-------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
"""
knn_indices = np.empty((X.shape[0], n_neighbors), dtype=np.int32)
for row in numba.prange(X.shape[0]):
# v = np.argsort(X[row]) # Need to call argsort this way for numba
v = X[row].argsort(kind="quicksort")
v = v[:n_neighbors]
knn_indices[row] = v
return knn_indices
@numba.njit("i4(i8[:])")
def tau_rand_int(state):
"""A fast (pseudo)-random number generator.
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random int32 value
"""
state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (
(((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19
)
state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (
(((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25
)
state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (
(((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11
)
return state[0] ^ state[1] ^ state[2]
@numba.njit("f4(i8[:])")
def tau_rand(state):
"""A fast (pseudo)-random number generator for floats in the range [0,1]
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random float32 in the interval [0, 1]
"""
integer = tau_rand_int(state)
return abs(float(integer) / 0x7FFFFFFF)
@numba.njit()
def norm(vec):
"""Compute the (standard l2) norm of a vector.
Parameters
----------
vec: array of shape (dim,)
Returns
-------
The l2 norm of vec.
"""
result = 0.0
for i in range(vec.shape[0]):
result += vec[i] ** 2
return np.sqrt(result)
@numba.njit()
def rejection_sample(n_samples, pool_size, rng_state):
"""Generate n_samples many integers from 0 to pool_size such that no
integer is selected twice. The duplication constraint is achieved via
rejection sampling.
Parameters
----------
n_samples: int
The number of random samples to select from the pool
pool_size: int
The size of the total pool of candidates to sample from
rng_state: array of int64, shape (3,)
Internal state of the random number generator
Returns
-------
sample: array of shape(n_samples,)
The ``n_samples`` randomly selected elements from the pool.
"""
result = np.empty(n_samples, dtype=np.int64)
for i in range(n_samples):
reject_sample = True
j = 0
while reject_sample:
j = tau_rand_int(rng_state) % pool_size
for k in range(i):
if j == result[k]:
break
else:
reject_sample = False
result[i] = j
return result
@numba.njit()
def make_heap(n_points, size):
"""Constructor for the numba enabled heap objects. The heaps are used
for approximate nearest neighbor search, maintaining a list of potential
neighbors sorted by their distance. We also flag if potential neighbors
are newly added to the list or not. Internally this is stored as
a single ndarray; the first axis determines whether we are looking at the
array of candidate indices, the array of distances, or the flag array for
whether elements are new or not. Each of these arrays are of shape
(``n_points``, ``size``)
Parameters
----------
n_points: int
The number of data points to track in the heap.
size: int
The number of items to keep on the heap for each data point.
Returns
-------
heap: An ndarray suitable for passing to other numba enabled heap functions.
"""
result = np.zeros(
(np.int64(3), np.int64(n_points), np.int64(size)), dtype=np.float64
)
result[0] = -1
result[1] = np.infty
result[2] = 0
return result
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def heap_push(heap, row, weight, index, flag):
"""Push a new element onto the heap. The heap stores potential neighbors
for each data point. The ``row`` parameter determines which data point we
are addressing, the ``weight`` determines the distance (for heap sorting),
the ``index`` is the element to add, and the flag determines whether this
is to be considered a new addition.
Parameters
----------
heap: ndarray generated by ``make_heap``
The heap object to push into
row: int
Which actual heap within the heap object to push to
weight: float
The priority value of the element to push onto the heap
index: int
The actual value to be pushed
flag: int
Whether to flag the newly added element or not.
Returns
-------
success: The number of new elements successfully pushed into the heap.
"""
row = int(row)
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
if weight >= weights[0]:
return 0
# break if we already have this element.
for i in range(indices.shape[0]):
if index == indices[i]:
return 0
# insert val at position zero
weights[0] = weight
indices[0] = index
is_new[0] = flag
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def unchecked_heap_push(heap, row, weight, index, flag):
"""Push a new element onto the heap. The heap stores potential neighbors
for each data point. The ``row`` parameter determines which data point we
are addressing, the ``weight`` determines the distance (for heap sorting),
the ``index`` is the element to add, and the flag determines whether this
is to be considered a new addition.
Parameters
----------
heap: ndarray generated by ``make_heap``
The heap object to push into
row: int
Which actual heap within the heap object to push to
weight: float
The priority value of the element to push onto the heap
index: int
The actual value to be pushed
flag: int
Whether to flag the newly added element or not.
Returns
-------
success: The number of new elements successfully pushed into the heap.
"""
if weight >= heap[1, row, 0]:
return 0
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
# insert val at position zero
weights[0] = weight
indices[0] = index
is_new[0] = flag
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit()
def siftdown(heap1, heap2, elt):
"""Restore the heap property for a heap with an out of place element
at position ``elt``. This works with a heap pair where heap1 carries
the weights and heap2 holds the corresponding elements."""
while elt * 2 + 1 < heap1.shape[0]:
left_child = elt * 2 + 1
right_child = left_child + 1
swap = elt
if heap1[swap] < heap1[left_child]:
swap = left_child
if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:
swap = right_child
if swap == elt:
break
else:
heap1[elt], heap1[swap] = (heap1[swap], heap1[elt])
heap2[elt], heap2[swap] = (heap2[swap], heap2[elt])
elt = swap
@numba.njit()
def deheap_sort(heap):
"""Given an array of heaps (of indices and weights), unpack the heap
out to give and array of sorted lists of indices and weights by increasing
weight. This is effectively just the second half of heap sort (the first
half not being required since we already have the data in a heap).
Parameters
----------
heap : array of shape (3, n_samples, n_neighbors)
The heap to turn into sorted lists.
Returns
-------
indices, weights: arrays of shape (n_samples, n_neighbors)
The indices and weights sorted by increasing weight.
"""
indices = heap[0]
weights = heap[1]
for i in range(indices.shape[0]):
ind_heap = indices[i]
dist_heap = weights[i]
for j in range(ind_heap.shape[0] - 1):
ind_heap[0], ind_heap[ind_heap.shape[0] - j - 1] = (
ind_heap[ind_heap.shape[0] - j - 1],
ind_heap[0],
)
dist_heap[0], dist_heap[dist_heap.shape[0] - j - 1] = (
dist_heap[dist_heap.shape[0] - j - 1],
dist_heap[0],
)
siftdown(
dist_heap[: dist_heap.shape[0] - j - 1],
ind_heap[: ind_heap.shape[0] - j - 1],
0,
)
return indices.astype(np.int64), weights
@numba.njit("i8(f8[:, :, :],i8)")
def smallest_flagged(heap, row):
"""Search the heap for the smallest element that is
still flagged.
Parameters
----------
heap: array of shape (3, n_samples, n_neighbors)
The heaps to search
row: int
Which of the heaps to search
Returns
-------
index: int
The index of the smallest flagged element
of the ``row``th heap, or -1 if no flagged
elements remain in the heap.
"""
ind = heap[0, row]
dist = heap[1, row]
flag = heap[2, row]
min_dist = np.inf
result_index = -1
for i in range(ind.shape[0]):
if flag[i] == 1 and dist[i] < min_dist:
min_dist = dist[i]
result_index = i
if result_index >= 0:
flag[result_index] = 0.0
return int(ind[result_index])
else:
return -1
@numba.njit(parallel=True)
def build_candidates(current_graph, n_vertices, n_neighbors, max_candidates, rng_state):
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
n_vertices: int
The total number of vertices in the graph.
n_neighbors: int
The number of neighbor edges per node in the current graph.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
candidate_neighbors = make_heap(n_vertices, max_candidates)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
heap_push(candidate_neighbors, i, d, idx, isn)
heap_push(candidate_neighbors, idx, d, i, isn)
current_graph[2, i, j] = 0
return candidate_neighbors
@numba.njit()
def new_build_candidates(
current_graph, n_vertices, n_neighbors, max_candidates, rng_state, rho=0.5
): # pragma: no cover
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
n_vertices: int
The total number of vertices in the graph.
n_neighbors: int
The number of neighbor edges per node in the current graph.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
new_candidate_neighbors = make_heap(n_vertices, max_candidates)
old_candidate_neighbors = make_heap(n_vertices, max_candidates)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
if tau_rand(rng_state) < rho:
c = 0
if isn:
c += heap_push(new_candidate_neighbors, i, d, idx, isn)
c += heap_push(new_candidate_neighbors, idx, d, i, isn)
else:
heap_push(old_candidate_neighbors, i, d, idx, isn)
heap_push(old_candidate_neighbors, idx, d, i, isn)
if c > 0:
current_graph[2, i, j] = 0
return new_candidate_neighbors, old_candidate_neighbors
@numba.njit(parallel=True)
def submatrix(dmat, indices_col, n_neighbors):
"""Return a submatrix given an orginal matrix and the indices to keep.
Parameters
----------
dmat: array, shape (n_samples, n_samples)
Original matrix.
indices_col: array, shape (n_samples, n_neighbors)
Indices to keep. Each row consists of the indices of the columns.
n_neighbors: int
Number of neighbors.
Returns
-------
submat: array, shape (n_samples, n_neighbors)
The corresponding submatrix.
"""
n_samples_transform, n_samples_fit = dmat.shape
submat = np.zeros((n_samples_transform, n_neighbors), dtype=dmat.dtype)
for i in numba.prange(n_samples_transform):
for j in numba.prange(n_neighbors):
submat[i, j] = dmat[i, indices_col[i, j]]
return submat
# Generates a timestamp for use in logging messages when verbose=True
# I'm not enough of a numba ninja to numba this successfully.
# np.arrays of lists, which are objects...
def csr_unique(matrix, return_index=True, return_inverse=True, return_counts=True):
"""Find the unique elements of a sparse csr matrix.
We don't explicitly construct the unique matrix leaving that to the user
who may not want to duplicate a massive array in memory.
Returns the indices of the input array that give the unique values.
Returns the indices of the unique array that reconstructs the input array.
Returns the number of times each unique row appears in the input matrix.
matrix: a csr matrix
return_index = bool, optional
If true, return the row indices of 'matrix'
return_inverse: bool, optional
If true, return the the indices of the unique array that can be
used to reconstruct 'matrix'.
return_counts = bool, optional
If true, returns the number of times each unique item appears in 'matrix'
The unique matrix can computed via
unique_matrix = matrix[index]
and the original matrix reconstructed via
unique_matrix[inverse]
"""
lil_matrix = matrix.tolil()
rows = [x + y for x, y in zip(lil_matrix.rows, lil_matrix.data)]
return_values = return_counts + return_inverse + return_index
return np.unique(
rows,
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
)[1 : (return_values + 1)]
| 29.107973 | 88 | 0.605319 | # Author: Leland McInnes <leland.mcinnes@gmail.com>
#
# License: BSD 3 clause
import time
import numpy as np
import numba
import scipy.sparse
@numba.njit(parallel=True)
def fast_knn_indices(X, n_neighbors):
"""A fast computation of knn indices.
Parameters
----------
X: array of shape (n_samples, n_features)
The input data to compute the k-neighbor indices of.
n_neighbors: int
The number of nearest neighbors to compute for each sample in ``X``.
Returns
-------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
"""
knn_indices = np.empty((X.shape[0], n_neighbors), dtype=np.int32)
for row in numba.prange(X.shape[0]):
# v = np.argsort(X[row]) # Need to call argsort this way for numba
v = X[row].argsort(kind="quicksort")
v = v[:n_neighbors]
knn_indices[row] = v
return knn_indices
@numba.njit("i4(i8[:])")
def tau_rand_int(state):
"""A fast (pseudo)-random number generator.
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random int32 value
"""
state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (
(((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19
)
state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (
(((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25
)
state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (
(((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11
)
return state[0] ^ state[1] ^ state[2]
@numba.njit("f4(i8[:])")
def tau_rand(state):
"""A fast (pseudo)-random number generator for floats in the range [0,1]
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random float32 in the interval [0, 1]
"""
integer = tau_rand_int(state)
return abs(float(integer) / 0x7FFFFFFF)
@numba.njit()
def norm(vec):
"""Compute the (standard l2) norm of a vector.
Parameters
----------
vec: array of shape (dim,)
Returns
-------
The l2 norm of vec.
"""
result = 0.0
for i in range(vec.shape[0]):
result += vec[i] ** 2
return np.sqrt(result)
@numba.njit()
def rejection_sample(n_samples, pool_size, rng_state):
"""Generate n_samples many integers from 0 to pool_size such that no
integer is selected twice. The duplication constraint is achieved via
rejection sampling.
Parameters
----------
n_samples: int
The number of random samples to select from the pool
pool_size: int
The size of the total pool of candidates to sample from
rng_state: array of int64, shape (3,)
Internal state of the random number generator
Returns
-------
sample: array of shape(n_samples,)
The ``n_samples`` randomly selected elements from the pool.
"""
result = np.empty(n_samples, dtype=np.int64)
for i in range(n_samples):
reject_sample = True
j = 0
while reject_sample:
j = tau_rand_int(rng_state) % pool_size
for k in range(i):
if j == result[k]:
break
else:
reject_sample = False
result[i] = j
return result
@numba.njit()
def make_heap(n_points, size):
"""Constructor for the numba enabled heap objects. The heaps are used
for approximate nearest neighbor search, maintaining a list of potential
neighbors sorted by their distance. We also flag if potential neighbors
are newly added to the list or not. Internally this is stored as
a single ndarray; the first axis determines whether we are looking at the
array of candidate indices, the array of distances, or the flag array for
whether elements are new or not. Each of these arrays are of shape
(``n_points``, ``size``)
Parameters
----------
n_points: int
The number of data points to track in the heap.
size: int
The number of items to keep on the heap for each data point.
Returns
-------
heap: An ndarray suitable for passing to other numba enabled heap functions.
"""
result = np.zeros(
(np.int64(3), np.int64(n_points), np.int64(size)), dtype=np.float64
)
result[0] = -1
result[1] = np.infty
result[2] = 0
return result
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def heap_push(heap, row, weight, index, flag):
"""Push a new element onto the heap. The heap stores potential neighbors
for each data point. The ``row`` parameter determines which data point we
are addressing, the ``weight`` determines the distance (for heap sorting),
the ``index`` is the element to add, and the flag determines whether this
is to be considered a new addition.
Parameters
----------
heap: ndarray generated by ``make_heap``
The heap object to push into
row: int
Which actual heap within the heap object to push to
weight: float
The priority value of the element to push onto the heap
index: int
The actual value to be pushed
flag: int
Whether to flag the newly added element or not.
Returns
-------
success: The number of new elements successfully pushed into the heap.
"""
row = int(row)
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
if weight >= weights[0]:
return 0
# break if we already have this element.
for i in range(indices.shape[0]):
if index == indices[i]:
return 0
# insert val at position zero
weights[0] = weight
indices[0] = index
is_new[0] = flag
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def unchecked_heap_push(heap, row, weight, index, flag):
"""Push a new element onto the heap. The heap stores potential neighbors
for each data point. The ``row`` parameter determines which data point we
are addressing, the ``weight`` determines the distance (for heap sorting),
the ``index`` is the element to add, and the flag determines whether this
is to be considered a new addition.
Parameters
----------
heap: ndarray generated by ``make_heap``
The heap object to push into
row: int
Which actual heap within the heap object to push to
weight: float
The priority value of the element to push onto the heap
index: int
The actual value to be pushed
flag: int
Whether to flag the newly added element or not.
Returns
-------
success: The number of new elements successfully pushed into the heap.
"""
if weight >= heap[1, row, 0]:
return 0
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
# insert val at position zero
weights[0] = weight
indices[0] = index
is_new[0] = flag
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit()
def siftdown(heap1, heap2, elt):
"""Restore the heap property for a heap with an out of place element
at position ``elt``. This works with a heap pair where heap1 carries
the weights and heap2 holds the corresponding elements."""
while elt * 2 + 1 < heap1.shape[0]:
left_child = elt * 2 + 1
right_child = left_child + 1
swap = elt
if heap1[swap] < heap1[left_child]:
swap = left_child
if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:
swap = right_child
if swap == elt:
break
else:
heap1[elt], heap1[swap] = (heap1[swap], heap1[elt])
heap2[elt], heap2[swap] = (heap2[swap], heap2[elt])
elt = swap
@numba.njit()
def deheap_sort(heap):
"""Given an array of heaps (of indices and weights), unpack the heap
out to give and array of sorted lists of indices and weights by increasing
weight. This is effectively just the second half of heap sort (the first
half not being required since we already have the data in a heap).
Parameters
----------
heap : array of shape (3, n_samples, n_neighbors)
The heap to turn into sorted lists.
Returns
-------
indices, weights: arrays of shape (n_samples, n_neighbors)
The indices and weights sorted by increasing weight.
"""
indices = heap[0]
weights = heap[1]
for i in range(indices.shape[0]):
ind_heap = indices[i]
dist_heap = weights[i]
for j in range(ind_heap.shape[0] - 1):
ind_heap[0], ind_heap[ind_heap.shape[0] - j - 1] = (
ind_heap[ind_heap.shape[0] - j - 1],
ind_heap[0],
)
dist_heap[0], dist_heap[dist_heap.shape[0] - j - 1] = (
dist_heap[dist_heap.shape[0] - j - 1],
dist_heap[0],
)
siftdown(
dist_heap[: dist_heap.shape[0] - j - 1],
ind_heap[: ind_heap.shape[0] - j - 1],
0,
)
return indices.astype(np.int64), weights
@numba.njit("i8(f8[:, :, :],i8)")
def smallest_flagged(heap, row):
"""Search the heap for the smallest element that is
still flagged.
Parameters
----------
heap: array of shape (3, n_samples, n_neighbors)
The heaps to search
row: int
Which of the heaps to search
Returns
-------
index: int
The index of the smallest flagged element
of the ``row``th heap, or -1 if no flagged
elements remain in the heap.
"""
ind = heap[0, row]
dist = heap[1, row]
flag = heap[2, row]
min_dist = np.inf
result_index = -1
for i in range(ind.shape[0]):
if flag[i] == 1 and dist[i] < min_dist:
min_dist = dist[i]
result_index = i
if result_index >= 0:
flag[result_index] = 0.0
return int(ind[result_index])
else:
return -1
@numba.njit(parallel=True)
def build_candidates(current_graph, n_vertices, n_neighbors, max_candidates, rng_state):
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
n_vertices: int
The total number of vertices in the graph.
n_neighbors: int
The number of neighbor edges per node in the current graph.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
candidate_neighbors = make_heap(n_vertices, max_candidates)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
heap_push(candidate_neighbors, i, d, idx, isn)
heap_push(candidate_neighbors, idx, d, i, isn)
current_graph[2, i, j] = 0
return candidate_neighbors
@numba.njit()
def new_build_candidates(
current_graph, n_vertices, n_neighbors, max_candidates, rng_state, rho=0.5
): # pragma: no cover
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
n_vertices: int
The total number of vertices in the graph.
n_neighbors: int
The number of neighbor edges per node in the current graph.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
new_candidate_neighbors = make_heap(n_vertices, max_candidates)
old_candidate_neighbors = make_heap(n_vertices, max_candidates)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
if tau_rand(rng_state) < rho:
c = 0
if isn:
c += heap_push(new_candidate_neighbors, i, d, idx, isn)
c += heap_push(new_candidate_neighbors, idx, d, i, isn)
else:
heap_push(old_candidate_neighbors, i, d, idx, isn)
heap_push(old_candidate_neighbors, idx, d, i, isn)
if c > 0:
current_graph[2, i, j] = 0
return new_candidate_neighbors, old_candidate_neighbors
@numba.njit(parallel=True)
def submatrix(dmat, indices_col, n_neighbors):
"""Return a submatrix given an orginal matrix and the indices to keep.
Parameters
----------
dmat: array, shape (n_samples, n_samples)
Original matrix.
indices_col: array, shape (n_samples, n_neighbors)
Indices to keep. Each row consists of the indices of the columns.
n_neighbors: int
Number of neighbors.
Returns
-------
submat: array, shape (n_samples, n_neighbors)
The corresponding submatrix.
"""
n_samples_transform, n_samples_fit = dmat.shape
submat = np.zeros((n_samples_transform, n_neighbors), dtype=dmat.dtype)
for i in numba.prange(n_samples_transform):
for j in numba.prange(n_neighbors):
submat[i, j] = dmat[i, indices_col[i, j]]
return submat
# Generates a timestamp for use in logging messages when verbose=True
def ts():
return time.ctime(time.time())
# I'm not enough of a numba ninja to numba this successfully.
# np.arrays of lists, which are objects...
def csr_unique(matrix, return_index=True, return_inverse=True, return_counts=True):
"""Find the unique elements of a sparse csr matrix.
We don't explicitly construct the unique matrix leaving that to the user
who may not want to duplicate a massive array in memory.
Returns the indices of the input array that give the unique values.
Returns the indices of the unique array that reconstructs the input array.
Returns the number of times each unique row appears in the input matrix.
matrix: a csr matrix
return_index = bool, optional
If true, return the row indices of 'matrix'
return_inverse: bool, optional
If true, return the the indices of the unique array that can be
used to reconstruct 'matrix'.
return_counts = bool, optional
If true, returns the number of times each unique item appears in 'matrix'
The unique matrix can computed via
unique_matrix = matrix[index]
and the original matrix reconstructed via
unique_matrix[inverse]
"""
lil_matrix = matrix.tolil()
rows = [x + y for x, y in zip(lil_matrix.rows, lil_matrix.data)]
return_values = return_counts + return_inverse + return_index
return np.unique(
rows,
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
)[1 : (return_values + 1)]
| 23 | 0 | 22 |
94107815294d742b861a995326c662f166d635ed | 876 | py | Python | malaya_speech/supervised/lm.py | huseinzol05/malaya-speech | 4343c409340c608a426cc6f0926fbe2c1661783e | [
"MIT"
] | 111 | 2020-08-31T04:58:54.000Z | 2022-03-29T15:44:18.000Z | malaya_speech/supervised/lm.py | huseinzol05/malaya-speech | 4343c409340c608a426cc6f0926fbe2c1661783e | [
"MIT"
] | 14 | 2020-12-16T07:27:22.000Z | 2022-03-15T17:39:01.000Z | malaya_speech/supervised/lm.py | huseinzol05/malaya-speech | 4343c409340c608a426cc6f0926fbe2c1661783e | [
"MIT"
] | 29 | 2021-02-09T08:57:15.000Z | 2022-03-12T14:09:19.000Z | from malaya_speech.utils import check_file
from malaya_speech.path import CTC_VOCABS
import json
| 25.028571 | 105 | 0.611872 | from malaya_speech.utils import check_file
from malaya_speech.path import CTC_VOCABS
import json
def get_vocab_ctc(language):
return CTC_VOCABS.get(language, CTC_VOCABS['malay'])
def load(model, module, alpha, beta, **kwargs):
path = check_file(
file=model,
module=module,
keys={
'model': 'model.trie.klm',
'vocab': get_vocab_ctc(model.split('-')[-1]),
},
quantized=False,
**kwargs,
)
try:
from ctc_decoders import Scorer
except BaseException:
raise ModuleNotFoundError(
'ctc_decoders not installed. Please install it by `pip3 install ctc-decoders` and try again.'
)
with open(path['vocab']) as fopen:
vocab_list = json.load(fopen) + ['{', '}', '[']
scorer = Scorer(alpha, beta, path['model'], vocab_list)
return scorer
| 731 | 0 | 46 |
3325305004c8b489ac5964acd458de188c086fad | 8,543 | py | Python | server/KevlarServer.py | DhirajWishal/Kevlar | f010fac7778d65ffef63d9ae3ac25c5cb4144815 | [
"Apache-2.0"
] | null | null | null | server/KevlarServer.py | DhirajWishal/Kevlar | f010fac7778d65ffef63d9ae3ac25c5cb4144815 | [
"Apache-2.0"
] | null | null | null | server/KevlarServer.py | DhirajWishal/Kevlar | f010fac7778d65ffef63d9ae3ac25c5cb4144815 | [
"Apache-2.0"
] | 3 | 2021-12-20T01:48:32.000Z | 2022-03-14T12:14:12.000Z | from http.server import BaseHTTPRequestHandler
import CryptoService
import Database
import Packager
import XMLParser
| 38.138393 | 118 | 0.584806 | from http.server import BaseHTTPRequestHandler
import CryptoService
import Database
import Packager
import XMLParser
class Server(BaseHTTPRequestHandler):
database = Database.Database()
packager = Packager.Packager()
def write_data(self, data: str):
"""
Write data to be passed to the response.
:param data: The data to write as a string.
:return: None.
"""
size = str(len(data))
self.send_header("Content-Length", size)
self.end_headers()
self.wfile.write(bytes(data, "utf-8"))
def do_GET(self):
"""
Handle GET requests.
:return: None
"""
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(bytes("Boo Hoo no one uses GET to send sensitive data!", "utf-8"))
def do_POST(self):
"""
Handle POST requests.
:return: None
"""
self.send_response(200)
self.send_header("Content-type", "application/xml")
self.handle_request(self.rfile.read(int(self.headers['Content-Length'])))
def handle_request(self, data: bytes):
"""
Get the data and handle the request.
:param data: The request as bytes.
:return: None
"""
decoded_data = data.decode("utf-8")
# Set up the xml parser to walk through the xml tree.
xml_parser = XMLParser.XMLParser(decoded_data)
# Handle the check request.
if xml_parser.mode == "check":
self.handle_check(xml_parser)
# Handle the login request.
elif xml_parser.mode == "login":
self.handle_login(xml_parser)
# Handle the user create account request.
elif xml_parser.mode == "account":
self.handle_account(xml_parser)
# Handle the user update request.
elif xml_parser.mode == "update":
self.handle_update(xml_parser)
# If none of the modes match, say that the mode is invalid.
else:
self.write_data(self.packager.generate_status("Invalid mode"))
def handle_check(self, xml_parser):
"""
Handle the client's check if account exist request.
:param xml_parser: The xml parser used to parse the user request.
:return: None
"""
username = ""
password = ""
# Walk through the xml tree and gather the required data.
for element in xml_parser.tree.getroot():
if element.tag == "username":
username = element.text
elif element.tag == "password":
password = element.text
# Check if the username exists.
if self.database.user_exist(username):
# If true, check if the passwords match.
if self.database.get_password(username) == password:
# If yes, we just say that everything is okay.
self.write_data(self.packager.generate_status("2"))
else:
# If the passwords don't match, we say that the status is 1 (the passwords mismatch)
self.write_data(self.packager.generate_status("1"))
else:
# If the username isn't there in the database, we just send the status code 0.
self.write_data(self.packager.generate_status("0"))
def handle_login(self, xml_parser):
"""
Handle the client's login request.
:param xml_parser: The xml parser used to parse the user request.
:return: None
"""
username = ""
password = ""
# Walk through the xml tree and gather the required data.
for element in xml_parser.tree.getroot():
if element.tag == "username":
username = element.text
elif element.tag == "password":
password = element.text
# If the username exists within the database, we can then proceed to check if the password is valid.
if self.database.user_exist(username):
database_password = self.database.get_password(username)
# Here we check if the password is valid or not.
if database_password == password:
database_database = self.database.get_database(username)
validation_key = self.database.get_validation_key(username)
initialization_vector = self.database.get_initialization_vector(username)
# If the password is valid, we can send back the correct account information back to the client.
self.write_data(self.packager.generate_account(username, database_password, database_database,
CryptoService.perform_hmac(database_database,
validation_key),
initialization_vector))
# If the password is invalid, we just send a form with just the username.
else:
self.write_data(self.packager.generate_account(username, "", "", "", ""))
# If the username does not exist, we send an empty response document.
else:
self.write_data(self.packager.generate_account("", "", "", "", ""))
def handle_account(self, xml_parser):
"""
Handle the client's create account request.
:param xml_parser: The xml parser used to parse the request.
:return: None
"""
username = ""
password = ""
database = ""
user_validation_key = ""
initialization_vector = ""
# Walk through the xml tree and get the required information.
for element in xml_parser.tree.getroot():
if element.tag == "username":
username = element.text
elif element.tag == "password":
password = element.text
elif element.tag == "database":
database = element.text
elif element.tag == "validation":
user_validation_key = element.text
elif element.tag == "iv":
initialization_vector = element.text
# If the user exists, we can proceed to update the account.
if not self.database.user_exist(username):
if initialization_vector == "":
self.write_data(self.packager.generate_status("Invalid Initialization Vector"))
else:
if not self.database.insert(username, password, user_validation_key, database, initialization_vector):
self.write_data(self.packager.generate_status("Failed to insert data"))
else:
self.write_data(self.packager.generate_status("Successful"))
else:
self.write_data(self.packager.generate_status("Account already exists"))
def handle_update(self, xml_parser):
"""
Handle the client's update request.
:param xml_parser: The xml parser used to parse the request.
:return: None
"""
username = ""
password = ""
database = ""
hmac = ""
# Walk through the xml tree and get the required information.
for element in xml_parser.tree.getroot():
if element.tag == "username":
username = element.text
elif element.tag == "password":
password = element.text
elif element.tag == "database":
database = element.text
elif element.tag == "hmac":
hmac = element.text
# If the user exists, we can proceed to update the account.
if self.database.user_exist(username):
validation_key = self.database.get_validation_key(username)
internal_hmac = CryptoService.perform_hmac(database, validation_key)
# First we validate the incoming database data.
if hmac == internal_hmac:
# If successful, we can update the table.
self.database.update(username, password, validation_key, database)
self.write_data(self.packager.generate_status("Successful"))
# If not, we send an error status.
else:
self.write_data(self.packager.generate_status("HMAC Error"))
else:
self.write_data(self.packager.generate_status("User does not exist"))
| 0 | 8,400 | 23 |
a40b9a19d395eb71763cf2620d9c821602f8af5d | 4,400 | py | Python | util.py | jotunskij/flaskan | 6d6aed6a8a647ba9a4751f80a40e81397047eb6f | [
"MIT"
] | null | null | null | util.py | jotunskij/flaskan | 6d6aed6a8a647ba9a4751f80a40e81397047eb6f | [
"MIT"
] | null | null | null | util.py | jotunskij/flaskan | 6d6aed6a8a647ba9a4751f80a40e81397047eb6f | [
"MIT"
] | null | null | null | from functools import wraps
import os
import json
import requests
from flask import redirect, url_for, jsonify, request
from flask_jwt_extended import (
get_jwt_identity, verify_jwt_in_request,
unset_jwt_cookies
)
TOKEN_BLACKLIST = set()
# Decorators
| 31.428571 | 88 | 0.623864 | from functools import wraps
import os
import json
import requests
from flask import redirect, url_for, jsonify, request
from flask_jwt_extended import (
get_jwt_identity, verify_jwt_in_request,
unset_jwt_cookies
)
TOKEN_BLACKLIST = set()
# Decorators
def group_required_api(group):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
identity = get_jwt_identity()
if group not in identity['groups']:
return jsonify({'error': 'Token doesnt have the correct group'}), 400
return fn(*args, **kwargs)
return wrapper
return decorator
def group_optional_web(group):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
verify_jwt_in_request()
except Exception:
response = redirect_to_login()
unset_jwt_cookies(response)
return response
identity = get_jwt_identity()
if not identity or group not in identity['groups']:
return fn(*args, **kwargs)
else:
kwargs['passed'] = True
return fn(*args, **kwargs)
return wrapper
return decorator
def group_required_web(group, next_url):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
verify_jwt_in_request()
except Exception:
response = redirect_to_login(next_url)
unset_jwt_cookies(response)
return response
identity = get_jwt_identity()
print(identity)
if not identity or group not in identity['groups']:
return redirect_to_login(next_url)
return fn(*args, **kwargs)
return wrapper
return decorator
def redirect_to_login(next_url=None, error=None):
if not next_url:
next_url = '.index'
if not error:
error = (
'Du saknar rättigheter eller har '
'blivit utloggad pga inaktivitet'
)
return redirect(url_for(
'public_routes.login',
next_url=next_url,
error=error
))
def log_in_user(username, password):
# Perform login functionality
#return {'user_id': 1, 'groups': ['admin', 'user']}
return {'user_id': 1, 'groups': ['user']}
def validate_recaptcha(token):
secret = os.environ.get('RECAPTCHA_SECRET')
payload = {'response': token, 'secret': secret}
response = requests.post("https://www.google.com/recaptcha/api/siteverify", payload)
response_text = json.loads(response.text)
return response_text['success']
def api_call(method, path, payload=None):
token_server = os.environ.get('TOKEN_SERVER')
response = requests.request(
method=method,
url=f'{token_server}{path}',
headers=request.headers,
json=payload
)
if response.status_code != 200:
return redirect_to_login(next_url=None, error='API-fel')
return response
def api_logout(csrf_token):
token_server = os.environ.get('TOKEN_SERVER')
requests.delete(
f'{token_server}/api/logout',
cookies=request.cookies,
headers={'X-CSRF-TOKEN': csrf_token}
)
def api_logout_refresh(csrf_token):
token_server = os.environ.get('TOKEN_SERVER')
requests.delete(
f'{token_server}/api/logout-refresh',
cookies=request.cookies,
headers={'X-CSRF-TOKEN': csrf_token}
)
def api_refresh(csrf_token):
token_server = os.environ.get('TOKEN_SERVER')
response = requests.post(
f'{token_server}/api/token/refresh',
cookies=request.cookies,
headers={'X-CSRF-TOKEN': csrf_token}
)
return response
def api_login():
username = request.form.get('username', None)
password = request.form.get('password', None)
if not username:
return redirect_to_login(next_url=None, error='Användarnamn saknas')
if not username:
return redirect_to_login(next_url=None, error='Lösenord saknas')
token_server = os.environ.get('TOKEN_SERVER')
response = requests.post(
f'{token_server}/api/login',
json={'username': username, 'password': password}
)
if response.status_code != 200:
return redirect_to_login(next_url=None, error='Felaktig inloggning')
return response
| 3,890 | 0 | 252 |
3d080c0fdc2341e4fe547be1e651ed85bcc41187 | 19,078 | py | Python | env/lib/python3.9/site-packages/wx/lib/pubsub/core/topicmgr.py | aidswidjaja/fryingpan | 147b308ddbf4b0afa740717dad94d6b87db55d00 | [
"MIT"
] | 1 | 2020-02-14T02:46:31.000Z | 2020-02-14T02:46:31.000Z | env/lib/python3.9/site-packages/wx/lib/pubsub/core/topicmgr.py | aidswidjaja/fryingpan | 147b308ddbf4b0afa740717dad94d6b87db55d00 | [
"MIT"
] | null | null | null | env/lib/python3.9/site-packages/wx/lib/pubsub/core/topicmgr.py | aidswidjaja/fryingpan | 147b308ddbf4b0afa740717dad94d6b87db55d00 | [
"MIT"
] | 1 | 2018-05-12T16:01:58.000Z | 2018-05-12T16:01:58.000Z | """
Code related to the concept of topic tree and its management: creating
and removing topics, getting info about a particular topic, etc.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
__all__ = [
'TopicManager',
'TopicNameError',
'TopicDefnError',
]
from .callables import getID
from .topicutils import (
ALL_TOPICS,
tupleize,
stringize,
)
from .topicexc import (
TopicNameError,
TopicDefnError,
)
from .topicargspec import (
ArgSpecGiven,
ArgsInfo,
topicArgsFromCallable,
)
from .topicobj import (
Topic,
)
from .treeconfig import TreeConfig
from .topicdefnprovider import ITopicDefnProvider
from .topicmgrimpl import getRootTopicSpec
from .. import py2and3
# ---------------------------------------------------------
ARGS_SPEC_ALL = ArgSpecGiven.SPEC_GIVEN_ALL
ARGS_SPEC_NONE = ArgSpecGiven.SPEC_GIVEN_NONE
# ---------------------------------------------------------
class TopicManager:
"""
Manages the registry of all topics and creation/deletion
of topics.
Note that any method that accepts a topic name can accept it in the
'dotted' format such as ``'a.b.c.'`` or in tuple format such as
``('a', 'b', 'c')``. Any such method will raise a ValueError
if name not valid (empty, invalid characters, etc).
"""
# Allowed return values for isTopicSpecified()
TOPIC_SPEC_NOT_SPECIFIED = 0 # false
TOPIC_SPEC_ALREADY_CREATED = 1 # all other values equate to "true" but different reason
TOPIC_SPEC_ALREADY_DEFINED = 2
def __init__(self, treeConfig=None):
"""The optional treeConfig is an instance of TreeConfig, used to
configure the topic tree such as notification settings, etc. A
default config is created if not given. This method should only be
called by an instance of Publisher (see Publisher.getTopicManager())."""
self.__allTopics = None # root of topic tree
self._topicsMap = {} # registry of all topics
self.__treeConfig = treeConfig or TreeConfig()
self.__defnProvider = _MasterTopicDefnProvider(self.__treeConfig)
# define root of all topics
assert self.__allTopics is None
argsDocs, reqdArgs = getRootTopicSpec()
desc = 'Root of all topics'
specGiven = ArgSpecGiven(argsDocs, reqdArgs)
self.__allTopics = self.__createTopic((ALL_TOPICS,), desc, specGiven=specGiven)
def getRootAllTopics(self):
"""Get the topic that is parent of all root (ie top-level) topics,
for default TopicManager instance created when this module is imported.
Some notes:
- "root of all topics" topic satisfies isAll()==True, isRoot()==False,
getParent() is None;
- all root-level topics satisfy isAll()==False, isRoot()==True, and
getParent() is getDefaultTopicTreeRoot();
- all other topics satisfy neither. """
return self.__allTopics
def addDefnProvider(self, providerOrSource, format=None):
"""Register a topic definition provider. After this method is called, whenever a topic must be created,
the first definition provider that has a definition
for the required topic is used to instantiate the topic.
If providerOrSource is an instance of ITopicDefnProvider, register
it as a provider of topic definitions. Otherwise, register a new
instance of TopicDefnProvider(providerOrSource, format). In that case,
if format is not given, it defaults to TOPIC_TREE_FROM_MODULE. Either
way, returns the instance of ITopicDefnProvider registered.
"""
if isinstance(providerOrSource, ITopicDefnProvider):
provider = providerOrSource
else:
from .topicdefnprovider import (TopicDefnProvider, TOPIC_TREE_FROM_MODULE)
source = providerOrSource
provider = TopicDefnProvider(source, format or TOPIC_TREE_FROM_MODULE)
self.__defnProvider.addProvider(provider)
return provider
def clearDefnProviders(self):
"""Remove all registered topic definition providers"""
self.__defnProvider.clear()
def getNumDefnProviders(self):
"""Get how many topic definitions providers are registered."""
return self.__defnProvider.getNumProviders()
def getTopic(self, name, okIfNone=False):
"""Get the Topic instance for the given topic name. By default, raises
an TopicNameError exception if a topic with given name doesn't exist. If
okIfNone=True, returns None instead of raising an exception."""
topicNameDotted = stringize(name)
#if not name:
# raise TopicNameError(name, 'Empty topic name not allowed')
obj = self._topicsMap.get(topicNameDotted, None)
if obj is not None:
return obj
if okIfNone:
return None
# NOT FOUND! Determine what problem is and raise accordingly:
# find the closest parent up chain that does exists:
parentObj, subtopicNames = self.__getClosestParent(topicNameDotted)
assert subtopicNames
subtopicName = subtopicNames[0]
if parentObj is self.__allTopics:
raise TopicNameError(name, 'Root topic "%s" doesn\'t exist' % subtopicName)
msg = 'Topic "%s" doesn\'t have "%s" as subtopic' % (parentObj.getName(), subtopicName)
raise TopicNameError(name, msg)
def newTopic(self, _name, _desc, _required=(), **_argDocs):
"""Deprecated legacy method.
If topic _name already exists, just returns it and does nothing else.
Otherwise, uses getOrCreateTopic() to create it, then sets its
description (_desc) and its message data specification (_argDocs
and _required). Replaced by getOrCreateTopic()."""
topic = self.getTopic(_name, True)
if topic is None:
topic = self.getOrCreateTopic(_name)
topic.setDescription(_desc)
topic.setMsgArgSpec(_argDocs, _required)
return topic
def getOrCreateTopic(self, name, protoListener=None):
"""Get the Topic instance for topic of given name, creating it
(and any of its missing parent topics) as necessary. Pubsub
functions such as subscribe() use this to obtain the Topic object
corresponding to a topic name.
The name can be in dotted or string format (``'a.b.'`` or ``('a','b')``).
This method always attempts to return a "complete" topic, i.e. one
with a Message Data Specification (MDS). So if the topic does not have
an MDS, it attempts to add it. It first tries to find an MDS
from a TopicDefnProvider (see addDefnProvider()). If none is available,
it attempts to set it from protoListener, if it has been given. If not,
the topic has no MDS.
Once a topic's MDS has been set, it is never again changed or accessed
by this method.
Examples::
# assume no topics exist
# but a topic definition provider has been added via
# pub.addTopicDefnProvider() and has definition for topics 'a' and 'a.b'
# creates topic a and a.b; both will have MDS from the defn provider:
t1 = topicMgr.getOrCreateTopic('a.b')
t2 = topicMgr.getOrCreateTopic('a.b')
assert(t1 is t2)
assert(t1.getParent().getName() == 'a')
def proto(req1, optarg1=None): pass
# creates topic c.d with MDS based on proto; creates c without an MDS
# since no proto for it, nor defn provider:
t1 = topicMgr.getOrCreateTopic('c.d', proto)
The MDS can also be defined via a call to subscribe(listener, topicName),
which indirectly calls getOrCreateTopic(topicName, listener).
"""
obj = self.getTopic(name, okIfNone=True)
if obj:
# if object is not sendable but a proto listener was given,
# update its specification so that it is sendable
if (protoListener is not None) and not obj.hasMDS():
allArgsDocs, required = topicArgsFromCallable(protoListener)
obj.setMsgArgSpec(allArgsDocs, required)
return obj
# create missing parents
nameTuple = tupleize(name)
parentObj = self.__createParentTopics(nameTuple)
# now the final topic object, args from listener if provided
desc, specGiven = self.__defnProvider.getDefn(nameTuple)
# POLICY: protoListener is used only if no definition available
if specGiven is None:
if protoListener is None:
desc = 'UNDOCUMENTED: created without spec'
else:
allArgsDocs, required = topicArgsFromCallable(protoListener)
specGiven = ArgSpecGiven(allArgsDocs, required)
desc = 'UNDOCUMENTED: created from protoListener "%s" in module %s' % getID(protoListener)
return self.__createTopic(nameTuple, desc, parent = parentObj, specGiven = specGiven)
def isTopicInUse(self, name):
"""Determine if topic 'name' is in use. True if a Topic object exists
for topic name (i.e. message has already been sent for that topic, or a
least one listener subscribed), false otherwise. Note: a topic may be in use
but not have a definition (MDS and docstring); or a topic may have a
definition, but not be in use."""
return self.getTopic(name, okIfNone=True) is not None
def hasTopicDefinition(self, name):
"""Determine if there is a definition avaiable for topic 'name'. Return
true if there is, false otherwise. Note: a topic may have a
definition without being in use, and vice versa."""
# in already existing Topic object:
alreadyCreated = self.getTopic(name, okIfNone=True)
if alreadyCreated is not None and alreadyCreated.hasMDS():
return True
# from provider?
nameTuple = tupleize(name)
if self.__defnProvider.isDefined(nameTuple):
return True
return False
def checkAllTopicsHaveMDS(self):
"""Check that all topics that have been created for their MDS.
Raise a TopicDefnError if one is found that does not have one."""
for topic in py2and3.itervalues(self._topicsMap):
if not topic.hasMDS():
raise TopicDefnError(topic.getNameTuple())
def delTopic(self, name):
"""Delete the named topic, including all sub-topics. Returns False
if topic does not exist; True otherwise. Also unsubscribe any listeners
of topic and all subtopics. """
# find from which parent the topic object should be removed
dottedName = stringize(name)
try:
#obj = weakref( self._topicsMap[dottedName] )
obj = self._topicsMap[dottedName]
except KeyError:
return False
#assert obj().getName() == dottedName
assert obj.getName() == dottedName
# notification must be before deletion in case
self.__treeConfig.notificationMgr.notifyDelTopic(dottedName)
#obj()._undefineSelf_(self._topicsMap)
obj._undefineSelf_(self._topicsMap)
#assert obj() is None
return True
def getTopicsSubscribed(self, listener):
"""Get the list of Topic objects that have given listener
subscribed. Note: the listener can also get messages from any
sub-topic of returned list."""
assocTopics = []
for topicObj in py2and3.itervalues(self._topicsMap):
if topicObj.hasListener(listener):
assocTopics.append(topicObj)
return assocTopics
def __getClosestParent(self, topicNameDotted):
"""Returns a pair, (closest parent, tuple path from parent). The
first item is the closest parent Topic that exists.
The second one is the list of topic name elements that have to be
created to create the given topic.
So if topicNameDotted = A.B.C.D, but only A.B exists (A.B.C and
A.B.C.D not created yet), then return is (A.B, ['C','D']).
Note that if none of the branch exists (not even A), then return
will be [root topic, ['A',B','C','D']). Note also that if A.B.C
exists, the return will be (A.B.C, ['D']) regardless of whether
A.B.C.D exists. """
subtopicNames = []
headTail = topicNameDotted.rsplit('.', 1)
while len(headTail) > 1:
parentName = headTail[0]
subtopicNames.insert( 0, headTail[1] )
obj = self._topicsMap.get( parentName, None )
if obj is not None:
return obj, subtopicNames
headTail = parentName.rsplit('.', 1)
subtopicNames.insert( 0, headTail[0] )
return self.__allTopics, subtopicNames
def __createParentTopics(self, topicName):
"""This will find which parents need to be created such that
topicName can be created (but doesn't create given topic),
and creates them. Returns the parent object."""
assert self.getTopic(topicName, okIfNone=True) is None
parentObj, subtopicNames = self.__getClosestParent(stringize(topicName))
# will create subtopics of parentObj one by one from subtopicNames
if parentObj is self.__allTopics:
nextTopicNameList = []
else:
nextTopicNameList = list(parentObj.getNameTuple())
for name in subtopicNames[:-1]:
nextTopicNameList.append(name)
desc, specGiven = self.__defnProvider.getDefn( tuple(nextTopicNameList) )
if desc is None:
desc = 'UNDOCUMENTED: created as parent without specification'
parentObj = self.__createTopic( tuple(nextTopicNameList),
desc, specGiven = specGiven, parent = parentObj)
return parentObj
def __createTopic(self, nameTuple, desc, specGiven, parent=None):
"""Actual topic creation step. Adds new Topic instance to topic map,
and sends notification message (see ``Publisher.addNotificationMgr()``)
regarding topic creation."""
if specGiven is None:
specGiven = ArgSpecGiven()
parentAI = None
if parent:
parentAI = parent._getListenerSpec()
argsInfo = ArgsInfo(nameTuple, specGiven, parentAI)
if (self.__treeConfig.raiseOnTopicUnspecified
and not argsInfo.isComplete()):
raise TopicDefnError(nameTuple)
newTopicObj = Topic(self.__treeConfig, nameTuple, desc,
argsInfo, parent = parent)
# sanity checks:
assert newTopicObj.getName() not in self._topicsMap
if parent is self.__allTopics:
assert len( newTopicObj.getNameTuple() ) == 1
else:
assert parent.getNameTuple() == newTopicObj.getNameTuple()[:-1]
assert nameTuple == newTopicObj.getNameTuple()
# store new object and notify of creation
self._topicsMap[ newTopicObj.getName() ] = newTopicObj
self.__treeConfig.notificationMgr.notifyNewTopic(
newTopicObj, desc, specGiven.reqdArgs, specGiven.argsDocs)
return newTopicObj
def validateNameHierarchy(topicTuple):
"""Check that names in topicTuple are valid: no spaces, not empty.
Raise ValueError if fails check. E.g. ('',) and ('a',' ') would
both fail, but ('a','b') would be ok. """
if not topicTuple:
topicName = stringize(topicTuple)
errMsg = 'empty topic name'
raise TopicNameError(topicName, errMsg)
for indx, topic in enumerate(topicTuple):
errMsg = None
if topic is None:
topicName = list(topicTuple)
topicName[indx] = 'None'
errMsg = 'None at level #%s'
elif not topic:
topicName = stringize(topicTuple)
errMsg = 'empty element at level #%s'
elif topic.isspace():
topicName = stringize(topicTuple)
errMsg = 'blank element at level #%s'
if errMsg:
raise TopicNameError(topicName, errMsg % indx)
class _MasterTopicDefnProvider:
"""
Stores a list of topic definition providers. When queried for a topic
definition, queries each provider (registered via addProvider()) and
returns the first complete definition provided, or (None,None).
The providers must follow the ITopicDefnProvider protocol.
"""
def addProvider(self, provider):
"""Add given provider IF not already added. """
assert(isinstance(provider, ITopicDefnProvider))
if provider not in self.__providers:
self.__providers.append(provider)
def clear(self):
"""Remove all providers added."""
self.__providers = []
def getNumProviders(self):
"""Return how many providers added."""
return len(self.__providers)
def getDefn(self, topicNameTuple):
"""Returns a pair (docstring, MDS) for the topic. The first item is
a string containing the topic's "docstring", i.e. a description string
for the topic, or None if no docstring available for the topic. The
second item is None or an instance of ArgSpecGiven specifying the
required and optional message data for listeners of this topic. """
desc, defn = None, None
for provider in self.__providers:
tmpDesc, tmpDefn = provider.getDefn(topicNameTuple)
if (tmpDesc is not None) and (tmpDefn is not None):
assert tmpDefn.isComplete()
desc, defn = tmpDesc, tmpDefn
break
return desc, defn
def isDefined(self, topicNameTuple):
"""Returns True only if a complete definition exists, ie topic
has a description and a complete message data specification (MDS)."""
desc, defn = self.getDefn(topicNameTuple)
if desc is None or defn is None:
return False
if defn.isComplete():
return True
return False
| 41.746171 | 112 | 0.622392 | """
Code related to the concept of topic tree and its management: creating
and removing topics, getting info about a particular topic, etc.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
__all__ = [
'TopicManager',
'TopicNameError',
'TopicDefnError',
]
from .callables import getID
from .topicutils import (
ALL_TOPICS,
tupleize,
stringize,
)
from .topicexc import (
TopicNameError,
TopicDefnError,
)
from .topicargspec import (
ArgSpecGiven,
ArgsInfo,
topicArgsFromCallable,
)
from .topicobj import (
Topic,
)
from .treeconfig import TreeConfig
from .topicdefnprovider import ITopicDefnProvider
from .topicmgrimpl import getRootTopicSpec
from .. import py2and3
# ---------------------------------------------------------
ARGS_SPEC_ALL = ArgSpecGiven.SPEC_GIVEN_ALL
ARGS_SPEC_NONE = ArgSpecGiven.SPEC_GIVEN_NONE
# ---------------------------------------------------------
class TopicManager:
"""
Manages the registry of all topics and creation/deletion
of topics.
Note that any method that accepts a topic name can accept it in the
'dotted' format such as ``'a.b.c.'`` or in tuple format such as
``('a', 'b', 'c')``. Any such method will raise a ValueError
if name not valid (empty, invalid characters, etc).
"""
# Allowed return values for isTopicSpecified()
TOPIC_SPEC_NOT_SPECIFIED = 0 # false
TOPIC_SPEC_ALREADY_CREATED = 1 # all other values equate to "true" but different reason
TOPIC_SPEC_ALREADY_DEFINED = 2
def __init__(self, treeConfig=None):
"""The optional treeConfig is an instance of TreeConfig, used to
configure the topic tree such as notification settings, etc. A
default config is created if not given. This method should only be
called by an instance of Publisher (see Publisher.getTopicManager())."""
self.__allTopics = None # root of topic tree
self._topicsMap = {} # registry of all topics
self.__treeConfig = treeConfig or TreeConfig()
self.__defnProvider = _MasterTopicDefnProvider(self.__treeConfig)
# define root of all topics
assert self.__allTopics is None
argsDocs, reqdArgs = getRootTopicSpec()
desc = 'Root of all topics'
specGiven = ArgSpecGiven(argsDocs, reqdArgs)
self.__allTopics = self.__createTopic((ALL_TOPICS,), desc, specGiven=specGiven)
def getRootAllTopics(self):
"""Get the topic that is parent of all root (ie top-level) topics,
for default TopicManager instance created when this module is imported.
Some notes:
- "root of all topics" topic satisfies isAll()==True, isRoot()==False,
getParent() is None;
- all root-level topics satisfy isAll()==False, isRoot()==True, and
getParent() is getDefaultTopicTreeRoot();
- all other topics satisfy neither. """
return self.__allTopics
def addDefnProvider(self, providerOrSource, format=None):
"""Register a topic definition provider. After this method is called, whenever a topic must be created,
the first definition provider that has a definition
for the required topic is used to instantiate the topic.
If providerOrSource is an instance of ITopicDefnProvider, register
it as a provider of topic definitions. Otherwise, register a new
instance of TopicDefnProvider(providerOrSource, format). In that case,
if format is not given, it defaults to TOPIC_TREE_FROM_MODULE. Either
way, returns the instance of ITopicDefnProvider registered.
"""
if isinstance(providerOrSource, ITopicDefnProvider):
provider = providerOrSource
else:
from .topicdefnprovider import (TopicDefnProvider, TOPIC_TREE_FROM_MODULE)
source = providerOrSource
provider = TopicDefnProvider(source, format or TOPIC_TREE_FROM_MODULE)
self.__defnProvider.addProvider(provider)
return provider
def clearDefnProviders(self):
"""Remove all registered topic definition providers"""
self.__defnProvider.clear()
def getNumDefnProviders(self):
"""Get how many topic definitions providers are registered."""
return self.__defnProvider.getNumProviders()
def getTopic(self, name, okIfNone=False):
"""Get the Topic instance for the given topic name. By default, raises
an TopicNameError exception if a topic with given name doesn't exist. If
okIfNone=True, returns None instead of raising an exception."""
topicNameDotted = stringize(name)
#if not name:
# raise TopicNameError(name, 'Empty topic name not allowed')
obj = self._topicsMap.get(topicNameDotted, None)
if obj is not None:
return obj
if okIfNone:
return None
# NOT FOUND! Determine what problem is and raise accordingly:
# find the closest parent up chain that does exists:
parentObj, subtopicNames = self.__getClosestParent(topicNameDotted)
assert subtopicNames
subtopicName = subtopicNames[0]
if parentObj is self.__allTopics:
raise TopicNameError(name, 'Root topic "%s" doesn\'t exist' % subtopicName)
msg = 'Topic "%s" doesn\'t have "%s" as subtopic' % (parentObj.getName(), subtopicName)
raise TopicNameError(name, msg)
def newTopic(self, _name, _desc, _required=(), **_argDocs):
"""Deprecated legacy method.
If topic _name already exists, just returns it and does nothing else.
Otherwise, uses getOrCreateTopic() to create it, then sets its
description (_desc) and its message data specification (_argDocs
and _required). Replaced by getOrCreateTopic()."""
topic = self.getTopic(_name, True)
if topic is None:
topic = self.getOrCreateTopic(_name)
topic.setDescription(_desc)
topic.setMsgArgSpec(_argDocs, _required)
return topic
def getOrCreateTopic(self, name, protoListener=None):
"""Get the Topic instance for topic of given name, creating it
(and any of its missing parent topics) as necessary. Pubsub
functions such as subscribe() use this to obtain the Topic object
corresponding to a topic name.
The name can be in dotted or string format (``'a.b.'`` or ``('a','b')``).
This method always attempts to return a "complete" topic, i.e. one
with a Message Data Specification (MDS). So if the topic does not have
an MDS, it attempts to add it. It first tries to find an MDS
from a TopicDefnProvider (see addDefnProvider()). If none is available,
it attempts to set it from protoListener, if it has been given. If not,
the topic has no MDS.
Once a topic's MDS has been set, it is never again changed or accessed
by this method.
Examples::
# assume no topics exist
# but a topic definition provider has been added via
# pub.addTopicDefnProvider() and has definition for topics 'a' and 'a.b'
# creates topic a and a.b; both will have MDS from the defn provider:
t1 = topicMgr.getOrCreateTopic('a.b')
t2 = topicMgr.getOrCreateTopic('a.b')
assert(t1 is t2)
assert(t1.getParent().getName() == 'a')
def proto(req1, optarg1=None): pass
# creates topic c.d with MDS based on proto; creates c without an MDS
# since no proto for it, nor defn provider:
t1 = topicMgr.getOrCreateTopic('c.d', proto)
The MDS can also be defined via a call to subscribe(listener, topicName),
which indirectly calls getOrCreateTopic(topicName, listener).
"""
obj = self.getTopic(name, okIfNone=True)
if obj:
# if object is not sendable but a proto listener was given,
# update its specification so that it is sendable
if (protoListener is not None) and not obj.hasMDS():
allArgsDocs, required = topicArgsFromCallable(protoListener)
obj.setMsgArgSpec(allArgsDocs, required)
return obj
# create missing parents
nameTuple = tupleize(name)
parentObj = self.__createParentTopics(nameTuple)
# now the final topic object, args from listener if provided
desc, specGiven = self.__defnProvider.getDefn(nameTuple)
# POLICY: protoListener is used only if no definition available
if specGiven is None:
if protoListener is None:
desc = 'UNDOCUMENTED: created without spec'
else:
allArgsDocs, required = topicArgsFromCallable(protoListener)
specGiven = ArgSpecGiven(allArgsDocs, required)
desc = 'UNDOCUMENTED: created from protoListener "%s" in module %s' % getID(protoListener)
return self.__createTopic(nameTuple, desc, parent = parentObj, specGiven = specGiven)
def isTopicInUse(self, name):
"""Determine if topic 'name' is in use. True if a Topic object exists
for topic name (i.e. message has already been sent for that topic, or a
least one listener subscribed), false otherwise. Note: a topic may be in use
but not have a definition (MDS and docstring); or a topic may have a
definition, but not be in use."""
return self.getTopic(name, okIfNone=True) is not None
def hasTopicDefinition(self, name):
"""Determine if there is a definition avaiable for topic 'name'. Return
true if there is, false otherwise. Note: a topic may have a
definition without being in use, and vice versa."""
# in already existing Topic object:
alreadyCreated = self.getTopic(name, okIfNone=True)
if alreadyCreated is not None and alreadyCreated.hasMDS():
return True
# from provider?
nameTuple = tupleize(name)
if self.__defnProvider.isDefined(nameTuple):
return True
return False
def checkAllTopicsHaveMDS(self):
"""Check that all topics that have been created for their MDS.
Raise a TopicDefnError if one is found that does not have one."""
for topic in py2and3.itervalues(self._topicsMap):
if not topic.hasMDS():
raise TopicDefnError(topic.getNameTuple())
def delTopic(self, name):
"""Delete the named topic, including all sub-topics. Returns False
if topic does not exist; True otherwise. Also unsubscribe any listeners
of topic and all subtopics. """
# find from which parent the topic object should be removed
dottedName = stringize(name)
try:
#obj = weakref( self._topicsMap[dottedName] )
obj = self._topicsMap[dottedName]
except KeyError:
return False
#assert obj().getName() == dottedName
assert obj.getName() == dottedName
# notification must be before deletion in case
self.__treeConfig.notificationMgr.notifyDelTopic(dottedName)
#obj()._undefineSelf_(self._topicsMap)
obj._undefineSelf_(self._topicsMap)
#assert obj() is None
return True
def getTopicsSubscribed(self, listener):
"""Get the list of Topic objects that have given listener
subscribed. Note: the listener can also get messages from any
sub-topic of returned list."""
assocTopics = []
for topicObj in py2and3.itervalues(self._topicsMap):
if topicObj.hasListener(listener):
assocTopics.append(topicObj)
return assocTopics
def __getClosestParent(self, topicNameDotted):
"""Returns a pair, (closest parent, tuple path from parent). The
first item is the closest parent Topic that exists.
The second one is the list of topic name elements that have to be
created to create the given topic.
So if topicNameDotted = A.B.C.D, but only A.B exists (A.B.C and
A.B.C.D not created yet), then return is (A.B, ['C','D']).
Note that if none of the branch exists (not even A), then return
will be [root topic, ['A',B','C','D']). Note also that if A.B.C
exists, the return will be (A.B.C, ['D']) regardless of whether
A.B.C.D exists. """
subtopicNames = []
headTail = topicNameDotted.rsplit('.', 1)
while len(headTail) > 1:
parentName = headTail[0]
subtopicNames.insert( 0, headTail[1] )
obj = self._topicsMap.get( parentName, None )
if obj is not None:
return obj, subtopicNames
headTail = parentName.rsplit('.', 1)
subtopicNames.insert( 0, headTail[0] )
return self.__allTopics, subtopicNames
def __createParentTopics(self, topicName):
"""This will find which parents need to be created such that
topicName can be created (but doesn't create given topic),
and creates them. Returns the parent object."""
assert self.getTopic(topicName, okIfNone=True) is None
parentObj, subtopicNames = self.__getClosestParent(stringize(topicName))
# will create subtopics of parentObj one by one from subtopicNames
if parentObj is self.__allTopics:
nextTopicNameList = []
else:
nextTopicNameList = list(parentObj.getNameTuple())
for name in subtopicNames[:-1]:
nextTopicNameList.append(name)
desc, specGiven = self.__defnProvider.getDefn( tuple(nextTopicNameList) )
if desc is None:
desc = 'UNDOCUMENTED: created as parent without specification'
parentObj = self.__createTopic( tuple(nextTopicNameList),
desc, specGiven = specGiven, parent = parentObj)
return parentObj
def __createTopic(self, nameTuple, desc, specGiven, parent=None):
"""Actual topic creation step. Adds new Topic instance to topic map,
and sends notification message (see ``Publisher.addNotificationMgr()``)
regarding topic creation."""
if specGiven is None:
specGiven = ArgSpecGiven()
parentAI = None
if parent:
parentAI = parent._getListenerSpec()
argsInfo = ArgsInfo(nameTuple, specGiven, parentAI)
if (self.__treeConfig.raiseOnTopicUnspecified
and not argsInfo.isComplete()):
raise TopicDefnError(nameTuple)
newTopicObj = Topic(self.__treeConfig, nameTuple, desc,
argsInfo, parent = parent)
# sanity checks:
assert newTopicObj.getName() not in self._topicsMap
if parent is self.__allTopics:
assert len( newTopicObj.getNameTuple() ) == 1
else:
assert parent.getNameTuple() == newTopicObj.getNameTuple()[:-1]
assert nameTuple == newTopicObj.getNameTuple()
# store new object and notify of creation
self._topicsMap[ newTopicObj.getName() ] = newTopicObj
self.__treeConfig.notificationMgr.notifyNewTopic(
newTopicObj, desc, specGiven.reqdArgs, specGiven.argsDocs)
return newTopicObj
def validateNameHierarchy(topicTuple):
"""Check that names in topicTuple are valid: no spaces, not empty.
Raise ValueError if fails check. E.g. ('',) and ('a',' ') would
both fail, but ('a','b') would be ok. """
if not topicTuple:
topicName = stringize(topicTuple)
errMsg = 'empty topic name'
raise TopicNameError(topicName, errMsg)
for indx, topic in enumerate(topicTuple):
errMsg = None
if topic is None:
topicName = list(topicTuple)
topicName[indx] = 'None'
errMsg = 'None at level #%s'
elif not topic:
topicName = stringize(topicTuple)
errMsg = 'empty element at level #%s'
elif topic.isspace():
topicName = stringize(topicTuple)
errMsg = 'blank element at level #%s'
if errMsg:
raise TopicNameError(topicName, errMsg % indx)
class _MasterTopicDefnProvider:
"""
Stores a list of topic definition providers. When queried for a topic
definition, queries each provider (registered via addProvider()) and
returns the first complete definition provided, or (None,None).
The providers must follow the ITopicDefnProvider protocol.
"""
def __init__(self, treeConfig):
self.__providers = []
self.__treeConfig = treeConfig
def addProvider(self, provider):
"""Add given provider IF not already added. """
assert(isinstance(provider, ITopicDefnProvider))
if provider not in self.__providers:
self.__providers.append(provider)
def clear(self):
"""Remove all providers added."""
self.__providers = []
def getNumProviders(self):
"""Return how many providers added."""
return len(self.__providers)
def getDefn(self, topicNameTuple):
"""Returns a pair (docstring, MDS) for the topic. The first item is
a string containing the topic's "docstring", i.e. a description string
for the topic, or None if no docstring available for the topic. The
second item is None or an instance of ArgSpecGiven specifying the
required and optional message data for listeners of this topic. """
desc, defn = None, None
for provider in self.__providers:
tmpDesc, tmpDefn = provider.getDefn(topicNameTuple)
if (tmpDesc is not None) and (tmpDefn is not None):
assert tmpDefn.isComplete()
desc, defn = tmpDesc, tmpDefn
break
return desc, defn
def isDefined(self, topicNameTuple):
"""Returns True only if a complete definition exists, ie topic
has a description and a complete message data specification (MDS)."""
desc, defn = self.getDefn(topicNameTuple)
if desc is None or defn is None:
return False
if defn.isComplete():
return True
return False
| 81 | 0 | 29 |
dd76a16f14eb92e05fb195495c917c12243546f9 | 3,309 | py | Python | module4-acid-and-database-scalability-tradeoffs/mongo_queries.py | jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases | 8a6b3fd14b6a6833ee3a14b2d8a7db3bee494a14 | [
"MIT"
] | null | null | null | module4-acid-and-database-scalability-tradeoffs/mongo_queries.py | jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases | 8a6b3fd14b6a6833ee3a14b2d8a7db3bee494a14 | [
"MIT"
] | null | null | null | module4-acid-and-database-scalability-tradeoffs/mongo_queries.py | jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases | 8a6b3fd14b6a6833ee3a14b2d8a7db3bee494a14 | [
"MIT"
] | null | null | null | import sqlite3
import pymongo
import os
from dotenv import load_dotenv
from pprintpp import pprint
if __name__ == "__main__":
# Open a connection
mongo_client = create_mongodb_connection()
db = mongo_client.rgb_characters
# How many total documents are there?
doc_count = db.rgb_characters.count_documents({})
print(f"Counted {doc_count} documents on your MongoDB cluster")
# How many total Characters are there?
character_count = db.rgb_characters.count_documents({ 'name': { "$exists": True } })
print(f"There are {character_count} characters")
# How many total Items?
characters_with_items = list(db.rgb_characters.find({ 'items': { "$exists": True } }))
nested_list_of_items = [character['items'] for character in characters_with_items]
list_of_items = [item for character_items in nested_list_of_items for item in character_items]
print(f"Characters have many items: {list_of_items[:3]}")
item_count = len(list_of_items)
print(f"All characters together have a total of {item_count} items.")
# How many of the Items are weapons? How many are not?
characters_with_weapons = list(db.rgb_characters.find({ 'weapons': { "$exists": True } }))
nested_list_of_weapons = [character['weapons'] for character in characters_with_weapons]
list_of_weapons = [item for character_weapons in nested_list_of_weapons for item in character_weapons]
print(f"Characters have many weapons too: {list_of_weapons[:3]}")
weapon_count = len(list_of_weapons)
print(f"All characters together have a total of {weapon_count} weapons.")
weapon_portion = weapon_count/item_count
print(f"This means that {100*weapon_portion:.2f}% of items are weapons (and {100*(1-weapon_portion):.2f}% are not).")
# How many Items does each character have? (Return first 20 rows)
characters_with_items = list(db.rgb_characters.find({ 'items': { "$exists": True } }))
for character in characters_with_items[:20]:
print(f"{character['name']} has {len(character['items'])} items")
# How many Weapons does each character have? (Return first 20 rows)
characters_with_weapons = list(db.rgb_characters.find({ 'weapons': { "$exists": True } }))
for character in characters_with_weapons[:20]:
print(f"{character['name']} has {len(character['weapons'])} weapons")
# On average, how many Items does each Character have?
print(f"On average, each character has {item_count/character_count:.2f} items.")
# On average, how many Weapons does each character have?
print(f"On average, each character has {weapon_count/character_count:.2f} weapons.") | 51.703125 | 194 | 0.725295 | import sqlite3
import pymongo
import os
from dotenv import load_dotenv
from pprintpp import pprint
def create_mongodb_connection():
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
DB_NAME = os.getenv("MONGO_DB", default="OOPS")
# client = pymongo.MongoClient("mongodb+srv://jrslagle:<password>@cluster0.ticlc.mongodb.net/<dbname>?retryWrites=true&w=majority") # example that Mongo provides
mongo_client = pymongo.MongoClient(f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.ticlc.mongodb.net/{DB_NAME}?retryWrites=true&w=majority") # &ssl=true") # &ssl_cert_reqs=CERT_NONE")
return mongo_client
if __name__ == "__main__":
# Open a connection
mongo_client = create_mongodb_connection()
db = mongo_client.rgb_characters
# How many total documents are there?
doc_count = db.rgb_characters.count_documents({})
print(f"Counted {doc_count} documents on your MongoDB cluster")
# How many total Characters are there?
character_count = db.rgb_characters.count_documents({ 'name': { "$exists": True } })
print(f"There are {character_count} characters")
# How many total Items?
characters_with_items = list(db.rgb_characters.find({ 'items': { "$exists": True } }))
nested_list_of_items = [character['items'] for character in characters_with_items]
list_of_items = [item for character_items in nested_list_of_items for item in character_items]
print(f"Characters have many items: {list_of_items[:3]}")
item_count = len(list_of_items)
print(f"All characters together have a total of {item_count} items.")
# How many of the Items are weapons? How many are not?
characters_with_weapons = list(db.rgb_characters.find({ 'weapons': { "$exists": True } }))
nested_list_of_weapons = [character['weapons'] for character in characters_with_weapons]
list_of_weapons = [item for character_weapons in nested_list_of_weapons for item in character_weapons]
print(f"Characters have many weapons too: {list_of_weapons[:3]}")
weapon_count = len(list_of_weapons)
print(f"All characters together have a total of {weapon_count} weapons.")
weapon_portion = weapon_count/item_count
print(f"This means that {100*weapon_portion:.2f}% of items are weapons (and {100*(1-weapon_portion):.2f}% are not).")
# How many Items does each character have? (Return first 20 rows)
characters_with_items = list(db.rgb_characters.find({ 'items': { "$exists": True } }))
for character in characters_with_items[:20]:
print(f"{character['name']} has {len(character['items'])} items")
# How many Weapons does each character have? (Return first 20 rows)
characters_with_weapons = list(db.rgb_characters.find({ 'weapons': { "$exists": True } }))
for character in characters_with_weapons[:20]:
print(f"{character['name']} has {len(character['weapons'])} weapons")
# On average, how many Items does each Character have?
print(f"On average, each character has {item_count/character_count:.2f} items.")
# On average, how many Weapons does each character have?
print(f"On average, each character has {weapon_count/character_count:.2f} weapons.") | 633 | 0 | 23 |
891991e7edafcfc7bc9209443ed1b6dd7be12732 | 224 | py | Python | tests/lcomp.py | raff/gopyr | 924c5f7f687c9396524b709a74d342253ba85411 | [
"MIT"
] | 9 | 2018-10-20T07:34:28.000Z | 2020-04-21T03:03:54.000Z | tests/lcomp.py | raff/gopyr | 924c5f7f687c9396524b709a74d342253ba85411 | [
"MIT"
] | 2 | 2018-10-20T17:47:25.000Z | 2019-11-20T23:30:21.000Z | tests/lcomp.py | raff/gopyr | 924c5f7f687c9396524b709a74d342253ba85411 | [
"MIT"
] | null | null | null | # test list comprehension
print([x.upper() for x in ["one", "two", "three", "four", "five", "six"]])
print([x.upper() for x in ["one", "two", "three", "four", "five", "six"] if len(x) <= 4])
print([x for x in range(10)])
| 28 | 89 | 0.558036 | # test list comprehension
print([x.upper() for x in ["one", "two", "three", "four", "five", "six"]])
print([x.upper() for x in ["one", "two", "three", "four", "five", "six"] if len(x) <= 4])
print([x for x in range(10)])
| 0 | 0 | 0 |
6220f4b2ae6c6bab03a1292e62d9d64704614bc6 | 3,479 | py | Python | DipTrace/Pattern.py | kolod/DipTrace-Library-Generator | 8ab2117b3803a3284d0ba664bd0609c497701631 | [
"MIT"
] | null | null | null | DipTrace/Pattern.py | kolod/DipTrace-Library-Generator | 8ab2117b3803a3284d0ba664bd0609c497701631 | [
"MIT"
] | null | null | null | DipTrace/Pattern.py | kolod/DipTrace-Library-Generator | 8ab2117b3803a3284d0ba664bd0609c497701631 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2021-... Oleksandr Kolodkin <alexandr.kolodkin@gmail.com>.
# This program is distributed under the MIT license.
# Glory to Ukraine!
from typing import Tuple, Optional
import DipTrace
| 27.179688 | 72 | 0.713423 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2021-... Oleksandr Kolodkin <alexandr.kolodkin@gmail.com>.
# This program is distributed under the MIT license.
# Glory to Ukraine!
from typing import Tuple, Optional
import DipTrace
class Pattern(
DipTrace.ReferenceMixin,
DipTrace.WidthHeightMixin,
DipTrace.ShapesMixin,
DipTrace.PadsMixin,
DipTrace.TypeLockedMixin,
DipTrace.DimensionsMixin,
DipTrace.OrientationMixin,
DipTrace.OriginMixin,
DipTrace.CategoryMixin
):
tag = 'Pattern'
defaults = {
**DipTrace.ReferenceMixin.defaults,
'mounting': 'None',
**DipTrace.WidthHeightMixin.defaults,
**DipTrace.OrientationMixin.defaults,
'name': '',
'value': '',
**DipTrace.CategoryMixin.defaults,
'origin': None,
'default_pad': None,
**DipTrace.TypeLockedMixin.defaults,
'type': DipTrace.PatternType.Free,
'parameters': (0.0, 0.0, 0.0, 0, 0),
**DipTrace.PadsMixin.defaults,
**DipTrace.ShapesMixin.defaults,
**DipTrace.DimensionsMixin.defaults,
'model': None,
}
@property
def type(self) -> DipTrace.PatternType:
return DipTrace.PatternType.from_str(self.root.get('Type'))
@type.setter
def type(self, t: DipTrace.PatternType):
self.root.attrib['Type'] = t.value
@property
def mounting(self) -> str:
return self.root.get('Mounting')
@mounting.setter
def mounting(self, value: str):
self.root.attrib['Mounting'] = value
@property
def name(self) -> str:
return self._get_first_text_or_default('Name')
@name.setter
def name(self, value: str):
self._set_first_text('Name', value)
@property
def value(self) -> str:
return self._get_first_text_or_default('Value')
@value.setter
def value(self, value: str):
self._set_first_text('Value', value)
@property
def default_pad_type(self) -> str:
return self._get_first_attribute_or_default('DefPad', 'PadType')
@default_pad_type.setter
def default_pad_type(self, name: str):
self._get_first_or_new('DefPad').attrib['PadType'] = name
@property
def parameters(self) -> Tuple[float, float, float, int, int]:
return (
DipTrace.to_float(self.root.get('Float1')),
DipTrace.to_float(self.root.get('Float2')),
DipTrace.to_float(self.root.get('Float3')),
DipTrace.to_int(self.root.get('Int1')),
DipTrace.to_int(self.root.get('Int2')),
)
@parameters.setter
def parameters(self, parameters: Tuple[float, float, float, int, int]):
self.root.attrib['Float1'] = DipTrace.from_float(parameters[0])
self.root.attrib['Float2'] = DipTrace.from_float(parameters[1])
self.root.attrib['Float3'] = DipTrace.from_float(parameters[2])
self.root.attrib['Int1'] = DipTrace.from_int(parameters[3])
self.root.attrib['Int2'] = DipTrace.from_int(parameters[4])
@property
def default_pad(self) -> Optional[str]:
if (x := self.root.find('DefPad')) is not None:
return x.attribget('PadType')
else:
return None
@default_pad.setter
def default_pad(self, pad: Optional[str]):
if pad is not None:
self._get_first_or_new('DefPad').attrib['PadType'] = pad
elif (x := self.root.find('DefPad')) is not None:
self.root.remove(x)
@property
def model(self) -> Optional[DipTrace.Model3D]:
if (x := self.root.find('Model3D')) is not None:
return DipTrace.Model3D(x)
else:
return None
@model.setter
def model(self, model: Optional[DipTrace.Model3D]):
if model is not None:
self.root.replace(self._get_first_or_new('Model3D'), model.root)
elif (x := self.root.find('Model3D')) is not None:
self.root.remove(x)
| 1,857 | 1,358 | 23 |
ffa027f705c36f7faaa86f4bbebad662c39f0a67 | 943 | py | Python | modules/niktoscan.py | TheDarkAssassins/WebMap | 2fe9afd90092c9f7061a807e2335dfdecf6720dd | [
"MIT"
] | null | null | null | modules/niktoscan.py | TheDarkAssassins/WebMap | 2fe9afd90092c9f7061a807e2335dfdecf6720dd | [
"MIT"
] | null | null | null | modules/niktoscan.py | TheDarkAssassins/WebMap | 2fe9afd90092c9f7061a807e2335dfdecf6720dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2021 Iliass Alami Qammouri
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
import conf.conf as conf
| 34.925926 | 134 | 0.634146 | #!/usr/bin/env python3
#
# Copyright (c) 2021 Iliass Alami Qammouri
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
import conf.conf as conf
def niktoScan() :
print("==============================================")
print( conf.colored(conf.text2art ("Nikto Scan", "small"),'cyan'))
print("==============================================")
niktoTarget = input( conf.colored("\nEnter target: ", "green", attrs=['bold']))
niktoOutput = input( conf.colored(f"Enter the output folder - [default: reports/Nikto/{niktoTarget}/]: ","green", attrs=['bold']))
conf.notValid(niktoScan, niktoTarget)
niktoOutput = conf.dirOutput(niktoOutput, "reports/Nikto", niktoTarget)
conf.createDir(niktoOutput)
conf.os.system(f"nikto +h {niktoTarget} -output {niktoOutput}/nikto.txt")
print("______________________________________________________________________")
| 728 | 0 | 23 |
1638bb38a57d54f0be83404e7218cc974d64fd44 | 5,896 | py | Python | figpptx/image_misc.py | Sillte/figpptx | bf5539b09eeef4e6a17bb4483f62f29d286138b2 | [
"MIT"
] | null | null | null | figpptx/image_misc.py | Sillte/figpptx | bf5539b09eeef4e6a17bb4483f62f29d286138b2 | [
"MIT"
] | null | null | null | figpptx/image_misc.py | Sillte/figpptx | bf5539b09eeef4e6a17bb4483f62f29d286138b2 | [
"MIT"
] | null | null | null | import math
import matplotlib
import numpy as np
from typing import Sequence
from PIL import Image
from io import BytesIO
from contextlib import contextmanager
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from figpptx.slide_editor import SlideTransformer, Box
def fig_to_image(fig, **kwargs):
"""Convert ``matplotlib.Figure`` to ``PIL.Image``.
Args:
kwargs (str):
Keyword parameters for ``Figure.savefig`` except ``fname``.
"""
# Ref: https://stackoverflow.com/questions/8598673/how-to-save-a-pylab-figure-into-in-memory-file-which-can-be-read-into-pil-image/8598881 # NOQA
kwargs["format"] = kwargs.get("format", "png")
kwargs["transparent"] = kwargs.get("transparent", True)
buf = BytesIO()
fig.savefig(buf, **kwargs)
buf.seek(0)
image = Image.open(buf).copy()
buf.close()
return image
def ax_to_image(ax, is_tight=True, **kwargs):
"""Convert ``matplotlib.Axes`` to ``PIL.Image``."""
kwargs["transparent"] = kwargs.get("transparent", True)
fig = ax.figure
artists = fig.get_children() # [TODO] Check ``get_axes`` is more apt?
with _store_visibility(artists):
for artist in artists:
if artist is not ax:
artist.set_visible(False)
image = fig_to_image(fig, **kwargs)
if is_tight:
image = _crop_image(image, ax)
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
xmin, xmax = math.floor(bbox.xmin), math.ceil(bbox.xmax)
ymin, ymax = math.floor(bbox.ymin), math.ceil(bbox.ymax)
image = image.crop([xmin, ymin, xmax, ymax])
return image
def _get_bbox(image):
"""
(2020-01-12)
``Image.getbbox()`` does not seem to work intendedly. (Really?)
So, substitution is implemented.
"""
assert image.mode == "RGBA"
width, height = image.size
array = np.array(image)
alpha = array[:, :, -1]
ys, xs = np.where(alpha != 0)
xmin, xmax = np.min(xs) - 1, np.max(xs) + 1
ymin, ymax = np.min(ys) - 1, np.max(ys) + 1
xmin = np.clip(xmin, 0, width)
xmax = np.clip(xmax, 0, width)
ymin = np.clip(ymin, 0, height)
ymax = np.clip(ymax, 0, height)
return xmin, ymin, xmax, ymax
def _crop_image(fig_image, artist):
"""Crop the ``fig_image`` so that only ROI of ``target`` remains."""
width, height = fig_image.size
from figpptx import artist_misc
transformer = SlideTransformer(0, 0, size=(width, height), offset=(0, 0))
if isinstance(artist, Axes):
fig = artist_misc.to_figure(artist)
renderer = fig.canvas.get_renderer()
bbox = artist.get_tightbbox(renderer)
vertices = transformer.transform(bbox)
box = Box.from_vertices(vertices)
elif isinstance(artist, Artist):
box = transformer.get_box(artist)
elif isinstance(artist, Sequence):
boxes = [transformer.get_box(elem) for elem in artist]
box = Box.union(boxes)
else:
raise ValueError("Argument Error.", artist)
xmin, xmax = math.floor(box.left), math.ceil(box.left + box.width)
ymin, ymax = math.floor(box.top), math.ceil(box.top + box.height)
xmin, xmax = max(0, xmin), min(xmax, width - 1)
ymin, ymax = max(0, ymin), min(ymax, height - 1)
image = fig_image.crop([xmin, ymin, xmax + 1, ymax + 1])
return image
@contextmanager
if __name__ == "__main__":
pass
| 30.391753 | 150 | 0.637042 | import math
import matplotlib
import numpy as np
from typing import Sequence
from PIL import Image
from io import BytesIO
from contextlib import contextmanager
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from figpptx.slide_editor import SlideTransformer, Box
def to_image(arg, **kwargs):
if isinstance(arg, matplotlib.figure.Figure):
return fig_to_image(arg, **kwargs)
elif isinstance(arg, Axes):
is_tight = kwargs.pop("is_tight", True)
return ax_to_image(arg, is_tight, **kwargs)
elif isinstance(arg, Artist):
return artists_to_image(arg)
elif isinstance(arg, Image.Image):
return arg.copy()
if isinstance(arg, Sequence):
if all(isinstance(elem, Artist) for elem in arg):
return artists_to_image(arg)
else:
raise ValueError("All elements must be ``Artist``.")
raise ValueError(f"``{arg}`` cannot be converted to image.")
def fig_to_image(fig, **kwargs):
"""Convert ``matplotlib.Figure`` to ``PIL.Image``.
Args:
kwargs (str):
Keyword parameters for ``Figure.savefig`` except ``fname``.
"""
# Ref: https://stackoverflow.com/questions/8598673/how-to-save-a-pylab-figure-into-in-memory-file-which-can-be-read-into-pil-image/8598881 # NOQA
kwargs["format"] = kwargs.get("format", "png")
kwargs["transparent"] = kwargs.get("transparent", True)
buf = BytesIO()
fig.savefig(buf, **kwargs)
buf.seek(0)
image = Image.open(buf).copy()
buf.close()
return image
def ax_to_image(ax, is_tight=True, **kwargs):
"""Convert ``matplotlib.Axes`` to ``PIL.Image``."""
kwargs["transparent"] = kwargs.get("transparent", True)
fig = ax.figure
artists = fig.get_children() # [TODO] Check ``get_axes`` is more apt?
with _store_visibility(artists):
for artist in artists:
if artist is not ax:
artist.set_visible(False)
image = fig_to_image(fig, **kwargs)
if is_tight:
image = _crop_image(image, ax)
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
xmin, xmax = math.floor(bbox.xmin), math.ceil(bbox.xmax)
ymin, ymax = math.floor(bbox.ymin), math.ceil(bbox.ymax)
image = image.crop([xmin, ymin, xmax, ymax])
return image
def artists_to_image(artists, is_tight=True, **kwargs):
if isinstance(artists, Artist):
artists = [artists]
if not artists:
raise ValueError("``Empty Collection of Artists.``")
# Check whether the all belongs to the same figure.
figures = [artist.get_figure() for artist in artists]
figures = [figure for figure in figures if figure]
figures = set(figures)
if not figures:
raise ValueError("Figure does not exist.")
elif 1 < len(figures):
ValueError("All the ``Artists`` must belong to the same Figure.")
figure = list(figures)[0]
target_pairs = sum([_get_artist_pairs(artist) for artist in artists], [])
target_ids = {id(pair[0]) for pair in target_pairs}
pairs = _get_artist_pairs(figure)
leaf_artists = [artist for artist, has_child in pairs if not has_child]
with _store_visibility(leaf_artists):
for artist in leaf_artists:
if id(artist) not in target_ids:
artist.set_visible(False)
image = fig_to_image(figure, **kwargs)
if is_tight:
image = _crop_image(image, artists)
return image
def _get_artist_pairs(root):
result = list()
def _inner(artist):
children = artist.get_children()
has_child = True if children else False
for child in children:
_inner(child)
pair = (artist, has_child)
result.append(pair)
_inner(root)
return result
def _get_bbox(image):
"""
(2020-01-12)
``Image.getbbox()`` does not seem to work intendedly. (Really?)
So, substitution is implemented.
"""
assert image.mode == "RGBA"
width, height = image.size
array = np.array(image)
alpha = array[:, :, -1]
ys, xs = np.where(alpha != 0)
xmin, xmax = np.min(xs) - 1, np.max(xs) + 1
ymin, ymax = np.min(ys) - 1, np.max(ys) + 1
xmin = np.clip(xmin, 0, width)
xmax = np.clip(xmax, 0, width)
ymin = np.clip(ymin, 0, height)
ymax = np.clip(ymax, 0, height)
return xmin, ymin, xmax, ymax
def _crop_image(fig_image, artist):
"""Crop the ``fig_image`` so that only ROI of ``target`` remains."""
width, height = fig_image.size
from figpptx import artist_misc
transformer = SlideTransformer(0, 0, size=(width, height), offset=(0, 0))
if isinstance(artist, Axes):
fig = artist_misc.to_figure(artist)
renderer = fig.canvas.get_renderer()
bbox = artist.get_tightbbox(renderer)
vertices = transformer.transform(bbox)
box = Box.from_vertices(vertices)
elif isinstance(artist, Artist):
box = transformer.get_box(artist)
elif isinstance(artist, Sequence):
boxes = [transformer.get_box(elem) for elem in artist]
box = Box.union(boxes)
else:
raise ValueError("Argument Error.", artist)
xmin, xmax = math.floor(box.left), math.ceil(box.left + box.width)
ymin, ymax = math.floor(box.top), math.ceil(box.top + box.height)
xmin, xmax = max(0, xmin), min(xmax, width - 1)
ymin, ymax = max(0, ymin), min(ymax, height - 1)
image = fig_image.crop([xmin, ymin, xmax + 1, ymax + 1])
return image
@contextmanager
def _store_visibility(artists):
stored = dict()
for artist in artists:
stored[id(artist)] = artist.get_visible()
def _restore():
for artist in artists:
artist.set_visible(stored[id(artist)])
try:
yield
except Exception as e:
_restore()
raise e
else:
_restore()
if __name__ == "__main__":
pass
| 2,389 | 0 | 91 |
c2ce827b0788af8553678c06ac399c030666ebb9 | 20,135 | py | Python | compare.py | matt-noonan/retypd-data | 6563207da5704b2171b1e9eca875c6f95268c569 | [
"MIT"
] | 11 | 2016-06-23T01:39:08.000Z | 2021-06-21T17:07:13.000Z | compare.py | matt-noonan/retypd-data | 6563207da5704b2171b1e9eca875c6f95268c569 | [
"MIT"
] | null | null | null | compare.py | matt-noonan/retypd-data | 6563207da5704b2171b1e9eca875c6f95268c569 | [
"MIT"
] | 3 | 2016-06-14T07:59:28.000Z | 2021-08-30T07:51:38.000Z | import sys
baseline = sys.argv[1]
benchmark_decls = sys.argv[2]
benchmark_map = sys.argv[3]
baseline_types = {}
with open(baseline) as f:
for line in f:
if line.strip() == "":
continue
if line.strip()[0] == '#':
continue
if line.startswith("type "):
info = line[len("type "):].strip()
(typeid, defn) = scan_number(info)
(t,rest) = scan_type(defn[1:])
assert rest == ""
baseline_types[typeid] = t
benchmark_types = {}
with open(benchmark_decls) as f:
for line in f:
if line.strip() == "":
continue
if line.strip()[0] == '#':
continue
(typeid, rest) = scan_number(line)
(t, rest) = scan_type(rest[1:])
benchmark_types[typeid] = t
## Compare globals
baseline_globals = {}
with open(baseline) as f:
for line in f:
if "<global>" in line:
parts = line.strip().split(" ")
baseline_globals[parts[0]] = int(parts[2])
benchmark_globals = {}
with open(benchmark_map) as f:
for line in f:
if line.startswith(" @"):
parts = line[2:].strip().split(" ")
benchmark_globals[parts[0]] = int(parts[1])
gdists = []
gsizes = []
gconsv = []
gptacc = []
for addr in baseline_globals:
base_ty = baseline_types[baseline_globals[addr]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_globals[addr]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
base_ty = to_structural(base_ty)
bench_ty = to_structural(bench_ty)
gdists.append(dist(base_ty, bench_ty))
gsizes.append(interval_size(bench_ty))
gconsv.append(conservativeness(base_ty, bench_ty))
gptacc.append(ptr_acc(base_ty, bench_ty))
dists = []
sizes = []
consv = []
ptacc = []
if gdists != []:
print "GLOBALS:"
print "average dist =", float(sum(gdists)) / len(gdists)
print "average size =", float(sum(gsizes)) / len(gsizes)
print "conservative =", 100. * float(sum(gconsv)) / len(gconsv)
print "ptr accuracy =", 100. * float(sum(gptacc)) / len(gptacc)
print
dists += gdists
sizes += gsizes
consv += gconsv
ptacc += gptacc
baseline_rets = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
continue
if "<final>" in line:
parts = line.strip().split(" ")
baseline_rets[funaddr] = int(parts[2])
rty = None
benchmark_rets = {}
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
parts = line.strip().split(" ")
if len(parts) != 2 or parts[1] in "VXS":
rty = None
continue
rty = int(parts[1])
continue
if line.startswith(" ") and line.strip().endswith(":"):
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
if rty != None:
benchmark_rets[addr] = rty
rty = None
rdists = []
rsizes = []
rconsv = []
rptacc = []
for addr in baseline_rets:
base_ty = baseline_types[baseline_rets[addr]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_rets[addr]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
rdists.append(dist(base_ty, bench_ty))
rsizes.append(interval_size(bench_ty))
rconsv.append(conservativeness(base_ty, bench_ty))
rptacc.append(ptr_acc(base_ty, bench_ty))
if rdists != []:
print "RETURNS:"
print "average dist =", float(sum(rdists)) / len(rdists)
print "average size =", float(sum(rsizes)) / len(rsizes)
print "conservative =", 100. * float(sum(rconsv)) / len(rconsv)
print "ptr accuracy =", 100. * float(sum(rptacc)) / len(rptacc)
print
dists += rdists
sizes += rsizes
consv += rconsv
ptacc += rptacc
baseline_params = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
baseline_params[funaddr] = {}
continue
if "<initial>" in line:
parts = line.strip().split(" ")
baseline_params[funaddr][parts[0]] = int(parts[2])
benchmark_params = {}
get_next = False
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
addr = None
parts = line.strip().split(" ")
if len(parts) == 2:
get_next = True
continue
continue
if line.startswith(" ") and line.strip().endswith(":"):
addr = None
if get_next:
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
benchmark_params[addr] = {}
get_next = False
continue
if line.startswith(" ") and addr != None:
parts = line.strip().split(" ")
benchmark_params[addr][parts[0]] = int(parts[1])
pdists = []
psizes = []
pconsv = []
pptacc = []
bps = sorted(baseline_params.keys())
for addr in bps:
for loc in baseline_params[addr]:
base_ty = baseline_types[baseline_params[addr][loc]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_params[addr][loc]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
pdists.append(dist(base_ty, bench_ty))
psizes.append(interval_size(bench_ty))
pconsv.append(conservativeness(base_ty, bench_ty))
pptacc.append(ptr_acc(base_ty, bench_ty))
if pdists != []:
print "PARAMETERS:"
print "average dist =", float(sum(pdists)) / len(pdists)
print "average size =", float(sum(psizes)) / len(psizes)
print "conservative =", 100. * float(sum(pconsv)) / len(pconsv)
print "ptr accuracy =", 100. * float(sum(pptacc)) / len(pptacc)
print
dists += pdists
sizes += psizes
consv += pconsv
ptacc += pptacc
baseline_locals = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
baseline_locals[funaddr] = {}
continue
if "<any>" in line:
parts = line.strip().split(" ")
baseline_locals[funaddr][parts[0]] = int(parts[2])
benchmark_locals = {}
get_next = False
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
addr = None
parts = line.strip().split(" ")
if len(parts) == 2:
get_next = True
continue
continue
if line.startswith(" ") and line.strip().endswith(":"):
if get_next:
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
benchmark_locals[addr] = {}
get_next = False
continue
if line.startswith(" ") and addr != None:
parts = line.strip().split(" ")
types = map(lambda x: int(x), parts[1:])
try:
benchmark_locals[addr][parts[0]] += types
except KeyError:
benchmark_locals[addr][parts[0]] = types
ldists = []
lsizes = []
lconsv = []
lptacc = []
for addr in baseline_locals:
for loc in baseline_locals[addr]:
base_ty = baseline_types[baseline_locals[addr][loc]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types_to_union(benchmark_locals[addr][loc])
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
ldists.append(dist(base_ty, bench_ty))
lsizes.append(interval_size(bench_ty))
lconsv.append(conservativeness(base_ty, bench_ty))
lptacc.append(ptr_acc(base_ty, bench_ty))
if ldists != []:
print "LOCALS:"
print "average dist =", float(sum(ldists)) / len(ldists)
print "average size =", float(sum(lsizes)) / len(lsizes)
print "conservative =", 100. * float(sum(lconsv)) / len(lconsv)
print "ptr accuracy =", 100. * float(sum(lptacc)) / len(lptacc)
print
dists += ldists
sizes += lsizes
consv += lconsv
ptacc += lptacc
if dists != []:
print "TOTAL:"
print " matched entities:", len(dists)
print " average TIE distance:", float(sum(dists)) / len(dists)
print " average TIE interval:", float(sum(sizes)) / len(sizes)
print " average conservative:", float(sum(consv)) / len(consv)
print " average ptr accuracy:", float(sum(ptacc)) / len(ptacc)
print csv(benchmark_map.split(".map")[0].split("/")[-1], [(gdists, gsizes, gconsv, gptacc), (ldists, lsizes, lconsv, lptacc), (rdists, rsizes, rconsv, rptacc), (pdists, psizes, pconsv, pptacc)])
| 29.785503 | 195 | 0.535833 | import sys
baseline = sys.argv[1]
benchmark_decls = sys.argv[2]
benchmark_map = sys.argv[3]
def level(x):
if isinstance(x,list):
return min(map(level,x))
if isinstance(x,dict):
return 1.
if x == "code":
return 1.
if x == "TOP":
return 0.
if x.startswith("reg"):
return 1.
if x.startswith("num") or x.startswith("ptr") or x in ["float32_t", "float64_t"]:
return 2.
if x.startswith("uint") or x.startswith("int") or x == "code_t":
# relative to TIE, code_t is moved to a subtype of ptr(*)
return 3.
if x == "BOT":
return 4.
print ">>> " + x
def nbits(x):
if isinstance(x,dict):
maxs = []
for offset in x:
maxs.append(offset * 8 + nbits(x[offset]))
return max(maxs)
if isinstance(x,list):
return max(map(nbits, x))
if x.startswith("ptr") or x == "code_t" or x == "float32_t":
return 32
if x == "float64_t":
return 64
if x in ["TOP", "BOT", "code"]:
return 0
part = x.split("_t")[0][3:]
if part[0] == "t":
part = part[1:]
try:
return int(part)
except ValueError:
print part, x
raise ValueError
def dist(x, y):
if x == y:
return 0.
if isinstance(y,list):
if isinstance(x,list):
return min([dist(xm,ym) for xm in x for ym in y])
else:
return min(map(lambda t: dist(x,t), y))
if isinstance(x,list):
return min(map(lambda t: dist(y,t), x))
if isinstance(x,dict) and not isinstance(y,dict):
top = {}
top[0] = "TOP"
return level(y) + dist(top, x)
if isinstance(y,dict) and not isinstance(x,dict):
top = {}
top[0] = "TOP"
return level(x) + dist(top, y)
if isinstance(x,dict) and isinstance(y,dict):
return struct_dist(x,y)
if x in ["TOP", "BOT"] or y in ["TOP", "BOT"]:
return abs(level(x) - level(y))
if nbits(x) != nbits(y):
# different sizes -> max distance
return 4.
if x.startswith("reg") or y.startswith("reg"):
return abs(level(x) - level(y))
if x.startswith("ptr") and y.startswith("ptr"):
return 0.
if x.startswith("ptr") or y.startswith("ptr"):
if x == "code_t" or y == "code_t":
return 1.
return 4.
if x == "code_t" or y == "code_t":
return 4.
if x.startswith("float") or y.startswith("float"):
return 4.
# x and y are both numeric types
if x.startswith("uint") and y.startswith("int"):
return 4.
if y.startswith("uint") and x.startswith("int"):
return 4.
return abs(level(x) - level(y))
def ptr_depth(t):
if isinstance(t,dict):
return 0.
if isinstance(t,list):
return max(map(ptr_depth,t))
if t.startswith("ptr"):
return 1. + ptr_depth(t[len("ptr("):-1])
return 0.
def is_ptr(t):
return isinstance(t, str) and (t.startswith("ptr") or t == "code_t")
def ptrs(x):
if not isinstance(x, list):
ts = [x]
else:
ts = x
return filter(is_ptr, ts)
def nonptrs(x):
if not isinstance(x, list):
ts = [x]
else:
ts = x
return filter(lambda t: not is_ptr(t), ts)
def get_ptt(t):
if t == "code_t":
return "code"
return t[len("ptr("):-1]
def ptr_acc(true_type, computed_type):
if true_type == computed_type:
return 1.
if isinstance(true_type, dict):
if nonptrs(computed_type) == []:
return 0.
return 1.
total_ptr_levels = 0.
matched_ptr_levels = 0.
if isinstance(computed_type, list):
cts = list(computed_type)
else:
cts = [computed_type]
true = true_type
while is_ptr(true):
true = get_ptt(true)
total_ptr_levels += 1.
cptrs = ptrs(cts)
if cptrs != []:
matched_ptr_levels += 1.
cts = map(get_ptt, cptrs)
if nonptrs(cts) != [] and matched_ptr_levels == total_ptr_levels:
# computed_type matches true_type ptr depth exactly
matched_ptr_levels += 1.
total_ptr_levels += 1.
return matched_ptr_levels / total_ptr_levels
def interval_size(x):
if isinstance(x, dict):
if 0 in x.keys() and x[0] == "reg8_t":
# equivalent of regNN_t for weird sizes of NN
return 2.0
return 0.
if isinstance(x, list):
return max(map(interval_size, x))
if x == "TOP":
return 4.
if x.startswith("reg"):
return 3.
if x.startswith("num"):
return 2.
return 0.
def conservativeness(true_type, computed_type):
if true_type == computed_type:
return 1.
if computed_type == "TOP" or true_type == "BOT":
return 1.
if true_type == "TOP" or computed_type == "BOT":
return 0.
if isinstance(computed_type, list):
return max(map(lambda t: conservativeness(true_type, t), computed_type))
if isinstance(computed_type, dict) and isinstance(true_type, dict):
if 0 in computed_type.keys() and computed_type[0] == "reg8_t":
# equivalent of regNN_t for weird sizes of NN
return 1.0
cons = [1.0]
for offset in computed_type:
try:
cons.append(conservativeness(true_type[offset], computed_type[offset]))
except KeyError:
# structural subtype
continue
return min(cons)
if isinstance(computed_type, dict) or isinstance(true_type, dict):
return 0.
if dist(true_type, computed_type) < 4:
# if the two scalar types are comparable...
if dist(true_type, computed_type) == 0:
return 1.
if computed_type.startswith("reg"):
return 1.
if computed_type.startswith("num") and (true_type.startswith("int") or true_type.startswith("uint")):
return 1.
else:
# if the two scalar types are not comparable, check size compatibility
if nbits(true_type) < nbits(computed_type):
if computed_type.startswith("reg"):
return 1.
if computed_type.startswith("num") and (true_type.startswith("num") or true_type.startswith("int") or true_type.startswith("uint")):
return 1.
return 0.
return 0.
def struct_dist(x, y):
d = abs( (1. - 1. / len(x)) - (1. - 1. / len(y)) )
fds = []
for offset in set(x.keys() + y.keys()):
try:
xf = x[offset]
except KeyError:
xf = None
try:
yf = y[offset]
except KeyError:
yf = None
if xf == None or yf == None:
fds.append(4.)
else:
fds.append(dist(xf,yf))
return d + sum(fds) / (4. * len(fds))
def scan_until_matched_paren(x):
depth = 1
offset = 0
for c in x:
if c == '(':
depth += 1
if c == ')':
depth -= 1
if depth == 0:
return (x[0:offset], x[offset+1:])
offset += 1
assert False
def scan_number(x):
num = ""
rest = x
while rest != "" and rest[0] in "0123456789":
num += rest[0]
rest = rest[1:]
return (int(num), rest)
def scan_type(x):
if x.startswith("TOP") or x.startswith("BOT"):
return (x[0:3], x[3:])
if x.startswith("code_t"):
return (x[0:6], x[6:])
if x.startswith("code"):
return (x[0:4], x[4:])
if x.startswith("float"):
return (x[0:len("floatNN_t")], x[len("floatNN_t"):])
if x.startswith("ptr("):
(ptts, rest) = scan_until_matched_paren(x[4:])
(ptt, _) = scan_type(ptts)
try:
if not ptt.startswith("ptr"):
ptt = "*"
except AttributeError:
ptt = "*"
return ("ptr(" + ptt + ")", rest)
#return ("ptr", rest)
if x.startswith("reg") or x.startswith("num") or x.startswith("int") or x.startswith("uint"):
sz = 3
if x.startswith("uint"):
sz = 4
kind = x[0:sz]
(bits, rest) = scan_number(x[sz:])
assert rest.startswith("_t")
return (kind + str(bits) + "_t", rest[2:])
if x.startswith("struct("):
rest = x[len("struct("):]
(rest, tail) = scan_until_matched_paren(rest)
fields = {}
while rest != "":
(offset, rest) = scan_number(rest)
assert rest[0] == ':'
(t, rest) = scan_type(rest[1:])
assert rest == "" or rest[0] == ','
fields[offset] = t
if rest != "":
rest = rest[1:]
if (len(fields) == 0):
return ("TOP", tail)
return (fields, tail)
if x.startswith("array("):
rest = x[len("array("):]
(rest, tail) = scan_until_matched_paren(rest)
(t, rest) = scan_type(rest)
(qty, _) = scan_number(rest[1:])
arr = {}
arr[0] = t
arrsz = max([qty,0x1000])
if t == "reg8_t":
arrsz = 1
step = nbits(t) / 8
for i in range(0,arrsz):
arr[i * step] = t
return (arr, tail)
if x.startswith("union("):
rest = x[len("union("):]
(rest, tail) = scan_until_matched_paren(rest)
types = []
while rest != "":
(t, rest) = scan_type(rest)
types.append(t)
if rest != "":
rest = rest[1:]
return (types, tail) # :-|
def to_structural(x):
if x in ["TOP", "BOT"]:
return x
if isinstance(x, dict):
return x
if isinstance(x, list):
return map(to_structural, x)
return {0:x}
baseline_types = {}
with open(baseline) as f:
for line in f:
if line.strip() == "":
continue
if line.strip()[0] == '#':
continue
if line.startswith("type "):
info = line[len("type "):].strip()
(typeid, defn) = scan_number(info)
(t,rest) = scan_type(defn[1:])
assert rest == ""
baseline_types[typeid] = t
benchmark_types = {}
with open(benchmark_decls) as f:
for line in f:
if line.strip() == "":
continue
if line.strip()[0] == '#':
continue
(typeid, rest) = scan_number(line)
(t, rest) = scan_type(rest[1:])
benchmark_types[typeid] = t
## Compare globals
baseline_globals = {}
with open(baseline) as f:
for line in f:
if "<global>" in line:
parts = line.strip().split(" ")
baseline_globals[parts[0]] = int(parts[2])
benchmark_globals = {}
with open(benchmark_map) as f:
for line in f:
if line.startswith(" @"):
parts = line[2:].strip().split(" ")
benchmark_globals[parts[0]] = int(parts[1])
def comparable_size(x, y):
if nbits(x) == nbits(y):
return True
if x in ["BOT", "TOP"]:
return True
if y in ["BOT", "TOP"]:
return True
if isinstance(x, dict) and isinstance(y, dict):
return True
if isinstance(x, list):
return any(map(lambda t: comparable_size(t, y), x))
if isinstance(y, list):
return any(map(lambda t: comparable_size(t, x), y))
return False
gdists = []
gsizes = []
gconsv = []
gptacc = []
for addr in baseline_globals:
base_ty = baseline_types[baseline_globals[addr]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_globals[addr]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
base_ty = to_structural(base_ty)
bench_ty = to_structural(bench_ty)
gdists.append(dist(base_ty, bench_ty))
gsizes.append(interval_size(bench_ty))
gconsv.append(conservativeness(base_ty, bench_ty))
gptacc.append(ptr_acc(base_ty, bench_ty))
dists = []
sizes = []
consv = []
ptacc = []
if gdists != []:
print "GLOBALS:"
print "average dist =", float(sum(gdists)) / len(gdists)
print "average size =", float(sum(gsizes)) / len(gsizes)
print "conservative =", 100. * float(sum(gconsv)) / len(gconsv)
print "ptr accuracy =", 100. * float(sum(gptacc)) / len(gptacc)
print
dists += gdists
sizes += gsizes
consv += gconsv
ptacc += gptacc
baseline_rets = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
continue
if "<final>" in line:
parts = line.strip().split(" ")
baseline_rets[funaddr] = int(parts[2])
rty = None
benchmark_rets = {}
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
parts = line.strip().split(" ")
if len(parts) != 2 or parts[1] in "VXS":
rty = None
continue
rty = int(parts[1])
continue
if line.startswith(" ") and line.strip().endswith(":"):
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
if rty != None:
benchmark_rets[addr] = rty
rty = None
rdists = []
rsizes = []
rconsv = []
rptacc = []
for addr in baseline_rets:
base_ty = baseline_types[baseline_rets[addr]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_rets[addr]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
rdists.append(dist(base_ty, bench_ty))
rsizes.append(interval_size(bench_ty))
rconsv.append(conservativeness(base_ty, bench_ty))
rptacc.append(ptr_acc(base_ty, bench_ty))
if rdists != []:
print "RETURNS:"
print "average dist =", float(sum(rdists)) / len(rdists)
print "average size =", float(sum(rsizes)) / len(rsizes)
print "conservative =", 100. * float(sum(rconsv)) / len(rconsv)
print "ptr accuracy =", 100. * float(sum(rptacc)) / len(rptacc)
print
dists += rdists
sizes += rsizes
consv += rconsv
ptacc += rptacc
baseline_params = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
baseline_params[funaddr] = {}
continue
if "<initial>" in line:
parts = line.strip().split(" ")
baseline_params[funaddr][parts[0]] = int(parts[2])
benchmark_params = {}
get_next = False
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
addr = None
parts = line.strip().split(" ")
if len(parts) == 2:
get_next = True
continue
continue
if line.startswith(" ") and line.strip().endswith(":"):
addr = None
if get_next:
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
benchmark_params[addr] = {}
get_next = False
continue
if line.startswith(" ") and addr != None:
parts = line.strip().split(" ")
benchmark_params[addr][parts[0]] = int(parts[1])
pdists = []
psizes = []
pconsv = []
pptacc = []
bps = sorted(baseline_params.keys())
for addr in bps:
for loc in baseline_params[addr]:
base_ty = baseline_types[baseline_params[addr][loc]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_params[addr][loc]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
pdists.append(dist(base_ty, bench_ty))
psizes.append(interval_size(bench_ty))
pconsv.append(conservativeness(base_ty, bench_ty))
pptacc.append(ptr_acc(base_ty, bench_ty))
if pdists != []:
print "PARAMETERS:"
print "average dist =", float(sum(pdists)) / len(pdists)
print "average size =", float(sum(psizes)) / len(psizes)
print "conservative =", 100. * float(sum(pconsv)) / len(pconsv)
print "ptr accuracy =", 100. * float(sum(pptacc)) / len(pptacc)
print
dists += pdists
sizes += psizes
consv += pconsv
ptacc += pptacc
baseline_locals = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
baseline_locals[funaddr] = {}
continue
if "<any>" in line:
parts = line.strip().split(" ")
baseline_locals[funaddr][parts[0]] = int(parts[2])
benchmark_locals = {}
get_next = False
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
addr = None
parts = line.strip().split(" ")
if len(parts) == 2:
get_next = True
continue
continue
if line.startswith(" ") and line.strip().endswith(":"):
if get_next:
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
benchmark_locals[addr] = {}
get_next = False
continue
if line.startswith(" ") and addr != None:
parts = line.strip().split(" ")
types = map(lambda x: int(x), parts[1:])
try:
benchmark_locals[addr][parts[0]] += types
except KeyError:
benchmark_locals[addr][parts[0]] = types
def benchmark_types_to_union(types):
global benchmark_types
if len(types) == 0:
return "BOT"
if len(types) == 1:
return benchmark_types[types[0]]
tys = []
for t in types:
ty = benchmark_types[t]
if isinstance(ty, list):
tys += ty
else:
tys.append(ty)
return tys
ldists = []
lsizes = []
lconsv = []
lptacc = []
for addr in baseline_locals:
for loc in baseline_locals[addr]:
base_ty = baseline_types[baseline_locals[addr][loc]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types_to_union(benchmark_locals[addr][loc])
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
ldists.append(dist(base_ty, bench_ty))
lsizes.append(interval_size(bench_ty))
lconsv.append(conservativeness(base_ty, bench_ty))
lptacc.append(ptr_acc(base_ty, bench_ty))
if ldists != []:
print "LOCALS:"
print "average dist =", float(sum(ldists)) / len(ldists)
print "average size =", float(sum(lsizes)) / len(lsizes)
print "conservative =", 100. * float(sum(lconsv)) / len(lconsv)
print "ptr accuracy =", 100. * float(sum(lptacc)) / len(lptacc)
print
dists += ldists
sizes += lsizes
consv += lconsv
ptacc += lptacc
if dists != []:
print "TOTAL:"
print " matched entities:", len(dists)
print " average TIE distance:", float(sum(dists)) / len(dists)
print " average TIE interval:", float(sum(sizes)) / len(sizes)
print " average conservative:", float(sum(consv)) / len(consv)
print " average ptr accuracy:", float(sum(ptacc)) / len(ptacc)
def csv(x, ys):
s = x
tot = 0
tot_ds = 0
tot_ss = 0
tot_cs = 0
tot_ps = 0
for (ds, ss, cs, ps) in ys:
tot += len(ds)
tot_ds += sum(ds)
tot_ss += sum(ss)
tot_cs += sum(cs)
tot_ps += sum(ps)
s += "," + str(len(ds))
s += "," + str(sum(ds))
s += "," + str(sum(ss))
s += "," + str(sum(cs))
s += "," + str(sum(ps))
if tot == 0:
tot = 1
s += "," + str(tot)
s += "," + str(tot_ds)
s += "," + str(tot_ss)
s += "," + str(tot_cs)
s += "," + str(tot_ps)
s += "," + str(float(tot_ds) / tot)
s += "," + str(float(tot_ss) / tot)
s += "," + str(float(tot_cs) / tot)
s += "," + str(float(tot_ps) / tot)
return s
print csv(benchmark_map.split(".map")[0].split("/")[-1], [(gdists, gsizes, gconsv, gptacc), (ldists, lsizes, lconsv, lptacc), (rdists, rsizes, rconsv, rptacc), (pdists, psizes, pconsv, pptacc)])
| 10,766 | 0 | 456 |
01b64da62f296cae180a03c4f36b2eaf05614e04 | 469 | py | Python | setup.py | ggoblin/trello-sync-client | 203ce33a6809b356bfdcd9486fd8be2377185c4f | [
"Apache-2.0"
] | null | null | null | setup.py | ggoblin/trello-sync-client | 203ce33a6809b356bfdcd9486fd8be2377185c4f | [
"Apache-2.0"
] | null | null | null | setup.py | ggoblin/trello-sync-client | 203ce33a6809b356bfdcd9486fd8be2377185c4f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(name='gtsc',
version='0.1',
description='Goblin Trello Sync Client. ',
author='eternnoir',
author_email='eternnoir@gmail.com',
url='https://github.com/ggoblin/trello-sync-client',
packages=['gtsc'],
install_requires=['requests', 'pythondialog'],
entry_points={
'console_scripts': [
'gtsc = gtsc:main',
],
},
) | 27.588235 | 58 | 0.567164 | #!/usr/bin/env python
from setuptools import setup
setup(name='gtsc',
version='0.1',
description='Goblin Trello Sync Client. ',
author='eternnoir',
author_email='eternnoir@gmail.com',
url='https://github.com/ggoblin/trello-sync-client',
packages=['gtsc'],
install_requires=['requests', 'pythondialog'],
entry_points={
'console_scripts': [
'gtsc = gtsc:main',
],
},
) | 0 | 0 | 0 |
8346214a8b4850dacdd88da49cdf55ed0ef06499 | 387 | py | Python | 03. Advanced (Nested) Conditional Statements/P06 Working Hours.py | KrisBestTech/Python-Basics | 10bd961bf16d15ddb94bbea53327b4fc5bfdba4c | [
"MIT"
] | null | null | null | 03. Advanced (Nested) Conditional Statements/P06 Working Hours.py | KrisBestTech/Python-Basics | 10bd961bf16d15ddb94bbea53327b4fc5bfdba4c | [
"MIT"
] | null | null | null | 03. Advanced (Nested) Conditional Statements/P06 Working Hours.py | KrisBestTech/Python-Basics | 10bd961bf16d15ddb94bbea53327b4fc5bfdba4c | [
"MIT"
] | null | null | null | hour_of_the_day = int(input())
day_of_the_week = str(input())
if 10 <= hour_of_the_day <= 18:
if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or day_of_the_week == 'Wednesday' or \
day_of_the_week == 'Thursday' or day_of_the_week == 'Friday':
print('open')
else:
print('closed')
else:
print('closed')
| 32.25 | 105 | 0.586563 | hour_of_the_day = int(input())
day_of_the_week = str(input())
if 10 <= hour_of_the_day <= 18:
if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or day_of_the_week == 'Wednesday' or \
day_of_the_week == 'Thursday' or day_of_the_week == 'Friday':
print('open')
else:
print('closed')
else:
print('closed')
| 0 | 0 | 0 |
7fd160ce43b51b6ecf3001d77b210dc2cba27423 | 3,649 | py | Python | Software/Python/Steth Edition/protocolDefinitions.py | pd3d/magneto | da619b58b3e3c0ba9d6ac149e8902d8fc4614ccb | [
"MIT"
] | 1 | 2021-05-18T16:50:11.000Z | 2021-05-18T16:50:11.000Z | Software/Python/Steth Edition/protocolDefinitions.py | dash-orlando/magneto | da619b58b3e3c0ba9d6ac149e8902d8fc4614ccb | [
"MIT"
] | null | null | null | Software/Python/Steth Edition/protocolDefinitions.py | dash-orlando/magneto | da619b58b3e3c0ba9d6ac149e8902d8fc4614ccb | [
"MIT"
] | null | null | null | """
protocolDefinitions.py
The following module consists of a list of commands or definitions to be used in the communication between devices and the control system
Michael Xynidis
Fluvio L Lobo Fenoglietto
09/26/2016
"""
# Definition Name Value Class
# ---------- ---- ----- -----
SOH = chr(0x01) # Start of Heading 0x01 STD
ENQ = chr(0x05) # Enquiry 0x05 STD
EOT = chr(0x04) # End of Transmission 0x04 STD
ACK = chr(0x06) # Positive Acknowledgement 0x06 STD
NAK = chr(0x15) # Negative Acknowledgement 0x15 STD
CAN = chr(0x18) # Cancel Current Command 0x18 STD
# Device Control Commands
# We have extended the four (4) standard "device control" commands by means of a two-byte communication protocol
DC1 = chr(0x11) # Device Control 1: Diagnostic Functions 0x11 STD
DC1_DEVICEID = chr(0x00) # Device Identification
DC1_SDCHECK = chr(0x01) # SD Card Check 0x00 ORG
# 0xFF ORG
DC2 = chr(0x12) # Device Control 2: Operational Functions 0x12 STD
DC2_SENDWAV = chr(0x00) # Send .WAV File 0x00 ORG
DC2_DELVOLATILE = chr(0x01) # Delete Volatile Files 0x01 ORG
# 0xFF ORG
DC3 = chr(0x13) # Device Control 3: Device-Specific Functions 0x13 STD
DC3_STARTREC = chr(0x00) # Start Recording 0x00 ORG
DC3_STOPREC = chr(0x01) # Stop Recording 0x01 ORG
DC3_STARTPLAY = chr(0x02) # Start Playback 0x02 ORG
DC3_STOPPLAY = chr(0x03) # Stop Playback 0x03 ORG
DC3_STARTSTREAM = chr(0x04) # Start Microphone Stream 0x04 ORG
DC3_STARTTRACKING = chr(0x05) # Start Tracking Microphone Stream for Peaks 0x05 ORG
DC3_STOPTRACKING = chr(0x06) # Stop Tracking Microphone Stream for Peaks 0x06 ORG
# 0xFF ORG
DC4 = chr(0x14) # Device Control 4: Simulation Functions 0x14 STD
DC4_NORMALHB = chr(0x00) # Playback of Normal Heart Beat 0x00 ORG
DC4_ESHMURMUR = chr(0x01) # Playback of Early Systolic Heart Beat 0x01 ORG
# 0xFF ORG
# Legend
# STD - Standard terminology / Standard reference for command
# ORG - Original or custom-made command and reference
| 72.98 | 137 | 0.399836 | """
protocolDefinitions.py
The following module consists of a list of commands or definitions to be used in the communication between devices and the control system
Michael Xynidis
Fluvio L Lobo Fenoglietto
09/26/2016
"""
# Definition Name Value Class
# ---------- ---- ----- -----
SOH = chr(0x01) # Start of Heading 0x01 STD
ENQ = chr(0x05) # Enquiry 0x05 STD
EOT = chr(0x04) # End of Transmission 0x04 STD
ACK = chr(0x06) # Positive Acknowledgement 0x06 STD
NAK = chr(0x15) # Negative Acknowledgement 0x15 STD
CAN = chr(0x18) # Cancel Current Command 0x18 STD
# Device Control Commands
# We have extended the four (4) standard "device control" commands by means of a two-byte communication protocol
DC1 = chr(0x11) # Device Control 1: Diagnostic Functions 0x11 STD
DC1_DEVICEID = chr(0x00) # Device Identification
DC1_SDCHECK = chr(0x01) # SD Card Check 0x00 ORG
# 0xFF ORG
DC2 = chr(0x12) # Device Control 2: Operational Functions 0x12 STD
DC2_SENDWAV = chr(0x00) # Send .WAV File 0x00 ORG
DC2_DELVOLATILE = chr(0x01) # Delete Volatile Files 0x01 ORG
# 0xFF ORG
DC3 = chr(0x13) # Device Control 3: Device-Specific Functions 0x13 STD
DC3_STARTREC = chr(0x00) # Start Recording 0x00 ORG
DC3_STOPREC = chr(0x01) # Stop Recording 0x01 ORG
DC3_STARTPLAY = chr(0x02) # Start Playback 0x02 ORG
DC3_STOPPLAY = chr(0x03) # Stop Playback 0x03 ORG
DC3_STARTSTREAM = chr(0x04) # Start Microphone Stream 0x04 ORG
DC3_STARTTRACKING = chr(0x05) # Start Tracking Microphone Stream for Peaks 0x05 ORG
DC3_STOPTRACKING = chr(0x06) # Stop Tracking Microphone Stream for Peaks 0x06 ORG
# 0xFF ORG
DC4 = chr(0x14) # Device Control 4: Simulation Functions 0x14 STD
DC4_NORMALHB = chr(0x00) # Playback of Normal Heart Beat 0x00 ORG
DC4_ESHMURMUR = chr(0x01) # Playback of Early Systolic Heart Beat 0x01 ORG
# 0xFF ORG
# Legend
# STD - Standard terminology / Standard reference for command
# ORG - Original or custom-made command and reference
| 0 | 0 | 0 |
6e33880e55a6ead19ca1419599c06037fe903b43 | 330 | py | Python | ifp/Server/server.py | KevinThelly/Mask_RCNN | 3eded0fca0d85a5cfa17949e739922447752fd8f | [
"MIT"
] | null | null | null | ifp/Server/server.py | KevinThelly/Mask_RCNN | 3eded0fca0d85a5cfa17949e739922447752fd8f | [
"MIT"
] | null | null | null | ifp/Server/server.py | KevinThelly/Mask_RCNN | 3eded0fca0d85a5cfa17949e739922447752fd8f | [
"MIT"
] | null | null | null | from flask import Flask, render_template, jsonify, request, redirect, url_for, flash
from app import people
app = Flask(__name__)
app.debug=True
@app.route("/",methods=['GET','POST'])
@app.route('/run',methods=['GET','POST'])
app.run() | 20.625 | 84 | 0.684848 | from flask import Flask, render_template, jsonify, request, redirect, url_for, flash
from app import people
app = Flask(__name__)
app.debug=True
@app.route("/",methods=['GET','POST'])
def app1():
return("run to deploy model")
@app.route('/run',methods=['GET','POST'])
def run_model():
people()
return("done")
app.run() | 47 | 0 | 44 |
a7d3171a27df3b32eb74f798a03ccd4185ea9086 | 2,874 | py | Python | corai_plot/src/aplot/dict_ax_for_aplot.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | 1 | 2022-01-01T22:10:04.000Z | 2022-01-01T22:10:04.000Z | corai_plot/src/aplot/dict_ax_for_aplot.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | null | null | null | corai_plot/src/aplot/dict_ax_for_aplot.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | null | null | null | # todo add a ylabel bis !
# write down that, if label on bis axis, need to pass the label to the other dict. Be careful bc two axison same figure and so can be crowded.
class Dict_ax_for_APlot(object):
"""
dict_ax_for_APlot is an object that stores the properties of each axs of a APlot.
DEFAULT_DICT is then showing the default properties for each axs before personalisation.
The parameters are:
title: message on top of the image.
xlabel: legend of x-axis. string.
ylabel: legend of y-axis. string.
xscale: scale of the x-axis. string.
yscale: scale of the y-axis. string.
basex: base for log scale on x-axis. float.
basey: base for log scale on y-axis. float.
parameters: values of the parameters we want to print under the figure. list of floats. Should not be longer than 20.
name_parameters: name of the parameters shown next to the value. list of strings. Should not be longer than 20.
xlim: range of the x-axis. 2 elements list or tuple of floats.
ylim: range of the y-axis. 2 elements list or tuple of floats.
"""
# default parameters
DEFAULT_STR = "Non-Defined."
DEFAULT_DICT = {'title': DEFAULT_STR,
'xlabel': DEFAULT_STR, 'ylabel': DEFAULT_STR,
'xscale': 'linear', 'yscale': 'linear',
'basex': 10, 'basey': 10,
'xint': False, 'yint': False,
'parameters': None, 'name_parameters': None,
'xlim': None, 'ylim': None}
DEFAULT_DICT_BIS = {'title': '',
'xlabel': '', 'ylabel': 'bis_axis',
'xscale': 'linear', 'yscale': 'linear',
'basex': 10, 'basey': 10,
'xint': False, 'yint': False,
'parameters': None, 'name_parameters': None,
'xlim': None, 'ylim': None}
# TODO it would be a good idea to design the setter with certain conditions:
# if another parameter than authorised is given, warning!
# parameters and name_parameters same length.
# check that scale and xint not set at the same time?
@classmethod
def help_dict_ax(cls):
"""
Semantics:
print possibilities for dict_ax and the default behavior.
"""
text = cls.DEFAULT_DICT
print(text)
| 46.354839 | 142 | 0.591162 | # todo add a ylabel bis !
# write down that, if label on bis axis, need to pass the label to the other dict. Be careful bc two axison same figure and so can be crowded.
class Dict_ax_for_APlot(object):
"""
dict_ax_for_APlot is an object that stores the properties of each axs of a APlot.
DEFAULT_DICT is then showing the default properties for each axs before personalisation.
The parameters are:
title: message on top of the image.
xlabel: legend of x-axis. string.
ylabel: legend of y-axis. string.
xscale: scale of the x-axis. string.
yscale: scale of the y-axis. string.
basex: base for log scale on x-axis. float.
basey: base for log scale on y-axis. float.
parameters: values of the parameters we want to print under the figure. list of floats. Should not be longer than 20.
name_parameters: name of the parameters shown next to the value. list of strings. Should not be longer than 20.
xlim: range of the x-axis. 2 elements list or tuple of floats.
ylim: range of the y-axis. 2 elements list or tuple of floats.
"""
# default parameters
DEFAULT_STR = "Non-Defined."
DEFAULT_DICT = {'title': DEFAULT_STR,
'xlabel': DEFAULT_STR, 'ylabel': DEFAULT_STR,
'xscale': 'linear', 'yscale': 'linear',
'basex': 10, 'basey': 10,
'xint': False, 'yint': False,
'parameters': None, 'name_parameters': None,
'xlim': None, 'ylim': None}
DEFAULT_DICT_BIS = {'title': '',
'xlabel': '', 'ylabel': 'bis_axis',
'xscale': 'linear', 'yscale': 'linear',
'basex': 10, 'basey': 10,
'xint': False, 'yint': False,
'parameters': None, 'name_parameters': None,
'xlim': None, 'ylim': None}
def __init__(self, nb_of_axs):
# creates a list of independent dicts with the default settings.
self.list_dicts_parameters_for_each_axs = [Dict_ax_for_APlot.DEFAULT_DICT.copy()
for _ in range(nb_of_axs)]
self.list_dicts_parameters_for_each_axs_bis = [Dict_ax_for_APlot.DEFAULT_DICT_BIS.copy()
for _ in range(nb_of_axs)]
# TODO it would be a good idea to design the setter with certain conditions:
# if another parameter than authorised is given, warning!
# parameters and name_parameters same length.
# check that scale and xint not set at the same time?
@classmethod
def help_dict_ax(cls):
"""
Semantics:
print possibilities for dict_ax and the default behavior.
"""
text = cls.DEFAULT_DICT
print(text)
| 428 | 0 | 27 |
1c8a4b3668c04c35802751586f6fccca80b7c194 | 73 | py | Python | game/gameobject.py | willwybrow/starlines-web | c6c0ef4c02362bd666b750980a1e005394fd423d | [
"MIT"
] | null | null | null | game/gameobject.py | willwybrow/starlines-web | c6c0ef4c02362bd666b750980a1e005394fd423d | [
"MIT"
] | null | null | null | game/gameobject.py | willwybrow/starlines-web | c6c0ef4c02362bd666b750980a1e005394fd423d | [
"MIT"
] | null | null | null | from dataclasses import dataclass
@dataclass | 12.166667 | 33 | 0.794521 | from dataclasses import dataclass
@dataclass
class GameObject:
pass | 0 | 5 | 22 |
0061ebcf6c22a44030bbf2c7e1fa8e1989c71a42 | 652 | py | Python | src/data/418.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/418.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/418.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | n, q = map(int, input().split())
graph = [[] for _ in range(n)]
for i in range(n - 1):
a, b = map(int, input().split())
graph[a - 1].append(b - 1)
graph[b - 1].append(a - 1)
group = [[], []]
town_color = [-1] * n
tmp = [[0, -1, 0]]
while tmp:
v, past, color = tmp.pop()
town_color[v] = color
group[color].append(v + 1)
for i in graph[v]:
if i == past: continue
tmp.append([i, v, color ^ 1])
# print(group[0])
# print(group[1])
# print(town_color)
for i in range(q):
c, d = map(int, input().split())
if town_color[c - 1] == town_color[d - 1]:
print("Town")
else:
print("Road")
| 21.733333 | 46 | 0.515337 | n, q = map(int, input().split())
graph = [[] for _ in range(n)]
for i in range(n - 1):
a, b = map(int, input().split())
graph[a - 1].append(b - 1)
graph[b - 1].append(a - 1)
group = [[], []]
town_color = [-1] * n
tmp = [[0, -1, 0]]
while tmp:
v, past, color = tmp.pop()
town_color[v] = color
group[color].append(v + 1)
for i in graph[v]:
if i == past: continue
tmp.append([i, v, color ^ 1])
# print(group[0])
# print(group[1])
# print(town_color)
for i in range(q):
c, d = map(int, input().split())
if town_color[c - 1] == town_color[d - 1]:
print("Town")
else:
print("Road")
| 0 | 0 | 0 |
625fe47e65c35d9e48674c3a4a8744be7bc2ecdf | 840 | py | Python | src/boost_histogram/_internal/kwargs.py | HDembinski/boost-histogram | 6071588d8b58504938f72818d22ff3ce2a5b45dc | [
"BSD-3-Clause"
] | null | null | null | src/boost_histogram/_internal/kwargs.py | HDembinski/boost-histogram | 6071588d8b58504938f72818d22ff3ce2a5b45dc | [
"BSD-3-Clause"
] | null | null | null | src/boost_histogram/_internal/kwargs.py | HDembinski/boost-histogram | 6071588d8b58504938f72818d22ff3ce2a5b45dc | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
del absolute_import, division, print_function
| 27.096774 | 88 | 0.625 | from __future__ import absolute_import, division, print_function
del absolute_import, division, print_function
class KWArgs(object):
def __init__(self, kwargs):
self.kwargs = kwargs
def __enter__(self):
return self
def __exit__(self, *args):
if self.kwargs:
raise TypeError("Keyword(s) {} not expected".format(", ".join(self.kwargs)))
def required(self, name):
if name in self.kwargs:
self.kwargs.pop(name)
else:
raise KeyError("{0} is required".format(name))
def optional(self, name, default=None):
if name in self.kwargs:
return self.kwargs.pop(name)
else:
return default
def options(self, **options):
return {option for option in options if self.optional(option, options[option])}
| 543 | 0 | 184 |
5bc53cc07354127b9259cde07c54c8e0dab91abb | 10,988 | py | Python | bin/cell_def.py | rheiland/pc4biorobots_plotly | e1b75b17cb049aa1381ed0e70baab2cd5cd050b5 | [
"BSD-3-Clause"
] | null | null | null | bin/cell_def.py | rheiland/pc4biorobots_plotly | e1b75b17cb049aa1381ed0e70baab2cd5cd050b5 | [
"BSD-3-Clause"
] | null | null | null | bin/cell_def.py | rheiland/pc4biorobots_plotly | e1b75b17cb049aa1381ed0e70baab2cd5cd050b5 | [
"BSD-3-Clause"
] | null | null | null |
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box,Dropdown, Text
# Populate the GUI widgets with values from the XML
# Read values from the GUI widgets to enable editing XML
| 45.974895 | 149 | 0.672825 |
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box,Dropdown, Text
class CellDefTab(object):
def __init__(self):
micron_units = Label('micron') # use "option m" (Mac, for micro symbol)
constWidth = '180px'
tab_height = '500px'
stepsize = 10
#style = {'description_width': '250px'}
style = {'description_width': '25%'}
layout = {'width': '400px'}
name_button_layout={'width':'25%'}
widget_layout = {'width': '15%'}
units_button_layout ={'width':'15%'}
desc_button_layout={'width':'45%'}
# divider_button_layout={'width':'40%', 'align_items':'left'}
divider_button_layout={'width':'40%'}
self.cell_type = Dropdown(
description='Cell:',
# options=get_cell_types(),
options={'default':'default', 'worker':'worker', 'director':'director', 'cargo':'cargo'},
tooltip='Config File or Previous Run',
)
self.cell_type.style = {'description_width': '%sch' % str(len(self.cell_type.description) + 1)}
# self.cell_type.observe(cell_type_cb, names='value')
self.parent_name = Text(
value='None',
placeholder='Type something',
description='Parent:',
disabled=True
)
menv_var1 = Button(description='director_signal', disabled=True, layout=name_button_layout)
menv_var1.style.button_color = 'tan'
param_name1 = Button(description='cycle trans rate', disabled=True, layout=name_button_layout)
self.cycle_trans_rate = FloatText(value=1000,
step=100,style=style, layout=widget_layout)
param_name2 = Button(description='decay_rate', disabled=True, layout=name_button_layout)
self.director_signal_decay_rate = FloatText(value=.1,
step=0.01,style=style, layout=widget_layout)
param_name3 = Button(description='initial_condition', disabled=True, layout=name_button_layout)
self.director_signal_initial_condition = FloatText(value=0,style=style, layout=widget_layout)
param_name4 = Button(description='Dirichlet_boundary_condition', disabled=True, layout=name_button_layout)
self.director_signal_Dirichlet_boundary_condition = FloatText(value=1,style=style, layout=widget_layout)
self.director_signal_Dirichlet_boundary_condition_toggle = Checkbox(description='on/off', disabled=False,style=style, layout=widget_layout)
menv_var2 = Button(description='cargo_signal', disabled=True, layout=name_button_layout)
menv_var2.style.button_color = 'lightgreen'
param_name5 = Button(description='diffusion_coefficient', disabled=True, layout=name_button_layout)
self.cargo_signal_diffusion_coefficient = FloatText(value=1000,
step=100,style=style, layout=widget_layout)
param_name6 = Button(description='decay_rate', disabled=True, layout=name_button_layout)
self.cargo_signal_decay_rate = FloatText(value=.4,
step=0.1,style=style, layout=widget_layout)
param_name7 = Button(description='initial_condition', disabled=True, layout=name_button_layout)
self.cargo_signal_initial_condition = FloatText(value=0,style=style, layout=widget_layout)
param_name8 = Button(description='Dirichlet_boundary_condition', disabled=True, layout=name_button_layout)
self.cargo_signal_Dirichlet_boundary_condition = FloatText(value=1,style=style, layout=widget_layout)
self.cargo_signal_Dirichlet_boundary_condition_toggle = Checkbox(description='on/off', disabled=False,style=style, layout=widget_layout)
self.calculate_gradient = Checkbox(description='calculate_gradients', disabled=False, layout=desc_button_layout)
self.track_internal = Checkbox(description='track_in_agents', disabled=False, layout=desc_button_layout)
row_director_signal = [menv_var1, ]
box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%')
"""
box_director_signal = Box(children=row_director_signal, layout=box_layout)
box1 = Box(children=row1, layout=box_layout)
box_cargo_signal = Box(children=row_cargo_signal, layout=box_layout)
box5 = Box(children=row5, layout=box_layout)
"""
#--------------------------
div_cycle = Button(description='Phenotype:cycle', disabled=True, layout=divider_button_layout)
param_name1 = Button(description='transition rate: 0->1', disabled=True, layout=name_button_layout)
param_name1.style.button_color = 'tan'
param_name1_units = Button(description='1/min', disabled=True, layout=units_button_layout)
param_name1_units.style.button_color = 'tan'
self.cycle_trans_rate1 = FloatText(
value=0.0001,
step=0.00001,
style=style, layout=widget_layout)
row1 = [param_name1, self.cycle_trans_rate1, param_name1_units]
box1 = Box(children=row1, layout=box_layout)
param_name2 = Button(description='transition rate: 1->2', disabled=True, layout=name_button_layout)
param_name2.style.button_color = 'lightgreen'
param_name2_units = Button(description='1/min', disabled=True, layout=units_button_layout)
param_name2_units.style.button_color = 'lightgreen'
self.cycle_trans_rate2 = FloatText(
value=0.0002,
step=0.00001,
style=style, layout=widget_layout)
row2 = [param_name2, self.cycle_trans_rate2, param_name2_units]
box2 = Box(children=row2, layout=box_layout)
#--------------------------
div_death = Button(description='Phenotype:death', disabled=True, layout=divider_button_layout)
#--------------------------
div_volume = Button(description='Phenotype:volume', disabled=True, layout=divider_button_layout)
param_name9 = Button(description='volume', disabled=True, layout=name_button_layout)
param_name9.style.button_color = 'tan'
param_name9_units = Button(description='micron^3', disabled=True, layout=units_button_layout)
param_name9_units.style.button_color = 'tan'
self.volume = FloatText(
value=2.15e3,
step=100,
style=style, layout=widget_layout)
row9 = [param_name9, self.volume, param_name9_units]
box9 = Box(children=row9, layout=box_layout)
#--------------------------
#--------------------------
div_mechanics = Button(description='Phenotype:mechanics', disabled=True, layout=divider_button_layout)
#--------------------------
div_motility = Button(description='Phenotype:motility', disabled=True, layout=divider_button_layout)
#--------------------------
div_secretion = Button(description='Phenotype:secretion', disabled=True, layout=divider_button_layout)
#--------------------------
div_intracellular = Button(description='Phenotype:intracellular', disabled=True, layout=divider_button_layout)
#--------------------------
div_custom_data = Button(description='Custom data', disabled=True, layout=divider_button_layout)
# <elastic_coefficient length=”1” units=”1/min”>1.0</elastic_coefficient>
# <attachment_point length=”3” units=”micron”>-12.8,13.9,0.0</attachment_point>
param_name31 = Button(description='elastic_coefficient', disabled=True, layout=name_button_layout)
param_name31.style.button_color = 'tan'
param_name31_units = Button(description='1/min', disabled=True, layout=units_button_layout)
param_name31_units.style.button_color = 'tan'
self.custom_elastic_coef = FloatText(
value=1.0,
step=0.1,
style=style, layout=widget_layout)
row31 = [param_name31, self.custom_elastic_coef, param_name31_units]
box31 = Box(children=row31, layout=box_layout)
param_name32 = Button(description='attachment_point', disabled=True, layout=name_button_layout)
param_name32.style.button_color = 'lightgreen'
param_name32_units = Button(description='micron', disabled=True, layout=units_button_layout)
param_name32_units.style.button_color = 'lightgreen'
self.custom_attachment_point = Text(
value="-12.8,13.9,0.0",
style=style, layout=widget_layout)
row32 = [param_name32, self.custom_attachment_point, param_name32_units]
box32 = Box(children=row32, layout=box_layout)
#--------------------------
self.tab = VBox([
HBox([self.cell_type, self.parent_name]),
div_cycle,
box1,
box2,
div_death,
div_volume,
box9,
div_mechanics,
div_motility,
div_secretion,
div_intracellular,
div_custom_data,
box31,
box32,
])
# Populate the GUI widgets with values from the XML
def fill_gui(self, xml_root):
return
# Read values from the GUI widgets to enable editing XML
def fill_xml(self, xml_root):
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp = [] # pointers to <variable> nodes
if uep:
for var in uep.findall('variable'):
vp.append(var)
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp[0].find('.//diffusion_coefficient').text = str(self.director_signal_diffusion_coefficient.value)
vp[0].find('.//decay_rate').text = str(self.director_signal_decay_rate.value)
vp[0].find('.//initial_condition').text = str(self.director_signal_initial_condition.value)
vp[0].find('.//Dirichlet_boundary_condition').text = str(self.director_signal_Dirichlet_boundary_condition.value)
vp[0].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.director_signal_Dirichlet_boundary_condition_toggle.value).lower()
vp[1].find('.//diffusion_coefficient').text = str(self.cargo_signal_diffusion_coefficient.value)
vp[1].find('.//decay_rate').text = str(self.cargo_signal_decay_rate.value)
vp[1].find('.//initial_condition').text = str(self.cargo_signal_initial_condition.value)
vp[1].find('.//Dirichlet_boundary_condition').text = str(self.cargo_signal_Dirichlet_boundary_condition.value)
vp[1].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.cargo_signal_Dirichlet_boundary_condition_toggle.value).lower()
uep.find('.//options//calculate_gradients').text = str(self.calculate_gradient.value)
uep.find('.//options//track_internalized_substrates_in_each_agent').text = str(self.track_internal.value)
| 10,496 | 4 | 106 |
910eeac9cd5ac038e87c6fc6fb3eea6860897851 | 11,215 | py | Python | app/aims_loader.py | niaid/AIMS | d2270cffecb90e9542c23c22bc868c9f74dba734 | [
"MIT"
] | 13 | 2020-09-13T22:17:46.000Z | 2022-03-30T16:57:33.000Z | app/aims_loader.py | niaid/AIMS | d2270cffecb90e9542c23c22bc868c9f74dba734 | [
"MIT"
] | 2 | 2021-08-24T17:46:52.000Z | 2021-09-15T20:28:59.000Z | app/aims_loader.py | niaid/AIMS | d2270cffecb90e9542c23c22bc868c9f74dba734 | [
"MIT"
] | 2 | 2021-08-09T15:14:27.000Z | 2021-12-06T21:48:22.000Z | from Bio import AlignIO
from Bio.Seq import Seq
from Bio import SeqIO
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
import numpy as np
import pandas
thing = True
# Using the nomenclature of the GUI explanation, here are some example GUI start/end values
# As a reminder, it goes S1s, S1e/H1s, H1e/S2s, S2e/H2s, H2e
# For the ji_cartFish we have: 2,49,93,152,193
# For the cd1d.fasta we have: 124,167,209,262,303
# For the hlaA.fasta we have: 170,218,260,306,348
# For cd1_ufa_genes.fasta: 22,66,105,158,199
# So in the main version of the script, we have a special loader for each data subset
# Can we make just a generalizable one? Let's give it a try...
#####################################################################################
| 42.003745 | 138 | 0.547214 | from Bio import AlignIO
from Bio.Seq import Seq
from Bio import SeqIO
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
import numpy as np
import pandas
thing = True
# Using the nomenclature of the GUI explanation, here are some example GUI start/end values
# As a reminder, it goes S1s, S1e/H1s, H1e/S2s, S2e/H2s, H2e
# For the ji_cartFish we have: 2,49,93,152,193
# For the cd1d.fasta we have: 124,167,209,262,303
# For the hlaA.fasta we have: 170,218,260,306,348
# For cd1_ufa_genes.fasta: 22,66,105,158,199
def mhc_loader(fastapath,mat_coords,label):
thing = True
xxx1 = fastapath.rfind('/')
xxx2 = fastapath.rfind('.fasta')
yyy = fastapath[xxx1+1:xxx2]
a = 0
for seq_record in SeqIO.parse(fastapath,'fasta'):
seqV=str(seq_record.seq)
fasta_id = str(seq_record.id)
ori_titleV = yyy + ' - ' + fasta_id
# Replace
titleV = label + '_' + str(a)
seg1 = seqV[int(mat_coords[0]):int(mat_coords[1])].replace('-','')
seg2 = seqV[int(mat_coords[1]):int(mat_coords[2])].replace('-','')
seg3 = seqV[int(mat_coords[2]):int(mat_coords[3])].replace('-','')
seg4 = seqV[int(mat_coords[3]):int(mat_coords[4])].replace('-','')
segs = [seg1,seg2,seg3,seg4]
if thing:
final_Seg1 = segs
final_title = [titleV]
final_ori_title = [ori_titleV]
thing = False
else:
final_Seg1 = np.vstack((final_Seg1,segs))
final_title = final_title + [titleV]
final_ori_title = final_ori_title + [ori_titleV]
a = a+1
ff_seg1 = np.transpose(final_Seg1)
# Obviously don't need to worry about extra sequences if there is only one...
# What a dumb f***ing way to do this, but it works...
if np.shape(np.shape(ff_seg1))[0] != 1:
# Extra bit here to delete duplicate sequences
# REMOVE POINT MUTANTS AND SEQs TWO MUTATIONS OFF
num_muts = 2
aa,bb = np.shape(ff_seg1)
indices = np.array([0,0])
for i in np.arange(bb):
for j in np.arange(bb):
if i == j:
continue
count = 0
for k in np.arange(aa):
# SO THIS IS A NICE CODE I STOLE FROM ONLINE TO FIND NUMBER OF MATCHES
# ABSOLUTE VALUE COUNTS DIFF LENGTH AS A MISMATCH
count += sum(1 for a, b in zip(ff_seg1[k,i], ff_seg1[k,j]) if a != b) + abs(len(ff_seg1[k,i]) - len(ff_seg1[k,j]))
if count < num_muts:
indices = np.vstack((indices,[i,j]))
thing = True
for i in np.arange(len(indices)):
if len(indices) < 3:
break
if indices[i,0] < indices[i,1]:
if thing:
index_new = [indices[i,0]]
thing = False
elif len(index_new) == 1:
if index_new == indices[i,0]:
continue
else:
index_new = np.vstack((index_new,indices[i,0]))
elif len(index_new) > 1:
if index_new[len(index_new)-1] == indices[i,0]:
continue
else:
index_new = np.vstack((index_new,indices[i,0]))
if len(indices) < 3:
finalDF = pandas.DataFrame(ff_seg1,columns = final_title)
title_key = np.vstack((final_title,final_ori_title))
return(finalDF,title_key)
else:
seq_new = np.delete(ff_seg1,index_new,axis = 1)
title_new = np.delete(final_title,index_new,axis = 0)
title_ori_new = np.delete(final_ori_title,index_new,axis = 0)
finalDF = pandas.DataFrame(seq_new,columns = title_new)
title_key = np.vstack((title_new,title_ori_new))
return(finalDF,title_key)
else:
seq_new = ff_seg1
title_new = final_title
title_ori_new = final_ori_title
finalDF = pandas.DataFrame(seq_new,columns = title_new)
title_key = np.vstack((title_new,title_ori_new))
return(finalDF,title_key)
# So in the main version of the script, we have a special loader for each data subset
# Can we make just a generalizable one? Let's give it a try...
def Ig_loader(fastapath,label,loops=6,drop_degens = False):
if loops == 6:
total_Abs=pandas.read_csv(fastapath,sep=',',header=0,names=['cdrL1_aa','cdrL2_aa','cdrL3_aa','cdrH1_aa','cdrH2_aa','cdrH3_aa'])
elif loops == 3:
total_Abs=pandas.read_csv(fastapath,sep=',',header=0,names=['cdr1_aa','cdr2_aa','cdr3_aa'])
elif loops == 2:
total_Abs=pandas.read_csv(fastapath,sep=',',header=0,names=['cdrH3_aa','cdrL3_aa'])
elif loops == 1:
total_Abs=pandas.read_csv(fastapath,sep=',',header=0,names=['cdr_aa'])
# Remove empty entries
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
if loops == 6:
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
totalF=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")].values
elif loops == 3:
total_abs5=total_abs1[~total_abs1['cdr1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdr2_aa'].str.contains("X")]
totalF=total_abs6[~total_abs6['cdr3_aa'].str.contains("X")].values
elif loops == 2:
total_abs5=total_abs1[~total_abs1['cdrH3_aa'].str.contains("X")]
totalF=total_abs5[~total_abs5['cdrL3_aa'].str.contains("X")].values
elif loops == 1:
totalF=total_abs1[~total_abs1['cdr_aa'].str.contains("X")].values
# Remove incomplete entries
a=0
del_these=[]
if loops == 6:
for i in np.arange(len(totalF[:,5])):
if totalF[i,5] == '' or totalF[i,4] == '' or totalF[i,3] == '' or totalF[i,2] == '' or totalF[i,1] == '' or totalF[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
elif loops == 3:
for i in np.arange(len(totalF[:,2])):
if totalF[i,2] == '' or totalF[i,1] == '' or totalF[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
elif loops == 2:
for i in np.arange(np.shape(totalF)[0]):
if totalF[i,1] == '' or totalF[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
elif loops == 1:
for i in np.arange(len(totalF[:])):
if totalF[i] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
final_Ig=np.delete(totalF,del_these,axis=0)
# Remove degeneracies in the dataset (optional)
if drop_degens:
aa = np.shape(final_Ig)[0]
for i in np.arange(aa):
degen = False
for j in np.arange(i):
# ignore diagonal
if i == j:
continue
# to get around changing number of loops,
if loops == 1:
test1 = final_Ig[i,0]
test2 = final_Ig[j,0]
elif loops == 2:
test1 = final_Ig[i,0] + final_Ig[i,1]
test2 = final_Ig[j,0] + final_Ig[j,1]
elif loops == 3:
test1 = final_Ig[i,0] + final_Ig[i,1] + final_Ig[i,2]
test2 = final_Ig[j,0] + final_Ig[j,1] + final_Ig[j,2]
elif loops == 6:
test1 = final_Ig[i,0] + final_Ig[i,1] + final_Ig[i,2] + final_Ig[i,3] + final_Ig[i,4] + final_Ig[i,5]
test2 = final_Ig[j,0] + final_Ig[j,1] + final_Ig[j,2] + final_Ig[j,3] + final_Ig[j,4] + final_Ig[j,5]
# if the sequences are of a different length, clearly they aren't identical
if len(test1) - len(test2) != 0:
continue
# Sum zip here counts the number of matched residues (position senstive)
# So by subtracting the length, identical sequences should have count = 0
count = sum(1 for a, b in zip(test1, test2) if a == b) - len(test1)
# as soon as you find an identical sequence, break out
if count == 0:
degen = True
break
if i == 0 and not degen:
indices = np.array([0])
elif not degen:
indices = np.vstack((indices,i))
if loops == 1:
f_Ig = final_Ig[indices,:].reshape(len(indices),1)
elif loops == 2:
f_Ig = final_Ig[indices,:].reshape(len(indices),2)
elif loops == 3:
f_Ig = final_Ig[indices,:].reshape(len(indices),3)
elif loops == 6:
f_Ig = final_Ig[indices,:].reshape(len(indices),6)
else:
f_Ig = final_Ig
final_title = [label + '_' + str(a) for a in np.arange(len(f_Ig))]
final_Df = pandas.DataFrame(np.transpose(f_Ig),columns = final_title)
return(final_Df)
#####################################################################################
def pep_loader(fastapath,label, scrape=False, start_label=0):
thing = True
xxx1 = fastapath.rfind('/')
xxx2 = fastapath.rfind('.csv')
yyy = fastapath[xxx1+1:xxx2]
a = 0
# Alright so for now there are abolsutely no standards here
if scrape:
csv_file = pandas.read_csv(fastapath,sep=',',header=0)
# Need to do this because not every file calls their "MHC class"
# the same thing. Some must predict, some must control for it...
headers = csv_file.columns
data = csv_file['search_hit']
# I believe the MHC allele is ALWAYS the last column,
# but I should probably make sure of that at some point
allele = csv_file[headers[-1]]
else:
data = pandas.read_csv(fastapath,sep=',',header=1)['sequence']
for i in np.arange(len(data)):
# Replace
titleV = label + '_' + str(a+start_label)
if thing:
final_title = [titleV]
thing = False
else:
final_title = final_title + [titleV]
a = a+1
finalDF = np.transpose(pandas.DataFrame(np.array(data)))
finalDF.columns=final_title
if scrape:
finalAllele = np.transpose(pandas.DataFrame(np.array(allele)))
finalAllele.columns=final_title
return(finalDF,finalAllele)
else:
return(finalDF)
| 10,381 | 0 | 67 |
b1039017a58259fe6c620d0b18a79f81337d3065 | 60 | py | Python | src/Engine/Actions/__init__.py | MiguelReuter/Volley-ball-game | 67d830cc528f3540b236d8191f582adb1827dbde | [
"MIT"
] | 4 | 2019-04-15T20:39:29.000Z | 2022-02-04T10:51:37.000Z | src/Engine/Actions/__init__.py | MiguelReuter/Volley-ball-game | 67d830cc528f3540b236d8191f582adb1827dbde | [
"MIT"
] | null | null | null | src/Engine/Actions/__init__.py | MiguelReuter/Volley-ball-game | 67d830cc528f3540b236d8191f582adb1827dbde | [
"MIT"
] | 1 | 2019-11-30T01:05:29.000Z | 2019-11-30T01:05:29.000Z | # encoding : UTF-8
from .action_object import ActionObject
| 15 | 39 | 0.783333 | # encoding : UTF-8
from .action_object import ActionObject
| 0 | 0 | 0 |
dda9528dfddd821b3f30c5c18800d43d8f8cc5a1 | 3,730 | py | Python | st/clitests/s3fi.py | RakeshVaghasiya/cortx-s3server | 356c00f7523883300f3271b365545f4ff8b4c2be | [
"Apache-2.0"
] | null | null | null | st/clitests/s3fi.py | RakeshVaghasiya/cortx-s3server | 356c00f7523883300f3271b365545f4ff8b4c2be | [
"Apache-2.0"
] | 1 | 2021-04-27T07:25:16.000Z | 2021-04-27T07:25:16.000Z | st/clitests/s3fi.py | RakeshVaghasiya/cortx-s3server | 356c00f7523883300f3271b365545f4ff8b4c2be | [
"Apache-2.0"
] | 1 | 2022-03-29T02:39:28.000Z | 2022-03-29T02:39:28.000Z | #
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import os
import sys
import time
from threading import Timer
import subprocess
from framework import PyCliTest
from framework import Config
from framework import logit
from s3client_config import S3ClientConfig
| 36.930693 | 125 | 0.643432 | #
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import os
import sys
import time
from threading import Timer
import subprocess
from framework import PyCliTest
from framework import Config
from framework import logit
from s3client_config import S3ClientConfig
class S3fiTest(PyCliTest):
def __init__(self, description):
self.s3cfg = os.path.join(os.path.dirname(os.path.realpath(__file__)), Config.config_file)
super(S3fiTest, self).__init__(description)
def setup(self):
# Do initializations required to run the tests
logit("Setting up the test [%s]" % (self.description))
super(S3fiTest, self).setup()
def run(self):
super(S3fiTest, self).run()
def teardown(self):
super(S3fiTest, self).teardown()
def with_cli(self, command):
if Config.no_ssl:
command = command + S3ClientConfig.s3_uri_http
else:
command = command + S3ClientConfig.s3_uri_https + " --cacert " + S3ClientConfig.ca_file
super(S3fiTest, self).with_cli(command)
def enable_fi(self, opcode, freq, tag):
curl_cmd = "curl -sS --header \"x-seagate-faultinjection: "
self.opcode = opcode
self.freq = freq
self.tag = tag
command = curl_cmd + self.opcode + "," + self.freq + "," + self.tag + "\" " + "-X PUT "
self.with_cli(command)
return self
def enable_fi_random(self, opcode, tag, prob):
curl_cmd = "curl -sS --header \"x-seagate-faultinjection: "
self.opcode = opcode
self.tag = tag
self.prob = prob
command = curl_cmd + self.opcode + ",random," + self.tag + "," + self.prob + "\" " + "-X PUT "
self.with_cli(command)
return self
def enable_fi_enablen(self, opcode, tag, ntime):
curl_cmd = "curl -sS --header \"x-seagate-faultinjection: "
self.opcode = opcode
self.tag = tag
self.ntime = ntime
command = curl_cmd + self.opcode + ",enablen," + self.tag + "," + self.ntime + "\" " + "-X PUT "
self.with_cli(command)
return self
def enable_fi_offnonm(self, opcode, tag, ntime, mtime):
# sleep to avoid the impact on previous request cleanup of fault injection
# TODO fault injection should be embeded into actual request.
# This will restrict the fault injection scope/lifetime to that specific request only.
time.sleep(1)
curl_cmd = "curl -sS --header \"x-seagate-faultinjection: "
self.opcode = opcode
self.tag = tag
self.ntime = ntime
self.mtime = mtime
command = curl_cmd + self.opcode + ",offnonm," + self.tag + "," + self.ntime + "," + self.mtime + "\" " + "-X PUT "
self.with_cli(command)
return self
def disable_fi(self, tag):
curl_cmd = "curl -sS --header \"x-seagate-faultinjection: "
self.tag = tag
command = curl_cmd + "disable,noop," + self.tag + "\" " + "-X PUT "
self.with_cli(command)
return self
| 2,479 | 5 | 292 |
2118bf96e71c2f0f8f04b2c453cdef4a9730273a | 286 | py | Python | 2_mundo_exercicios/Lacos_de_repeticoes.py | GuilhermeLima182/CursoDePython | 7e72b117142794c38cbb14284d0fa6e1dbee5bf6 | [
"MIT"
] | null | null | null | 2_mundo_exercicios/Lacos_de_repeticoes.py | GuilhermeLima182/CursoDePython | 7e72b117142794c38cbb14284d0fa6e1dbee5bf6 | [
"MIT"
] | null | null | null | 2_mundo_exercicios/Lacos_de_repeticoes.py | GuilhermeLima182/CursoDePython | 7e72b117142794c38cbb14284d0fa6e1dbee5bf6 | [
"MIT"
] | null | null | null | #Nessa aula, vamos começar nossos estudos com os laços
# e vamos fazer primeiro o “for”, que é uma estrutura versátil e simples de entender.
# Por exemplo:
i = int(input('Início: '))
f = int(input('fim: '))
p = int(input('Passo: '))
for c in range(i, f+1, p):
print(c)
print('fim') | 28.6 | 85 | 0.660839 | #Nessa aula, vamos começar nossos estudos com os laços
# e vamos fazer primeiro o “for”, que é uma estrutura versátil e simples de entender.
# Por exemplo:
i = int(input('Início: '))
f = int(input('fim: '))
p = int(input('Passo: '))
for c in range(i, f+1, p):
print(c)
print('fim') | 0 | 0 | 0 |
4d7b41040feda519e478269c7c731fd79965edc3 | 6,068 | py | Python | examples/of_1.py | cwitkowitz/transcription-models | e8697d6969b074926ac55986bc02fa1aad04b471 | [
"MIT"
] | 4 | 2021-06-15T19:45:26.000Z | 2022-03-31T20:42:26.000Z | examples/of_1.py | cwitkowitz/transcription-models | e8697d6969b074926ac55986bc02fa1aad04b471 | [
"MIT"
] | null | null | null | examples/of_1.py | cwitkowitz/transcription-models | e8697d6969b074926ac55986bc02fa1aad04b471 | [
"MIT"
] | 1 | 2021-11-08T02:13:02.000Z | 2021-11-08T02:13:02.000Z | # Author: Frank Cwitkowitz <fcwitkow@ur.rochester.edu>
# My imports
from amt_tools.models import OnsetsFrames
from amt_tools.features import MelSpec
from amt_tools.datasets import MAPS
from amt_tools.train import train, validate
from amt_tools.transcribe import *
from amt_tools.evaluate import *
import amt_tools.tools as tools
# Regular imports
from sacred.observers import FileStorageObserver
from torch.utils.data import DataLoader
from sacred import Experiment
import torch
import os
EX_NAME = '_'.join([OnsetsFrames.model_name(),
MAPS.dataset_name(),
MelSpec.features_name()])
ex = Experiment('Onsets & Frames 1 w/ Mel Spectrogram on MAPS')
@ex.config
@ex.automain
| 33.899441 | 111 | 0.664964 | # Author: Frank Cwitkowitz <fcwitkow@ur.rochester.edu>
# My imports
from amt_tools.models import OnsetsFrames
from amt_tools.features import MelSpec
from amt_tools.datasets import MAPS
from amt_tools.train import train, validate
from amt_tools.transcribe import *
from amt_tools.evaluate import *
import amt_tools.tools as tools
# Regular imports
from sacred.observers import FileStorageObserver
from torch.utils.data import DataLoader
from sacred import Experiment
import torch
import os
EX_NAME = '_'.join([OnsetsFrames.model_name(),
MAPS.dataset_name(),
MelSpec.features_name()])
ex = Experiment('Onsets & Frames 1 w/ Mel Spectrogram on MAPS')
@ex.config
def config():
# Number of samples per second of audio
sample_rate = 16000
# Number of samples between frames
hop_length = 512
# Number of consecutive frames within each example fed to the model
num_frames = 625
# Number of training iterations to conduct
iterations = 2000
# How many equally spaced save/validation checkpoints - 0 to disable
checkpoints = 40
# Number of samples to gather for a batch
batch_size = 8
# The initial learning rate
learning_rate = 6e-4
# The id of the gpu to use, if available
gpu_id = 0
# Flag to re-acquire ground-truth data and re-calculate-features
# This is useful if testing out different feature extraction parameters
reset_data = True
# The random seed for this experiment
seed = 0
# Create the root directory for the experiment to hold train/transcribe/evaluate materials
root_dir = os.path.join(tools.DEFAULT_EXPERIMENTS_DIR, EX_NAME)
os.makedirs(root_dir, exist_ok=True)
# Add a file storage observer for the log directory
ex.observers.append(FileStorageObserver(root_dir))
@ex.automain
def onsets_frames_run(sample_rate, hop_length, num_frames, iterations, checkpoints,
batch_size, learning_rate, gpu_id, reset_data, seed, root_dir):
# Seed everything with the same seed
tools.seed_everything(seed)
# Initialize the default piano profile
profile = tools.PianoProfile()
# Processing parameters
dim_in = 229
model_complexity = 2
# Create the mel spectrogram data processing module
data_proc = MelSpec(sample_rate=sample_rate,
n_mels=dim_in,
hop_length=hop_length)
# Initialize the estimation pipeline
validation_estimator = ComboEstimator([NoteTranscriber(profile=profile),
PitchListWrapper(profile=profile)])
# Initialize the evaluation pipeline
evaluators = [LossWrapper(),
MultipitchEvaluator(),
NoteEvaluator(key=tools.KEY_NOTE_ON),
NoteEvaluator(offset_ratio=0.2, key=tools.KEY_NOTE_OFF)]
validation_evaluator = ComboEvaluator(evaluators, patterns=['loss', 'f1'])
# Get a list of the MAPS splits
splits = MAPS.available_splits()
# Initialize the testing splits as the real piano data
test_splits = ['ENSTDkAm', 'ENSTDkCl']
# Remove the real piano splits to get the training partition
train_splits = splits.copy()
for split in test_splits:
train_splits.remove(split)
print('Loading training partition...')
# Create a dataset corresponding to the training partition
maps_train = MAPS(splits=train_splits,
hop_length=hop_length,
sample_rate=sample_rate,
data_proc=data_proc,
profile=profile,
num_frames=num_frames,
reset_data=reset_data)
# Remove tracks in both partitions from the training partitions
print('Removing overlapping tracks from training partition')
maps_train.remove_overlapping(test_splits)
# Create a PyTorch data loader for the dataset
train_loader = DataLoader(dataset=maps_train,
batch_size=batch_size,
shuffle=True,
num_workers=0,
drop_last=True)
print('Loading testing partition...')
# Create a dataset corresponding to the testing partition
maps_test = MAPS(splits=test_splits,
hop_length=hop_length,
sample_rate=sample_rate,
data_proc=data_proc,
profile=profile,
store_data=True)
print('Initializing model...')
# Initialize a new instance of the model
onsetsframes = OnsetsFrames(dim_in, profile, data_proc.get_num_channels(), model_complexity, False, gpu_id)
onsetsframes.change_device()
onsetsframes.train()
# Initialize a new optimizer for the model parameters
optimizer = torch.optim.Adam(onsetsframes.parameters(), learning_rate)
print('Training classifier...')
# Create a log directory for the training experiment
model_dir = os.path.join(root_dir, 'models')
# Train the model
onsetsframes = train(model=onsetsframes,
train_loader=train_loader,
optimizer=optimizer,
iterations=iterations,
checkpoints=checkpoints,
log_dir=model_dir,
val_set=None)
print('Transcribing and evaluating test partition...')
# Add save directories to the estimators
validation_estimator.set_save_dirs(os.path.join(root_dir, 'estimated'), ['notes', 'pitch'])
# Add a save directory to the evaluators and reset the patterns
validation_evaluator.set_save_dir(os.path.join(root_dir, 'results'))
validation_evaluator.set_patterns(None)
# Get the average results for the testing partition
results = validate(onsetsframes, maps_test, evaluator=validation_evaluator, estimator=validation_estimator)
# Log the average results in metrics.json
ex.log_scalar('Final Results', results, 0)
| 5,302 | 0 | 44 |
9e3c4b3f957494a170665eb8ab9cc5483493ea47 | 25,329 | py | Python | mwbase/models/participants.py | uw-ictd/mwbase | 6a46b5c5459a6bb6e1ba84ea74f689da8efe9687 | [
"Apache-2.0"
] | 1 | 2021-07-17T00:18:06.000Z | 2021-07-17T00:18:06.000Z | mwbase/models/participants.py | akettel/mwbase | 873b4fe8038f16feba5273990b0eb2109f8f05c6 | [
"Apache-2.0"
] | 4 | 2017-08-31T17:09:53.000Z | 2018-11-28T06:01:00.000Z | mwbase/models/participants.py | akettel/mwbase | 873b4fe8038f16feba5273990b0eb2109f8f05c6 | [
"Apache-2.0"
] | 2 | 2018-09-17T22:06:16.000Z | 2021-07-17T00:18:09.000Z | #!/usr/bin/python
# Python Imports
import collections
import datetime
import numbers
import swapper
from hashlib import sha256
# Django Imports
from django.conf import settings
from django.db import models
import utils
# Local Imports
from mwbase.models import PhoneCall, Practitioner, Visit, Connection
from transports import router, TransportError
from utils import enums
from utils.models import TimeStampedModel, ForUserQuerySet
| 38.967692 | 143 | 0.613565 | #!/usr/bin/python
# Python Imports
import collections
import datetime
import numbers
import swapper
from hashlib import sha256
# Django Imports
from django.conf import settings
from django.db import models
import utils
# Local Imports
from mwbase.models import PhoneCall, Practitioner, Visit, Connection
from transports import router, TransportError
from utils import enums
from utils.models import TimeStampedModel, ForUserQuerySet
class ParticipantQuerySet(ForUserQuerySet):
participant_field = None
def get_from_phone_number(self, phone_number):
try:
return Connection.objects.get(identity=phone_number).participant
except Connection.DoesNotExist as e:
raise Participant.DoesNotExist()
def annotate_messages(self):
return self.annotate(
msg_outgoing=utils.sql_count_when(message__is_outgoing=True),
msg_system=utils.sql_count_when(message__is_system=True),
msg_nurse=utils.sql_count_when(message__is_system=False, message__is_outgoing=True),
msg_incoming=utils.sql_count_when(message__is_outgoing=False),
msg_delivered=utils.sql_count_when(message__external_status='Success'),
msg_sent=utils.sql_count_when(message__external_status='Sent'),
msg_failed=utils.sql_count_when(message__external_status='Failed'),
msg_rejected=utils.sql_count_when(message__external_status='Message Rejected By Gateway'),
).annotate(
msg_missed=models.F('msg_outgoing') - models.F('msg_delivered'),
msg_other=models.F('msg_outgoing') - models.F('msg_delivered') - models.F('msg_sent'),
)
def send_batch(self, english, swahili=None, luo=None, auto='', send=False, control=False):
""" Send a message to all participants in the query set
english: required text
swahili, luo: optional translated text
auto: string to tag in the auto link field, will prefix with custom.
send: boolean flag to send messages (default false)
control: boolean flag to send messages to control group (default false)
"""
if swahili is None:
swahili = english
if luo is None:
luo = english
text_translations = {'english': english, 'swahili': swahili, 'luo': luo}
original_count = self.count()
send_to = self.active_users()
send_count = send_to.count()
print("Sending to {} of {}".format(send_count, original_count))
counts = collections.Counter()
for p in send_to.all():
# Send the correct language message to all participants
text = text_translations.get(p.language, english)
text = text.format(**p.message_kwargs())
if send is True:
msg = p.send_message(
text=text,
translation_status='cust',
auto='custom.{}'.format(auto) if auto != '' else 'custom',
translated_text=english if p.language != english else '',
control=control,
is_system=False,
)
counts[msg.external_status] += 1
else:
print("({}) -- {}".format(p, text[:40]))
if send is True:
print("Send Status:\n", "\n\t".join("{} -> {}".format(key, count) for key, count in counts.most_common()))
return send_count
class ParticipantManager(models.Manager):
def get_queryset(self):
qs = super(ParticipantManager, self).get_queryset()
return qs.annotate(
note_count=models.Count('note', distinct=True),
phonecall_count=models.Count('phonecall', distinct=True),
message_count=models.Count('message', distinct=True),
).prefetch_related('connection_set',
models.Prefetch(
'visit_set',
queryset=Visit.objects.order_by('scheduled').filter(arrived__isnull=True,
status='pending'),
to_attr='pending_visits'
)
)
class BaseParticipant(TimeStampedModel):
PREG_STATUS_CHOICES = (
('pregnant', 'Pregnant'),
('over', 'Post-Date'),
('post', 'Post-Partum'),
('ccc', 'CCC'),
('loss', 'SAE opt-in'),
('sae', 'SAE opt-out'),
)
SMS_STATUS_CHOICES = (
('active', 'Active'),
('completed', 'Completed'),
('stopped', 'Withdrew'),
('quit', 'Left Study'),
('other', 'Admin Stop'),
)
LANGUAGE_CHOICES = (
('english', 'English'),
('luo', 'Luo'),
('swahili', 'Swahili'),
)
CONDITION_CHOICES = (
('art', '1 - Starting ART'),
('adolescent', '2 - Adolescent'),
('first', '3 - First Time Mother'),
('normal', '4 - Normal'),
('multiple', '5 - Twins'),
)
FAMILY_PLANNING_CHOICES = (
('none', 'None'),
('iud', 'IUD'),
('pill', 'Pills'),
('depot', 'Depot'),
('implant', 'Implant'),
)
RELATIONSHIP_CHOICES = (
('single', 'Single'),
('partner', 'Partner'),
('married', 'Married'),
('seperated', 'Seperated'),
)
DAY_CHOICES = (
(0, 'Monday'),
(1, 'Tuesday'),
(2, 'Wednesday'),
(3, 'Thursday'),
(4, 'Friday'),
(5, 'Satuday'),
(6, 'Sunday'),
)
TIME_CHOICES = (
(8, 'Morning (8 AM)'),
(13, 'Afternoon (1 PM)'),
(20, 'Evening (8 PM)'),
)
DELIVERY_SOURCE_CHOICES = (
('phone', 'Phone'),
('sms', 'SMS'),
('visit', 'Clinic Visit'),
('m2m', "Mothers to Mothers"),
('other', 'Other'),
)
# Set Custom Manager
objects = ParticipantManager.from_queryset(ParticipantQuerySet)()
objects_no_link = ParticipantQuerySet.as_manager()
# Study Attributes
study_id = models.CharField(max_length=10, unique=True, verbose_name='Study ID', help_text="* Use Barcode Scanner")
sms_status = models.CharField(max_length=10, choices=SMS_STATUS_CHOICES, default='active', verbose_name='SMS Messaging Status')
study_group = models.CharField(max_length=10, choices=enums.GROUP_CHOICES, verbose_name='Group')
send_day = models.IntegerField(choices=DAY_CHOICES, default=0, verbose_name='Send Day')
send_time = models.IntegerField(choices=TIME_CHOICES, default=8, verbose_name='Send Time')
facility = models.CharField(max_length=15, choices=enums.FACILITY_CHOICES)
# Participant Personal Information
sms_name = models.CharField(max_length=12,verbose_name="SMS Name")
display_name = models.CharField(max_length=30,blank=True)
birthdate = models.DateField(verbose_name='DOB')
partner_name = models.CharField(max_length=40, blank=True, verbose_name='Partner Name')
relationship_status = models.CharField(max_length=15, choices=RELATIONSHIP_CHOICES, verbose_name='Relationship Status', blank=True)
previous_pregnancies = models.IntegerField(blank=True, null=True, help_text='* excluding current')
phone_shared = models.NullBooleanField(verbose_name='Phone Shared')
language = models.CharField(max_length=10, choices=LANGUAGE_CHOICES, default='english')
quick_notes = models.TextField(blank=True)
# Medical Information
preg_status = models.CharField(max_length=15, choices=PREG_STATUS_CHOICES, default='pregnant')
condition = models.CharField(max_length=15, choices=CONDITION_CHOICES, default='normal')
anc_num = models.CharField(max_length=15, verbose_name='ANC #')
due_date = models.DateField(verbose_name='Estimated Delivery Date')
delivery_date = models.DateField(verbose_name='Delivery Date', blank=True, null=True)
delivery_source = models.CharField(max_length=10, verbose_name="Delivery Notification Source", choices=DELIVERY_SOURCE_CHOICES, blank=True)
loss_date = models.DateField(blank=True, null=True, help_text='SAE date if applicable')
family_planning = models.CharField(max_length=10, blank=True, choices=FAMILY_PLANNING_CHOICES, verbose_name='Family Planning')
# State attributes to be edited by the system
last_msg_client = models.DateField(blank=True, null=True, help_text='Date of last client message received', editable=False)
last_msg_system = models.DateField(blank=True, null=True, help_text='Date of last system message sent', editable=False)
is_validated = models.BooleanField(default=False, blank=True)
validation_key = models.CharField(max_length=5, blank=True)
class Meta:
abstract = True
def save(self, force_insert=False, force_update=False, *args, **kwargs):
# Force capitalization of display_name
self.display_name = self.display_name.capitalize()
super().save(force_insert, force_update, *args, **kwargs)
def __str__(self):
return self.display_name.title()
def __repr__(self):
return "(#%03s) %s (%s)" % (self.study_id, self.display_name.title(), self.facility.title())
def connection(self):
# Use connection_set.all() instead of .filter to take advantage of prefetch_related
for connection in self.connection_set.all():
if connection.is_primary is True:
return connection
def phone_number(self):
connection = self.connection()
if connection is not None:
return connection.identity
@property
def is_active(self):
""" Return active or reason not active: {active, preg, sms} """
active = 'active'
if self.preg_status in enums.NOT_ACTIVE_STATUS:
active = 'preg'
if self.sms_status in enums.NOT_ACTIVE_STATUS:
active = 'sms'
return active
@property
def no_sms(self):
return self.preg_status in enums.NO_SMS_STATUS or self.sms_status in enums.NO_SMS_STATUS
def age(self):
today = utils.today()
delta = today - self.birthdate
return int((delta.days - delta.seconds / 86400.0) / 365.2425)
def next_visit(self):
""" Return The Next Visit"""
pending = self.visit_set.filter(scheduled__gte=datetime.date.today(), status='pending').last()
if pending is None:
# Check for a pending past date
pending = self.visit_set.filter(status='pending').last()
if pending is None:
return None
# Return the scheduled pending date
return pending
def tca_date(self):
""" Return To Come Again Date or None """
pending = self.next_visit()
return pending.scheduled if pending is not None else None
def tca_type(self):
""" Return next visit type """
pending = self.next_visit()
return pending.visit_type.capitalize() if pending is not None else None
def is_pregnant(self):
return self.preg_status == 'pregnant' or self.preg_status == 'over'
def was_pregnant(self, today=None):
"""
Returns true if the participant was pregnant at date today
"""
if self.delivery_date is not None:
today = utils.today(today)
return today <= self.delivery_date
return True
def delta_days(self, today=None):
"""
Return the number days until EDD or since delivery
"""
today = utils.today(today)
if self.was_pregnant(today):
if self.delivery_date is None:
return (self.due_date - today).days
else:
return (self.delivery_date - today).days
else: # post-partum
# Return days since due date
return (today - self.delivery_date).days
def description(self, **kwargs):
"""
Description is a special formatted string that represents the state of a participant.
It contains a series of dot-separated fields that map to the relevant attributes of the
participant in determining an SMS message to send.
See the equivalent section in the `AutomatedMessageQuerySet` class.
"""
today = kwargs.get("today")
condition = kwargs.get("condition", self.condition)
group = kwargs.get("group", self.study_group)
send_base = kwargs.get("send_base", 'edd' if self.was_pregnant(today=today) else 'dd')
send_offset = kwargs.get("send_offset", self.delta_days(today=today) / 7)
# Special Case: Visit Messages
if send_base == 'visit':
send_offset = 0
# Special Case: SAE opt in messaging
elif self.preg_status == 'loss':
today = utils.today(today)
loss_offset = ((today - self.loss_date).days - 1) / 7 + 1
condition = 'nbaby'
if loss_offset <= 4:
send_base = 'loss'
send_offset = loss_offset
return "{send_base}.{group}.{condition}.{send_offset:.0f}".format(
group=group, condition=condition,
send_base=send_base, send_offset=send_offset
)
def days_str(self, today=None):
return utils.days_as_str(self.delta_days(today))
def get_validation_key(self):
# todo: what is this used by/for?
sha = sha256(
('%s%s%s%s' % (self.study_id, self.display_name, self.anc_num, self.birthdate)).encode('utf-8')
).hexdigest()[:5]
key = ''.join([str(int(i, 16)) for i in sha])
return key[:5]
def choice_label(self):
return '{} {}'.format(self.study_id, self.display_name)
def add_call(self, outcome='answered', comment=None, length=None, is_outgoing=True,
created=None, admin_user=None, scheduled=None):
if created is None:
created = utils.today()
else:
created = utils.angular_datepicker(created)
new_call = PhoneCall.objects.create(outcome=outcome, participant=self, is_outgoing=is_outgoing,
comment=comment, created=created, connection=self.connection(),
length=length,
scheduled=scheduled)
return new_call
def delivery(self, delivery_date, comment='', user=None, source=None):
self.delivery_date = delivery_date
self.delivery_source = source
# set_status calls self.save()
self.set_status('post', comment='Post-partum set by {0}'.format(user))
self.note_set.create(comment=comment, admin=user)
# schedual 6w and 1yr call as needed
self.schedule_month_call()
self.schedule_year_call()
# mark any delivery visits as attended
self.visit_set.filter(visit_type='delivery').update(status='attended', arrived=delivery_date)
# Add 6wk visits
six_wk_date = delivery_date + datetime.timedelta(days=42)
self.visit_set.create(scheduled=six_wk_date, visit_type='study')
def set_status(self, new_status, comment='', note=False, user=None):
### get swapped StatusChange model
StatusChange = swapper.load_model("mwbase", "StatusChange")
### validate new status against sms and preg choices and change
if any(new_status in choice for choice in self.PREG_STATUS_CHOICES):
old_status = self.preg_status
self.preg_status = new_status
self._old_status = new_status # Disable auto status change message
self.save()
status = StatusChange(
participant=self, old=old_status, new=new_status, comment=comment
)
status.save()
elif any(new_status in choice for choice in self.SMS_STATUS_CHOICES):
old_status = self.sms_status
self.sms_status = new_status
self._old_status = new_status # Disable auto status change message
self.save()
status = StatusChange(
participant=self, old=old_status, new=new_status, comment=comment, type='sms_status'
)
status.save()
if note is True:
self.note_set.create(comment=comment, admin=user)
def schedule_month_call(self, created=False):
''' Schedule 1m call post delivery
param: created(boolean): flag to return created,call tuple
This function is idempotent
'''
if self.delivery_date is None:
# No delivery date so call schedual post_edd call
return self.schedule_edd_call(created)
one_month_call = self.scheduledphonecall_set.filter(call_type='m').first()
was_created = one_month_call is None
if one_month_call is not None:
# Already set a call 2w post edd
if one_month_call.attended is not None:
# Last schedualed call was made so do nothing
# (assume it was the 14 day call were we learned about the delivery)
pass
else:
# Delivery notification happes before posd-edd call
# Change one month call to 30 days past delivery date
one_month_call.scheduled = self.delilvery_date + datetime.timedelta(days=30)
else:
# Schedual call for one_month after delivery
one_month_call = self.scheduledphonecall_set.create(
scheduled=self.delivery_date + datetime.timedelta(days=30),
call_type='m'
)
if created:
return was_created, one_month_call
return one_month_call
def schedule_edd_call(self, created=False):
""" If no delivery date is set schedule a 14 day post edd call
param: created(boolean): flag to return created,call tuple
This function is idempotent
"""
if self.delivery_date is not None:
# There is a delivery date so don't schedule an edd call
if created:
return False, None
return None
one_month_call = self.scheduledphonecall_set.filter(call_type='m').first()
if one_month_call is not None:
# Scheduled one month call has not been marked as attended
if one_month_call.arrived is None:
return False, one_month_call
else:
# Allready made a 14 day pre edd call so set for 14 days from now
scheduled = datetime.date.today() + datetime.timedelta(days=14)
else:
# Set for 14 days from edd
scheduled = self.due_date + datetime.timedelta(days=14)
one_month_call = self.scheduledphonecall_set.create(scheduled=scheduled, call_type='m')
if created:
return True, one_month_call
return one_month_call
def schedule_year_call(self, created=False):
""" Schedule 1yr calls as needed
param: created(boolean): flag to return created,call tuple
This function is idempotent
"""
one_year_call = self.scheduledphonecall_set.get_or_none(call_type='y')
was_created = False
if self.delivery_date is not None:
if one_year_call is None:
was_created = True
one_year_call = self.scheduledphonecall_set.create(
scheduled=self.delivery_date + datetime.timedelta(days=365),
call_type='y'
)
else:
one_year_call.scheduled = self.delivery_date + datetime.timedelta(days=365)
if created:
return was_created, one_year_call
return one_year_call
def message_kwargs(self):
nurse_obj = Practitioner.objects.for_participant(self)
return {
'name': self.sms_name.title(),
'nurse': nurse_obj.user.first_name.title() if nurse_obj is not None else 'Nurse',
'clinic': self.facility.title()
}
def send_message(self, text, control=False, **kwargs):
# Control check - don't send messages to participants in the control
if self.study_group == 'control' and control is False:
text = 'CONTROL NOT SENT: ' + text
msg_id = 'control'
msg_success = False
external_data = {}
# Status check - don't send messages to participants with NO_SMS_STATUS
elif self.preg_status in enums.NO_SMS_STATUS and control is False:
text = 'STATUS {} NOT SENT: '.format(self.preg_status.upper()) + text
msg_id = self.preg_status
msg_success = False
external_data = {}
else:
# Send message over system transport
try:
msg_id, msg_success, external_data = router.send(self.phone_number(), text)
except TransportError as e:
msg_id = ""
msg_success = False
external_data = {"error": str(e)}
# Create new message
new_message = self.message_set.create(
text=text,
connection=self.connection(),
external_id=msg_id,
external_success=msg_success,
external_status="Sent" if msg_success else external_data.get("status", "Failed"),
external_data=external_data,
**kwargs)
return new_message
def send_automated_message(self, control=False, send=True, exact=False, extra_kwargs=None, **kwargs):
""" kwargs get passed into self.description
:param control bool - if True allow sending to control
:param exact bool - if True only send exact match
:param send bool - if True send message
:kwargs
- hiv_messaging bool - hiv_messaging or not
- group - string for study group
- today - date for sending to (default today)
- send_base - string send_base
- send_offset - int send_offset (or calculated from today)
- condition - defaults to self.condition
"""
description = self.description(**kwargs)
# print(kwargs)
# print(description)
AutomatedMessage = swapper.load_model("mwbase", "AutomatedMessage")
message = AutomatedMessage.objects.from_description(description, exact=exact)
if message is None:
return None # TODO: logging on this
text = message.text_for(self, extra_kwargs)
if text is None:
return None # TODO: logging on this
# Set last_msg_system
self.last_msg_system = utils.today()
self.save()
if send:
translated_text = message.english if self.language != 'english' else ''
return self.send_message(
text=text,
translation_status='auto',
auto=message.description(),
control=control,
translated_text=translated_text
)
else:
return message
def get_recent_messages(self,n=8):
"""
:return: most recent n messages for serialization
"""
return self.message_set.all()[:n]
########################################
# Reporting Functions
########################################
def validation_delta(self):
""" Return the number of seconds between welcome message and validation """
if self.is_validated:
welcome_msg = self.message_set.filter(auto__startswith='signup', auto__endswith='0').first()
validation_msg = self.message_set.filter(topic='validation').last()
if welcome_msg and validation_msg:
delta = validation_msg.created - welcome_msg.created
return delta.total_seconds()
def delivery_delta(self):
""" Return the number of days between the delivery and delivery notification """
if self.delivery_date is None:
return None
else:
status_change = self.statuschange_set.filter(type='status', new='post').last()
if status_change is not None:
return (status_change.created.date() - self.delivery_date).days
return None
class Participant(BaseParticipant):
## only includes base elements and swappable meta
class Meta:
app_label = 'mwbase'
swappable = swapper.swappable_setting('mwbase', 'Participant')
class BaseStatusChange(TimeStampedModel):
class Meta:
abstract = True
participant = models.ForeignKey(swapper.get_model_name('mwbase', 'Participant'), models.CASCADE)
old = models.CharField(max_length=20)
new = models.CharField(max_length=20)
type = models.CharField(max_length=10, default='preg_status')
comment = models.TextField(blank=True)
def __str__(self):
return "{0.old} {0.new} ({0.type})".format(self)
class StatusChange(BaseStatusChange):
objects = ForUserQuerySet.as_manager()
class Meta:
app_label = 'mwbase'
swappable = swapper.swappable_setting('mwbase', 'StatusChange')
| 7,513 | 17,210 | 165 |
dd4b91889a0f53feb4fa8787ef9d5fec82d85047 | 1,272 | py | Python | sdno-link-monitor/mie/logtan/logtan_null.py | openov2/sdno-monitoring | 7ca338dd34db36cd5a5ec574137578bac656df2a | [
"CC-BY-4.0"
] | null | null | null | sdno-link-monitor/mie/logtan/logtan_null.py | openov2/sdno-monitoring | 7ca338dd34db36cd5a5ec574137578bac656df2a | [
"CC-BY-4.0"
] | null | null | null | sdno-link-monitor/mie/logtan/logtan_null.py | openov2/sdno-monitoring | 7ca338dd34db36cd5a5ec574137578bac656df2a | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from singleton import ClsSingleton
logtan = LogTan_Null()
| 20.190476 | 75 | 0.656447 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from singleton import ClsSingleton
class LogTan_Null():
__metaclass__ = ClsSingleton
@classmethod
def cfg(cls, **kwargs):
pass
def i(cls, rec, mod=None):
pass
def w(cls, rec, mod=None):
pass
def e(cls, rec, mod=None):
pass
def f(cls, rec, mod=None):
pass
logtan = LogTan_Null()
def cfg(**kwargs):
logtan.cfg(**kwargs)
def i(mod=None, **kwargs):
logtan.i(rec, mod)
def w(mod=None, **kwargs):
logtan.w(rec, mod)
def e(mod=None, **kwargs):
logtan.e(rec, mod)
def f(mod=None, **kwargs):
logtan.f(rec, mod)
| 221 | 184 | 138 |
e9530d009a76d844bb5cfce22dcfd6beb9b0f0b7 | 4,384 | py | Python | PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(21) Intro to Pandas (5) - Accessing and Changing Specific Observations.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(21) Intro to Pandas (5) - Accessing and Changing Specific Observations.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(21) Intro to Pandas (5) - Accessing and Changing Specific Observations.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Intro to Pandas (5) - Accessing and Changing Specific Observations
# In the last lesson we saw how to rename and drop columns, and to set the index in a DataFrame.
#
# In this lesson we'll learn about positional and label-based selection and how to use this to make changes to specific observations.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# ## Positional and Label-based Selection
#
# First of all, I'm going to read some data into a DataFrame. I also need to make another DataFrame which has a label-based index rather than a positional index.
# In[2]:
baseRateData = pd.read_csv("http://www.richard-muir.com/data/public/csv/BoEBaseRate.csv")
baseRateData_r = baseRateData.rename(columns = {'VALUE' : 'Value', 'DATE' : 'Date'})
baseRateData_r.set_index(baseRateData_r['Date'], inplace=True)
baseRateData_r.drop(['Date'], axis = 1, inplace = True)
# Let's have a look at these DataFrames:
# In[3]:
baseRateData.head()
# In[4]:
baseRateData_r.head()
# #### Selecting observations in a DataFrame
#
# We can select observations from a DataFrame by using <code>df.loc</code> and <code>df.iloc</code>.
#
# - <code>df.loc</code> selects the observations by their index label
# - <code>df.iloc</code> selects the observations by their position
#
# Here, I'm using <code>df.loc</code> to select the first 10 rows from <code>baseRateData</code>. Note that <code>df.loc</code> doesn't work like a list slice in Python; rather than stopping before the specified number, we include that observation:
# In[5]:
baseRateData.loc[:9]
# If I try to use <code>df.loc</code> on <code>baseRateData_r</code>, this won't work because we have changed the index label:
# In[6]:
baseRateData_r.loc[:9]
# Instead I have to pass the row index label which I want:
# In[7]:
baseRateData_r.loc[:'15/01/1975']
# But <code>df.iloc</code> works the same on both DataFrames because in <code>baseRateData</code>, the index is equal to the position - <code>df.iloc</code> works on the ordinal position of the rows.
#
# Confusingly, <code>df.iloc</code> works in the same way as list and string slicing, stopping just before the specified position:
# In[8]:
baseRateData.iloc[:9]
# In[9]:
baseRateData_r.iloc[:9]
# For both <code>df.loc</code> and <code>df.iloc</code>, we can take a slice from the middle of the DataFrame:
# In[10]:
baseRateData_r.loc['06/01/1975':'13/01/1975']
# In[11]:
baseRateData.iloc[4:6]
# We can also combine the column names with <code>df.loc</code> and <code>df.iloc</code> to get 2D slices of a DataFrame.
#
# Remember that <code>df.loc</code> works on the labels:
# In[12]:
baseRateData.loc[5:13, 'DATE']
# But <code>df.iloc</code> operates on the index; the columns are numerically indexed (in the same way as the rows):
# In[13]:
baseRateData.iloc[5:13, 0]
# ### Changing Data in a DataFrame
#
# So now we can select individual rows and columns in a DataFrame by the index label or position. We can use this knowledge to make changes to specific observations within the DataFrame.
#
# Imagine that we were told that the first twenty rows of our data were incorrect; they should have been 1.15 instead of 11.5. Let's make some changes!
#
# First of all, I'm using <code>df.loc</code> to select the first 20 rows by label and only the 'VALUE' column. It's just a simple matter of setting the value which we want these observations to take:
# In[14]:
baseRateData.loc[:19, 'VALUE'] = 1.15
baseRateData.head(25)
# We can also do it with <code>df.iloc</code>. Remember that the slicing is slightly different...
#
# I'll change it instead to 2.15 so we can prove it works:
# In[15]:
baseRateData.iloc[:20, 0] = 2.15
baseRateData.head(25)
# ### What have we learnt this lesson?
# In this lesson we've seen how to access rows and columns by their label and position, and to use this positional selection to make changes to the data in the DataFrame.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| 28.842105 | 248 | 0.726277 |
# coding: utf-8
# # Intro to Pandas (5) - Accessing and Changing Specific Observations
# In the last lesson we saw how to rename and drop columns, and to set the index in a DataFrame.
#
# In this lesson we'll learn about positional and label-based selection and how to use this to make changes to specific observations.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# ## Positional and Label-based Selection
#
# First of all, I'm going to read some data into a DataFrame. I also need to make another DataFrame which has a label-based index rather than a positional index.
# In[2]:
baseRateData = pd.read_csv("http://www.richard-muir.com/data/public/csv/BoEBaseRate.csv")
baseRateData_r = baseRateData.rename(columns = {'VALUE' : 'Value', 'DATE' : 'Date'})
baseRateData_r.set_index(baseRateData_r['Date'], inplace=True)
baseRateData_r.drop(['Date'], axis = 1, inplace = True)
# Let's have a look at these DataFrames:
# In[3]:
baseRateData.head()
# In[4]:
baseRateData_r.head()
# #### Selecting observations in a DataFrame
#
# We can select observations from a DataFrame by using <code>df.loc</code> and <code>df.iloc</code>.
#
# - <code>df.loc</code> selects the observations by their index label
# - <code>df.iloc</code> selects the observations by their position
#
# Here, I'm using <code>df.loc</code> to select the first 10 rows from <code>baseRateData</code>. Note that <code>df.loc</code> doesn't work like a list slice in Python; rather than stopping before the specified number, we include that observation:
# In[5]:
baseRateData.loc[:9]
# If I try to use <code>df.loc</code> on <code>baseRateData_r</code>, this won't work because we have changed the index label:
# In[6]:
baseRateData_r.loc[:9]
# Instead I have to pass the row index label which I want:
# In[7]:
baseRateData_r.loc[:'15/01/1975']
# But <code>df.iloc</code> works the same on both DataFrames because in <code>baseRateData</code>, the index is equal to the position - <code>df.iloc</code> works on the ordinal position of the rows.
#
# Confusingly, <code>df.iloc</code> works in the same way as list and string slicing, stopping just before the specified position:
# In[8]:
baseRateData.iloc[:9]
# In[9]:
baseRateData_r.iloc[:9]
# For both <code>df.loc</code> and <code>df.iloc</code>, we can take a slice from the middle of the DataFrame:
# In[10]:
baseRateData_r.loc['06/01/1975':'13/01/1975']
# In[11]:
baseRateData.iloc[4:6]
# We can also combine the column names with <code>df.loc</code> and <code>df.iloc</code> to get 2D slices of a DataFrame.
#
# Remember that <code>df.loc</code> works on the labels:
# In[12]:
baseRateData.loc[5:13, 'DATE']
# But <code>df.iloc</code> operates on the index; the columns are numerically indexed (in the same way as the rows):
# In[13]:
baseRateData.iloc[5:13, 0]
# ### Changing Data in a DataFrame
#
# So now we can select individual rows and columns in a DataFrame by the index label or position. We can use this knowledge to make changes to specific observations within the DataFrame.
#
# Imagine that we were told that the first twenty rows of our data were incorrect; they should have been 1.15 instead of 11.5. Let's make some changes!
#
# First of all, I'm using <code>df.loc</code> to select the first 20 rows by label and only the 'VALUE' column. It's just a simple matter of setting the value which we want these observations to take:
# In[14]:
baseRateData.loc[:19, 'VALUE'] = 1.15
baseRateData.head(25)
# We can also do it with <code>df.iloc</code>. Remember that the slicing is slightly different...
#
# I'll change it instead to 2.15 so we can prove it works:
# In[15]:
baseRateData.iloc[:20, 0] = 2.15
baseRateData.head(25)
# ### What have we learnt this lesson?
# In this lesson we've seen how to access rows and columns by their label and position, and to use this positional selection to make changes to the data in the DataFrame.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| 0 | 0 | 0 |
fe8ca815b43fd13795ddf63b4f6cfbc80709e93d | 828 | py | Python | Eratostenes_method_prime_numbers.py | Pablo-RodriguezOrtiz/Small-projects | b83f70214ce98daf45112306342d7d232d13a61f | [
"CC0-1.0"
] | null | null | null | Eratostenes_method_prime_numbers.py | Pablo-RodriguezOrtiz/Small-projects | b83f70214ce98daf45112306342d7d232d13a61f | [
"CC0-1.0"
] | null | null | null | Eratostenes_method_prime_numbers.py | Pablo-RodriguezOrtiz/Small-projects | b83f70214ce98daf45112306342d7d232d13a61f | [
"CC0-1.0"
] | null | null | null | # ------------------------------------------------------------------------
#
#
# Made with python 3.8.8
#
#
# ------------------------------------------------------------------------
eratostenes(100) | 41.4 | 125 | 0.415459 | # ------------------------------------------------------------------------
#
#
# Made with python 3.8.8
#
#
# ------------------------------------------------------------------------
def eratostenes(x):
multiplos=[]
primos=[]
for i in range(2,x+1):
if i not in multiplos: #Si i no está ya almacenada en multiplos:
M=[i* k for k in range(2,int(x/3))] #Generamos los multiplos de i con lista compresiva.
multiplos.extend(M) #Almacenamos en la lista multiplos todos los multiplos de i.
if i not in multiplos:
primos.append(i) #Si i no está en la lista de multiplos, lo añadimos a primos.
return print(primos)
eratostenes(100) | 598 | 0 | 25 |
f1b421c3c51ea1cc5bb86904d870033bead4bba3 | 11,980 | py | Python | src/scv/reg_gbscv.py | tpinhoda/Graph-Based_Spatial_Cross_Validation | 19300a715d3d03580232926bbc1f6ea8800b23e3 | [
"MIT"
] | null | null | null | src/scv/reg_gbscv.py | tpinhoda/Graph-Based_Spatial_Cross_Validation | 19300a715d3d03580232926bbc1f6ea8800b23e3 | [
"MIT"
] | null | null | null | src/scv/reg_gbscv.py | tpinhoda/Graph-Based_Spatial_Cross_Validation | 19300a715d3d03580232926bbc1f6ea8800b23e3 | [
"MIT"
] | null | null | null | """Generate graph-based cross-validation spatial folds"""
import os
import time
from typing import Dict, List
from dataclasses import dataclass, field
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from tqdm import tqdm
from src.scv.scv import SpatialCV
X_1DIM_COL = "X_1DIM"
@dataclass
class RegGraphBasedSCV(SpatialCV):
"""Generates the Regularization Graph Based Spatial Cross-Validation folds
Attributes
----------
data: pd.Dataframe
The spatial dataset to generate the folds
fold_col: str
The fold column name
target_col: str
The targer attribute column name
adj_matrix: pd.Dataframe
The adjacency matrix regarding the spatial objects in the data
paper: bool
Whether to run experiments according to ICMLA21 paper
root_path : str
Root path
"""
kappa: float = 0.5
run_selection: bool = False
target_col: str = "TARGET"
adj_matrix: pd.DataFrame = field(default_factory=pd.DataFrame)
paper: bool = False
type_graph: str = "Sparse"
sill_target: Dict = field(default_factory=dict)
sill_reduced: Dict = field(default_factory=dict)
sill_max_reduced: Dict = field(default_factory=dict)
w_matrix: pd.DataFrame = field(default_factory=pd.DataFrame)
def _calculate_train_pca(self) -> np.array:
"""Return the PCA first component transformation on the traind data"""
pca = PCA(n_components=1)
train = self.data.drop(columns=[self.fold_col, self.target_col])
# For the IMCLA21 paper the PCA is executed only on the cennsus columns
if self.paper:
cols = [c for c in train.columns if "CENSUS" in c]
train = train[cols]
pca.fit(train)
return pca.transform(train).flatten()
def _calculate_removing_buffer_sill(self, fold_name, fold_data, global_var) -> Dict:
"""Calculate the sill for each fold to be used on the removing buffer process"""
fold_target = fold_data[self.target_col]
test_target = self.test_data[self.target_col]
target_var = fold_target.append(test_target).var()
self.sill_target[fold_name] = (target_var + global_var) / 2
def _calculate_selection_buffer_sill(
self, fold_name, fold_data, global_var
) -> Dict:
"""Calculate the sill for each fold to be used on the selection buffer process"""
reduced_var = fold_data[X_1DIM_COL].append(self.test_data[X_1DIM_COL]).var()
# self.sill_reduced[fold_name] = (reduced_var + global_var) / 2
self.sill_reduced[fold_name] = reduced_var
max_var_train = max(self.sill_reduced, key=self.sill_reduced.get)
for _ in self.sill_reduced:
self.sill_max_reduced[_] = self.sill_reduced[max_var_train]
def _initiate_buffers_sills(self) -> Dict:
"""Initialize and calculate the sills for the removing and selectiont procedures"""
global_target_var = self.data[self.target_col].var()
global_reduced_var = self.data[X_1DIM_COL].var()
self.sill_target = {}
self.sill_reduced = {}
for fold_name, fold_data in self.train_data.groupby(by=self.fold_col):
self._calculate_selection_buffer_sill(
fold_name, fold_data, global_reduced_var
)
self._calculate_removing_buffer_sill(
fold_name, fold_data, global_target_var
)
def _convert_adj_matrix_index_types(self) -> pd.DataFrame:
"""Convert adjacenty matrixy index and columns types to the same as in the data"""
self.adj_matrix.index = self.adj_matrix.index.astype(self.data.index.dtype)
self.adj_matrix.columns = self.adj_matrix.columns.astype(self.data.index.dtype)
self.w_matrix.index = self.w_matrix.index.astype(self.data.index.dtype)
self.w_matrix.columns = self.w_matrix.columns.astype(self.data.index.dtype)
@staticmethod
def _get_neighbors(indexes, adj_matrix) -> List:
"""Return the 1-degree neighborhood from a given sub-graph formed by indexes"""
area_matrix = adj_matrix.loc[indexes]
neighbors = area_matrix.sum(axis=0) > 0
neighbors = neighbors[neighbors].index
neighbors = [n for n in neighbors if n not in indexes]
return neighbors
def _calculate_longest_path(self) -> int:
"""Calculate the longest_path from a BFS tree taking the test set as root"""
path_indexes = self.test_data.index.values.tolist()
local_data_idx = (
self.test_data.index.values.tolist() + self.train_data.index.values.tolist()
)
matrix = self.adj_matrix.loc[local_data_idx, local_data_idx]
neighbors = self._get_neighbors(path_indexes, matrix)
size_tree = 0
while len(neighbors) > 0:
size_tree += 1
neighbors = self._get_neighbors(path_indexes, matrix)
path_indexes = path_indexes + neighbors
return size_tree
def _calculate_similarity_matrix(self, fold_data, attribute) -> np.ndarray:
"""Calculate the similarity matrix between test set and a given training
fold set based on a given attribute"""
test_values = self.test_data[attribute].to_numpy()
node_values = fold_data[attribute]
return (test_values - node_values) ** 2
@staticmethod
def _calculate_gamma(similarity, geo_weights, kappa) -> np.float64:
"""Calculate gamma or the semivariogram"""
gamma_dist = similarity - (kappa * (1 - geo_weights) * similarity)
sum_diff = np.sum(gamma_dist)
sum_dist = len((similarity))
return sum_diff / (2 * sum_dist)
def _get_neighbors_weights(self, index):
"""Return the matrix weights test set x neighbors"""
return self.w_matrix.loc[self.test_data.index, index]
def _calculate_gamma_by_node(self, neighbors, attribute, kappa) -> Dict:
"""Calculate the semivariogram by folds"""
nodes_gamma = {}
neighbors = [n for n in neighbors if n in self.train_data.index]
neighbors_data = self.train_data.loc[neighbors]
for index, node_data in neighbors_data.iterrows():
similarity = self._calculate_similarity_matrix(node_data, attribute)
geo_weights = self._get_neighbors_weights(index)
gamma = self._calculate_gamma(similarity, geo_weights, kappa)
nodes_gamma[index] = gamma
return nodes_gamma
def _get_n_fold_neighbohood(self) -> int:
"""Get ne number of folds neighbors from the test set"""
neighbors_idx = self._get_neighbors(self.test_data.index, self.adj_matrix)
neighbors_idx = [n for n in neighbors_idx if n in self.data.index]
return len(self.data.loc[neighbors_idx].groupby(self.fold_col))
@staticmethod
def _calculate_exponent(size_tree, count_n) -> np.float64:
"""Caclulate the decay exponent"""
return np.log(1 * size_tree - count_n) / np.log(1 * size_tree)
def _propagate_variance(self, attribute, kappa) -> List:
"""Calculate propagate variance"""
# Initialize variables
buffer = [] # containg the index of instaces buffered
nodes_gamma = {}
# Start creating the buffer
while len(buffer) < self.train_data.shape[0]:
# Get the instance indexes from te test set + the indexes buffer
growing_graph_idx = self.test_data.index.values.tolist() + buffer
# Get the neighbor
h_neighbors = self._get_neighbors(growing_graph_idx, self.adj_matrix)
# Calculate the semivariogram for each fold in the neighborhood
nodes_gamma.update(
self._calculate_gamma_by_node(h_neighbors, attribute, kappa)
)
buffer += h_neighbors
return nodes_gamma
def _calculate_selection_buffer(self, nodes_propagated, attribute):
"""Calculate buffer nodes"""
buffered_nodes = []
sill = self.data[attribute].var()
buffered_nodes = [
node for node, gamma in nodes_propagated.items() if gamma < sill
]
return buffered_nodes
def _calculate_removing_buffer(self, nodes_propagated, nodes_reduced, attribute):
"""Calculate buffer nodes"""
sill_target = self.test_data[attribute].var()
# sill_w_matrix = self.w_matrix.to_numpy().var()
sill_reduced = self.test_data[X_1DIM_COL].var()
# sill_target = self.kappa * sill_target + (1 - self.kappa) * sill_w_matrix
buffered_nodes_target = [
node for node, gamma in nodes_propagated.items() if gamma < sill_target
]
buffered_nodes_reduced = [
node for node, gamma in nodes_reduced.items() if gamma < sill_reduced
]
# return [node for node in buffered_nodes_target if node in buffered_nodes_reduced]
return buffered_nodes_target
def run(self):
"""Generate graph-based spatial folds"""
# Create folder folds
start_time = time.time()
self._init_fields()
self._make_folders(["folds", self.scv_method])
self.data[X_1DIM_COL] = self._calculate_train_pca()
for fold_name, test_data in tqdm(
self.data.groupby(by=self.fold_col), desc="Creating folds"
):
if fold_name != -1:
# Cread fold folder
self._mkdir(str(fold_name))
# Initialize x , y and reduce
self._split_data_test_train(test_data)
# Calculate local sill
self._initiate_buffers_sills()
# Ensure indexes and columns compatibility
self._convert_adj_matrix_index_types()
# Calculate selection buffer
nodes_prop_reduced = self._propagate_variance(X_1DIM_COL, self.kappa)
selection_buffer = self._calculate_selection_buffer(
nodes_prop_reduced, X_1DIM_COL
)
if self.run_selection:
self.train_data = self.train_data.loc[selection_buffer]
# The train data is used to calcualte the buffer. Thus, the size tree,
# and the gamma calculation will be influenced by the selection buffer.
# Calculate removing buffer
nodes_prop_target = self._propagate_variance(
self.target_col, self.kappa
)
removing_buffer = self._calculate_removing_buffer(
nodes_prop_target, nodes_prop_reduced, self.target_col
)
# removing_buffer = [node for node in removing_buffer if node in selection_buffer]
# removing_buffer = selection_buffer
self.train_data.drop(index=removing_buffer, inplace=True)
# Save buffered data indexes
self._save_buffered_indexes(removing_buffer)
# Save fold index relation table
self._save_fold_by_index_training()
# Clean data
self._clean_data(cols_drop=[X_1DIM_COL, self.fold_col])
# Save data
# self._save_data()
# Update cur dir
self.cur_dir = os.path.join(
self._get_root_path(), "folds", self.scv_method
)
# Save execution time
end_time = time.time()
self._save_time(end_time, start_time)
print(f"Execution time: {end_time-start_time} seconds")
| 44.206642 | 98 | 0.644324 | """Generate graph-based cross-validation spatial folds"""
import os
import time
from typing import Dict, List
from dataclasses import dataclass, field
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from tqdm import tqdm
from src.scv.scv import SpatialCV
X_1DIM_COL = "X_1DIM"
@dataclass
class RegGraphBasedSCV(SpatialCV):
"""Generates the Regularization Graph Based Spatial Cross-Validation folds
Attributes
----------
data: pd.Dataframe
The spatial dataset to generate the folds
fold_col: str
The fold column name
target_col: str
The targer attribute column name
adj_matrix: pd.Dataframe
The adjacency matrix regarding the spatial objects in the data
paper: bool
Whether to run experiments according to ICMLA21 paper
root_path : str
Root path
"""
kappa: float = 0.5
run_selection: bool = False
target_col: str = "TARGET"
adj_matrix: pd.DataFrame = field(default_factory=pd.DataFrame)
paper: bool = False
type_graph: str = "Sparse"
sill_target: Dict = field(default_factory=dict)
sill_reduced: Dict = field(default_factory=dict)
sill_max_reduced: Dict = field(default_factory=dict)
w_matrix: pd.DataFrame = field(default_factory=pd.DataFrame)
def _init_fields(self):
if self.type_graph == "Sparse":
self.w_matrix = pd.DataFrame(
index=self.adj_matrix.index, columns=self.adj_matrix.columns
)
self.w_matrix.fillna(1, inplace=True)
self.sill_target = {}
self.sill_reduced = {}
self.sill_max_reduced = {}
def _calculate_train_pca(self) -> np.array:
"""Return the PCA first component transformation on the traind data"""
pca = PCA(n_components=1)
train = self.data.drop(columns=[self.fold_col, self.target_col])
# For the IMCLA21 paper the PCA is executed only on the cennsus columns
if self.paper:
cols = [c for c in train.columns if "CENSUS" in c]
train = train[cols]
pca.fit(train)
return pca.transform(train).flatten()
def _calculate_removing_buffer_sill(self, fold_name, fold_data, global_var) -> Dict:
"""Calculate the sill for each fold to be used on the removing buffer process"""
fold_target = fold_data[self.target_col]
test_target = self.test_data[self.target_col]
target_var = fold_target.append(test_target).var()
self.sill_target[fold_name] = (target_var + global_var) / 2
def _calculate_selection_buffer_sill(
self, fold_name, fold_data, global_var
) -> Dict:
"""Calculate the sill for each fold to be used on the selection buffer process"""
reduced_var = fold_data[X_1DIM_COL].append(self.test_data[X_1DIM_COL]).var()
# self.sill_reduced[fold_name] = (reduced_var + global_var) / 2
self.sill_reduced[fold_name] = reduced_var
max_var_train = max(self.sill_reduced, key=self.sill_reduced.get)
for _ in self.sill_reduced:
self.sill_max_reduced[_] = self.sill_reduced[max_var_train]
def _initiate_buffers_sills(self) -> Dict:
"""Initialize and calculate the sills for the removing and selectiont procedures"""
global_target_var = self.data[self.target_col].var()
global_reduced_var = self.data[X_1DIM_COL].var()
self.sill_target = {}
self.sill_reduced = {}
for fold_name, fold_data in self.train_data.groupby(by=self.fold_col):
self._calculate_selection_buffer_sill(
fold_name, fold_data, global_reduced_var
)
self._calculate_removing_buffer_sill(
fold_name, fold_data, global_target_var
)
def _convert_adj_matrix_index_types(self) -> pd.DataFrame:
"""Convert adjacenty matrixy index and columns types to the same as in the data"""
self.adj_matrix.index = self.adj_matrix.index.astype(self.data.index.dtype)
self.adj_matrix.columns = self.adj_matrix.columns.astype(self.data.index.dtype)
self.w_matrix.index = self.w_matrix.index.astype(self.data.index.dtype)
self.w_matrix.columns = self.w_matrix.columns.astype(self.data.index.dtype)
@staticmethod
def _get_neighbors(indexes, adj_matrix) -> List:
"""Return the 1-degree neighborhood from a given sub-graph formed by indexes"""
area_matrix = adj_matrix.loc[indexes]
neighbors = area_matrix.sum(axis=0) > 0
neighbors = neighbors[neighbors].index
neighbors = [n for n in neighbors if n not in indexes]
return neighbors
def _calculate_longest_path(self) -> int:
"""Calculate the longest_path from a BFS tree taking the test set as root"""
path_indexes = self.test_data.index.values.tolist()
local_data_idx = (
self.test_data.index.values.tolist() + self.train_data.index.values.tolist()
)
matrix = self.adj_matrix.loc[local_data_idx, local_data_idx]
neighbors = self._get_neighbors(path_indexes, matrix)
size_tree = 0
while len(neighbors) > 0:
size_tree += 1
neighbors = self._get_neighbors(path_indexes, matrix)
path_indexes = path_indexes + neighbors
return size_tree
def _calculate_similarity_matrix(self, fold_data, attribute) -> np.ndarray:
"""Calculate the similarity matrix between test set and a given training
fold set based on a given attribute"""
test_values = self.test_data[attribute].to_numpy()
node_values = fold_data[attribute]
return (test_values - node_values) ** 2
@staticmethod
def _calculate_gamma(similarity, geo_weights, kappa) -> np.float64:
"""Calculate gamma or the semivariogram"""
gamma_dist = similarity - (kappa * (1 - geo_weights) * similarity)
sum_diff = np.sum(gamma_dist)
sum_dist = len((similarity))
return sum_diff / (2 * sum_dist)
def _get_neighbors_weights(self, index):
"""Return the matrix weights test set x neighbors"""
return self.w_matrix.loc[self.test_data.index, index]
def _calculate_gamma_by_node(self, neighbors, attribute, kappa) -> Dict:
"""Calculate the semivariogram by folds"""
nodes_gamma = {}
neighbors = [n for n in neighbors if n in self.train_data.index]
neighbors_data = self.train_data.loc[neighbors]
for index, node_data in neighbors_data.iterrows():
similarity = self._calculate_similarity_matrix(node_data, attribute)
geo_weights = self._get_neighbors_weights(index)
gamma = self._calculate_gamma(similarity, geo_weights, kappa)
nodes_gamma[index] = gamma
return nodes_gamma
def _get_n_fold_neighbohood(self) -> int:
"""Get ne number of folds neighbors from the test set"""
neighbors_idx = self._get_neighbors(self.test_data.index, self.adj_matrix)
neighbors_idx = [n for n in neighbors_idx if n in self.data.index]
return len(self.data.loc[neighbors_idx].groupby(self.fold_col))
@staticmethod
def _calculate_exponent(size_tree, count_n) -> np.float64:
"""Caclulate the decay exponent"""
return np.log(1 * size_tree - count_n) / np.log(1 * size_tree)
def _propagate_variance(self, attribute, kappa) -> List:
"""Calculate propagate variance"""
# Initialize variables
buffer = [] # containg the index of instaces buffered
nodes_gamma = {}
# Start creating the buffer
while len(buffer) < self.train_data.shape[0]:
# Get the instance indexes from te test set + the indexes buffer
growing_graph_idx = self.test_data.index.values.tolist() + buffer
# Get the neighbor
h_neighbors = self._get_neighbors(growing_graph_idx, self.adj_matrix)
# Calculate the semivariogram for each fold in the neighborhood
nodes_gamma.update(
self._calculate_gamma_by_node(h_neighbors, attribute, kappa)
)
buffer += h_neighbors
return nodes_gamma
def _calculate_selection_buffer(self, nodes_propagated, attribute):
"""Calculate buffer nodes"""
buffered_nodes = []
sill = self.data[attribute].var()
buffered_nodes = [
node for node, gamma in nodes_propagated.items() if gamma < sill
]
return buffered_nodes
def _calculate_removing_buffer(self, nodes_propagated, nodes_reduced, attribute):
"""Calculate buffer nodes"""
sill_target = self.test_data[attribute].var()
# sill_w_matrix = self.w_matrix.to_numpy().var()
sill_reduced = self.test_data[X_1DIM_COL].var()
# sill_target = self.kappa * sill_target + (1 - self.kappa) * sill_w_matrix
buffered_nodes_target = [
node for node, gamma in nodes_propagated.items() if gamma < sill_target
]
buffered_nodes_reduced = [
node for node, gamma in nodes_reduced.items() if gamma < sill_reduced
]
# return [node for node in buffered_nodes_target if node in buffered_nodes_reduced]
return buffered_nodes_target
def run(self):
"""Generate graph-based spatial folds"""
# Create folder folds
start_time = time.time()
self._init_fields()
self._make_folders(["folds", self.scv_method])
self.data[X_1DIM_COL] = self._calculate_train_pca()
for fold_name, test_data in tqdm(
self.data.groupby(by=self.fold_col), desc="Creating folds"
):
if fold_name != -1:
# Cread fold folder
self._mkdir(str(fold_name))
# Initialize x , y and reduce
self._split_data_test_train(test_data)
# Calculate local sill
self._initiate_buffers_sills()
# Ensure indexes and columns compatibility
self._convert_adj_matrix_index_types()
# Calculate selection buffer
nodes_prop_reduced = self._propagate_variance(X_1DIM_COL, self.kappa)
selection_buffer = self._calculate_selection_buffer(
nodes_prop_reduced, X_1DIM_COL
)
if self.run_selection:
self.train_data = self.train_data.loc[selection_buffer]
# The train data is used to calcualte the buffer. Thus, the size tree,
# and the gamma calculation will be influenced by the selection buffer.
# Calculate removing buffer
nodes_prop_target = self._propagate_variance(
self.target_col, self.kappa
)
removing_buffer = self._calculate_removing_buffer(
nodes_prop_target, nodes_prop_reduced, self.target_col
)
# removing_buffer = [node for node in removing_buffer if node in selection_buffer]
# removing_buffer = selection_buffer
self.train_data.drop(index=removing_buffer, inplace=True)
# Save buffered data indexes
self._save_buffered_indexes(removing_buffer)
# Save fold index relation table
self._save_fold_by_index_training()
# Clean data
self._clean_data(cols_drop=[X_1DIM_COL, self.fold_col])
# Save data
# self._save_data()
# Update cur dir
self.cur_dir = os.path.join(
self._get_root_path(), "folds", self.scv_method
)
# Save execution time
end_time = time.time()
self._save_time(end_time, start_time)
print(f"Execution time: {end_time-start_time} seconds")
| 321 | 0 | 27 |
a508d4c0722ba5265fd74644461c3427fafc0893 | 2,379 | py | Python | src/son/package/tests/test_integ_Packager.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 4 | 2017-02-08T22:50:28.000Z | 2018-05-29T07:29:47.000Z | src/son/package/tests/test_integ_Packager.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 81 | 2016-07-19T13:55:12.000Z | 2021-05-07T15:03:05.000Z | src/son/package/tests/test_integ_Packager.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 13 | 2016-07-19T13:33:19.000Z | 2019-04-25T08:04:15.000Z | # Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import unittest
from son.package.package import Packager
from son.workspace.workspace import Workspace
from son.workspace.workspace import Project
| 34.985294 | 74 | 0.686003 | # Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import unittest
from son.package.package import Packager
from son.workspace.workspace import Workspace
from son.workspace.workspace import Project
class IntPDTester(unittest.TestCase):
__pfd__ = {
'version': '0.5',
'package': {
'version': '0.1',
'name': 'sonata-project-sample',
'vendor': 'com.sonata.project',
'maintainer': 'Name, Company, Contact',
'description': 'Project description',
},
'descriptor_extension': 'yml'
}
def __init__(self, *args, **kwargs):
super(IntPDTester, self).__init__(*args, **kwargs)
ws = Workspace("")
prj = Project(ws, '/')
self.pck = Packager(workspace=ws, project=prj, generate_pd=False)
def test_correct_gds(self):
""" Test the correct general description section """
gsd = self.pck.package_gds(IntPDTester.__pfd__)
self.assertNotEqual(gsd, False)
def test_incomplete_gds(self):
"""
Test the returning message when the provided
project has incomplete information.
"""
pfd = IntPDTester.__pfd__
pfd.pop('package')
gsd = self.pck.package_gds(pfd)
self.assertEqual(gsd, None)
| 206 | 868 | 23 |
71a62ff7ba99509eacc1a3e51ce3bf1c05259cf6 | 48,459 | py | Python | ProjectFiles/bin/Release/2.80/scripts/addons/object_boolean_tools.py | BlazesRus/Bforartists | 126bdd9e47cc984fd97ba5299bfb92ec5278e754 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2019-07-08T15:51:14.000Z | 2019-07-08T15:51:14.000Z | ProjectFiles/bin/Release/2.80/scripts/addons/object_boolean_tools.py | BlazesRus/Bforartists | 126bdd9e47cc984fd97ba5299bfb92ec5278e754 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | ProjectFiles/bin/Release/2.80/scripts/addons/object_boolean_tools.py | BlazesRus/Bforartists | 126bdd9e47cc984fd97ba5299bfb92ec5278e754 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Bool Tool",
"author": "Vitor Balbio, Mikhail Rachinskiy, TynkaTopi, Meta-Androcto",
"version": (0, 3, 9),
"blender": (2, 79, 2),
"location": "View3D > Toolshelf",
"description": "Bool Tool Hotkey: Ctrl Shift B",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/BoolTool",
"category": "Object",
}
import bpy
from bpy.app.handlers import persistent
from bpy.types import (
AddonPreferences,
Operator,
Panel,
Menu,
)
from bpy.props import (
BoolProperty,
StringProperty,
EnumProperty,
)
# ------------------- Bool Tool FUNCTIONS -------------------------
# Utils:
# Hide boolean objects
# Object is a Canvas
# Object is a Brush Tool Bool
# Object is a Poly Brush Tool Bool collection
"""
# EXPERIMENTAL FEATURES
def isMakeVertexGroup():
preferences = bpy.context.preferences
addon_prefs = preferences.addons[__name__].preferences
if addon_prefs.make_vertex_groups:
return True
else:
return False
def isMakeBoundary():
preferences = bpy.context.preferences
addon_prefs = preferences.addons[__name__].preferences
if addon_prefs.make_boundary:
return True
else:
return False
"""
# Do the Union, Difference and Intersection Operations with a Brush
# Remove Obejcts form the BoolTool System
# Toggle the Enable the Brush Object Property
# Find the Canvas and Enable this Brush
# Toggle the Fast Transform Property of the Active Brush
# Apply All Brushes to the Canvas
# Apply This Brush to the Canvas
# Handle the callbacks when modifying things in the scene
@persistent
# ------------------ Bool Tool OPERATORS --------------------------------------
# Fast Transform
# ------------------- Bool Tool OPERATOR CLASSES --------------------------------------------------------
# Brush Operators --------------------------------------------
# Boolean Union Operator
# Boolean Intersection Operator
# Boolean Difference Operator
# Boolean Slices Operator
# Auto Boolean operators (maintainer Mikhail Rachinskiy)
# --------------------------------------------------------------------------------------
# Utils Class ---------------------------------------------------------------
# Find the Brush Selected in Three View
# Move The Modifier in The Stack Up or Down
# Enable or Disable a Brush in the Three View
# Enable or Disable a Brush Directly
# Enable or Disable a Brush Directly
# Other Operations -------------------------------------------------------
# Remove a Brush or a Canvas
# Apply All to Canvas
# Apply This Brush to the Canvas
# TODO
# Apply This Brush To Mesh
# ------------------- MENU CLASSES ------------------------------
# 3Dview Header Menu
# ---------------- Toolshelf: Tools ---------------------
# ---------- Toolshelf: Properties --------------------------------------------------------
# ---------- Toolshelf: Brush Viewer -------------------------------------------------------
# ------------------ BOOL TOOL Help ----------------------------
# ------------------ BOOL TOOL ADD-ON PREFERENCES ----------------------------
# Add-ons Preferences Update Panel
# Define Panel classes for updating
panels = (
VIEW3D_PT_booltool_tools,
VIEW3D_PT_booltool_config,
VIEW3D_PT_booltool_bviewer,
)
# ------------------- Class List ------------------------------------------------
classes = (
PREFS_BoolTool_Props,
VIEW3D_MT_booltool_menu,
VIEW3D_PT_booltool_tools,
VIEW3D_PT_booltool_config,
VIEW3D_PT_booltool_bviewer,
OBJECT_OT_BoolTool_Auto_Union,
OBJECT_OT_BoolTool_Auto_Difference,
OBJECT_OT_BoolTool_Auto_Intersect,
OBJECT_OT_BoolTool_Auto_Slice,
OBJECT_OT_BoolTool_Auto_Subtract,
BTool_Union,
BTool_Diff,
BTool_Inters,
BTool_Slice,
BTool_DrawPolyBrush,
BTool_Remove,
BTool_AllBrushToMesh,
BTool_BrushToMesh,
BTool_FindBrush,
BTool_MoveStack,
BTool_EnableBrush,
BTool_EnableThisBrush,
BTool_EnableFTransform,
BTool_FastTransform,
WM_OT_BoolTool_Help,
)
# ------------------- REGISTER ------------------------------------------------
addon_keymaps = []
addon_keymapsFastT = []
# Fast Transform HotKeys Register
# Fast Transform HotKeys UnRegister
if __name__ == "__main__":
register()
| 32.632323 | 114 | 0.588807 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Bool Tool",
"author": "Vitor Balbio, Mikhail Rachinskiy, TynkaTopi, Meta-Androcto",
"version": (0, 3, 9),
"blender": (2, 79, 2),
"location": "View3D > Toolshelf",
"description": "Bool Tool Hotkey: Ctrl Shift B",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/BoolTool",
"category": "Object",
}
import bpy
from bpy.app.handlers import persistent
from bpy.types import (
AddonPreferences,
Operator,
Panel,
Menu,
)
from bpy.props import (
BoolProperty,
StringProperty,
EnumProperty,
)
# ------------------- Bool Tool FUNCTIONS -------------------------
# Utils:
# Hide boolean objects
def update_BoolHide(self, context):
ao = context.scene.objects.active
objs = [i.object for i in ao.modifiers if i.type == 'BOOLEAN']
hide_state = context.scene.BoolHide
for o in objs:
o.hide = hide_state
# Object is a Canvas
def isCanvas(_obj):
try:
if _obj["BoolToolRoot"]:
return True
except:
return False
# Object is a Brush Tool Bool
def isBrush(_obj):
try:
if _obj["BoolToolBrush"]:
return True
except:
return False
# Object is a Poly Brush Tool Bool collection
def isPolyBrush(_obj):
try:
if _obj["BoolToolPolyBrush"]:
return True
except:
return False
def BT_ObjectByName(obj):
for ob in bpy.context.scene.objects:
if isCanvas(ob) or isBrush(ob):
if ob.name == obj:
return ob
def FindCanvas(obj):
for ob in bpy.context.scene.objects:
if isCanvas(ob):
for mod in ob.modifiers:
if ("BTool_" in mod.name):
if (obj.name in mod.name):
return ob
def isFTransf():
preferences = bpy.context.preferences
addons = preferences.addons
addon_prefs = addons[__name__].preferences
if addon_prefs.fast_transform:
return True
else:
return False
"""
# EXPERIMENTAL FEATURES
def isMakeVertexGroup():
preferences = bpy.context.preferences
addon_prefs = preferences.addons[__name__].preferences
if addon_prefs.make_vertex_groups:
return True
else:
return False
def isMakeBoundary():
preferences = bpy.context.preferences
addon_prefs = preferences.addons[__name__].preferences
if addon_prefs.make_boundary:
return True
else:
return False
"""
def ConvertToMesh(obj):
act = bpy.context.scene.objects.active
bpy.context.scene.objects.active = obj
bpy.ops.object.convert(target="MESH")
bpy.context.scene.objects.active = act
# Do the Union, Difference and Intersection Operations with a Brush
def Operation(context, _operation):
prefs = bpy.context.preferences.addons[__name__].preferences
useWire = prefs.use_wire
for selObj in bpy.context.selected_objects:
if selObj != context.active_object and (selObj.type == "MESH" or selObj.type == "CURVE"):
if selObj.type == "CURVE":
ConvertToMesh(selObj)
actObj = context.active_object
selObj.hide_render = True
cyclesVis = selObj.cycles_visibility
"""
for obj in bpy.context.scene.objects:
if isCanvas(obj):
for mod in obj.modifiers:
if(mod.name == "BTool_" + selObj.name):
obj.modifiers.remove(mod)
"""
if useWire:
selObj.display_type = "WIRE"
else:
selObj.display_type = "BOUNDS"
cyclesVis.camera = False
cyclesVis.diffuse = False
cyclesVis.glossy = False
cyclesVis.shadow = False
cyclesVis.transmission = False
if _operation == "SLICE":
# copies instance_collection property(empty), but group property is empty (users_group = None)
clone = context.active_object.copy()
# clone.select = True
context.scene.objects.link(clone)
sliceMod = clone.modifiers.new("BTool_" + selObj.name, "BOOLEAN") # add mod to clone obj
sliceMod.object = selObj
sliceMod.operation = "DIFFERENCE"
clone["BoolToolRoot"] = True
newMod = actObj.modifiers.new("BTool_" + selObj.name, "BOOLEAN")
newMod.object = selObj
if _operation == "SLICE":
newMod.operation = "INTERSECT"
else:
newMod.operation = _operation
actObj["BoolToolRoot"] = True
selObj["BoolToolBrush"] = _operation
selObj["BoolTool_FTransform"] = "False"
# Remove Obejcts form the BoolTool System
def Remove(context, thisObj_name, Prop):
# Find the Brush pointed in the Tree View and Restore it, active is the Canvas
actObj = context.active_object
# Restore the Brush
def RemoveThis(_thisObj_name):
for obj in bpy.context.scene.objects:
# if it's the brush object
if obj.name == _thisObj_name:
cyclesVis = obj.cycles_visibility
obj.display_type = "TEXTURED"
del obj["BoolToolBrush"]
del obj["BoolTool_FTransform"]
cyclesVis.camera = True
cyclesVis.diffuse = True
cyclesVis.glossy = True
cyclesVis.shadow = True
cyclesVis.transmission = True
# Remove it from the Canvas
for mod in actObj.modifiers:
if ("BTool_" in mod.name):
if (_thisObj_name in mod.name):
actObj.modifiers.remove(mod)
if Prop == "THIS":
RemoveThis(thisObj_name)
# If the remove was called from the Properties:
else:
# Remove the Brush Property
if Prop == "BRUSH":
Canvas = FindCanvas(actObj)
if Canvas:
for mod in Canvas.modifiers:
if ("BTool_" in mod.name):
if (actObj.name in mod.name):
Canvas.modifiers.remove(mod)
cyclesVis = actObj.cycles_visibility
actObj.display_type = "TEXTURED"
del actObj["BoolToolBrush"]
del actObj["BoolTool_FTransform"]
cyclesVis.camera = True
cyclesVis.diffuse = True
cyclesVis.glossy = True
cyclesVis.shadow = True
cyclesVis.transmission = True
if Prop == "CANVAS":
for mod in actObj.modifiers:
if ("BTool_" in mod.name):
RemoveThis(mod.object.name)
# Toggle the Enable the Brush Object Property
def EnableBrush(context, objList, canvas):
for obj in objList:
for mod in canvas.modifiers:
if ("BTool_" in mod.name and mod.object.name == obj):
if (mod.show_viewport):
mod.show_viewport = False
mod.show_render = False
else:
mod.show_viewport = True
mod.show_render = True
# Find the Canvas and Enable this Brush
def EnableThisBrush(context, set):
canvas = None
for obj in bpy.context.scene.objects:
if obj != bpy.context.active_object:
if isCanvas(obj):
for mod in obj.modifiers:
if ("BTool_" in mod.name):
if mod.object == bpy.context.active_object:
canvas = obj
for mod in canvas.modifiers:
if ("BTool_" in mod.name):
if mod.object == bpy.context.active_object:
if set == "None":
if (mod.show_viewport):
mod.show_viewport = False
mod.show_render = False
else:
mod.show_viewport = True
mod.show_render = True
else:
if (set == "True"):
mod.show_viewport = True
else:
mod.show_viewport = False
return
# Toggle the Fast Transform Property of the Active Brush
def EnableFTransf(context):
actObj = bpy.context.active_object
if actObj["BoolTool_FTransform"] == "True":
actObj["BoolTool_FTransform"] = "False"
else:
actObj["BoolTool_FTransform"] = "True"
return
# Apply All Brushes to the Canvas
def ApplyAll(context, list):
objDeleteList = []
for selObj in list:
if isCanvas(selObj) and selObj == context.active_object:
for mod in selObj.modifiers:
if ("BTool_" in mod.name):
objDeleteList.append(mod.object)
try:
bpy.ops.object.modifier_apply(modifier=mod.name)
except: # if fails the means it is multiuser data
context.active_object.data = context.active_object.data.copy() # so just make data unique
bpy.ops.object.modifier_apply(modifier=mod.name)
del selObj['BoolToolRoot']
for obj in context.scene.objects:
if isCanvas(obj):
for mod in obj.modifiers:
if mod.type == 'BOOLEAN':
if mod.object in objDeleteList: # do not delete brush that is used by another canvas
objDeleteList.remove(mod.object) # remove it from deletion
bpy.ops.object.select_all(action='DESELECT')
for obj in objDeleteList:
obj.select = True
bpy.ops.object.delete()
# Apply This Brush to the Canvas
def ApplyThisBrush(context, brush):
for obj in context.scene.objects:
if isCanvas(obj):
for mod in obj.modifiers:
if ("BTool_" + brush.name in mod.name):
"""
# EXPERIMENTAL
if isMakeVertexGroup():
# Turn all faces of the Brush selected
bpy.context.scene.objects.active = brush
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.object.mode_set(mode='OBJECT')
# Turn off al faces of the Canvas selected
bpy.context.scene.objects.active = canvas
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
"""
# Apply This Brush
context.scene.objects.active = obj
try:
bpy.ops.object.modifier_apply(modifier=mod.name)
except: # if fails the means it is multiuser data
context.active_object.data = context.active_object.data.copy() # so just make data unique
bpy.ops.object.modifier_apply(modifier=mod.name)
bpy.ops.object.select_all(action='TOGGLE')
bpy.ops.object.select_all(action='DESELECT')
"""
# EXPERIMENTAL
if isMakeVertexGroup():
# Make Vertex Group
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.vertex_group_assign_new()
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
canvas.vertex_groups.active.name = "BTool_" + brush.name
"""
# Garbage Collector
brush.select = True
# bpy.ops.object.delete()
def GCollector(_obj):
if isCanvas(_obj):
BTRoot = False
for mod in _obj.modifiers:
if ("BTool_" in mod.name):
BTRoot = True
if mod.object is None:
_obj.modifiers.remove(mod)
if not BTRoot:
del _obj["BoolToolRoot"]
# Handle the callbacks when modifying things in the scene
@persistent
def HandleScene(scene):
if bpy.data.objects.is_updated:
for ob in bpy.data.objects:
if ob.is_updated:
GCollector(ob)
# ------------------ Bool Tool OPERATORS --------------------------------------
class BTool_DrawPolyBrush(Operator):
bl_idname = "btool.draw_polybrush"
bl_label = "Draw Poly Brush"
bl_description = ("Draw Polygonal Mask, can be applied to Canvas > Brush or Directly\n"
"Note: ESC to Cancel, Enter to Apply, Right Click to erase the Lines")
count = 0
store_cont_draw = False
@classmethod
def poll(cls, context):
return context.active_object is not None
def set_cont_draw(self, context, start=False):
# store / restore GP continuous drawing (see T52321)
scene = context.scene
tool_settings = scene.tool_settings
continuous = tool_settings.use_gpencil_continuous_drawing
if start:
self.store_cont_draw = continuous
tool_settings.use_gpencil_continuous_drawing = True
else:
tool_settings.use_gpencil_continuous_drawing = self.store_cont_draw
def modal(self, context, event):
self.count += 1
actObj = bpy.context.active_object
if self.count == 1:
actObj.select = True
bpy.ops.gpencil.draw('INVOKE_DEFAULT', mode="DRAW_POLY")
if event.type in {'RIGHTMOUSE'}:
# use this to pass to the Grease Pencil eraser (see T52321)
pass
if event.type in {'RET', 'NUMPAD_ENTER'}:
bpy.ops.gpencil.convert(type='POLY')
self.set_cont_draw(context)
for obj in context.selected_objects:
if obj.type == "CURVE":
obj.name = "PolyDraw"
bpy.context.scene.objects.active = obj
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.ops.object.convert(target="MESH")
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.edge_face_add()
bpy.ops.mesh.flip_normals()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.origin_set(type='ORIGIN_CENTER_OF_MASS')
bpy.ops.object.modifier_add(type="SOLIDIFY")
for mod in obj.modifiers:
if mod.name == "Solidify":
mod.name = "BTool_PolyBrush"
mod.thickness = 1
mod.offset = 0
obj["BoolToolPolyBrush"] = True
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active = actObj
bpy.context.scene.update()
actObj.select = True
obj.select = True
bpy.context.scene.grease_pencil.clear()
bpy.ops.gpencil.data_unlink()
return {'FINISHED'}
if event.type in {'ESC'}:
bpy.ops.ed.undo() # remove o Grease Pencil
self.set_cont_draw(context)
self.report({'INFO'},
"Draw Poly Brush: Operation Cancelled by User")
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.object:
self.set_cont_draw(context, start=True)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "No active object, could not finish")
return {'CANCELLED'}
# Fast Transform
class BTool_FastTransform(Operator):
bl_idname = "btool.fast_transform"
bl_label = "Fast Transform"
bl_description = "Enable Fast Transform"
operator = StringProperty("")
count = 0
def modal(self, context, event):
self.count += 1
actObj = bpy.context.active_object
useWire = bpy.context.preferences.addons[__name__].preferences.use_wire
if self.count == 1:
if isBrush(actObj) and actObj["BoolTool_FTransform"] == "True":
EnableThisBrush(bpy.context, "False")
if useWire:
actObj.display_type = "WIRE"
else:
actObj.display_type = "BOUNDS"
if self.operator == "Translate":
bpy.ops.transform.translate('INVOKE_DEFAULT')
if self.operator == "Rotate":
bpy.ops.transform.rotate('INVOKE_DEFAULT')
if self.operator == "Scale":
bpy.ops.transform.resize('INVOKE_DEFAULT')
if event.type == 'LEFTMOUSE':
if isBrush(actObj):
EnableThisBrush(bpy.context, "True")
actObj.display_type = "WIRE"
return {'FINISHED'}
if event.type in {'RIGHTMOUSE', 'ESC'}:
if isBrush(actObj):
EnableThisBrush(bpy.context, "True")
actObj.display_type = "WIRE"
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.object:
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "No active object, could not finish")
return {'CANCELLED'}
# ------------------- Bool Tool OPERATOR CLASSES --------------------------------------------------------
# Brush Operators --------------------------------------------
# Boolean Union Operator
class BTool_Union(Operator):
bl_idname = "btool.boolean_union"
bl_label = "Brush Union"
bl_description = "This operator add a union brush to a canvas"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
Operation(context, "UNION")
return {'FINISHED'}
# Boolean Intersection Operator
class BTool_Inters(Operator):
bl_idname = "btool.boolean_inters"
bl_label = "Brush Intersection"
bl_description = "This operator add a intersect brush to a canvas"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
Operation(context, "INTERSECT")
return {'FINISHED'}
# Boolean Difference Operator
class BTool_Diff(Operator):
bl_idname = "btool.boolean_diff"
bl_label = "Brush Difference"
bl_description = "This operator add a difference brush to a canvas"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
Operation(context, "DIFFERENCE")
return {'FINISHED'}
# Boolean Slices Operator
class BTool_Slice(Operator):
bl_idname = "btool.boolean_slice"
bl_label = "Brush Slice"
bl_description = "This operator add a intersect brush to a canvas"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
Operation(context, "SLICE")
return {'FINISHED'}
# Auto Boolean operators (maintainer Mikhail Rachinskiy)
# --------------------------------------------------------------------------------------
class Auto_Boolean:
def objects_prepare(self):
for ob in bpy.context.selected_objects:
if ob.type != 'MESH':
ob.select = False
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.convert(target='MESH')
def mesh_selection(self, ob, select_action):
scene = bpy.context.scene
obj = bpy.context.active_object
scene.objects.active = ob
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action=select_action)
bpy.ops.object.mode_set(mode='OBJECT')
scene.objects.active = obj
def boolean_operation(self):
obj = bpy.context.active_object
obj.select = False
obs = bpy.context.selected_objects
self.mesh_selection(obj, 'DESELECT')
for ob in obs:
self.mesh_selection(ob, 'SELECT')
self.boolean_mod(obj, ob, self.mode)
obj.select = True
def boolean_mod(self, obj, ob, mode, ob_delete=True):
md = obj.modifiers.new("Auto Boolean", 'BOOLEAN')
md.show_viewport = False
md.operation = mode
md.object = ob
bpy.ops.object.modifier_apply(modifier="Auto Boolean")
if not ob_delete:
return
bpy.context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
class OBJECT_OT_BoolTool_Auto_Union(Operator, Auto_Boolean):
bl_idname = "object.booltool_auto_union"
bl_label = "Bool Tool Union"
bl_description = "Combine selected objects"
bl_options = {'REGISTER', 'UNDO'}
mode = 'UNION'
def execute(self, context):
self.objects_prepare()
self.boolean_operation()
return {'FINISHED'}
class OBJECT_OT_BoolTool_Auto_Difference(Operator, Auto_Boolean):
bl_idname = "object.booltool_auto_difference"
bl_label = "Bool Tool Difference"
bl_description = "Subtract selected objects from active object"
bl_options = {'REGISTER', 'UNDO'}
mode = 'DIFFERENCE'
def execute(self, context):
self.objects_prepare()
self.boolean_operation()
return {'FINISHED'}
class OBJECT_OT_BoolTool_Auto_Intersect(Operator, Auto_Boolean):
bl_idname = "object.booltool_auto_intersect"
bl_label = "Bool Tool Intersect"
bl_description = "Keep only intersecting geometry"
bl_options = {'REGISTER', 'UNDO'}
mode = 'INTERSECT'
def execute(self, context):
self.objects_prepare()
self.boolean_operation()
return {'FINISHED'}
class OBJECT_OT_BoolTool_Auto_Slice(Operator, Auto_Boolean):
bl_idname = "object.booltool_auto_slice"
bl_label = "Bool Tool Slice"
bl_description = "Slice active object along the selected object"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
self.objects_prepare()
scene = context.scene
obj = context.active_object
obj.select = False
ob = context.selected_objects[0]
self.mesh_selection(obj, 'DESELECT')
self.mesh_selection(ob, 'SELECT')
obj_copy = obj.copy()
obj_copy.data = obj.data.copy()
scene.objects.link(obj_copy)
self.boolean_mod(obj, ob, 'DIFFERENCE', ob_delete=False)
scene.objects.active = obj_copy
self.boolean_mod(obj_copy, ob, 'INTERSECT')
obj_copy.select = True
return {'FINISHED'}
class OBJECT_OT_BoolTool_Auto_Subtract(Operator, Auto_Boolean):
bl_idname = "object.booltool_auto_subtract"
bl_label = "Bool Tool Subtract"
bl_description = "Subtract selected object from active object, subtracted object not removed"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
self.objects_prepare()
obj = context.active_object
obj.select = False
ob = context.selected_objects[0]
self.mesh_selection(obj, 'DESELECT')
self.mesh_selection(ob, 'SELECT')
self.boolean_mod(obj, ob, 'DIFFERENCE', ob_delete=False)
return {'FINISHED'}
# Utils Class ---------------------------------------------------------------
# Find the Brush Selected in Three View
class BTool_FindBrush(Operator):
bl_idname = "btool.find_brush"
bl_label = ""
bl_description = "Find the selected brush"
obj = StringProperty("")
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
for ob in bpy.context.scene.objects:
if (ob.name == self.obj):
bpy.ops.object.select_all(action='TOGGLE')
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active = ob
ob.select = True
return {'FINISHED'}
# Move The Modifier in The Stack Up or Down
class BTool_MoveStack(Operator):
bl_idname = "btool.move_stack"
bl_label = ""
bl_description = "Move this Brush Up/Down in the Stack"
modif = StringProperty("")
direction = StringProperty("")
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
if (self.direction == "UP"):
bpy.ops.object.modifier_move_up(modifier=self.modif)
if (self.direction == "DOWN"):
bpy.ops.object.modifier_move_down(modifier=self.modif)
return {'FINISHED'}
# Enable or Disable a Brush in the Three View
class BTool_EnableBrush(Operator):
bl_idname = "btool.enable_brush"
bl_label = ""
bl_description = "Removes all BoolTool config assigned to it"
thisObj = StringProperty("")
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
# in this case is just one object but the function accept more than one at once
EnableBrush(context, [self.thisObj], context.active_object)
return {'FINISHED'}
# Enable or Disable a Brush Directly
class BTool_EnableThisBrush(Operator):
bl_idname = "btool.enable_this_brush"
bl_label = ""
bl_description = "Toggles this brush"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
EnableThisBrush(context, "None")
return {'FINISHED'}
# Enable or Disable a Brush Directly
class BTool_EnableFTransform(Operator):
bl_idname = "btool.enable_ftransf"
bl_label = ""
bl_description = "Use Fast Transformations to improve speed"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
EnableFTransf(context)
return {'FINISHED'}
# Other Operations -------------------------------------------------------
# Remove a Brush or a Canvas
class BTool_Remove(Operator):
bl_idname = "btool.remove"
bl_label = ""
bl_description = "Removes all BoolTool config assigned to it"
bl_options = {'UNDO'}
thisObj = StringProperty("")
Prop = StringProperty("")
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
Remove(context, self.thisObj, self.Prop)
return {'FINISHED'}
# Apply All to Canvas
class BTool_AllBrushToMesh(Operator):
bl_idname = "btool.to_mesh"
bl_label = "Apply All Canvas"
bl_description = "Apply all brushes of this canvas"
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
lists = bpy.context.selected_objects
ApplyAll(context, lists)
return {'FINISHED'}
# Apply This Brush to the Canvas
class BTool_BrushToMesh(Operator):
bl_idname = "btool.brush_to_mesh"
bl_label = "Apply this Brush to Canvas"
bl_description = "Apply this brush to the canvas"
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
if isBrush(context.active_object):
return True
else:
return False
def execute(self, context):
ApplyThisBrush(context, bpy.context.active_object)
return {'FINISHED'}
# TODO
# Apply This Brush To Mesh
# ------------------- MENU CLASSES ------------------------------
# 3Dview Header Menu
class VIEW3D_MT_booltool_menu(Menu):
bl_label = "BoolTool Operators"
bl_idname = "VIEW3D_MT_booltool_menu"
def draw(self, context):
layout = self.layout
layout.label("Auto Boolean:")
layout.operator(OBJECT_OT_BoolTool_Auto_Difference.bl_idname, text='Difference', icon='PIVOT_ACTIVE')
layout.operator(OBJECT_OT_BoolTool_Auto_Union.bl_idname, text='Union', icon='PIVOT_INDIVIDUAL')
layout.operator(OBJECT_OT_BoolTool_Auto_Intersect.bl_idname, text='Intersect', icon='PIVOT_MEDIAN')
layout.operator(OBJECT_OT_BoolTool_Auto_Slice.bl_idname, text='Slice', icon='PIVOT_MEDIAN')
layout.operator(OBJECT_OT_BoolTool_Auto_Subtract.bl_idname, text='Subtract', icon='PIVOT_ACTIVE')
layout.separator()
layout.label("Brush Boolean:")
layout.operator(BTool_Diff.bl_idname, icon='PIVOT_ACTIVE')
layout.operator(BTool_Union.bl_idname, icon='PIVOT_INDIVIDUAL')
layout.operator(BTool_Inters.bl_idname, icon='PIVOT_MEDIAN')
layout.operator(BTool_Slice.bl_idname, icon='PIVOT_MEDIAN')
if (isCanvas(context.active_object)):
layout.separator()
layout.operator(BTool_AllBrushToMesh.bl_idname, icon="MOD_LATTICE", text="Apply All")
Rem = layout.operator(BTool_Remove.bl_idname, icon="CANCEL", text="Remove All")
Rem.thisObj = ""
Rem.Prop = "CANVAS"
if (isBrush(context.active_object)):
layout.separator()
layout.operator(BTool_BrushToMesh.bl_idname, icon="MOD_LATTICE", text="Apply Brush")
Rem = layout.operator(BTool_Remove.bl_idname, icon="CANCEL", text="Remove Brush")
Rem.thisObj = ""
Rem.Prop = "BRUSH"
def VIEW3D_BoolTool_Menu(self, context):
self.layout.menu(VIEW3D_MT_booltool_menu.bl_idname)
# ---------------- Toolshelf: Tools ---------------------
class VIEW3D_PT_booltool_tools(Panel):
bl_category = "Tools"
bl_label = "Bool Tool"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = 'objectmode'
@classmethod
def poll(cls, context):
return context.active_object is not None
def draw(self, context):
layout = self.layout
obj = context.active_object
obs_len = len(context.selected_objects)
row = layout.split(0.7)
row.label("Help:")
row.operator("wm.booltool_help", text="", icon="QUESTION")
main = layout.column(align=True)
main.enabled = obj.type == 'MESH' and obs_len > 0
main.separator()
col = main.column(align=True)
col.enabled = obs_len > 1
col.label("Auto Boolean:", icon="MODIFIER")
col.separator()
col.operator(OBJECT_OT_BoolTool_Auto_Difference.bl_idname, text='Difference', icon='PIVOT_ACTIVE')
col.operator(OBJECT_OT_BoolTool_Auto_Union.bl_idname, text='Union', icon='PIVOT_INDIVIDUAL')
col.operator(OBJECT_OT_BoolTool_Auto_Intersect.bl_idname, text='Intersect', icon='PIVOT_MEDIAN')
main.separator()
col = main.column(align=True)
col.enabled = obs_len == 2
col.operator(OBJECT_OT_BoolTool_Auto_Slice.bl_idname, text='Slice', icon='PIVOT_MEDIAN')
col.operator(OBJECT_OT_BoolTool_Auto_Subtract.bl_idname, text='Subtract', icon='PIVOT_ACTIVE')
main.separator()
col = main.column(align=True)
col.enabled = obs_len > 1
col.label("Brush Boolean:", icon="MODIFIER")
col.separator()
col.operator(BTool_Diff.bl_idname, text="Difference", icon='PIVOT_ACTIVE')
col.operator(BTool_Union.bl_idname, text="Union", icon='PIVOT_INDIVIDUAL')
col.operator(BTool_Inters.bl_idname, text="Intersect", icon='PIVOT_MEDIAN')
col.operator(BTool_Slice.bl_idname, text="Slice", icon='PIVOT_MEDIAN')
main.separator()
col = main.column(align=True)
col.label("Draw:", icon="MESH_CUBE")
col.separator()
col.operator(BTool_DrawPolyBrush.bl_idname, icon="LINE_DATA")
# ---------- Toolshelf: Properties --------------------------------------------------------
class VIEW3D_PT_booltool_config(Panel):
bl_category = "Tools"
bl_label = "Properties"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = 'objectmode'
@classmethod
def poll(cls, context):
result = False
actObj = bpy.context.active_object
if (isCanvas(actObj) or isBrush(actObj) or isPolyBrush(actObj)):
result = True
return result
def draw(self, context):
actObj = bpy.context.active_object
icon = ""
layout = self.layout
row = layout.row(True)
# CANVAS ---------------------------------------------------
if isCanvas(actObj):
row.label("CANVAS", icon="MESH_GRID")
row = layout.row()
row.prop(context.scene, 'BoolHide', text="Hide Bool objects")
row = layout.row(True)
row.operator(BTool_AllBrushToMesh.bl_idname, icon="MOD_LATTICE", text="Apply All")
row = layout.row(True)
Rem = row.operator(BTool_Remove.bl_idname, icon="CANCEL", text="Remove All")
Rem.thisObj = ""
Rem.Prop = "CANVAS"
if isBrush(actObj):
layout.separator()
# BRUSH ------------------------------------------------------
if isBrush(actObj):
if (actObj["BoolToolBrush"] == "UNION"):
icon = "ROTATECOLLECTION"
if (actObj["BoolToolBrush"] == "DIFFERENCE"):
icon = "ROTATECENTER"
if (actObj["BoolToolBrush"] == "INTERSECT"):
icon = "ROTACTIVE"
if (actObj["BoolToolBrush"] == "SLICE"):
icon = "ROTATECENTER"
row = layout.row(True)
row.label("BRUSH", icon=icon)
icon = ""
if actObj["BoolTool_FTransform"] == "True":
icon = "PMARKER_ACT"
else:
icon = "PMARKER"
if isFTransf():
pass
if isFTransf():
row = layout.row(True)
row.operator(BTool_EnableFTransform.bl_idname, text="Fast Vis", icon=icon)
row.operator(BTool_EnableThisBrush.bl_idname, text="Enable", icon="VISIBLE_IPO_ON")
row = layout.row(True)
else:
row.operator(BTool_EnableThisBrush.bl_idname, icon="VISIBLE_IPO_ON")
row = layout.row(True)
if isPolyBrush(actObj):
row = layout.row(False)
row.label("POLY BRUSH", icon="LINE_DATA")
mod = actObj.modifiers["BTool_PolyBrush"]
row = layout.row(False)
row.prop(mod, "thickness", text="Size")
layout.separator()
if isBrush(actObj):
row = layout.row(True)
row.operator(BTool_BrushToMesh.bl_idname, icon="MOD_LATTICE", text="Apply Brush")
row = layout.row(True)
Rem = row.operator(BTool_Remove.bl_idname, icon="CANCEL", text="Remove Brush")
Rem.thisObj = ""
Rem.Prop = "BRUSH"
layout.separator()
# ---------- Toolshelf: Brush Viewer -------------------------------------------------------
class VIEW3D_PT_booltool_bviewer(Panel):
bl_category = "Tools"
bl_label = "Brush Viewer"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = 'objectmode'
@classmethod
def poll(cls, context):
actObj = bpy.context.active_object
if isCanvas(actObj):
return True
else:
return False
def draw(self, context):
actObj = bpy.context.active_object
icon = ""
if isCanvas(actObj):
for mod in actObj.modifiers:
container = self.layout.box()
row = container.row(True)
icon = ""
if ("BTool_" in mod.name):
if (mod.operation == "UNION"):
icon = "ROTATECOLLECTION"
if (mod.operation == "DIFFERENCE"):
icon = "ROTATECENTER"
if (mod.operation == "INTERSECT"):
icon = "ROTACTIVE"
if (mod.operation == "SLICE"):
icon = "ROTATECENTER"
objSelect = row.operator("btool.find_brush", text=mod.object.name, icon=icon, emboss=False)
objSelect.obj = mod.object.name
EnableIcon = "RESTRICT_VIEW_ON"
if (mod.show_viewport):
EnableIcon = "RESTRICT_VIEW_OFF"
Enable = row.operator(BTool_EnableBrush.bl_idname, icon=EnableIcon, emboss=False)
Enable.thisObj = mod.object.name
Remove = row.operator("btool.remove", icon="CANCEL", emboss=False)
Remove.thisObj = mod.object.name
Remove.Prop = "THIS"
# Stack Changer
Up = row.operator("btool.move_stack", icon="TRIA_UP", emboss=False)
Up.modif = mod.name
Up.direction = "UP"
Dw = row.operator("btool.move_stack", icon="TRIA_DOWN", emboss=False)
Dw.modif = mod.name
Dw.direction = "DOWN"
else:
row.label(mod.name)
# Stack Changer
Up = row.operator("btool.move_stack", icon="TRIA_UP", emboss=False)
Up.modif = mod.name
Up.direction = "UP"
Dw = row.operator("btool.move_stack", icon="TRIA_DOWN", emboss=False)
Dw.modif = mod.name
Dw.direction = "DOWN"
# ------------------ BOOL TOOL Help ----------------------------
class WM_OT_BoolTool_Help(Operator):
bl_idname = "wm.booltool_help"
bl_label = "Bool Tool Help"
bl_description = "Tool Help - click to read some basic information"
def draw(self, context):
layout = self.layout
layout.label("To use:")
layout.label("Select two or more objects,")
layout.label("choose one option from the panel")
layout.label("or from the Ctrl + Shift + B menu")
layout.separator()
layout.label("Auto Boolean:")
layout.label("Apply Boolean operation directly.")
layout.separator()
layout.label("Brush Boolean:")
layout.label("Create a Boolean brush setup.")
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width=220)
# ------------------ BOOL TOOL ADD-ON PREFERENCES ----------------------------
def UpdateBoolTool_Pref(self, context):
if self.fast_transform:
RegisterFastT()
else:
UnRegisterFastT()
# Add-ons Preferences Update Panel
# Define Panel classes for updating
panels = (
VIEW3D_PT_booltool_tools,
VIEW3D_PT_booltool_config,
VIEW3D_PT_booltool_bviewer,
)
def update_panels(self, context):
try:
for panel in panels:
if "bl_rna" in panel.__dict__:
bpy.utils.unregister_class(panel)
for panel in panels:
panel.bl_category = context.preferences.addons[__name__].preferences.category
bpy.utils.register_class(panel)
except Exception as e:
message = "Bool Tool: Updating Panel locations has failed"
print("\n[{}]\n{}\n\nError:\n{}".format(__name__, message, e))
class PREFS_BoolTool_Props(AddonPreferences):
bl_idname = __name__
fast_transform = BoolProperty(
name="Fast Transformations",
default=False,
update=UpdateBoolTool_Pref,
description="Replace the Transform HotKeys (G,R,S)\n"
"for a custom version that can optimize the visualization of Brushes",
)
make_vertex_groups = BoolProperty(
name="Make Vertex Groups",
default=False,
description="When Applying a Brush to the Object it will create\n"
"a new vertex group for the new faces",
)
make_boundary = BoolProperty(
name="Make Boundary",
default=False,
description="When Apply a Brush to the Object it will create a\n"
"new vertex group of the boundary boolean area",
)
use_wire = BoolProperty(
name="Use Bmesh",
default=False,
description="Use The Wireframe Instead of Bounding Box for visualization",
)
category = StringProperty(
name="Tab Category",
description="Choose a name for the category of the panel",
default="Tools",
update=update_panels,
)
Enable_Tab_01 = BoolProperty(
default=False
)
def draw(self, context):
layout = self.layout
split_percent = 0.3
split = layout.split(percentage=split_percent)
col = split.column()
col.label(text="Tab Category:")
col = split.column()
col.prop(self, "category", text="")
split = layout.split(percentage=split_percent)
col = split.column()
col.label("Experimental Features:")
col = split.column()
col.prop(self, "fast_transform")
col.prop(self, "use_wire", text="Use Wire Instead Of Bbox")
layout.separator()
"""
# EXPERIMENTAL
col.prop(self, "make_vertex_groups")
col.prop(self, "make_boundary")
"""
layout.prop(self, "Enable_Tab_01", text="Hot Keys", icon="KEYINGSET")
if self.Enable_Tab_01:
row = layout.row()
col = row.column()
col.label("Hotkey List:")
col.label("Menu: Ctrl Shift B")
row = layout.row()
col = row.column()
col.label("Brush Operators:")
col.label("Union: Ctrl Num +")
col.label("Diff: Ctrl Num -")
col.label("Intersect: Ctrl Num *")
col.label("Slice: Ctrl Num /")
row = layout.row()
col = row.column()
col.label("Auto Operators:")
col.label("Difference: Ctrl Shift Num -")
col.label("Union: Ctrl Shift Num +")
col.label("Intersect: Ctrl Shift Num *")
col.label("Slice: Ctrl Shift Num /")
col.label("BTool Brush To Mesh: Ctrl Num Enter")
col.label("BTool All Brush To Mesh: Ctrl Shift Num Enter")
# ------------------- Class List ------------------------------------------------
classes = (
PREFS_BoolTool_Props,
VIEW3D_MT_booltool_menu,
VIEW3D_PT_booltool_tools,
VIEW3D_PT_booltool_config,
VIEW3D_PT_booltool_bviewer,
OBJECT_OT_BoolTool_Auto_Union,
OBJECT_OT_BoolTool_Auto_Difference,
OBJECT_OT_BoolTool_Auto_Intersect,
OBJECT_OT_BoolTool_Auto_Slice,
OBJECT_OT_BoolTool_Auto_Subtract,
BTool_Union,
BTool_Diff,
BTool_Inters,
BTool_Slice,
BTool_DrawPolyBrush,
BTool_Remove,
BTool_AllBrushToMesh,
BTool_BrushToMesh,
BTool_FindBrush,
BTool_MoveStack,
BTool_EnableBrush,
BTool_EnableThisBrush,
BTool_EnableFTransform,
BTool_FastTransform,
WM_OT_BoolTool_Help,
)
# ------------------- REGISTER ------------------------------------------------
addon_keymaps = []
addon_keymapsFastT = []
# Fast Transform HotKeys Register
def RegisterFastT():
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps.new(name='Object Mode', space_type='EMPTY')
kmi = km.keymap_items.new(BTool_FastTransform.bl_idname, 'G', 'PRESS')
kmi.properties.operator = "Translate"
addon_keymapsFastT.append((km, kmi))
kmi = km.keymap_items.new(BTool_FastTransform.bl_idname, 'R', 'PRESS')
kmi.properties.operator = "Rotate"
addon_keymapsFastT.append((km, kmi))
kmi = km.keymap_items.new(BTool_FastTransform.bl_idname, 'S', 'PRESS')
kmi.properties.operator = "Scale"
addon_keymapsFastT.append((km, kmi))
# Fast Transform HotKeys UnRegister
def UnRegisterFastT():
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymapsFastT:
km.keymap_items.remove(kmi)
addon_keymapsFastT.clear()
def register():
for cls in classes:
bpy.utils.register_class(cls)
update_panels(None, bpy.context)
# Scene variables
bpy.types.Scene.BoolHide = BoolProperty(
default=False,
description="Hide boolean objects",
update=update_BoolHide,
)
# Handlers
bpy.app.handlers.scene_update_post.append(HandleScene)
bpy.types.VIEW3D_MT_object.append(VIEW3D_BoolTool_Menu)
try:
bpy.types.VIEW3D_MT_Object.prepend(VIEW3D_BoolTool_Menu)
except:
pass
wm = bpy.context.window_manager
# create the boolean menu hotkey
km = wm.keyconfigs.addon.keymaps.new(name='Object Mode')
kmi = km.keymap_items.new('wm.call_menu', 'B', 'PRESS', ctrl=True, shift=True)
kmi.properties.name = 'VIEW3D_MT_booltool_menu'
addon_keymaps.append((km, kmi))
# Brush Operators
kmi = km.keymap_items.new(BTool_Union.bl_idname, 'NUMPAD_PLUS', 'PRESS', ctrl=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(BTool_Diff.bl_idname, 'NUMPAD_MINUS', 'PRESS', ctrl=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(BTool_Inters.bl_idname, 'NUMPAD_ASTERIX', 'PRESS', ctrl=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(BTool_Slice.bl_idname, 'NUMPAD_SLASH', 'PRESS', ctrl=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(BTool_BrushToMesh.bl_idname, 'NUMPAD_ENTER', 'PRESS', ctrl=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(BTool_AllBrushToMesh.bl_idname, 'NUMPAD_ENTER', 'PRESS', ctrl=True, shift=True)
addon_keymaps.append((km, kmi))
# Auto Operators
kmi = km.keymap_items.new(OBJECT_OT_BoolTool_Auto_Union.bl_idname,
'NUMPAD_PLUS', 'PRESS', ctrl=True, shift=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(OBJECT_OT_BoolTool_Auto_Difference.bl_idname,
'NUMPAD_MINUS', 'PRESS', ctrl=True, shift=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(OBJECT_OT_BoolTool_Auto_Intersect.bl_idname,
'NUMPAD_ASTERIX', 'PRESS', ctrl=True, shift=True)
addon_keymaps.append((km, kmi))
kmi = km.keymap_items.new(OBJECT_OT_BoolTool_Auto_Slice.bl_idname,
'NUMPAD_SLASH', 'PRESS', ctrl=True, shift=True)
addon_keymaps.append((km, kmi))
def unregister():
# Keymapping
# remove keymaps when add-on is deactivated
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
UnRegisterFastT()
bpy.types.VIEW3D_MT_object.remove(VIEW3D_BoolTool_Menu)
try:
bpy.types.VIEW3D_MT_Object.remove(VIEW3D_BoolTool_Menu)
except:
pass
del bpy.types.Scene.BoolHide
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
| 34,711 | 7,220 | 1,230 |
c56d86c7f1ec176af977170f19fa1d34d19015d9 | 1,752 | py | Python | opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py | s27y/opentelemetry-python | 8cc92600158aba9d9de272f66a29a8422e3e5a66 | [
"Apache-2.0"
] | null | null | null | opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py | s27y/opentelemetry-python | 8cc92600158aba9d9de272f66a29a8422e3e5a66 | [
"Apache-2.0"
] | null | null | null | opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py | s27y/opentelemetry-python | 8cc92600158aba9d9de272f66a29a8422e3e5a66 | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from logging import getLogger
from pkg_resources import iter_entry_points
from opentelemetry.instrumentation.auto_instrumentation.components import (
initialize_components,
)
logger = getLogger(__file__)
if (
hasattr(sys, "argv")
and sys.argv[0].split(os.path.sep)[-1] == "celery"
and "worker" in sys.argv[1:]
):
from celery.signals import worker_process_init # pylint:disable=E0401
@worker_process_init.connect(weak=False)
else:
initialize()
| 29.2 | 76 | 0.714612 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from logging import getLogger
from pkg_resources import iter_entry_points
from opentelemetry.instrumentation.auto_instrumentation.components import (
initialize_components,
)
logger = getLogger(__file__)
def auto_instrument():
for entry_point in iter_entry_points("opentelemetry_instrumentor"):
try:
entry_point.load()().instrument() # type: ignore
logger.debug("Instrumented %s", entry_point.name)
except Exception as exc: # pylint: disable=broad-except
logger.exception("Instrumenting of %s failed", entry_point.name)
raise exc
def initialize():
try:
initialize_components()
auto_instrument()
except Exception: # pylint: disable=broad-except
logger.exception("Failed to auto initialize opentelemetry")
if (
hasattr(sys, "argv")
and sys.argv[0].split(os.path.sep)[-1] == "celery"
and "worker" in sys.argv[1:]
):
from celery.signals import worker_process_init # pylint:disable=E0401
@worker_process_init.connect(weak=False)
def init_celery(*args, **kwargs):
initialize()
else:
initialize()
| 592 | 0 | 72 |
55a2292fbff06b70605f2594eb1fa26a748a8a95 | 970 | py | Python | recipes/Python/525487_Extending_socketsocketpair_work/recipe-525487.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/525487_Extending_socketsocketpair_work/recipe-525487.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/525487_Extending_socketsocketpair_work/recipe-525487.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | from socket import *
import threading
try:
pairfamily = AF_UNIX
except NameError:
pairfamily = AF_INET
def SocketPair(family=pairfamily, type_=SOCK_STREAM, proto=IPPROTO_IP):
"""Wraps socketpair() to support Windows using local ephemeral ports"""
try:
sock1, sock2 = socketpair(family, type_, proto)
return (sock1, sock2)
except NameError:
listensock = socket(family, type_, proto)
listensock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
listensock.bind( ('localhost', 0) )
iface, ephport = listensock.getsockname()
listensock.listen(1)
sock1 = socket(family, type_, proto)
connthread = threading.Thread(target=pairConnect, args=[sock1, ephport])
connthread.setDaemon(1)
connthread.start()
sock2, sock2addr = listensock.accept()
listensock.close()
return (sock1, sock2)
| 31.290323 | 80 | 0.663918 | from socket import *
import threading
try:
pairfamily = AF_UNIX
except NameError:
pairfamily = AF_INET
def SocketPair(family=pairfamily, type_=SOCK_STREAM, proto=IPPROTO_IP):
"""Wraps socketpair() to support Windows using local ephemeral ports"""
try:
sock1, sock2 = socketpair(family, type_, proto)
return (sock1, sock2)
except NameError:
listensock = socket(family, type_, proto)
listensock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
listensock.bind( ('localhost', 0) )
iface, ephport = listensock.getsockname()
listensock.listen(1)
sock1 = socket(family, type_, proto)
connthread = threading.Thread(target=pairConnect, args=[sock1, ephport])
connthread.setDaemon(1)
connthread.start()
sock2, sock2addr = listensock.accept()
listensock.close()
return (sock1, sock2)
def pairConnect(sock, port):
sock.connect( ('localhost', port) )
| 47 | 0 | 23 |
2c874ae4a028f9b9d95509696f7ed89e4d5161f0 | 1,916 | py | Python | backend/src/notes/serializers.py | trustthedata/Lambda-Notes | 1adeaee7ac9b72c34c5a502fddcfe790042bf8a5 | [
"MIT"
] | null | null | null | backend/src/notes/serializers.py | trustthedata/Lambda-Notes | 1adeaee7ac9b72c34c5a502fddcfe790042bf8a5 | [
"MIT"
] | 4 | 2020-06-05T19:19:31.000Z | 2022-02-12T14:48:50.000Z | backend/src/notes/serializers.py | trustthedata/Lambda-Notes | 1adeaee7ac9b72c34c5a502fddcfe790042bf8a5 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from rest_framework_jwt.settings import api_settings
from notes.models import PersonalNote
from django.contrib.auth.models import User, Group
# from taggit_serializer.serializers import (TagListSerializerField, TaggitSerializer) | 33.034483 | 92 | 0.69833 | from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from rest_framework_jwt.settings import api_settings
from notes.models import PersonalNote
from django.contrib.auth.models import User, Group
# from taggit_serializer.serializers import (TagListSerializerField, TaggitSerializer)
class PersonalNoteSerializer(serializers.ModelSerializer):
# tags = TagListSerializerField()
def create(self, validated_data):
# user = self.context['request'].user
note = PersonalNote.objects.create( **validated_data)
return note
class Meta:
model = PersonalNote
fields = ('__all__')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ( 'username', )
class UserSerializerWithToken(serializers.ModelSerializer):
email = serializers.EmailField(
required=True,
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
max_length=32,
validators=[UniqueValidator(queryset=User.objects.all())]
)
password = serializers.CharField(min_length=8, write_only=True)
token = serializers.SerializerMethodField()
def get_token(self, obj):
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(obj)
token = jwt_encode_handler(payload)
return token
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'],
validated_data['password'])
return user
class Meta:
model = User
fields = ('username', 'email', 'password', 'token')
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ("name", ) | 537 | 966 | 92 |
d4b04aeac788163a22e4262524c63aff5a1d9e1b | 2,236 | py | Python | backend/setup.py | noushi/kale | 1530ccc6dda3b8a02e2f58f50cfa682adfaf4b80 | [
"Apache-2.0"
] | null | null | null | backend/setup.py | noushi/kale | 1530ccc6dda3b8a02e2f58f50cfa682adfaf4b80 | [
"Apache-2.0"
] | null | null | null | backend/setup.py | noushi/kale | 1530ccc6dda3b8a02e2f58f50cfa682adfaf4b80 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(
name='kubeflow-kale',
version='0.5.1',
description='Convert JupyterNotebooks to Kubeflow Pipelines deployments',
url='https://github.com/kubeflow-kale/kale',
author='Stefano Fioravanzo',
author_email='stefano.fioravanzo@gmail.com',
license='Apache License Version 2.0',
packages=['kale',
'kale.common',
'kale.config',
'kale.marshal',
'kale.processors',
'kale.rpc',
'kale.static_analysis',
],
install_requires=[
'kfp',
'autopep8 >=1.4, <1.5',
'astor >= 0.8.1',
'nbformat >=4.4, <5.0',
'networkx >=2.3, <3.0',
'jinja2 >=2.10, <3.0',
'graphviz >=0.13, <1.0',
'pyflakes >=2.1.1',
'dill >=0.3, <0.4',
'IPython >= 7.6.0',
'jupyter-client >= 5.3.4',
'jupyter-core >= 4.6.0',
'nbconvert >= 5.6.1, < 6.0.0',
'ipykernel >= 5.1.4',
'notebook >= 6.0.0',
'packaging > 20',
'ml_metadata == 0.24.0',
'progress >= 1.5',
],
extras_require={
'dev': [
'pytest',
'pytest-clarity',
'testfixtures',
'pytest-cov',
'flake8',
'flake8-docstrings'
]
},
entry_points={'console_scripts':
['kale=kale.command_line:main',
'kale_server=kale.command_line:server',
'kale-volumes=kale.command_line:kale_volumes']},
python_requires='>=3.6.0',
include_package_data=True,
zip_safe=False
)
| 31.055556 | 77 | 0.558587 | # Copyright 2019-2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(
name='kubeflow-kale',
version='0.5.1',
description='Convert JupyterNotebooks to Kubeflow Pipelines deployments',
url='https://github.com/kubeflow-kale/kale',
author='Stefano Fioravanzo',
author_email='stefano.fioravanzo@gmail.com',
license='Apache License Version 2.0',
packages=['kale',
'kale.common',
'kale.config',
'kale.marshal',
'kale.processors',
'kale.rpc',
'kale.static_analysis',
],
install_requires=[
'kfp',
'autopep8 >=1.4, <1.5',
'astor >= 0.8.1',
'nbformat >=4.4, <5.0',
'networkx >=2.3, <3.0',
'jinja2 >=2.10, <3.0',
'graphviz >=0.13, <1.0',
'pyflakes >=2.1.1',
'dill >=0.3, <0.4',
'IPython >= 7.6.0',
'jupyter-client >= 5.3.4',
'jupyter-core >= 4.6.0',
'nbconvert >= 5.6.1, < 6.0.0',
'ipykernel >= 5.1.4',
'notebook >= 6.0.0',
'packaging > 20',
'ml_metadata == 0.24.0',
'progress >= 1.5',
],
extras_require={
'dev': [
'pytest',
'pytest-clarity',
'testfixtures',
'pytest-cov',
'flake8',
'flake8-docstrings'
]
},
entry_points={'console_scripts':
['kale=kale.command_line:main',
'kale_server=kale.command_line:server',
'kale-volumes=kale.command_line:kale_volumes']},
python_requires='>=3.6.0',
include_package_data=True,
zip_safe=False
)
| 0 | 0 | 0 |
bf5fc6a03134191f6e1482e82dc729fe4a50cff7 | 3,177 | py | Python | protostar/commands/deploy/starkware/starknet_cli.py | software-mansion/protostar | e9f701d3b02dde78e5292a4698ca3c6c7d39b485 | [
"MIT"
] | 11 | 2022-01-31T14:27:32.000Z | 2022-03-28T18:24:45.000Z | protostar/commands/deploy/starkware/starknet_cli.py | software-mansion/protostar | e9f701d3b02dde78e5292a4698ca3c6c7d39b485 | [
"MIT"
] | 105 | 2022-01-31T15:25:29.000Z | 2022-03-31T12:28:13.000Z | protostar/commands/deploy/starkware/starknet_cli.py | software-mansion/protostar | e9f701d3b02dde78e5292a4698ca3c6c7d39b485 | [
"MIT"
] | 1 | 2022-03-28T16:18:28.000Z | 2022-03-28T16:18:28.000Z | from io import TextIOWrapper
from typing import Optional, Sequence, Union
from services.external_api.client import RetryConfig
from starkware.starknet.cli.starknet_cli import assert_tx_received, validate_arguments
from starkware.starknet.definitions import constants, fields
from starkware.starknet.public.abi_structs import identifier_manager_from_abi
from starkware.starknet.services.api.contract_class import ContractClass
from starkware.starknet.services.api.gateway.gateway_client import GatewayClient
from starkware.starknet.services.api.gateway.transaction import Deploy
from starkware.starknet.utils.api_utils import cast_to_felts
from starkware.starkware_utils.error_handling import StarkErrorCode
from protostar.commands.deploy.gateway_response import SuccessfulGatewayResponse
from protostar.protostar_exception import ProtostarException
async def deploy(
gateway_url: str,
compiled_contract_file: TextIOWrapper,
constructor_args: Optional[Sequence[Union[str, int]]] = None,
salt: Optional[str] = None,
token: Optional[str] = None,
) -> SuccessfulGatewayResponse:
"""Version of deploy function from starkware.starknet.cli.starknet_cli independent of CLI logic."""
inputs = cast_to_felts(constructor_args or [])
if salt is not None and not salt.startswith("0x"):
raise ValueError(f"salt must start with '0x'. Got: {salt}.")
try:
numeric_salt: int = (
fields.ContractAddressSalt.get_random_value()
if salt is None
else int(salt, 16)
)
except ValueError as err:
raise ValueError("Invalid salt format.") from err
contract_class = ContractClass.loads(data=compiled_contract_file.read())
abi = contract_class.abi
assert abi is not None, "Missing ABI in the given contract class."
for abi_entry in abi:
if abi_entry["type"] == "constructor":
validate_arguments(
inputs=inputs,
abi_entry=abi_entry,
identifier_manager=identifier_manager_from_abi(abi=abi),
)
break
else:
if len(inputs) != 0:
raise ValueError(
"Constructor args cannot be specified for contracts without a constructor."
)
tx = Deploy(
contract_address_salt=numeric_salt,
contract_definition=contract_class,
constructor_calldata=inputs,
version=constants.TRANSACTION_VERSION,
) # type: ignore
gateway_client = GatewayClient(
url=gateway_url, retry_config=RetryConfig(n_retries=1)
)
gateway_response = await gateway_client.add_transaction(tx=tx, token=token)
if gateway_response["code"] != StarkErrorCode.TRANSACTION_RECEIVED.name:
raise DeployContractException(
message=f"Failed to send transaction. Response: {gateway_response}."
)
contract_address = int(gateway_response["address"], 16)
return SuccessfulGatewayResponse(
address=contract_address,
code=gateway_response["code"],
transaction_hash=gateway_response["transaction_hash"],
)
| 36.517241 | 103 | 0.720806 | from io import TextIOWrapper
from typing import Optional, Sequence, Union
from services.external_api.client import RetryConfig
from starkware.starknet.cli.starknet_cli import assert_tx_received, validate_arguments
from starkware.starknet.definitions import constants, fields
from starkware.starknet.public.abi_structs import identifier_manager_from_abi
from starkware.starknet.services.api.contract_class import ContractClass
from starkware.starknet.services.api.gateway.gateway_client import GatewayClient
from starkware.starknet.services.api.gateway.transaction import Deploy
from starkware.starknet.utils.api_utils import cast_to_felts
from starkware.starkware_utils.error_handling import StarkErrorCode
from protostar.commands.deploy.gateway_response import SuccessfulGatewayResponse
from protostar.protostar_exception import ProtostarException
class DeployContractException(ProtostarException):
pass
async def deploy(
gateway_url: str,
compiled_contract_file: TextIOWrapper,
constructor_args: Optional[Sequence[Union[str, int]]] = None,
salt: Optional[str] = None,
token: Optional[str] = None,
) -> SuccessfulGatewayResponse:
"""Version of deploy function from starkware.starknet.cli.starknet_cli independent of CLI logic."""
inputs = cast_to_felts(constructor_args or [])
if salt is not None and not salt.startswith("0x"):
raise ValueError(f"salt must start with '0x'. Got: {salt}.")
try:
numeric_salt: int = (
fields.ContractAddressSalt.get_random_value()
if salt is None
else int(salt, 16)
)
except ValueError as err:
raise ValueError("Invalid salt format.") from err
contract_class = ContractClass.loads(data=compiled_contract_file.read())
abi = contract_class.abi
assert abi is not None, "Missing ABI in the given contract class."
for abi_entry in abi:
if abi_entry["type"] == "constructor":
validate_arguments(
inputs=inputs,
abi_entry=abi_entry,
identifier_manager=identifier_manager_from_abi(abi=abi),
)
break
else:
if len(inputs) != 0:
raise ValueError(
"Constructor args cannot be specified for contracts without a constructor."
)
tx = Deploy(
contract_address_salt=numeric_salt,
contract_definition=contract_class,
constructor_calldata=inputs,
version=constants.TRANSACTION_VERSION,
) # type: ignore
gateway_client = GatewayClient(
url=gateway_url, retry_config=RetryConfig(n_retries=1)
)
gateway_response = await gateway_client.add_transaction(tx=tx, token=token)
if gateway_response["code"] != StarkErrorCode.TRANSACTION_RECEIVED.name:
raise DeployContractException(
message=f"Failed to send transaction. Response: {gateway_response}."
)
contract_address = int(gateway_response["address"], 16)
return SuccessfulGatewayResponse(
address=contract_address,
code=gateway_response["code"],
transaction_hash=gateway_response["transaction_hash"],
)
| 0 | 38 | 23 |
762ff6c4cef26b388215bc734d668b7188ad6f35 | 775 | py | Python | api/migrations/0004_auto_20180812_0418.py | PatrickCmd/django-rest-api-yummy-recipes | 449d791ed6befc235218a0689becbfc1ecd25557 | [
"MIT"
] | 2 | 2021-05-22T12:59:22.000Z | 2021-08-17T18:48:34.000Z | api/migrations/0004_auto_20180812_0418.py | PatrickCmd/django-rest-api-yummy-recipes | 449d791ed6befc235218a0689becbfc1ecd25557 | [
"MIT"
] | 2 | 2018-09-04T11:13:57.000Z | 2018-09-24T06:04:01.000Z | api/migrations/0004_auto_20180812_0418.py | PatrickCmd/django-rest-api-yummy-recipes | 449d791ed6befc235218a0689becbfc1ecd25557 | [
"MIT"
] | 2 | 2020-04-18T17:14:03.000Z | 2021-01-04T06:15:25.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-12 04:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 28.703704 | 122 | 0.636129 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-12 04:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20180812_0410'),
]
operations = [
migrations.AlterField(
model_name='review',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='api.Recipe'),
),
migrations.AlterField(
model_name='upvote',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='upvotes', to='api.Recipe'),
),
]
| 0 | 565 | 23 |
c267e2cfb2141ec6ba81756b528b9c64b5821b7f | 1,555 | py | Python | example/example.py | coursekevin/avlpy | 5ad81ea854b804ca333771630904a63ada25efa0 | [
"MIT"
] | 1 | 2020-11-21T22:11:02.000Z | 2020-11-21T22:11:02.000Z | example/example.py | coursekevin/avlpy | 5ad81ea854b804ca333771630904a63ada25efa0 | [
"MIT"
] | null | null | null | example/example.py | coursekevin/avlpy | 5ad81ea854b804ca333771630904a63ada25efa0 | [
"MIT"
] | null | null | null | import avlpy
# --------------------------------------------------------------------------------------------
# AVL SESSION
# --------------------------------------------------------------------------------------------
# setup avl session
avlSess = avlpy.avlRun('avl3.35','wingus.avl','wingus.mass')
# set cruise condition to 9 m/s
avlSess.set_flight_constraint('C1','V','9')
# set elevator to pitch such that pitch moment is 0 at cruise
avlSess.set_var_constraint('D1','PM',0)
# write flow analysis to default avl_tmp location
fname,proc_out = avlSess.get_flow_analysis('ST')
# read avl flow analysis to dictionary
avl_dict = avlpy.read_avl_flow_analysis(fname)
# print some important constants
print("Alpha: " + str(avl_dict['Alpha']))
print("Elevator Defl.: " + str(avl_dict['elevator']))
print("Neutral Point: " + str(avl_dict['Xnp']))
print("Cma: " + str(avl_dict["Cma"]))
print("Clb: " + str(avl_dict["Clb"]))
print("Cnb: " + str(avl_dict["Cnb"]))
# perform avl dynamic value analysis
fname2,proc_out2 = avlSess.get_eig_analysis('S')
# read state matrices to python arrays
A,B = avlpy.read_avl_sys_mat(fname2)
print("\nDynamic 'A' Matrix: " + str(A))
# --------------------------------------------------------------------------------------------
# READING AVL FILES
# --------------------------------------------------------------------------------------------
surfaces = avlpy.read_avl_file('wingus.avl')
print("\nExample Surfaces File:")
print(surfaces)
# save surfaces to new surfaces.avl file
avlpy.save_avl_file('surfaces.avl',surfaces) | 35.340909 | 94 | 0.552412 | import avlpy
# --------------------------------------------------------------------------------------------
# AVL SESSION
# --------------------------------------------------------------------------------------------
# setup avl session
avlSess = avlpy.avlRun('avl3.35','wingus.avl','wingus.mass')
# set cruise condition to 9 m/s
avlSess.set_flight_constraint('C1','V','9')
# set elevator to pitch such that pitch moment is 0 at cruise
avlSess.set_var_constraint('D1','PM',0)
# write flow analysis to default avl_tmp location
fname,proc_out = avlSess.get_flow_analysis('ST')
# read avl flow analysis to dictionary
avl_dict = avlpy.read_avl_flow_analysis(fname)
# print some important constants
print("Alpha: " + str(avl_dict['Alpha']))
print("Elevator Defl.: " + str(avl_dict['elevator']))
print("Neutral Point: " + str(avl_dict['Xnp']))
print("Cma: " + str(avl_dict["Cma"]))
print("Clb: " + str(avl_dict["Clb"]))
print("Cnb: " + str(avl_dict["Cnb"]))
# perform avl dynamic value analysis
fname2,proc_out2 = avlSess.get_eig_analysis('S')
# read state matrices to python arrays
A,B = avlpy.read_avl_sys_mat(fname2)
print("\nDynamic 'A' Matrix: " + str(A))
# --------------------------------------------------------------------------------------------
# READING AVL FILES
# --------------------------------------------------------------------------------------------
surfaces = avlpy.read_avl_file('wingus.avl')
print("\nExample Surfaces File:")
print(surfaces)
# save surfaces to new surfaces.avl file
avlpy.save_avl_file('surfaces.avl',surfaces) | 0 | 0 | 0 |
4f09063f2ba7aeb54b6bbd9ae8a47ddadf2e0dff | 2,642 | py | Python | src/main.py | liferooter/streameditbot | 102d5cb9f01c1aace12e853328a61ffe2b4a7166 | [
"Unlicense"
] | 1 | 2020-11-27T10:00:42.000Z | 2020-11-27T10:00:42.000Z | src/main.py | liferooter/streameditbot | 102d5cb9f01c1aace12e853328a61ffe2b4a7166 | [
"Unlicense"
] | null | null | null | src/main.py | liferooter/streameditbot | 102d5cb9f01c1aace12e853328a61ffe2b4a7166 | [
"Unlicense"
] | null | null | null | import logging
import os
import asyncio
from html import escape
from aiogram import Bot, Dispatcher, executor, types
BOT_TOKEN: str = os.getenv("BOT_TOKEN")
MSG_LENGTH_LIMIT = 2 ** 12
SANDBOX_USER = 'bot'
# Configure logging
logging.basicConfig(level=logging.INFO)
# Initialize bot and dispatcher3
bot = Bot(token=BOT_TOKEN, parse_mode="HTML")
dp = Dispatcher(bot)
COMMANDS = ['sed', 'grep', 'cut', 'tr', 'tail', 'head', 'uniq', 'sort', 'awk']
async def run_in_container(cmd: str, stdin: str) -> (str, str, int):
"""
Run program in container.
Returns stdout, stderr and exit_code.
"""
proc = await asyncio.create_subprocess_exec("su", SANDBOX_USER, "-c", f"/usr/src/app/sandbox.sh {cmd}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate(stdin.encode("utf-8"))
return stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore'), 0 # TODO: exit code
@dp.message_handler(regexp=f'^({"|".join(COMMANDS)})')
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
"""
This is a handler for `/help` and `/start` commands
:param message:
:return:
"""
await message.reply("""Hi!
I am stream editor bot. I can evaluate best Unix stream processing utilities in chat.
Just add me in your group and learn how to use Unix stream editors.
<b>Usage:</b>
<i>command args</i>, where command is one of my supported commands.
Reply on any message to use it as command input.
Now I support: """ + ', '.join(COMMANDS))
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| 32.617284 | 113 | 0.630583 | import logging
import os
import asyncio
from html import escape
from aiogram import Bot, Dispatcher, executor, types
BOT_TOKEN: str = os.getenv("BOT_TOKEN")
MSG_LENGTH_LIMIT = 2 ** 12
SANDBOX_USER = 'bot'
# Configure logging
logging.basicConfig(level=logging.INFO)
# Initialize bot and dispatcher3
bot = Bot(token=BOT_TOKEN, parse_mode="HTML")
dp = Dispatcher(bot)
COMMANDS = ['sed', 'grep', 'cut', 'tr', 'tail', 'head', 'uniq', 'sort', 'awk']
async def run_in_container(cmd: str, stdin: str) -> (str, str, int):
"""
Run program in container.
Returns stdout, stderr and exit_code.
"""
proc = await asyncio.create_subprocess_exec("su", SANDBOX_USER, "-c", f"/usr/src/app/sandbox.sh {cmd}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate(stdin.encode("utf-8"))
return stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore'), 0 # TODO: exit code
@dp.message_handler(regexp=f'^({"|".join(COMMANDS)})')
async def cmd_handler(message: types.Message):
stdout, stderr, _ = await run_in_container(message.text,
message.reply_to_message.text if message.reply_to_message else "")
# Telegram message length limit
if len(stdout) >= MSG_LENGTH_LIMIT:
stdout = "Output is too long"
if len(stderr) >= MSG_LENGTH_LIMIT:
stderr = "Error is too long"
if stderr:
await message.reply(
f'<pre>{escape(stderr)}</pre>'
) # TODO: return stderr if exit code != 0
elif stdout:
if message.reply_to_message:
await message.reply_to_message.reply(escape(stdout))
else:
await message.reply(escape(stdout))
else:
await message.reply("<pre>Output is empty...</pre>")
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
"""
This is a handler for `/help` and `/start` commands
:param message:
:return:
"""
await message.reply("""Hi!
I am stream editor bot. I can evaluate best Unix stream processing utilities in chat.
Just add me in your group and learn how to use Unix stream editors.
<b>Usage:</b>
<i>command args</i>, where command is one of my supported commands.
Reply on any message to use it as command input.
Now I support: """ + ', '.join(COMMANDS))
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| 784 | 0 | 22 |
baf42647d44c5e877398e19d76821641fd16eed7 | 769 | py | Python | future/wip_play_sound.py | ofgulban/minimalist_psychopy_examples | 71864ca7f829f4846d1aa002754117565b6549ba | [
"Unlicense"
] | 1 | 2019-01-29T17:04:08.000Z | 2019-01-29T17:04:08.000Z | future/wip_play_sound.py | ofgulban/minimalist_psychopy_examples | 71864ca7f829f4846d1aa002754117565b6549ba | [
"Unlicense"
] | null | null | null | future/wip_play_sound.py | ofgulban/minimalist_psychopy_examples | 71864ca7f829f4846d1aa002754117565b6549ba | [
"Unlicense"
] | null | null | null | """Simple script to play sounds.
Notes
-----
Having trouble playing sounds correctly in debian so far. It seems that
Psychopy sound recommentations has changed. I need to have a closer look:
<https://www.psychopy.org/api/sound.html>
"""
import os
import psychtoolbox as ptb
from psychopy import prefs
prefs.hardware['audioLib'] = ['PTB']
from psychopy import core, sound, event
path_in = "/home/faruk/Git/minimalist_psychopy_examples/future/test"
# Print the sound files
sounds = sorted(os.listdir(path_in))
# Play sounds one by one
for i in sounds:
sound_i = os.path.join(path_in, i)
test_sound = sound.Sound(sound_i, volume=1, sampleRate=44100)
now = ptb.GetSecs()
test_sound.play()
print(i)
core.wait(2)
core.quit()
print("Finished.")
| 23.30303 | 73 | 0.726918 | """Simple script to play sounds.
Notes
-----
Having trouble playing sounds correctly in debian so far. It seems that
Psychopy sound recommentations has changed. I need to have a closer look:
<https://www.psychopy.org/api/sound.html>
"""
import os
import psychtoolbox as ptb
from psychopy import prefs
prefs.hardware['audioLib'] = ['PTB']
from psychopy import core, sound, event
path_in = "/home/faruk/Git/minimalist_psychopy_examples/future/test"
# Print the sound files
sounds = sorted(os.listdir(path_in))
# Play sounds one by one
for i in sounds:
sound_i = os.path.join(path_in, i)
test_sound = sound.Sound(sound_i, volume=1, sampleRate=44100)
now = ptb.GetSecs()
test_sound.play()
print(i)
core.wait(2)
core.quit()
print("Finished.")
| 0 | 0 | 0 |
b6c4f5d9d30ff905a464e6c1c340146c5d342069 | 1,282 | py | Python | hard-gists/5201689/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/5201689/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/5201689/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | #!/usr/bin/env python
#http://geoinformaticstutorial.blogspot.it/2012/09/reading-raster-data-with-python-and-gdal.html
#http://www.gis.usu.edu/~chrisg/python/2009/lectures/ospy_slides4.pdf
from osgeo import gdal,ogr
from osgeo.gdalconst import *
import struct
import sys
lon = 12.502742
lat = 42.243713
lat = float(sys.argv[2])
lon = float(sys.argv[3])
ds = gdal.Open(sys.argv[1], GA_ReadOnly)
if ds is None:
print 'Failed open file'
sys.exit(1)
transf = ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
bands = ds.RasterCount #1
band = ds.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType) #Int16
driver = ds.GetDriver().LongName #'GeoTIFF'
success, transfInv = gdal.InvGeoTransform(transf)
if not success:
print "Failed InvGeoTransform()"
sys.exit(1)
px, py = gdal.ApplyGeoTransform(transfInv, lon, lat)
structval = band.ReadRaster(int(px), int(py), 1,1, buf_type = band.DataType )
fmt = pt2fmt(band.DataType)
intval = struct.unpack(fmt , structval)
print round(intval[0],2) #intval is a tuple, length=1 as we only asked for 1 pixel value
| 22.491228 | 96 | 0.719189 | #!/usr/bin/env python
#http://geoinformaticstutorial.blogspot.it/2012/09/reading-raster-data-with-python-and-gdal.html
#http://www.gis.usu.edu/~chrisg/python/2009/lectures/ospy_slides4.pdf
from osgeo import gdal,ogr
from osgeo.gdalconst import *
import struct
import sys
lon = 12.502742
lat = 42.243713
lat = float(sys.argv[2])
lon = float(sys.argv[3])
def pt2fmt(pt):
fmttypes = {
GDT_Byte: 'B',
GDT_Int16: 'h',
GDT_UInt16: 'H',
GDT_Int32: 'i',
GDT_UInt32: 'I',
GDT_Float32: 'f',
GDT_Float64: 'f'
}
return fmttypes.get(pt, 'x')
ds = gdal.Open(sys.argv[1], GA_ReadOnly)
if ds is None:
print 'Failed open file'
sys.exit(1)
transf = ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
bands = ds.RasterCount #1
band = ds.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType) #Int16
driver = ds.GetDriver().LongName #'GeoTIFF'
success, transfInv = gdal.InvGeoTransform(transf)
if not success:
print "Failed InvGeoTransform()"
sys.exit(1)
px, py = gdal.ApplyGeoTransform(transfInv, lon, lat)
structval = band.ReadRaster(int(px), int(py), 1,1, buf_type = band.DataType )
fmt = pt2fmt(band.DataType)
intval = struct.unpack(fmt , structval)
print round(intval[0],2) #intval is a tuple, length=1 as we only asked for 1 pixel value
| 172 | 0 | 23 |
c297f899031c53dbe37167be86a4369cc274d74c | 4,892 | py | Python | objects/CSCG/_3d/mesh/deprecated/coordinate_transformation/structuredMeshCTBase/COMPONENTS/trace.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_3d/mesh/deprecated/coordinate_transformation/structuredMeshCTBase/COMPONENTS/trace.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_3d/mesh/deprecated/coordinate_transformation/structuredMeshCTBase/COMPONENTS/trace.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
INTRO
@author: Yi Zhang. Created on Thu May 23 11:07:25 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
Delft, Netherlands
"""
import numpy as np
from screws.freeze.main import FrozenOnly
class CTEXTTBase(FrozenOnly):
"""
Parent of CoordinateTransformationTrace3D.
In Trace, the very import difference is that we take NOT mesh-grid inputs
to evaluate the mapping and so on. This is because we have different faces
(3D) or edges (2D) for the trace mapping. If we use the mesh-grid points
as we did in CoordinateTransformation, then, for example, in 3D case, we
needs 6 inputs of shape = (2,), at least, which is not a smart way.
"""
def __init__(self, ct):
""" """
self._ct_ = ct
# self._mapping_ = None
# self._Jacobian_matrix_ = None
# self._metric_matrix_ = None
self._freeze_self_()
# def _reset_(self):
# self._mapping_ = None
# self._Jacobian_matrix_ = None
# self._metric_matrix_ = None
@property
@property
def ndim(self):
"""
this is a trace mapping is in n dimensonal object; itself is a n-1 dimensional
one.
"""
return self._ct_.ndim
def ___generate_trace_evaluation_points___(self):
"""
When even we try to compute the trace mapping or trace Jacobian_matrix,
we run this method before hands to generate proper points (in reference
coordinates) for 4 edges(2D) or 6 sides(3D).
This looks very bad, since if we have done trace mapping, when we
further compute trace Jacobian, we will repeat it again, why not just
store it? No, we do not do this, because we always search
evaluation_points_gird from `self._ct_`, and when we reset
evaluation_points_gird, we will have to reset the stored value as well.
Of course, this is doable, but I think it makes the code a little bit
more un-readable. And what is more, this process is very fast, so, who
cares if we run it one more time?
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def mapping(self):
"""
The mapping. To compute it we just need to employ the
CoordinateTransformation.
Returns
-------
self._mapping_ : dict
Unlike the CoordinateTransformation.mapping which must be of
structured data sturcture (so we put it in a ndarray), we here put
it in a dict just like what we have in meshComponents.trace.
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def Jacobian_matrix(self):
"""
The Jacobian matrix. To compute it we just need to employ the
CoordinateTransformation.
Returns
-------
self._Jacobian_matrix_ : dict
As self.mapping, here we also put it in a dict whose keys represent
the numbering of the trace element. Just like what we always have
in meshComponents.trace.
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def metric_matrix(self):
""" The entries of metric_matrix is normally denoted as g_{i,j}. """
# if self._metric_matrix_ is None:
# J = self.Jacobian_matrix
# G = {}
# for k in self._mesh_.trace.elements.position_representive:
# Gk = [[None for i in range(self.ndim)] for j in range(self.ndim)]
# for i in range(self.ndim):
# for j in range(i, self.ndim):
# Gk[i][j] = J[k][0][i] * J[k][0][j]
# for l in range(1, self._ct_.ndim):
# Gk[i][j] += J[k][l][i] * J[k][l][j]
# if i != j:
# Gk[j][i] = Gk[i][j]
# G[k] = np.array(Gk)
# self._metric_matrix_ = G
# return self._metric_matrix_
J = self.Jacobian_matrix
G = {}
for k in self._mesh_.trace.elements.position_representive:
Gk = [[None for _ in range(self.ndim-1)] for _ in range(self.ndim-1)]
for i in range(self.ndim-1):
for j in range(i, self.ndim-1):
Gk[i][j] = J[k][0][i] * J[k][0][j]
for l in range(1, self.ndim):
Gk[i][j] += J[k][l][i] * J[k][l][j]
if i != j:
Gk[j][i] = Gk[i][j]
G[k] = np.array(Gk)
return G
| 36.781955 | 86 | 0.560098 | # -*- coding: utf-8 -*-
"""
INTRO
@author: Yi Zhang. Created on Thu May 23 11:07:25 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
Delft, Netherlands
"""
import numpy as np
from screws.freeze.main import FrozenOnly
class CTEXTTBase(FrozenOnly):
"""
Parent of CoordinateTransformationTrace3D.
In Trace, the very import difference is that we take NOT mesh-grid inputs
to evaluate the mapping and so on. This is because we have different faces
(3D) or edges (2D) for the trace mapping. If we use the mesh-grid points
as we did in CoordinateTransformation, then, for example, in 3D case, we
needs 6 inputs of shape = (2,), at least, which is not a smart way.
"""
def __init__(self, ct):
""" """
self._ct_ = ct
# self._mapping_ = None
# self._Jacobian_matrix_ = None
# self._metric_matrix_ = None
self._freeze_self_()
# def _reset_(self):
# self._mapping_ = None
# self._Jacobian_matrix_ = None
# self._metric_matrix_ = None
@property
def _mesh_(self):
return self._ct_._mesh_
@property
def ndim(self):
"""
this is a trace mapping is in n dimensonal object; itself is a n-1 dimensional
one.
"""
return self._ct_.ndim
def ___generate_trace_evaluation_points___(self):
"""
When even we try to compute the trace mapping or trace Jacobian_matrix,
we run this method before hands to generate proper points (in reference
coordinates) for 4 edges(2D) or 6 sides(3D).
This looks very bad, since if we have done trace mapping, when we
further compute trace Jacobian, we will repeat it again, why not just
store it? No, we do not do this, because we always search
evaluation_points_gird from `self._ct_`, and when we reset
evaluation_points_gird, we will have to reset the stored value as well.
Of course, this is doable, but I think it makes the code a little bit
more un-readable. And what is more, this process is very fast, so, who
cares if we run it one more time?
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def mapping(self):
"""
The mapping. To compute it we just need to employ the
CoordinateTransformation.
Returns
-------
self._mapping_ : dict
Unlike the CoordinateTransformation.mapping which must be of
structured data sturcture (so we put it in a ndarray), we here put
it in a dict just like what we have in meshComponents.trace.
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def Jacobian_matrix(self):
"""
The Jacobian matrix. To compute it we just need to employ the
CoordinateTransformation.
Returns
-------
self._Jacobian_matrix_ : dict
As self.mapping, here we also put it in a dict whose keys represent
the numbering of the trace element. Just like what we always have
in meshComponents.trace.
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def metric_matrix(self):
""" The entries of metric_matrix is normally denoted as g_{i,j}. """
# if self._metric_matrix_ is None:
# J = self.Jacobian_matrix
# G = {}
# for k in self._mesh_.trace.elements.position_representive:
# Gk = [[None for i in range(self.ndim)] for j in range(self.ndim)]
# for i in range(self.ndim):
# for j in range(i, self.ndim):
# Gk[i][j] = J[k][0][i] * J[k][0][j]
# for l in range(1, self._ct_.ndim):
# Gk[i][j] += J[k][l][i] * J[k][l][j]
# if i != j:
# Gk[j][i] = Gk[i][j]
# G[k] = np.array(Gk)
# self._metric_matrix_ = G
# return self._metric_matrix_
J = self.Jacobian_matrix
G = {}
for k in self._mesh_.trace.elements.position_representive:
Gk = [[None for _ in range(self.ndim-1)] for _ in range(self.ndim-1)]
for i in range(self.ndim-1):
for j in range(i, self.ndim-1):
Gk[i][j] = J[k][0][i] * J[k][0][j]
for l in range(1, self.ndim):
Gk[i][j] += J[k][l][i] * J[k][l][j]
if i != j:
Gk[j][i] = Gk[i][j]
G[k] = np.array(Gk)
return G
| 28 | 0 | 26 |
7cf627e66990358a58596d2c15b9d9bf051a7195 | 6,614 | py | Python | tests/test_sklearn_decomposition.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
] | 13 | 2022-01-17T16:14:26.000Z | 2022-03-30T02:06:04.000Z | tests/test_sklearn_decomposition.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
] | 1 | 2022-01-28T23:17:14.000Z | 2022-01-28T23:17:14.000Z | tests/test_sklearn_decomposition.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
] | 3 | 2022-01-18T02:13:53.000Z | 2022-03-06T19:28:19.000Z | """
Tests sklearn matrix decomposition converters
"""
import unittest
import warnings
import sys
from distutils.version import LooseVersion
import numpy as np
import torch
import sklearn
from sklearn.decomposition import FastICA, KernelPCA, PCA, TruncatedSVD
from sklearn.cross_decomposition import PLSRegression as PLSR
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
import hummingbird.ml
# PLS regressor n_componenets two
# PLS regressor n_componenets two no scale
if __name__ == "__main__":
unittest.main()
| 39.60479 | 144 | 0.72528 | """
Tests sklearn matrix decomposition converters
"""
import unittest
import warnings
import sys
from distutils.version import LooseVersion
import numpy as np
import torch
import sklearn
from sklearn.decomposition import FastICA, KernelPCA, PCA, TruncatedSVD
from sklearn.cross_decomposition import PLSRegression as PLSR
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
import hummingbird.ml
class TestSklearnMatrixDecomposition(unittest.TestCase):
def _fit_model_pca(self, model, precompute=False):
data = load_digits()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2, random_state=42)
X_test = X_test.astype("float32")
if precompute:
# For precompute we use a linear kernel
model.fit(np.dot(X_train, X_train.T))
X_test = np.dot(X_test, X_train.T)
else:
model.fit(X_train)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.transform(X_test), torch_model.transform(X_test), rtol=1e-6, atol=2 * 1e-5)
# PCA n_components none
def test_pca_converter_none(self):
self._fit_model_pca(PCA(n_components=None))
# PCA n_componenets two
def test_pca_converter_two(self):
self._fit_model_pca(PCA(n_components=2))
# PCA n_componenets mle and whiten true
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_whiten(self):
self._fit_model_pca(PCA(n_components="mle", whiten=True))
# PCA n_componenets mle and solver full
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_full(self):
self._fit_model_pca(PCA(n_components="mle", svd_solver="full"))
# PCA n_componenets none and solver arpack
def test_pca_converter_none_arpack(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="arpack"))
# PCA n_componenets none and solver randomized
def test_pca_converter_none_randomized(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="randomized"))
# KernelPCA linear kernel
def test_kernel_pca_converter_linear(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear"))
# KernelPCA linear kernel with inverse transform
def test_kernel_pca_converter_linear_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear", fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_poly(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=2))
# KernelPCA poly kernel coef0
def test_kernel_pca_converter_poly_coef0(self):
self._fit_model_pca(KernelPCA(n_components=10, kernel="poly", degree=3, coef0=10))
# KernelPCA poly kernel with inverse transform
def test_kernel_pca_converter_poly_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=3, fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_rbf(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="rbf"))
# KernelPCA sigmoid kernel
def test_kernel_pca_converter_sigmoid(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="sigmoid"))
# KernelPCA cosine kernel
def test_kernel_pca_converter_cosine(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="cosine"))
# KernelPCA precomputed kernel
def test_kernel_pca_converter_precomputed(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="precomputed"), precompute=True)
# TODO: Fails on macos-latest Python 3.8 due to a sklearn bug.
# FastICA converter with n_components none
# def test_fast_ica_converter_none(self):
# self._fit_model_pca(FastICA(n_components=None))
# FastICA converter with n_components 3
def test_fast_ica_converter_3(self):
self._fit_model_pca(FastICA(n_components=3))
# FastICA converter with n_components 3 whiten
def test_fast_ica_converter_3_whiten(self):
self._fit_model_pca(FastICA(n_components=3, whiten=True))
# FastICA converter with n_components 3 deflation algorithm
def test_fast_ica_converter_3_deflation(self):
self._fit_model_pca(FastICA(n_components=3, algorithm="deflation"))
# FastICA converter with n_components 3 fun exp
def test_fast_ica_converter_3_exp(self):
self._fit_model_pca(FastICA(n_components=3, fun="exp"))
# FastICA converter with n_components 3 fun cube
def test_fast_ica_converter_3_cube(self):
self._fit_model_pca(FastICA(n_components=3, fun="cube"))
# FastICA converter with n_components 3 fun custom
def test_fast_ica_converter_3_custom(self):
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
self._fit_model_pca(FastICA(n_components=3, fun=my_g))
# TruncatedSVD converter with n_components 3
def test_truncated_svd_converter_3(self):
self._fit_model_pca(TruncatedSVD(n_components=3))
# TruncatedSVD converter with n_components 3 algorithm arpack
def test_truncated_svd_converter_3_arpack(self):
self._fit_model_pca(TruncatedSVD(n_components=3, algorithm="arpack"))
class TestSklearnCrossDecomposition(unittest.TestCase):
def _fit_model_pls_regressor(self, model):
X = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [2.0, 5.0, 4.0]]
Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
model.fit(X, Y)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.predict(X), torch_model.predict(X), rtol=1e-6, atol=2 * 1e-5)
# PLS regressor n_componenets two
def test_pca_converter_two(self):
self._fit_model_pls_regressor(PLSR(n_components=2))
# PLS regressor n_componenets two no scale
def test_pca_converter_two_no_scale(self):
self._fit_model_pls_regressor(PLSR(n_components=10, scale=False))
if __name__ == "__main__":
unittest.main()
| 3,499 | 2,413 | 124 |
2bf67033e801389361452d8f15036e0ec59a7c97 | 47,025 | py | Python | OccupancyGrid/OccupancyGridTest.py | DavidLSmyth/DroneCoordinatedSearch | 99173ef63c726049596fb79eda168b4fc3a550a8 | [
"MIT"
] | 1 | 2018-12-26T04:13:06.000Z | 2018-12-26T04:13:06.000Z | OccupancyGrid/OccupancyGridTest.py | DavidLSmyth/DroneCoordinatedSearch | 99173ef63c726049596fb79eda168b4fc3a550a8 | [
"MIT"
] | null | null | null | OccupancyGrid/OccupancyGridTest.py | DavidLSmyth/DroneCoordinatedSearch | 99173ef63c726049596fb79eda168b4fc3a550a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 13:11:39 2018
@author: 13383861
"""
import sys
import enum
sys.path.append('.')
sys.path.append('..')
import requests
import os
import time
from collections import namedtuple
import copy
import random
import typing
import functools
import json
import threading
import pathlib
import AirSimInterface.client as airsim
from AirSimInterface.types import *
import numpy as np
#%%
#%%
class UE4Coord:
'''A coordinate which represents an objects location in an unreal engine environment'''
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
assert set(test_grid.get_neighbors(UE4Coord(2,2), 1.9)) == set([UE4Coord(1,2), UE4Coord(2,1), UE4Coord(2,3), UE4Coord(3,2), UE4Coord(3,3), UE4Coord(1,3), UE4Coord(1,1), UE4Coord(3,1)])
sensor_reading = lambda image_loc: get_highest_pred(get_image_response(image_loc))
#assert get_highest_pred(get_image_response('C:/Users/13383861/Downloads/test_train.jpg'))[0] > 0.6
#test
#an agent precept consists of a grid location, a detection probability, a timestep, a timestamp and the observer name
AgentObservation = namedtuple('obs_location', ['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
#A belief map component consists of a grid location and a likelihood
BeliefMapComponent = namedtuple('belief_map_component', ['grid_loc','likelihood'])
#%%
#Code and test for class which manages agent observations in a set grid
class AgentObservations():
'''A class which records agent observations in a UE4Grid'''
def get_most_recent_observation(self, observations = []):
'''Returns the most recent observation in a list of observations'''
if not observations:
observations = self.observations
return sorted(observations, key = lambda observation: observation.timestamp, reverse = True)[0]
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
test_agent_observations = AgentObservations(test_grid)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent1')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent1')
obs3 = AgentObservation(UE4Coord(0,1),0.9, 3, 1237, 'agent1')
test_agent_observations.record_agent_observation(obs1)
test_agent_observations.record_agent_observation(obs2)
test_agent_observations.record_agent_observation(obs3)
assert test_agent_observations.get_most_recent_observation() == obs3
assert test_agent_observations.get_most_recent_observation_at_position(UE4Coord(0,0)) == obs2
assert test_agent_observations.get_all_observations_at_position(UE4Coord(0,1)) == [obs3]
calc_posterior = lambda observation, prior: (prior * observation) / ((prior * observation) + (1-prior)*(1-observation))
assert abs(calc_posterior(0.5, 0.2) - 0.2) <= 0.001
assert abs(calc_posterior(0.8, 0.2) - 0.5) <= 0.001
#%%
#Calculation of posterior given prior and observations
def get_posterior_given_obs(observations:list, prior):
'''For a sequence of observations calculates the posterior probability given a prior.'''
for observation in observations:
prior = calc_posterior(observation, prior)
return prior
assert abs(get_posterior_given_obs([0.5,0.2,0.8], 0.5) - 0.5) <= 0.001
#%%
####################### Belief map and tests #######################
#A belief map has an agent name (beliefs belong to an agent) consists of belief map components
#Leave this as namedtuple if don't need to define methods
#maybe this should go in contsructor and make regular class
def create_belief_map(grid, agent_name, prior = {}):
'''Creates an occupancy belief map for a given observer and a set of grid locations.
Prior is a mapping of grid_points to probabilities'''
if not prior:
#use uniform uninformative prior
prior = {grid_point: 1/len(grid.get_grid_points()) for grid_point in grid.get_grid_points()}
return BeliefMap(agent_name, grid, [BeliefMapComponent(grid_point, prior[grid_point]) for grid_point in grid.get_grid_points()], prior)
#return {grid_locs[i]: ObsLocation(grid_locs[i],prior[i], 0, time.time(), observer_name) for i in range(len(grid_locs))}
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
test_map = create_belief_map(test_grid, "agent1")
assert test_map.get_belief_map_component(UE4Coord(0,0)) == BeliefMapComponent(UE4Coord(0,0), 1/len(test_grid.get_grid_points()))
assert test_map._get_observation_grid_index(UE4Coord(0,0)) == 5
test_map.update_from_prob(UE4Coord(0,0), 0.9)
assert 0.132<test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.133
#prove order in which observations come in doesn't matter
obs1 = AgentObservation(UE4Coord(0,0),0.4, 1, 1234, 'agent1')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent1')
obs3 = AgentObservation(UE4Coord(0,0),0.93, 3, 1237, 'agent1')
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs1)
assert 0.0111 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.0112
test_map.update_from_observation(obs2)
assert 0.025688 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.0256881
test_map.update_from_observation(obs3)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
#now check observing in a different order gives same result
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs2)
test_map.update_from_observation(obs1)
test_map.update_from_observation(obs3)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
#now check observing in a different order gives same result
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs3)
test_map.update_from_observation(obs2)
test_map.update_from_observation(obs1)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
####################### Belief map and tests #######################
#%%
####################### Observation Set Manager and tests #######################
class ObservationSetManager:
'''
Manages the sensor measurements of other agents. Observations don't have to be taken at disrete locations -
the continuous position can be recorded and the grid location inferred from this.
Calculating a belief map from these sets of observations requires a grid so that each recorded observation can
be
'''
#really strange behaviour: using this initialises the class with observations that don't exist... self.observation_sets[rav_name] = set()
def init_rav_observation_set(self, rav_name, observations = None):
'''initialise a new list of observations for a RAV'''
if not observations:
self.observation_sets[rav_name] = set()
else:
self.observation_sets[rav_name] = observations
#self.observation_sets[rav_name] = observations
def get_observation_set(self, rav_name) -> typing.Set[AgentObservation]:
'''Get list of observations from a RAV'''
return self.observation_sets[rav_name]
def update_from_other_obs_list_man(self, other):
'''Might need to check that the timestamps must be different...'''
for rav_name, observation_set in other.observation_sets.items():
self.update_rav_obs_set(rav_name, observation_set)
def get_discrete_belief_map_from_observations(self, grid):
'''Given a descrete grid, returns a belief map containing the likelihood of the source
being contained in each grid segment'''
#ToDo:
#Currently observations must be made at grid locations - instead compute which observations are made
#in each grid location and then compute the belief map
return_belief_map = create_belief_map(grid, self.agent_name)
return_belief_map.update_from_observations(self.get_all_observations())
return return_belief_map
def get_continuous_belief_map_from_observations(self, grid_bounds):
'''Given grid bounds, returns a function which returns the likelihood given the
continuous position of the RAV. I.E. transform the discrete PDF as above to a
continuous one.'''
pass
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
test_ObservationSetManager = ObservationSetManager('agent1')
test_ObservationSetManager.observation_sets
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
obs4 = AgentObservation(UE4Coord(0,1),0.9, 3, 1238, 'agent1')
test_ObservationSetManager.init_rav_observation_set('agent2', set([obs1, obs2]))
test_ObservationSetManager.observation_sets
test_ObservationSetManager.update_rav_obs_set('agent2', set([obs3]))
test_ObservationSetManager.get_all_observations()
assert test_ObservationSetManager.get_observation_set('agent2') == set([obs1, obs2, obs3])
assert test_ObservationSetManager.get_observation_set('agent1') == set([])
test_ObservationSetManager.update_rav_obs_set('agent1', set([obs4]))
assert not test_ObservationSetManager.get_all_observations().difference(set([obs1, obs2, obs3, obs4]))
###################################################
# Check that duplicate observations aren't added
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
test1_ObservationSetManager = ObservationSetManager('agent1')
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
test1_ObservationSetManager.update_rav_obs_set('agent2',[obs1, obs2, obs3])
test1_ObservationSetManager.observation_sets
#test that duplicate measurements won't occur
obs4 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
test1_ObservationSetManager.update_rav_obs_set('agent2', set([obs4]))
assert test1_ObservationSetManager.get_observation_set('agent2') == set([obs1, obs2, obs3])
assert abs(test1_ObservationSetManager.get_discrete_belief_map_from_observations(test_grid).get_belief_map_component(UE4Coord(0,0)).likelihood - 0.074468) < 0.0001
assert abs(test1_ObservationSetManager.get_discrete_belief_map_from_observations(test_grid).get_belief_map_component(UE4Coord(0,1)).likelihood - 0.395833) < 0.0001
#%%
######################### Action selection strategies #########################
def get_move_from_belief_map_epsilon_greedy(belief_map: BeliefMap, current_grid_loc: UE4Coord, epsilon: float, eff_radius = None) -> UE4Coord:
'''Epsilon greedy move selection'''
#assume grid is regular, get all neighbors that are within max(lat_spacing, long_spacing)7
#assuming that lat_spacing < 2* lng_spacing and visa versa
if not eff_radius:
eff_radius = max(belief_map.get_grid().get_lat_spacing(), belief_map.get_grid().get_lng_spacing())
#a list of UE4Coord
neighbors = belief_map.get_grid().get_neighbors(current_grid_loc, eff_radius)
#don't move to new position if can't find any neighbors to move to
if not neighbors:
return current_grid_loc
#neighbors = list(filter(lambda grid_loc: grid_loc.get_dist_to_other(current_grid_loc) <= eff_radius and grid_loc!=current_grid_loc, bel_map.keys()))
if random.random() < epsilon:
#epsilon random
return_move = random.choice(neighbors)
else:
#otherwise choose move that has highest value
max_move_value = 0
for neighbor in neighbors:
if belief_map.get_belief_map_component(neighbor).likelihood > max_move_value:
max_move_value = belief_map.get_belief_map_component(neighbor).likelihood
return_move = neighbor
# move = max(map(lambda neighbor: bel_map[neighbor].likelihood, neighbors))
return return_move
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
#(grid, agent_name, prior = {})
obs_man = ObservationSetManager("agent1")
obs_man.update_rav_obs_set('agent2', [obs1, obs2, obs3])
belief_map = obs_man.get_discrete_belief_map_from_observations(test_grid)
assert get_move_from_belief_map_epsilon_greedy(belief_map, UE4Coord(1,1), 0.0, 1.8) == UE4Coord(0,1)
#%%
#everything that could be important for measuring agent performance/progress
AgentAnalysisState = namedtuple('AgentAnalysisState', ['timestep',
'timestamp',
'rav_name',
'position_intended',
'position_measured',
#maybe add distance travelled for current timestep
'total_dist_travelled',
'remaining_batt_cap',
'prop_battery_cap_used',
'sensor_reading',
#is it necessary to record the grid along with the likelihoods in case want the grid to
#dynamically change? For now assume grid is fixed and in 1-1 correspondance with likelihoods
#'occ_grid_likelihoods',
#which other agents did the agent coordinate with on this timestep
'coordinated_with_other_names'])
#metadata related to the agent - details about grid its operating in, prior that was worked with, to be updated...
AgentAnalysisMetadata= namedtuple("MissionAnalysisData", ["agents_used", "grid_origin", 'grid_lat_spacing',
'grid_lng_spacing','lng_lim', 'lat_lim',
'no_lat_points', 'no_lng_points', 'prior'])
def get_agent_state_for_analysis(agent_analysis_state: AgentAnalysisState):
'''Returns elements of agent state that are important for analysis that can be written to csv. Position, battery cap., total_dist_travelled, battery_consumed, occ_grid'''
#csv_headers = ['timestep', 'timestamp', 'rav_name', 'position_intended', 'position_measured', 'total_dist_travelled', 'remaining_batt_cap', 'prop_battery_cap_used', 'sensor_reading', 'occ_grid_locs', 'occ_grid_likelihoods', 'coordinated_with_other_bool', 'coordinated_with_other_names']
#return str(agent_analysis_state._fields).replace(')','').replace('(','').replace("'", '')
return _get_agent_state_for_analysis(**agent_analysis_state._asdict())
def get_agent_observation(agent_observation: AgentObservation):
'''Returns elements of agent state that are important for analysis that can be written to csv. Position, battery cap., total_dist_travelled, battery_consumed, occ_grid'''
#csv_headers = ['timestep', 'timestamp', 'rav_name', 'position_intended', 'position_measured', 'total_dist_travelled', 'remaining_batt_cap', 'prop_battery_cap_used', 'sensor_reading', 'occ_grid_locs', 'occ_grid_likelihoods', 'coordinated_with_other_bool', 'coordinated_with_other_names']
#return str(agent_analysis_state._fields).replace(')','').replace('(','').replace("'", '')
return _get_agent_observation(**agent_observation._asdict())
#AgentObservation = namedtuple('obs_location', ['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
testAgentAnalysisState = AgentAnalysisState(2, 100, 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test')
testAgentAnalysisState._asdict()
assert get_agent_state_for_analysis(testAgentAnalysisState) == "2,100,test,test,test,test,test,test,test,test"
assert calc_likelihood([0.1,0.1,0.2,0.4]) == 0.1*0.1*0.2*0.4
def create_belief_map_from_observations(grid: UE4Grid, agent_name: str, agent_belief_map_prior: typing.Dict[UE4Coord, float], agent_observations: typing.Set[AgentObservations]):
'''Since the calculation of posterior likelihood is based only on prior and observations (independent of order), updating a belief map component from measurements can be done
by the following update formula:
prior * product(over all i observations) observation_i
----------------------------------------------------------------------------------------------------------------------
prior * product(over all i observations) observation_i + (1-prior) * product(over all i observations) (1-observation_i)
'''
return_bel_map = create_belief_map(grid.get_grid(), agent_name, agent_belief_map_prior)
#update belief map based on all observations...
return_bel_map.update_from_observations(agent_observations)
#grid, agent_name, prior = {}
#update_bel_map(update_bel_map(test_map, 0.5, 3), 0.5,3)
class BaseROCSAFEAgent:
'''Base class for all agents related to the ROCSAFE project, contains minimal functionality. Designed with goal main in mind
to be able to compare and measure agent performance in a consistent way'''
pass
class BaseGridAgent:
'''Base class for all agents that use a grid representation of the environment, contains minimal functionality. Designed with goal main in mind
to be able to compare and measure agent performance in a consistent way'''
#create a base agent class
class OccupancyGridAgent():
'''agent that moves around an occupancy grid in order to locate a source of radiation. Uses a rav agent'''
ImageDir = 'D:/ReinforcementLearning/DetectSourceAgent/Data/SensorData'
#stores analysis csvs. Each csv contains agent state at each timestep
AgentStateDir = "D:/ReinforcementLearning/DetectSourceAgent/Analysis"
#stores observation json
ObservationDir = "D:/ReinforcementLearning/DetectSourceAgent/Observations"
MockedImageDir = 'D:/ReinforcementLearning/DetectSource/Data/MockData'
#break apart this into components, one which manages actuation/sensing, one which manages/represents state, etc.
def __eq__(self, other):
'''This agent is the same as another agent if names are the same. Refine this later'''
return self.agent_name == other.agent_name
def get_available_actions(self, state):
'''Returns actions available to RAV based on its current state'''
pass
def get_belief_map_after_t_timesteps(self, t):
'''Calculates what the agent's belief map would be after t timesteps'''
pass
def get_agent_state_for_analysis(self):
'''AgentAnalysisState = namedtuple('AgentAnalysisState', ['timestep','timestamp','rav_name',
'position_intended','position_measured',
'total_dist_travelled','remaining_batt_cap',
'prop_battery_cap_used',
'sensor_reading',
#which other agents did the agent coordinate with on this timestep
'coordinated_with_other_names'])'''
return get_agent_state_for_analysis(AgentAnalysisState(self.timestep, time.time(), self.get_agent_name(),
self.current_pos_intended, self.current_pos_measured,
self.total_dist_travelled, self.rav.getRemainingBatteryCap(),
self.prop_battery_cap_used,
self.current_reading,
#'[' + ','.join(map(lambda loc: loc.likelihood, self.current_belief_map.get_belief_map_components())) + ']',
#self.get_grid_locs_likelihoods_lists()[1],
self.others_coordinated_this_timestep))
#coordination strategy:
#agent will write all measurements in its possession to a file at each timestep. When communication requested,
#other agent will read all measurements from the file.
def coord_with_other(self, other_rav_name):
'''coordinate with other rav by requesting their measurement list and sending our own measurement list first write own measurement list to file'''
if self.can_coord_with_other(other_rav_name):
observations_from_other_agents = self._read_observations(self)
print('read observations from other agents: {}'.format(observations_from_other_agents))
for observations_from_other_agent in observations_from_other_agents.values():
self.observation_manager.update_rav_obs_set(observations_from_other_agent)
#this only updates observations not seen previously since a set is maintained of all seen observations
self.current_belief_map.update_from_observations(self.observation_manager.get_all_observations())
self.others_coordinated_this_timestep.append(other_rav_name)
self.coordinated_this_timestep = True
def _write_observations(self, file_loc):
'''writes agent measurements to file to be read by other agent'''
with open(file_loc, 'a') as f:
json.dump(str(self.observation_manager.observation_sets), f)
def explore_timestep(self):
'''Gets rav to explore next timestep'''
#grid: UE4Grid, agent_name: str, agent_belief_map_prior: typing.Dict[UE4Coord, float], agent_observations: typing.List[AgentObservations]
next_pos = self.move_from_bel_map_callable(self.current_belief_map, self.current_pos_intended, self.epsilon)
print("self.current_pos_intended: {}".format(self.current_pos_intended ))
self.move_agent(next_pos)
self.current_pos_intended = next_pos
self.current_pos_measured = self.rav.getMultirotorState(vehicle_name = self.agent_name).kinematics_estimated.position
self.update_agent_pos_measured()
#record image at location
self.record_image()
#get sensor reading, can be done on separate thread
print('getting sensor reading for {}'.format(OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep) + '.png'))
self.current_reading = float(sensor_reading(OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep) + '.png')[0])
#mocked sensor reading
#self.current_reading = float(sensor_reading("D:/ReinforcementLearning/DetectSourceAgent/Data/MockData/test_train.jpg")[0])
print('sensro reading: {}'.format(self.current_reading))
print("updating belief map position {} from {}".format(self.current_pos_intended, self.current_belief_map.get_belief_map_component(self.current_pos_intended)))
self.current_belief_map.update_from_prob(self.current_pos_intended, self.current_reading)
print(" to {}".format(self.current_belief_map.get_belief_map_component(self.current_pos_intended)))
#['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
newest_observation = AgentObservation(self.current_pos_intended, self.current_reading, self.timestep, time.time(), self.agent_name)
self.observation_manager.update_rav_obs_set(self.agent_name, [AgentObservation(self.current_pos_intended, self.current_reading, self.timestep, time.time(), self.agent_name)])
#self._write_observations(self.observations_file_loc)
self.update_state_for_analysis_file(self.agent_state_file_loc, self.get_agent_state_for_analysis())
print("Observation made: {}".format(newest_observation))
self.update_observations_file(self.observations_file_loc, newest_observation)
#if agent is in range, communicate
if __name__ != '__main__':
grid = UE4Grid(20, 15, UE4Coord(0,0), 120, 150)
#grid, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, performance_csv_path: "file path that agent can write performance to", prior = []
occupancy_grid_agent = OccupancyGridAgent(grid, get_move_from_belief_map_epsilon_greedy, -12, 0.2, MockRavForTesting(), 'agent1')
#write some tests for agent here
occupancy_grid_agent.current_pos_intended = UE4Coord(0,0)
occupancy_grid_agent.current_pos_measured = None
occupancy_grid_agent.current_reading = 0.1
occupancy_grid_agent.get_agent_state_for_analysis()
occupancy_grid_agent.explore_timestep()
##################### Functions that can deal with the initialization of RAVs ####################
#%%
#%%
if __name__ == '__main__':
grid = UE4Grid(20, 15, UE4Coord(0,0), 120, 150)
rav_names = ["Drone1"]
#, "Drone2"]
client = airsim.MultirotorClient()
for rav_name in rav_names:
create_rav(client, rav_name)
#assert client.getVehiclesInRange("Drone1", ["Drone2"],1000000) == ["Drone2"]
#print('vehicles in range: ', client.getVehiclesInRange("Drone1", ["Drone2"] ,1000000))
#rav1.simShowPawnPath(False, 1200, 20)
#grid shared between rav
#grid, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, performance_csv_path: "file path that agent can write performance to", prior = []
#for grid_coord_index in range(1,len(grid.get_grid_points())):
# client.showPlannedWaypoints(grid.get_grid_points()[grid_coord_index-1].x_val,
# grid.get_grid_points()[grid_coord_index-1].y_val,
# grid.get_grid_points()[grid_coord_index-1].z_val,
# grid.get_grid_points()[grid_coord_index].x_val,
# grid.get_grid_points()[grid_coord_index].y_val,
# grid.get_grid_points()[grid_coord_index].z_val,
# lifetime = 200)
occupancy_grid_agent1 = OccupancyGridAgent(grid, UE4Coord(0,0), get_move_from_belief_map_epsilon_greedy, -12, 0.3, client, "Drone1")
occupancy_grid_agent1.explore_t_timesteps(20)
#occupancy_grid_agent2 = OccupancyGridAgent(grid, UE4Coord(20,15),get_move_from_belief_map_epsilon_greedy, -12, 0.3, client, "Drone2")
#occupancy_grid_agent1.explore_t_timesteps(10)
#p1 = threading.Thread(target = run_t_timesteps, args = (occupancy_grid_agent1,))
#p2 = threading.Thread(target = run_t_timesteps, args = (occupancy_grid_agent2,))
#p1.start()
#p2.start()
#p1.join()
#p2.join()
# showPlannedWaypoints(self, x1, y1, z1, x2, y2, z2, thickness=50, lifetime=10, debug_line_color='red', vehicle_name = '')
destroy_rav(client, "Drone1")
#destroy_rav(client, "Drone2")
#for grid_loc in grid_locs:
##rav.moveOnPathAsync(list(map(lambda x: x.to_vector3r(),grid_locs)), 8)
#rav.moveToPositionAsync(0,0, -20, 5).join()
#print('rav position: {}'.format(rav.getMultirotorState().kinematics_estimated.position))
#responses = rav.simGetImages([ImageRequest("3", ImageType.Scene)])
#response = responses[0]
#filename = OccupancyGridAgent.ImageDir + "/photo_" + str(1)
#airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
# grid, move_from_bel_map_callable, height, epsilon, multirotor_client, prior = []
#pos, likelihood = OccupancyGridAgent(grid, get_move_from_bel_map, -12, 0.3, rav, "Drone1").explore_t_timesteps(125)
#print('determined {} as source with likelihood {}'.format(pos, likelihood))
#rav.moveToPositionAsync(pos.x_val, pos.y_val, -5, 3).join()
| 47.452069 | 291 | 0.677172 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 13:11:39 2018
@author: 13383861
"""
import sys
import enum
sys.path.append('.')
sys.path.append('..')
import requests
import os
import time
from collections import namedtuple
import copy
import random
import typing
import functools
import json
import threading
import pathlib
import AirSimInterface.client as airsim
from AirSimInterface.types import *
import numpy as np
#%%
class Vector3r:
x_val = 0.0
y_val = 0.0
z_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
@staticmethod
def nanVector3r():
return Vector3r(np.nan, np.nan, np.nan)
def __str__(self):
return f"Vector3r({self.x_val}, {self.y_val}, {self.z_val})"
def __add__(self, other):
return Vector3r(self.x_val + other.x_val, self.y_val + other.y_val, self.z_val + other.z_val)
def __sub__(self, other):
return Vector3r(self.x_val - other.x_val, self.y_val - other.y_val, self.z_val - other.z_val)
def __truediv__(self, other):
if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Vector3r( self.x_val / other, self.y_val / other, self.z_val / other)
else:
raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )
def __mul__(self, other):
if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Vector3r(self.x_val*other, self.y_val*other, self.z_val)
else:
raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )
def dot(self, other):
if type(self) == type(other):
return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val
else:
raise TypeError('unsupported operand type(s) for \'dot\': %s and %s' % ( str(type(self)), str(type(other))) )
def cross(self, other):
if type(self) == type(other):
cross_product = np.cross(self.to_numpy_array(), other.to_numpy_array)
return Vector3r(cross_product[0], cross_product[1], cross_product[2])
else:
raise TypeError('unsupported operand type(s) for \'cross\': %s and %s' % ( str(type(self)), str(type(other))) )
def get_length(self):
return ( self.x_val**2 + self.y_val**2 + self.z_val**2 )**0.5
def distance_to(self, other):
return ( (self.x_val-other.x_val)**2 + (self.y_val-other.y_val)**2 + (self.z_val-other.z_val)**2 )**0.5
def to_Quaternionr(self):
return Quaternionr(self.x_val, self.y_val, self.z_val, 0)
def to_numpy_array(self):
return np.array([self.x_val, self.y_val, self.z_val], dtype=np.float32)
#%%
class UE4Coord:
'''A coordinate which represents an objects location in an unreal engine environment'''
def __init__(self, x_val, y_val, z_val = 0):
self.x_val, self.y_val, self.z_val = x_val, y_val, z_val
if not isinstance(self.x_val, float):
try:
self.x_val = float(self.x_val)
except Exception as e:
raise(e)
if not isinstance(self.y_val, float):
try:
self.y_val = float(self.y_val)
except Exception as e:
raise(e)
if not isinstance(self.z_val, float):
try:
self.z_val = float(self.z_val)
except Exception as e:
raise(e)
def to_vector3r(self):
return Vector3r(self.x_val, self.y_val, self.z_val)
def __add__(self, other):
return UE4Coord(self.x_val + other.x_val, self.y_val + other.y_val, self.z_val + other.z_val)
def __sub__(self, other):
return UE4Coord(self.x_val - other.x_val, self.y_val - other.y_val, self.z_val - other.z_val)
def mul(self, int):
pass
def get_dist_to_other(self, other):
return ((self.x_val - other.x_val)**2 + (self.y_val - other.y_val)**2 + (self.z_val - other.z_val)**2)**0.5
def __eq__(self, other):
if self.x_val == other.x_val and self.y_val == other.y_val and self.z_val == other.z_val:
return True
else:
return False
def __str__(self):
return 'UE4Coord({x_val}, {y_val}, {z_val})'.format(x_val = self.x_val, y_val = self.y_val, z_val = self.z_val)
def __repr__(self):
return 'UE4Coord({x_val}, {y_val}, {z_val})'.format(x_val = self.x_val, y_val = self.y_val, z_val = self.z_val)
def __hash__(self):
return hash(repr(self))
class UE4GridFactory:
def __init__(self, lat_spacing, lng_spacing, origin, x_lim=None, y_lim=None, no_x=None, no_y=None):
self.lat_spacing = lat_spacing
self.lng_spacing = lng_spacing
self.origin = origin
if x_lim and y_lim:
self.x_lim, self.y_lim = x_lim, y_lim
self.create_grid_with_limits()
if no_x and no_y:
self.no_x, self.no_y = no_x, no_y
self.create_grid_with_no_points()
if not all([x_lim, y_lim]) and not all([no_x, no_y]):
raise Exception('Either give a limit to the grid or an x and y spacing')
def create_grid_with_limits(self):
self.no_x = int(self.x_lim/self.lng_spacing)
self.no_y = int(self.y_lim/self.lat_spacing)
self.create_grid_with_no_points()
def create_grid_with_no_points(self):
self.grid = []
backtrack = False
for x_counter in range(self.no_x):
if backtrack:
for y_counter in range(self.no_y):
self.grid.append(self.origin + UE4Coord(x_counter * self.lng_spacing, y_counter * self.lat_spacing))
backtrack = not backtrack
else:
for y_counter in range(self.no_y-1, -1, -1):
self.grid.append(self.origin + UE4Coord(x_counter * self.lng_spacing, y_counter * self.lat_spacing))
backtrack = not backtrack
def get_grid_points(self):
return self.grid
class UE4Grid:
def __init__(self, lat_spacing, lng_spacing, origin, x_lim=None, y_lim=None, no_x=None, no_y=None):
if not all([x_lim, y_lim]) and not all([no_x, no_y]):
raise Exception('Either give a limit to the grid or an x and y spacing')
self.origin = origin
self.grid_points = UE4GridFactory(lat_spacing, lng_spacing, origin, x_lim, y_lim, no_x, no_y).get_grid_points()
self.lat_spacing = lat_spacing
self.lng_spacing = lng_spacing
self.no_x = no_x if no_x else int(x_lim/lng_spacing)
self.no_y = no_y if no_y else int(y_lim/lat_spacing)
def get_grid_points(self):
return self.grid_points
def get_lat_spacing(self):
return self.lat_spacing
def get_lng_spacing(self):
return self.lng_spacing
def get_no_points_x(self):
return self.no_x
def get_no_points_y(self):
return self.no_y
def get_neighbors(self, grid_loc, radius):
'''Gets neighbors of grid_loc within radius.'''
return list(filter(lambda alt_grid_loc: alt_grid_loc.get_dist_to_other(grid_loc) <= radius and alt_grid_loc != grid_loc, self.get_grid_points()))
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
assert set(test_grid.get_neighbors(UE4Coord(2,2), 1.9)) == set([UE4Coord(1,2), UE4Coord(2,1), UE4Coord(2,3), UE4Coord(3,2), UE4Coord(3,3), UE4Coord(1,3), UE4Coord(1,1), UE4Coord(3,1)])
def get_image_response(image_loc: str):
headers = {'Prediction-Key': "fdc828690c3843fe8dc65e532d506d7e", "Content-type": "application/octet-stream", "Content-Length": "1000"}
with open(image_loc,'rb') as f:
response =requests.post('https://southcentralus.api.cognitive.microsoft.com/customvision/v2.0/Prediction/287a5a82-272d-45f3-be6a-98bdeba3454c/image?iterationId=3d1fd99c-0b93-432f-b275-77260edc46d3', data=f, headers=headers)
return response.json()
def get_highest_pred(image_json):
max_pred = 0
max_pred_details = ''
for pred in image_json['predictions']:
if pred['probability'] > max_pred:
max_pred = pred['probability']
max_pred_details = pred
return max_pred, max_pred_details
sensor_reading = lambda image_loc: get_highest_pred(get_image_response(image_loc))
#assert get_highest_pred(get_image_response('C:/Users/13383861/Downloads/test_train.jpg'))[0] > 0.6
#test
#an agent precept consists of a grid location, a detection probability, a timestep, a timestamp and the observer name
AgentObservation = namedtuple('obs_location', ['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
#A belief map component consists of a grid location and a likelihood
BeliefMapComponent = namedtuple('belief_map_component', ['grid_loc','likelihood'])
#%%
#Code and test for class which manages agent observations in a set grid
class AgentObservations():
'''A class which records agent observations in a UE4Grid'''
def __init__(self, grid: UE4Grid):
self.grid = grid
#observations consist of agent percepts
self.observations = []
def record_agent_observation(self, new_observation: AgentObservation):
self.observations.append(new_observation)
def get_most_recent_observation(self, observations = []):
'''Returns the most recent observation in a list of observations'''
if not observations:
observations = self.observations
return sorted(observations, key = lambda observation: observation.timestamp, reverse = True)[0]
def get_most_recent_observation_at_position(self, grid_loc: UE4Coord):
return self.get_most_recent_observation(self.get_all_observations_at_position(grid_loc))
def get_all_observations_at_position(self, grid_loc: UE4Coord):
return list(filter(lambda observation: observation.grid_loc == grid_loc, self.observations))
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
test_agent_observations = AgentObservations(test_grid)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent1')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent1')
obs3 = AgentObservation(UE4Coord(0,1),0.9, 3, 1237, 'agent1')
test_agent_observations.record_agent_observation(obs1)
test_agent_observations.record_agent_observation(obs2)
test_agent_observations.record_agent_observation(obs3)
assert test_agent_observations.get_most_recent_observation() == obs3
assert test_agent_observations.get_most_recent_observation_at_position(UE4Coord(0,0)) == obs2
assert test_agent_observations.get_all_observations_at_position(UE4Coord(0,1)) == [obs3]
calc_posterior = lambda observation, prior: (prior * observation) / ((prior * observation) + (1-prior)*(1-observation))
assert abs(calc_posterior(0.5, 0.2) - 0.2) <= 0.001
assert abs(calc_posterior(0.8, 0.2) - 0.5) <= 0.001
#%%
#Calculation of posterior given prior and observations
def get_posterior_given_obs(observations:list, prior):
'''For a sequence of observations calculates the posterior probability given a prior.'''
for observation in observations:
prior = calc_posterior(observation, prior)
return prior
assert abs(get_posterior_given_obs([0.5,0.2,0.8], 0.5) - 0.5) <= 0.001
#%%
####################### Belief map and tests #######################
#A belief map has an agent name (beliefs belong to an agent) consists of belief map components
#Leave this as namedtuple if don't need to define methods
class BeliefMap:
def __init__(self, agent_name: str, grid: UE4Grid, belief_map_components: typing.List[BeliefMapComponent], prior: typing.Dict[UE4Coord, float]):
self.agent_name = agent_name
self.grid = grid
self.belief_map_components = belief_map_components
self.prior = prior
def get_prior(self)->typing.Dict[UE4Coord, float]:
return self.prior
def get_grid(self)->UE4Grid:
return self.grid
def get_belief_map_components(self):
return self.belief_map_components
def update_from_prob(self, grid_loc, obs_prob):
prior_val = self.get_belief_map_component(grid_loc).likelihood
self.belief_map_components[self._get_observation_grid_index(grid_loc)] = BeliefMapComponent(grid_loc, get_posterior_given_obs([obs_prob], prior_val))
def update_from_observation(self, agent_observation: AgentObservation):
self.update_from_prob(agent_observation.grid_loc, agent_observation.probability)
def update_from_observations(self, agent_observations: typing.Set[AgentObservation]):
for observation in agent_observations:
self.update_from_observation(observation)
def _get_observation_grid_index(self, grid_loc: UE4Coord):
return self.belief_map_components.index(self.get_belief_map_component(grid_loc))
def get_belief_map_component(self, grid_loc):
if grid_loc in map(lambda belief_map_component: belief_map_component.grid_loc ,self.belief_map_components):
return next(filter(lambda belief_map_component: belief_map_component.grid_loc == grid_loc, self.belief_map_components))
else:
raise Exception("{} is not in the belief map".format(grid_loc))
def __eq__(self, other):
#check if grids are the same an componenents are the same
pass
def __str__(self):
return str({"grid": self.grid,"prior": self.prior,"agent_name": self.agent_name,"components": self.belief_map_components})
#maybe this should go in contsructor and make regular class
def create_belief_map(grid, agent_name, prior = {}):
'''Creates an occupancy belief map for a given observer and a set of grid locations.
Prior is a mapping of grid_points to probabilities'''
if not prior:
#use uniform uninformative prior
prior = {grid_point: 1/len(grid.get_grid_points()) for grid_point in grid.get_grid_points()}
return BeliefMap(agent_name, grid, [BeliefMapComponent(grid_point, prior[grid_point]) for grid_point in grid.get_grid_points()], prior)
#return {grid_locs[i]: ObsLocation(grid_locs[i],prior[i], 0, time.time(), observer_name) for i in range(len(grid_locs))}
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
test_map = create_belief_map(test_grid, "agent1")
assert test_map.get_belief_map_component(UE4Coord(0,0)) == BeliefMapComponent(UE4Coord(0,0), 1/len(test_grid.get_grid_points()))
assert test_map._get_observation_grid_index(UE4Coord(0,0)) == 5
test_map.update_from_prob(UE4Coord(0,0), 0.9)
assert 0.132<test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.133
#prove order in which observations come in doesn't matter
obs1 = AgentObservation(UE4Coord(0,0),0.4, 1, 1234, 'agent1')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent1')
obs3 = AgentObservation(UE4Coord(0,0),0.93, 3, 1237, 'agent1')
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs1)
assert 0.0111 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.0112
test_map.update_from_observation(obs2)
assert 0.025688 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.0256881
test_map.update_from_observation(obs3)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
#now check observing in a different order gives same result
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs2)
test_map.update_from_observation(obs1)
test_map.update_from_observation(obs3)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
#now check observing in a different order gives same result
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs3)
test_map.update_from_observation(obs2)
test_map.update_from_observation(obs1)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
####################### Belief map and tests #######################
#%%
####################### Observation Set Manager and tests #######################
class ObservationSetManager:
'''
Manages the sensor measurements of other agents. Observations don't have to be taken at disrete locations -
the continuous position can be recorded and the grid location inferred from this.
Calculating a belief map from these sets of observations requires a grid so that each recorded observation can
be
'''
def __init__(self, agent_name: 'name of the agent that owns this observation list manager'):
#self.self_meas_list = []
#key value pairs of form agent_name: list of AgentObservations
self.observation_sets = dict()
self.agent_name = agent_name
#agent should initialize its own observations
self.init_rav_observation_set(self.agent_name)
#really strange behaviour: using this initialises the class with observations that don't exist... self.observation_sets[rav_name] = set()
def init_rav_observation_set(self, rav_name, observations = None):
'''initialise a new list of observations for a RAV'''
if not observations:
self.observation_sets[rav_name] = set()
else:
self.observation_sets[rav_name] = observations
#self.observation_sets[rav_name] = observations
def get_all_observations(self):
return functools.reduce(lambda x,y: x.union(y) if x else y, self.observation_sets.values(), set())
def get_observation_set(self, rav_name) -> typing.Set[AgentObservation]:
'''Get list of observations from a RAV'''
return self.observation_sets[rav_name]
def update_rav_obs_set(self, rav_name, observations: typing.Set[AgentObservation]):
#check if rav is present before updating
if rav_name not in self.observation_sets:
self.init_rav_observation_set(rav_name)
#this avoids recording duplicate observations
self.observation_sets[rav_name].update(observations)
def update_from_other_obs_list_man(self, other):
'''Might need to check that the timestamps must be different...'''
for rav_name, observation_set in other.observation_sets.items():
self.update_rav_obs_set(rav_name, observation_set)
def get_discrete_belief_map_from_observations(self, grid):
'''Given a descrete grid, returns a belief map containing the likelihood of the source
being contained in each grid segment'''
#ToDo:
#Currently observations must be made at grid locations - instead compute which observations are made
#in each grid location and then compute the belief map
return_belief_map = create_belief_map(grid, self.agent_name)
return_belief_map.update_from_observations(self.get_all_observations())
return return_belief_map
def get_continuous_belief_map_from_observations(self, grid_bounds):
'''Given grid bounds, returns a function which returns the likelihood given the
continuous position of the RAV. I.E. transform the discrete PDF as above to a
continuous one.'''
pass
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
test_ObservationSetManager = ObservationSetManager('agent1')
test_ObservationSetManager.observation_sets
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
obs4 = AgentObservation(UE4Coord(0,1),0.9, 3, 1238, 'agent1')
test_ObservationSetManager.init_rav_observation_set('agent2', set([obs1, obs2]))
test_ObservationSetManager.observation_sets
test_ObservationSetManager.update_rav_obs_set('agent2', set([obs3]))
test_ObservationSetManager.get_all_observations()
assert test_ObservationSetManager.get_observation_set('agent2') == set([obs1, obs2, obs3])
assert test_ObservationSetManager.get_observation_set('agent1') == set([])
test_ObservationSetManager.update_rav_obs_set('agent1', set([obs4]))
assert not test_ObservationSetManager.get_all_observations().difference(set([obs1, obs2, obs3, obs4]))
###################################################
# Check that duplicate observations aren't added
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
test1_ObservationSetManager = ObservationSetManager('agent1')
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
test1_ObservationSetManager.update_rav_obs_set('agent2',[obs1, obs2, obs3])
test1_ObservationSetManager.observation_sets
#test that duplicate measurements won't occur
obs4 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
test1_ObservationSetManager.update_rav_obs_set('agent2', set([obs4]))
assert test1_ObservationSetManager.get_observation_set('agent2') == set([obs1, obs2, obs3])
assert abs(test1_ObservationSetManager.get_discrete_belief_map_from_observations(test_grid).get_belief_map_component(UE4Coord(0,0)).likelihood - 0.074468) < 0.0001
assert abs(test1_ObservationSetManager.get_discrete_belief_map_from_observations(test_grid).get_belief_map_component(UE4Coord(0,1)).likelihood - 0.395833) < 0.0001
#%%
######################### Action selection strategies #########################
def get_move_from_belief_map_epsilon_greedy(belief_map: BeliefMap, current_grid_loc: UE4Coord, epsilon: float, eff_radius = None) -> UE4Coord:
'''Epsilon greedy move selection'''
#assume grid is regular, get all neighbors that are within max(lat_spacing, long_spacing)7
#assuming that lat_spacing < 2* lng_spacing and visa versa
if not eff_radius:
eff_radius = max(belief_map.get_grid().get_lat_spacing(), belief_map.get_grid().get_lng_spacing())
#a list of UE4Coord
neighbors = belief_map.get_grid().get_neighbors(current_grid_loc, eff_radius)
#don't move to new position if can't find any neighbors to move to
if not neighbors:
return current_grid_loc
#neighbors = list(filter(lambda grid_loc: grid_loc.get_dist_to_other(current_grid_loc) <= eff_radius and grid_loc!=current_grid_loc, bel_map.keys()))
if random.random() < epsilon:
#epsilon random
return_move = random.choice(neighbors)
else:
#otherwise choose move that has highest value
max_move_value = 0
for neighbor in neighbors:
if belief_map.get_belief_map_component(neighbor).likelihood > max_move_value:
max_move_value = belief_map.get_belief_map_component(neighbor).likelihood
return_move = neighbor
# move = max(map(lambda neighbor: bel_map[neighbor].likelihood, neighbors))
return return_move
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
#(grid, agent_name, prior = {})
obs_man = ObservationSetManager("agent1")
obs_man.update_rav_obs_set('agent2', [obs1, obs2, obs3])
belief_map = obs_man.get_discrete_belief_map_from_observations(test_grid)
assert get_move_from_belief_map_epsilon_greedy(belief_map, UE4Coord(1,1), 0.0, 1.8) == UE4Coord(0,1)
#%%
#everything that could be important for measuring agent performance/progress
AgentAnalysisState = namedtuple('AgentAnalysisState', ['timestep',
'timestamp',
'rav_name',
'position_intended',
'position_measured',
#maybe add distance travelled for current timestep
'total_dist_travelled',
'remaining_batt_cap',
'prop_battery_cap_used',
'sensor_reading',
#is it necessary to record the grid along with the likelihoods in case want the grid to
#dynamically change? For now assume grid is fixed and in 1-1 correspondance with likelihoods
#'occ_grid_likelihoods',
#which other agents did the agent coordinate with on this timestep
'coordinated_with_other_names'])
#metadata related to the agent - details about grid its operating in, prior that was worked with, to be updated...
AgentAnalysisMetadata= namedtuple("MissionAnalysisData", ["agents_used", "grid_origin", 'grid_lat_spacing',
'grid_lng_spacing','lng_lim', 'lat_lim',
'no_lat_points', 'no_lng_points', 'prior'])
def get_agent_state_for_analysis(agent_analysis_state: AgentAnalysisState):
'''Returns elements of agent state that are important for analysis that can be written to csv. Position, battery cap., total_dist_travelled, battery_consumed, occ_grid'''
#csv_headers = ['timestep', 'timestamp', 'rav_name', 'position_intended', 'position_measured', 'total_dist_travelled', 'remaining_batt_cap', 'prop_battery_cap_used', 'sensor_reading', 'occ_grid_locs', 'occ_grid_likelihoods', 'coordinated_with_other_bool', 'coordinated_with_other_names']
#return str(agent_analysis_state._fields).replace(')','').replace('(','').replace("'", '')
return _get_agent_state_for_analysis(**agent_analysis_state._asdict())
def _get_agent_state_for_analysis(timestep, timestamp, rav_name, position_intended, position_measured, total_dist_travelled, remaining_batt_cap, prop_battery_cap_used, sensor_reading, coordinated_with_other_names):
return f"{timestep},{timestamp},{rav_name},{position_intended},{position_measured},{total_dist_travelled},{remaining_batt_cap},{prop_battery_cap_used},{sensor_reading},{coordinated_with_other_names}"
def _get_agent_observation(grid_loc,probability,timestep,timestamp,observer_name):
return f"{grid_loc},{probability},{timestep},{timestamp},{observer_name}"
def get_agent_observation(agent_observation: AgentObservation):
'''Returns elements of agent state that are important for analysis that can be written to csv. Position, battery cap., total_dist_travelled, battery_consumed, occ_grid'''
#csv_headers = ['timestep', 'timestamp', 'rav_name', 'position_intended', 'position_measured', 'total_dist_travelled', 'remaining_batt_cap', 'prop_battery_cap_used', 'sensor_reading', 'occ_grid_locs', 'occ_grid_likelihoods', 'coordinated_with_other_bool', 'coordinated_with_other_names']
#return str(agent_analysis_state._fields).replace(')','').replace('(','').replace("'", '')
return _get_agent_observation(**agent_observation._asdict())
def _init_state_for_analysis_file(file_path):
with open(file_path, 'w+') as f:
f.write(','.join(AgentAnalysisState._fields) + '\n')
def _update_state_for_analysis_file(file_path, csv_row):
with open(file_path, 'a') as f:
f.write(csv_row + '\n')
def _init_agent_metadata_file(file_path):
with open(file_path, 'w+') as f:
f.write(','.join(AgentAnalysisMetadata._fields) + '\n')
def _write_to_agent_metadata_file(file_path, csv_row):
with open(file_path, 'a') as f:
f.write(csv_row + '\n')
#AgentObservation = namedtuple('obs_location', ['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
def _init_observations_file(file_path):
with open(file_path, 'w+') as f:
f.write(','.join(AgentObservation._fields) + '\n')
def _write_to_obserations_file(file_path, agent_observation):
with open(file_path, 'a') as f:
f.write(get_agent_observation(agent_observation) + '\n')
testAgentAnalysisState = AgentAnalysisState(2, 100, 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test')
testAgentAnalysisState._asdict()
assert get_agent_state_for_analysis(testAgentAnalysisState) == "2,100,test,test,test,test,test,test,test,test"
def write_agent_performance_to_csv(csv_header, csv_rows, file_name):
with open(file_name, 'w') as csv_write_f:
csv_write_f.write(csv_header)
csv_write_f.write('\n')
for csv_row in csv_rows:
csv_write_f.write(csv_row)
csv_write_f.write('\n')
def calc_likelihood(observations: typing.List[float]):
return functools.reduce(lambda x, y: x*y, observations)
assert calc_likelihood([0.1,0.1,0.2,0.4]) == 0.1*0.1*0.2*0.4
def create_belief_map_from_observations(grid: UE4Grid, agent_name: str, agent_belief_map_prior: typing.Dict[UE4Coord, float], agent_observations: typing.Set[AgentObservations]):
'''Since the calculation of posterior likelihood is based only on prior and observations (independent of order), updating a belief map component from measurements can be done
by the following update formula:
prior * product(over all i observations) observation_i
----------------------------------------------------------------------------------------------------------------------
prior * product(over all i observations) observation_i + (1-prior) * product(over all i observations) (1-observation_i)
'''
return_bel_map = create_belief_map(grid.get_grid(), agent_name, agent_belief_map_prior)
#update belief map based on all observations...
return_bel_map.update_from_observations(agent_observations)
#grid, agent_name, prior = {}
#update_bel_map(update_bel_map(test_map, 0.5, 3), 0.5,3)
class BaseROCSAFEAgent:
'''Base class for all agents related to the ROCSAFE project, contains minimal functionality. Designed with goal main in mind
to be able to compare and measure agent performance in a consistent way'''
pass
class BaseGridAgent:
'''Base class for all agents that use a grid representation of the environment, contains minimal functionality. Designed with goal main in mind
to be able to compare and measure agent performance in a consistent way'''
def __init__(self):
pass
def actuate(self):
pass
def perceive(self):
pass
def get_state(self):
pass
#create a base agent class
class OccupancyGridAgent():
'''agent that moves around an occupancy grid in order to locate a source of radiation. Uses a rav agent'''
ImageDir = 'D:/ReinforcementLearning/DetectSourceAgent/Data/SensorData'
#stores analysis csvs. Each csv contains agent state at each timestep
AgentStateDir = "D:/ReinforcementLearning/DetectSourceAgent/Analysis"
#stores observation json
ObservationDir = "D:/ReinforcementLearning/DetectSourceAgent/Observations"
MockedImageDir = 'D:/ReinforcementLearning/DetectSource/Data/MockData'
#break apart this into components, one which manages actuation/sensing, one which manages/represents state, etc.
def __init__(self, grid, initial_pos, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, prior = {}):
#list expected types of everything here
self.rav = multirotor_client
self.grid = grid
self.current_pos_intended = initial_pos
self.grid_locs = grid.get_grid_points()
self.timestep = 0
self.rav_operational_height = height
self.move_from_bel_map_callable = move_from_bel_map_callable
self.epsilon = epsilon
self.agent_name = agent_name
self.agent_states_for_analysis = []
self.total_dist_travelled = 0
self.distance_covered_this_timestep = 0
self.prop_battery_cap_used = 0
self.current_battery_cap = 1
self.coordinated_this_timestep = False
self.others_coordinated_this_timestep = []
#manages observations of this agent and other agents
self.observation_manager = ObservationSetManager(self.agent_name)
self.agent_state_file_loc = OccupancyGridAgent.AgentStateDir + f"/{self.agent_name}.csv"
self.observations_file_loc = OccupancyGridAgent.ObservationDir + f"/{self.agent_name}.csv"
self.current_belief_map = create_belief_map(self.grid, self.agent_name, prior)
self.init_state_for_analysis_file(self.agent_state_file_loc)
self.init_observations_file(self.observations_file_loc)
def __eq__(self, other):
'''This agent is the same as another agent if names are the same. Refine this later'''
return self.agent_name == other.agent_name
def get_available_actions(self, state):
'''Returns actions available to RAV based on its current state'''
pass
def get_belief_map_after_t_timesteps(self, t):
'''Calculates what the agent's belief map would be after t timesteps'''
pass
def charge_battery(self):
#request from api if battery can be charged. While charging, capacity goes up. Charging can be cancelled at any point to explore some more.
pass
def move_agent(self, destination_intended: UE4Coord):
self.rav.moveToPositionAsync(destination_intended.x_val, destination_intended.y_val, self.rav_operational_height, 3, vehicle_name = self.agent_name).join()
self.distance_covered_this_timestep = self.current_pos_intended.get_dist_to_other(destination_intended)
self.total_dist_travelled += self.current_pos_intended.get_dist_to_other(destination_intended)
self.prop_battery_cap_used += self.current_battery_cap - self.rav.getRemainingBatteryCap()
self.current_battery_cap = self.rav.getRemainingBatteryCap()
def get_agent_name(self):
return self.agent_name
def get_agent(self):
return self.rav
def get_agent_state_for_analysis(self):
'''AgentAnalysisState = namedtuple('AgentAnalysisState', ['timestep','timestamp','rav_name',
'position_intended','position_measured',
'total_dist_travelled','remaining_batt_cap',
'prop_battery_cap_used',
'sensor_reading',
#which other agents did the agent coordinate with on this timestep
'coordinated_with_other_names'])'''
return get_agent_state_for_analysis(AgentAnalysisState(self.timestep, time.time(), self.get_agent_name(),
self.current_pos_intended, self.current_pos_measured,
self.total_dist_travelled, self.rav.getRemainingBatteryCap(),
self.prop_battery_cap_used,
self.current_reading,
#'[' + ','.join(map(lambda loc: loc.likelihood, self.current_belief_map.get_belief_map_components())) + ']',
#self.get_grid_locs_likelihoods_lists()[1],
self.others_coordinated_this_timestep))
def init_state_for_analysis_file(self, file_path):
_init_state_for_analysis_file(file_path)
def update_state_for_analysis_file(self, file_path, csv_row):
_update_state_for_analysis_file(file_path, csv_row)
def init_observations_file(self, file_path):
_init_observations_file(file_path)
def update_observations_file(self, file_path, agent_observation):
_write_to_obserations_file(file_path, agent_observation)
#coordination strategy:
#agent will write all measurements in its possession to a file at each timestep. When communication requested,
#other agent will read all measurements from the file.
def can_coord_with_other(self, other_rav_name):
#check if any other ravs in comm radius. if so, return which ravs can be communicated with
return self.rav.can_coord_with_other(other_rav_name)
def coord_with_other(self, other_rav_name):
'''coordinate with other rav by requesting their measurement list and sending our own measurement list first write own measurement list to file'''
if self.can_coord_with_other(other_rav_name):
observations_from_other_agents = self._read_observations(self)
print('read observations from other agents: {}'.format(observations_from_other_agents))
for observations_from_other_agent in observations_from_other_agents.values():
self.observation_manager.update_rav_obs_set(observations_from_other_agent)
#this only updates observations not seen previously since a set is maintained of all seen observations
self.current_belief_map.update_from_observations(self.observation_manager.get_all_observations())
self.others_coordinated_this_timestep.append(other_rav_name)
self.coordinated_this_timestep = True
def _write_observations(self, file_loc):
'''writes agent measurements to file to be read by other agent'''
with open(file_loc, 'a') as f:
json.dump(str(self.observation_manager.observation_sets), f)
def _read_observations(self, file_loc):
with open(file_loc, 'r') as f:
return json.loads(f)
def record_image(self):
responses = self.rav.simGetImages([ImageRequest("3", ImageType.Scene)], vehicle_name = self.agent_name)
response = responses[0]
# get numpy array
filename = OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep)
airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
print("saved image here: {}".format(os.path.normpath(filename + '.png')))
def update_agent_pos_measured(self):
self.current_pos_measured = self.rav.getMultirotorState().kinematics_estimated.position
def explore_timestep(self):
'''Gets rav to explore next timestep'''
#grid: UE4Grid, agent_name: str, agent_belief_map_prior: typing.Dict[UE4Coord, float], agent_observations: typing.List[AgentObservations]
next_pos = self.move_from_bel_map_callable(self.current_belief_map, self.current_pos_intended, self.epsilon)
print("self.current_pos_intended: {}".format(self.current_pos_intended ))
self.move_agent(next_pos)
self.current_pos_intended = next_pos
self.current_pos_measured = self.rav.getMultirotorState(vehicle_name = self.agent_name).kinematics_estimated.position
self.update_agent_pos_measured()
#record image at location
self.record_image()
#get sensor reading, can be done on separate thread
print('getting sensor reading for {}'.format(OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep) + '.png'))
self.current_reading = float(sensor_reading(OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep) + '.png')[0])
#mocked sensor reading
#self.current_reading = float(sensor_reading("D:/ReinforcementLearning/DetectSourceAgent/Data/MockData/test_train.jpg")[0])
print('sensro reading: {}'.format(self.current_reading))
print("updating belief map position {} from {}".format(self.current_pos_intended, self.current_belief_map.get_belief_map_component(self.current_pos_intended)))
self.current_belief_map.update_from_prob(self.current_pos_intended, self.current_reading)
print(" to {}".format(self.current_belief_map.get_belief_map_component(self.current_pos_intended)))
#['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
newest_observation = AgentObservation(self.current_pos_intended, self.current_reading, self.timestep, time.time(), self.agent_name)
self.observation_manager.update_rav_obs_set(self.agent_name, [AgentObservation(self.current_pos_intended, self.current_reading, self.timestep, time.time(), self.agent_name)])
#self._write_observations(self.observations_file_loc)
self.update_state_for_analysis_file(self.agent_state_file_loc, self.get_agent_state_for_analysis())
print("Observation made: {}".format(newest_observation))
self.update_observations_file(self.observations_file_loc, newest_observation)
#if agent is in range, communicate
def explore_t_timesteps(self, t: int):
for i in range(t):
self.explore_timestep()
self.timestep += 1
#print("current belief map: {}".format(self.current_belief_map))
return self.current_belief_map
if __name__ != '__main__':
grid = UE4Grid(20, 15, UE4Coord(0,0), 120, 150)
class KinematicsState():
position = Vector3r()
linear_velocity = Vector3r()
angular_velocity = Vector3r()
linear_acceleration = Vector3r()
angular_acceleration = Vector3r()
class MockRavForTesting:
def __init__(self):
self.kinematics_estimated = KinematicsState()
def getRemainingBatteryCap(self):
return 0.9
def moveToPositionAsync(self, destination_intendedx, destination_intendedy, rav_operational_height, velocity):
import threading
t = threading.Thread(target = lambda : None)
t.start()
return t
def can_coord_with_other(self,other_rav_name):
pass
def simGetImages(self, l):
pass
def getMultirotorState(self):
return self
def ImageRequest(*args, **kwargs):
return
class ImageType(enum.Enum):
Scene = ''
#grid, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, performance_csv_path: "file path that agent can write performance to", prior = []
occupancy_grid_agent = OccupancyGridAgent(grid, get_move_from_belief_map_epsilon_greedy, -12, 0.2, MockRavForTesting(), 'agent1')
#write some tests for agent here
occupancy_grid_agent.current_pos_intended = UE4Coord(0,0)
occupancy_grid_agent.current_pos_measured = None
occupancy_grid_agent.current_reading = 0.1
occupancy_grid_agent.get_agent_state_for_analysis()
occupancy_grid_agent.explore_timestep()
##################### Functions that can deal with the initialization of RAVs ####################
#%%
def create_rav(client, rav_name):
client.confirmConnection()
client.enableApiControl(True, rav_name)
client.armDisarm(True, rav_name)
client.takeoffAsync(vehicle_name = rav_name).join()
def destroy_rav(client, rav_name):
client.enableApiControl(False, rav_name)
client.landAsync(vehicle_name = rav_name).join()
client.armDisarm(False, rav_name)
def run_t_timesteps(occupancy_grid_agent):
print('type(occupancy_grid_agent)', type(occupancy_grid_agent))
occupancy_grid_agent.explore_t_timesteps(2)
#%%
if __name__ == '__main__':
grid = UE4Grid(20, 15, UE4Coord(0,0), 120, 150)
rav_names = ["Drone1"]
#, "Drone2"]
client = airsim.MultirotorClient()
for rav_name in rav_names:
create_rav(client, rav_name)
#assert client.getVehiclesInRange("Drone1", ["Drone2"],1000000) == ["Drone2"]
#print('vehicles in range: ', client.getVehiclesInRange("Drone1", ["Drone2"] ,1000000))
#rav1.simShowPawnPath(False, 1200, 20)
#grid shared between rav
#grid, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, performance_csv_path: "file path that agent can write performance to", prior = []
#for grid_coord_index in range(1,len(grid.get_grid_points())):
# client.showPlannedWaypoints(grid.get_grid_points()[grid_coord_index-1].x_val,
# grid.get_grid_points()[grid_coord_index-1].y_val,
# grid.get_grid_points()[grid_coord_index-1].z_val,
# grid.get_grid_points()[grid_coord_index].x_val,
# grid.get_grid_points()[grid_coord_index].y_val,
# grid.get_grid_points()[grid_coord_index].z_val,
# lifetime = 200)
occupancy_grid_agent1 = OccupancyGridAgent(grid, UE4Coord(0,0), get_move_from_belief_map_epsilon_greedy, -12, 0.3, client, "Drone1")
occupancy_grid_agent1.explore_t_timesteps(20)
#occupancy_grid_agent2 = OccupancyGridAgent(grid, UE4Coord(20,15),get_move_from_belief_map_epsilon_greedy, -12, 0.3, client, "Drone2")
#occupancy_grid_agent1.explore_t_timesteps(10)
#p1 = threading.Thread(target = run_t_timesteps, args = (occupancy_grid_agent1,))
#p2 = threading.Thread(target = run_t_timesteps, args = (occupancy_grid_agent2,))
#p1.start()
#p2.start()
#p1.join()
#p2.join()
# showPlannedWaypoints(self, x1, y1, z1, x2, y2, z2, thickness=50, lifetime=10, debug_line_color='red', vehicle_name = '')
destroy_rav(client, "Drone1")
#destroy_rav(client, "Drone2")
#for grid_loc in grid_locs:
##rav.moveOnPathAsync(list(map(lambda x: x.to_vector3r(),grid_locs)), 8)
#rav.moveToPositionAsync(0,0, -20, 5).join()
#print('rav position: {}'.format(rav.getMultirotorState().kinematics_estimated.position))
#responses = rav.simGetImages([ImageRequest("3", ImageType.Scene)])
#response = responses[0]
#filename = OccupancyGridAgent.ImageDir + "/photo_" + str(1)
#airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
# grid, move_from_bel_map_callable, height, epsilon, multirotor_client, prior = []
#pos, likelihood = OccupancyGridAgent(grid, get_move_from_bel_map, -12, 0.3, rav, "Drone1").explore_t_timesteps(125)
#print('determined {} as source with likelihood {}'.format(pos, likelihood))
#rav.moveToPositionAsync(pos.x_val, pos.y_val, -5, 3).join()
| 15,438 | 1,065 | 2,425 |