hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75345d261ba7d82143221ae7318f3540237cbfce | 1,164 | py | Python | programs/views.py | MAPC/masshealth | 3045c453e10dde952f459d81db886c64134b1268 | [
"BSD-3-Clause"
] | null | null | null | programs/views.py | MAPC/masshealth | 3045c453e10dde952f459d81db886c64134b1268 | [
"BSD-3-Clause"
] | null | null | null | programs/views.py | MAPC/masshealth | 3045c453e10dde952f459d81db886c64134b1268 | [
"BSD-3-Clause"
] | null | null | null | from django.http import HttpResponse
from django.utils import simplejson
from django.template.defaultfilters import truncatewords
from models import Program
def all_geojson(request):
"""
Return a GeoJSON representation of all Programs with title, description and image-url properties.
"""
try:
programs = Program.objects.transform(4326).all().select_related()
except Place.DoesNotExist:
raise Http404
features = []
for program in programs:
# truncatewords
properties = dict(title=program.title, description=truncatewords(program.description,20), absolute_url=program.get_absolute_url(), map_icon='layer-0')
if program.image:
properties['image_url'] = program.image.url
if program.icon:
properties['map_icon'] = 'layer-%s' % program.icon.id
geometry = simplejson.loads(program.geometry.geojson)
feature = dict(type='Feature', geometry=geometry, properties=properties)
features.append(feature)
response = dict(type='FeatureCollection', features=features)
return HttpResponse(simplejson.dumps(response), mimetype='application/json')
| 36.375 | 158 | 0.713058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.183849 |
75346a22ba08754ec10918d7f2f8e9f83a4fe87b | 3,197 | py | Python | tests/test_utils.py | Kirembu/qpanel | 6853dd7b7776d4a2ee84fbcdaa735d6eed0d1aea | [
"MIT"
] | null | null | null | tests/test_utils.py | Kirembu/qpanel | 6853dd7b7776d4a2ee84fbcdaa735d6eed0d1aea | [
"MIT"
] | null | null | null | tests/test_utils.py | Kirembu/qpanel | 6853dd7b7776d4a2ee84fbcdaa735d6eed0d1aea | [
"MIT"
] | 1 | 2019-10-06T19:31:38.000Z | 2019-10-06T19:31:38.000Z | import unittest
from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, \
timedelta_from_field_dict
from qpanel.convert import convert_time_when_param
import time
import datetime
class UtilsTestClass(unittest.TestCase):
def test_clean_str_to_div(self):
div = 'ro/.d. i _@l_k/d_@'
self.assertEqual(clean_str_to_div_id(div), 'ro-_d_ i __l_k-d__')
def test_underscore_to_camelcase(self):
a = 'rodrigoRamirez'
self.assertEqual(underscore_to_camelcase(a), 'Rodrigoramirez')
a = 'rodrigo_Ramirez'
self.assertEqual(underscore_to_camelcase(a), 'RodrigoRamirez')
a = 'rodrigo_ramirez'
self.assertEqual(underscore_to_camelcase(a), 'RodrigoRamirez')
a = '_rodrigo_ramirez'
self.assertEqual(underscore_to_camelcase(a), '_RodrigoRamirez')
def test_timedelta_from_field_dict(self):
now = time.time()
d = {'time': now, 'time2': 'hola'}
self.assertEqual(timedelta_from_field_dict('time', d, now + 1),
datetime.timedelta(0, 1))
self.assertNotEqual(
timedelta_from_field_dict(
'time',
d,
now + 1),
datetime.timedelta(
0,
10))
self.assertEqual(
timedelta_from_field_dict(
'time',
d,
now + 100),
datetime.timedelta(
0,
100))
self.assertEqual(
timedelta_from_field_dict(
'timeno',
d,
now + 100),
datetime.timedelta(
0,
0))
self.assertEqual(
str(timedelta_from_field_dict('time', d, now + 1)), '0:00:01')
self.assertEqual(
timedelta_from_field_dict(
'time', d, now), datetime.timedelta(
0, 0))
self.assertEqual(
str(timedelta_from_field_dict('time', d, now)), '0:00:00')
d2 = {'time': 60, 'time2': 6001}
self.assertEqual(str(timedelta_from_field_dict(
'time', d2, None, True)), '0:01:00')
self.assertEqual(str(timedelta_from_field_dict(
'time2', d2, None, True)), '1:40:01')
def test_convert_time_when_param(self):
value = 'test1,00:00:00'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:00'})
value = 'test1'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:00'})
value = 'test1, 00:00:01'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:01'})
value = 'test1, string_wrong'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:00'})
value = 'test1; 00:00:01'
self.assertEqual(convert_time_when_param(value, splitter=';'),
{'when': 'test1', 'hour': '00:00:01'})
# runs the unit tests
if __name__ == '__main__':
unittest.main()
| 34.010638 | 74 | 0.550829 | 2,925 | 0.91492 | 0 | 0 | 0 | 0 | 0 | 0 | 555 | 0.1736 |
75359098ea0c39566da775b5f14fdb222f3c9684 | 4,580 | py | Python | overtime/tests/algorithms/centrality/test_closeness.py | overtime3/overtime | ed3ae6877894f4d2c9f8473a885698e1622be3bd | [
"MIT"
] | 9 | 2020-10-15T13:53:36.000Z | 2022-03-08T12:08:09.000Z | overtime/tests/algorithms/centrality/test_closeness.py | overtime3/overtime | ed3ae6877894f4d2c9f8473a885698e1622be3bd | [
"MIT"
] | 6 | 2021-02-07T15:43:12.000Z | 2021-04-24T04:03:39.000Z | overtime/tests/algorithms/centrality/test_closeness.py | overtime3/overtime | ed3ae6877894f4d2c9f8473a885698e1622be3bd | [
"MIT"
] | 7 | 2020-10-15T13:55:12.000Z | 2022-03-12T03:54:02.000Z | import unittest
from overtime.components.graphs import TemporalGraph
from overtime.algorithms.centrality.closeness import *
class ClosenessTest(unittest.TestCase):
"""
Tests for temporal closeness centrality methods.
"""
def setUp(self):
"""
Create a graph for use in all test methods.
"""
self.network1 = TemporalDiGraph("test_network")
self.network2 = TemporalGraph("test_network")
for node in ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]:
self.network1.add_node(node)
self.network2.add_node(node)
edges = {
0: {'node1': 'a', 'node2': 'e', 'tstart': 1, 'tend': 2},
1: {'node1': 'e', 'node2': 'f', 'tstart': 2, 'tend': 3},
2: {'node1': 'g', 'node2': 'e', 'tstart': 3, 'tend': 4},
3: {'node1': 'h', 'node2': 'b', 'tstart': 4, 'tend': 5},
4: {'node1': 'h', 'node2': 'i', 'tstart': 5, 'tend': 6},
5: {'node1': 'e', 'node2': 'h', 'tstart': 6, 'tend': 7},
6: {'node1': 'c', 'node2': 'h', 'tstart': 7, 'tend': 8},
7: {'node1': 'j', 'node2': 'h', 'tstart': 7, 'tend': 8},
8: {'node1': 'd', 'node2': 'c', 'tstart': 8, 'tend': 9},
9: {'node1': 'h', 'node2': 'i', 'tstart': 9, 'tend': 10},
10: {'node1': 'h', 'node2': 'i', 'tstart': 10, 'tend': 11},
11: {'node1': 'a', 'node2': 'e', 'tstart': 11, 'tend': 12},
12: {'node1': 'h', 'node2': 'b', 'tstart': 12, 'tend': 13},
13: {'node1': 'a', 'node2': 'c', 'tstart': 12, 'tend': 13}
}
for index, edge in edges.items():
self.network1.add_edge(edge['node1'], edge['node2'], edge['tstart'], edge['tend'])
self.network2.add_edge(edge['node1'], edge['node2'], edge['tstart'], edge['tend'])
def test_temporal_closeness(self):
"""
Tests that temporal_closeness returns correct values for several dummy networks. Tests both notions of
optimality for which temporal_closeness is defined for and also tests on directed and undirected graphs.
"""
output_directed_fast = temporal_closeness(self.network1, optimality="fastest")
output_undirected_fast = temporal_closeness(self.network2, optimality="fastest")
output_directed_short = temporal_closeness(self.network1, optimality="shortest")
output_undirected_short = temporal_closeness(self.network2, optimality="shortest")
correct_directed_fast = {'g': 1.4928571428571429, 'j': 1.5, 'f': 0.0, 'a': 2.861111111111111, 'c': 1.5,
'e': 2.392857142857143, 'h': 2.0, 'b': 0.0, 'i': 0.0, 'd': 1.0}
correct_undirected_fast = {'c': 4.0, 'e': 5.226190476190476, 'd': 1.2, 'f': 2.492099567099567,
'g': 2.170634920634921, 'h': 5.166666666666666, 'b': 2.658333333333333,
'a': 2.6051587301587302, 'i': 2.6845238095238093, 'j': 1.5}
correct_directed_short = {'a': 3.6666666666666665, 'i': 0.0, 'e': 3.0, 'd': 1.0, 'f': 0.0, 'j': 2.0, 'c': 2.0,
'b': 0.0, 'g': 2.1666666666666665, 'h': 2.0}
correct_undirected_short = {'b': 3.6666666666666665, 'g': 3.5833333333333335, 'c': 4.5, 'i': 3.6666666666666665,
'e': 6.333333333333334, 'd': 1.5, 'a': 4.75, 'f': 4.083333333333334, 'j': 2.0,
'h': 6.0}
# Test fastest
self.assertAlmostEqual(output_directed_fast["a"], correct_directed_fast["a"])
self.assertAlmostEqual(output_directed_fast["e"], correct_directed_fast["e"])
self.assertAlmostEqual(output_directed_fast["j"], correct_directed_fast["j"])
self.assertAlmostEqual(output_undirected_fast["a"], correct_undirected_fast["a"])
self.assertAlmostEqual(output_undirected_fast["e"], correct_undirected_fast["e"])
self.assertAlmostEqual(output_undirected_fast["j"], correct_undirected_fast["j"])
# Test shortest
self.assertAlmostEqual(output_directed_short["a"], correct_directed_short["a"])
self.assertAlmostEqual(output_directed_short["e"], correct_directed_short["e"])
self.assertAlmostEqual(output_directed_short["j"], correct_directed_short["j"])
self.assertAlmostEqual(output_undirected_short["a"], correct_undirected_short["a"])
self.assertAlmostEqual(output_undirected_short["e"], correct_undirected_short["e"])
self.assertAlmostEqual(output_undirected_short["j"], correct_undirected_short["j"])
| 55.853659 | 120 | 0.577293 | 4,452 | 0.972052 | 0 | 0 | 0 | 0 | 0 | 0 | 1,202 | 0.262445 |
7535dab30ffe66be529e1d721b95b5fcf3fa87e7 | 4,165 | py | Python | game/00-chess-engine/chess_subprocess.py | lix2k3/renpy-chess | 4ac418224cc6a2367e59515904fc644acc889db3 | [
"MIT"
] | 1 | 2021-08-24T07:56:24.000Z | 2021-08-24T07:56:24.000Z | game/00-chess-engine/chess_subprocess.py | lix2k3/renpy-chess | 4ac418224cc6a2367e59515904fc644acc889db3 | [
"MIT"
] | null | null | null | game/00-chess-engine/chess_subprocess.py | lix2k3/renpy-chess | 4ac418224cc6a2367e59515904fc644acc889db3 | [
"MIT"
] | null | null | null | import sys
import subprocess
import_dir = sys.argv[1]
sys.path.append(import_dir)
# https://python-chess.readthedocs.io/en/v0.23.10/
import chess
import chess.uci
def main():
chess_engine = ChessEngine()
while True:
line = raw_input()
# some split token corresponding to that in chess_displayable.rpy
args = line.split('#')
if not args:
continue
if args[0] == 'quit':
chess_engine.kill_stockfish()
break
elif args[0] == 'fen':
chess_engine.init_board(args)
elif args[0] == 'stockfish':
chess_engine.init_stockfish(args)
elif args[0] == 'stockfish_move':
chess_engine.get_stockfish_move()
elif args[0] == 'game_status':
chess_engine.get_game_status()
elif args[0] == 'piece_at':
chess_engine.get_piece_at(args)
elif args[0] == 'is_capture':
chess_engine.get_is_capture(args)
elif args[0] == 'legal_moves':
chess_engine.get_legal_moves()
elif args[0] == 'push_move':
chess_engine.push_move(args)
elif args[0] == 'pop_move':
chess_engine.pop_move()
sys.stdout.flush()
class ChessEngine():
def __init__(self):
# enum game_status as defined in chess_displayable.rpy
self.INCHECK = 1
self.THREEFOLD = 2
self.FIFTYMOVES = 3
self.DRAW = 4
self.CHECKMATE = 5
self.STALEMATE = 6
self.board = None # the chess board object
self.stockfish = None # chess AI engine
self.stockfish_movetime = None
self.stockfish_depth = None
def init_board(self, args):
fen = args[1]
self.board = chess.Board(fen=fen)
def init_stockfish(self, args):
stockfish_path = args[1]
is_os_windows = eval(args[2])
self.stockfish_movetime = int(args[3])
self.stockfish_depth = int(args[4])
# stop stockfish from opening up shell on windows
# https://stackoverflow.com/a/63538680
startupinfo = None
if is_os_windows:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
self.stockfish = chess.uci.popen_engine(stockfish_path, startupinfo=startupinfo)
self.stockfish.uci()
self.stockfish.position(self.board)
def kill_stockfish(self):
self.stockfish.quit()
def get_piece_at(self, args):
file_idx, rank_idx = int(args[1]), int(args[2])
piece = self.board.piece_at(chess.square(file_idx, rank_idx))
if piece:
print(piece.symbol())
else:
print('None')
def get_is_capture(self, args):
move_uci = args[1]
move = chess.Move.from_uci(move_uci)
print(self.board.is_capture(move))
def get_game_status(self):
if self.board.is_checkmate():
print(self.CHECKMATE)
return
if self.board.is_stalemate():
print(self.STALEMATE)
return
if self.board.can_claim_threefold_repetition():
print(self.THREEFOLD)
return
if self.board.can_claim_fifty_moves():
print(self.FIFTYMOVES)
return
if self.board.is_check():
print(self.INCHECK)
return
print('-1') # no change to game_status
def get_stockfish_move(self):
self.stockfish.position(self.board)
move = self.stockfish.go(movetime=self.stockfish_movetime, depth=self.stockfish_depth)
move = move.bestmove
print(move.uci())
def get_legal_moves(self):
print('#'.join([move.uci() for move in self.board.legal_moves]))
def push_move(self, args):
move_uci = args[1]
move = chess.Move.from_uci(move_uci)
self.board.push(move)
print(self.board.turn)
def pop_move(self):
# this should not raise an IndexError as the logic has been handled by the caller
self.board.pop()
print(self.board.turn)
if __name__ == '__main__':
main() | 29.75 | 94 | 0.59952 | 2,872 | 0.689556 | 0 | 0 | 0 | 0 | 0 | 0 | 537 | 0.128932 |
753714fde4dcb095bb0ba9d29727df72d0c345bc | 2,639 | py | Python | python/data.py | DanGuo1223/mzClustering | 7d884c3ae3e6c2d03f37c564529f5e50221e88ee | [
"MIT"
] | null | null | null | python/data.py | DanGuo1223/mzClustering | 7d884c3ae3e6c2d03f37c564529f5e50221e88ee | [
"MIT"
] | null | null | null | python/data.py | DanGuo1223/mzClustering | 7d884c3ae3e6c2d03f37c564529f5e50221e88ee | [
"MIT"
] | null | null | null | import sys
import numpy as np
from sklearn.cluster import KMeans
from sklearn import manifold
from time import time
from matplotlib import offsetbox
import matplotlib.pyplot as plt
import seaborn as sns
#### plot embedding
def plot_embedding(X, label, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(label[i]),
color=plt.cm.Set1((label[i]+2) / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
## Data loader
np.set_printoptions(threshold=sys.maxsize)
# Option
mode = 'Training'
num_cluster =10
eps = 1e-10
height = 125
width = 110
channel = 1
sampleN = 4123 #746
spec = np.genfromtxt('spec.csv',delimiter=' ')
spec_train = np.reshape(spec, (-1, 1, height, width))
label = np.genfromtxt('fake_label.csv',delimiter=' ')
train_labels = np.asarray(label, dtype=np.int32)
image_data = spec_train
image_label = train_labels
##### normalize
print('min:', np.min(image_data[84,::]), 'max:', np.max(image_data[84,::]))
for i in range(0, sampleN):
current_min = np.min(image_data[i,::])
current_max = np.max(image_data[i,::])
image_data[i,::] = (current_max - image_data[i,::]) / (current_max - current_min)
#### plot ion image
print(np.shape(image_data))
plt.imshow(np.reshape(image_data[84,::],(125,110)))
############K-means
X=np.reshape(image_data, (-1, height*width))
kmeans = KMeans(n_clusters=10, init='k-means++', max_iter=500, n_init=10, random_state=2)
km_labels = kmeans.fit_predict(X)
########### t-sne embeddings
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
print('plot embedding')
plot_embedding(X_tsne, label = km_labels,
title = "t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
plt.figure(figsize=(5,3.5))
sns.scatterplot(
x=X_tsne[:,0], y=X_tsne[:,1],
hue=km_labels,
palette=sns.color_palette("hls", num_cluster),#3
legend="full"
)
plt.xlabel('TSNE 1')
plt.ylabel('TSNE 2') | 29.988636 | 89 | 0.632058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.160667 |
7537d9ff6f54603cb0471062e3b58878836382b5 | 4,362 | py | Python | moe/optimal_learning/python/interfaces/domain_interface.py | misokg/Cornell-MOE | 1547d6b168b7fc70857d522baa0d5d45c41d3cdf | [
"Apache-2.0"
] | 218 | 2017-10-14T03:54:00.000Z | 2022-03-25T14:48:38.000Z | moe/optimal_learning/python/interfaces/domain_interface.py | Tracy3370/Cornell-MOE | df299d1be882d2af9796d7a68b3f9505cac7a53e | [
"Apache-2.0"
] | 45 | 2017-09-27T14:33:31.000Z | 2020-12-16T09:32:50.000Z | moe/optimal_learning/python/interfaces/domain_interface.py | Tracy3370/Cornell-MOE | df299d1be882d2af9796d7a68b3f9505cac7a53e | [
"Apache-2.0"
] | 63 | 2017-09-25T14:23:57.000Z | 2022-03-17T01:41:42.000Z | # -*- coding: utf-8 -*-
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""
from builtins import object
from abc import ABCMeta, abstractmethod, abstractproperty
from future.utils import with_metaclass
class DomainInterface(with_metaclass(ABCMeta, object)):
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""
@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
pass
@abstractmethod
def check_point_inside(self, point):
r"""Check if a point is inside the domain/on its boundary or outside.
:param point: point to check
:type point: array of float64 with shape (dim)
:return: true if point is inside the domain
:rtype: bool
"""
pass
@abstractmethod
def get_bounding_box(self):
"""Return a list of ClosedIntervals representing a bounding box for this domain."""
pass
@abstractmethod
def get_constraint_list(self):
"""Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA.
:return: a list of lambda functions corresponding to constraints
:rtype: array of lambda functions with shape (dim * 2)
"""
pass
@abstractmethod
def generate_random_point_in_domain(self, random_source=None):
"""Generate ``point`` uniformly at random such that ``self.check_point_inside(point)`` is True.
.. Note:: if you need multiple points, use generate_uniform_random_points_in_domain instead;
depending on implementation, it may ield better distributions over many points. For example,
tensor product type domains use latin hypercube sampling instead of repeated random draws
which guarantees that no non-uniform clusters may arise (in subspaces) versus this method
which treats all draws independently.
:return: point in domain
:rtype: array of float64 with shape (dim)
"""
pass
@abstractmethod
def generate_uniform_random_points_in_domain(self, num_points, random_source):
r"""Generate AT MOST ``num_points`` uniformly distributed points from the domain.
.. NOTE::
The number of points returned may be LESS THAN ``num_points``!
Implementations may use rejection sampling. In such cases, generating the requested
number of points may be unreasonably slow, so implementers are allowed to generate
fewer than ``num_points`` results.
:param num_points: max number of points to generate
:type num_points: int >= 0
:param random_source:
:type random_source: callable yielding uniform random numbers in [0,1]
:return: uniform random sampling of points from the domain; may be fewer than ``num_points``!
:rtype: array of float64 with shape (num_points_generated, dim)
"""
pass
@abstractmethod
def compute_update_restricted_to_domain(self, max_relative_change, current_point, update_vector):
r"""Compute a new update so that CheckPointInside(``current_point`` + ``new_update``) is true.
Changes new_update_vector so that:
``point_new = point + new_update_vector``
has coordinates such that ``CheckPointInside(point_new)`` returns true.
``new_update_vector`` is a function of ``update_vector``.
``new_update_vector`` is just a copy of ``update_vector`` if ``current_point`` is already inside the domain.
.. NOTE::
We modify update_vector (instead of returning point_new) so that further update
limiting/testing may be performed.
:param max_relative_change: max change allowed per update (as a relative fraction of current distance to boundary)
:type max_relative_change: float64 in (0, 1]
:param current_point: starting point
:type current_point: array of float64 with shape (dim)
:param update_vector: proposed update
:type update_vector: array of float64 with shape (dim)
:return: new update so that the final point remains inside the domain
:rtype: array of float64 with shape (dim)
"""
pass
| 40.388889 | 123 | 0.684319 | 4,089 | 0.937414 | 0 | 0 | 3,867 | 0.88652 | 0 | 0 | 3,488 | 0.799633 |
7539ce8921d3225bf80008bbe5aba7640173e66f | 554 | py | Python | research/datasets/tests/test_datasets.py | joaopfonseca/research | 02659512218d077d9ef28d481178e62172ef18cd | [
"MIT"
] | 1 | 2021-01-25T00:09:32.000Z | 2021-01-25T00:09:32.000Z | research/datasets/tests/test_datasets.py | joaopfonseca/research | 02659512218d077d9ef28d481178e62172ef18cd | [
"MIT"
] | null | null | null | research/datasets/tests/test_datasets.py | joaopfonseca/research | 02659512218d077d9ef28d481178e62172ef18cd | [
"MIT"
] | null | null | null | from urllib.request import urlopen
import multiprocessing.dummy as mp
from multiprocessing import cpu_count
import ssl
from .._base import FETCH_URLS
ssl._create_default_https_context = ssl._create_unverified_context
def test_urls():
"""Test whether URLS are working."""
urls = [
url
for sublist in [[url] for url in list(FETCH_URLS.values()) if type(url) == str]
for url in sublist
]
p = mp.Pool(cpu_count())
url_status = p.map(lambda url: (urlopen(url).status == 200), urls)
assert all(url_status)
| 24.086957 | 87 | 0.693141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.064982 |
753a37676c204c0ac6bedd0d6d49be01a6ac16e3 | 1,347 | py | Python | jdcloud_sdk/services/vod/models/MediaClip.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/vod/models/MediaClip.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/vod/models/MediaClip.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class MediaClip(object):
def __init__(self, mediaId=None, mediaIn=None, mediaOut=None, timelineIn=None, timelineOut=None, operations=None):
"""
:param mediaId: (Optional) 素材ID,此处,必须为视频点播媒资的视频ID
:param mediaIn: (Optional) 素材片段在媒资中的入点
:param mediaOut: (Optional) 素材片段在媒资中的出点
:param timelineIn: (Optional) 素材片段在合成时间线中的入点
:param timelineOut: (Optional) 素材片段在合成时间线中的出点
:param operations: (Optional)
"""
self.mediaId = mediaId
self.mediaIn = mediaIn
self.mediaOut = mediaOut
self.timelineIn = timelineIn
self.timelineOut = timelineOut
self.operations = operations
| 35.447368 | 118 | 0.706756 | 812 | 0.547539 | 0 | 0 | 0 | 0 | 0 | 0 | 1,101 | 0.742414 |
753d85f68b37d30eae599c1964f08dd71bec7759 | 592 | py | Python | babylispregex.py | phoughton/baby_lisp | 0f21f4c3ba38377f6fc414f50207ec6fee655ea9 | [
"MIT"
] | null | null | null | babylispregex.py | phoughton/baby_lisp | 0f21f4c3ba38377f6fc414f50207ec6fee655ea9 | [
"MIT"
] | null | null | null | babylispregex.py | phoughton/baby_lisp | 0f21f4c3ba38377f6fc414f50207ec6fee655ea9 | [
"MIT"
] | null | null | null | import re
# $ babyLisp(‘(add 1 2)’)
# $ 3
# $ babyLisp(‘(multiply 4 (add 2 3))’)
# $ 20
def add(a, b):
return a+b
def subtract(a, b):
return a-b
def multiply(a, b):
return a*b
def divide(a, b):
return a/b
def baby_lisp(lisp_string):
brackets = re.sub(r"\)", "))", lisp_string)
commands = re.sub(r"([a-z]+) ", r"\1(", brackets)
final = re.sub(r" ", ",", commands)
return eval(final)
assert baby_lisp("(add 1 2)") == 3
assert baby_lisp("(multiply 4 (add 2 3))") == 20
assert baby_lisp("(multiply (add (subtract 2 1) (multiply 5 1)) (add 2 3))") == 30
| 18.5 | 82 | 0.567568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.35 |
753e238d11dcef3e0069b4e553a40395bf220dd3 | 421 | py | Python | python3/contains_duplicate_3.py | joshiaj7/CodingChallenges | f95dd79132f07c296e074d675819031912f6a943 | [
"MIT"
] | 1 | 2020-10-08T09:17:40.000Z | 2020-10-08T09:17:40.000Z | python3/contains_duplicate_3.py | joshiaj7/CodingChallenges | f95dd79132f07c296e074d675819031912f6a943 | [
"MIT"
] | null | null | null | python3/contains_duplicate_3.py | joshiaj7/CodingChallenges | f95dd79132f07c296e074d675819031912f6a943 | [
"MIT"
] | null | null | null | """
Space : O(1)
Time : O(n**2)
"""
class Solution:
def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:
if t == 0 and len(nums) == len(set(nums)):
return False
for i, cur_val in enumerate(nums):
for j in range(i+1, min(i+k+1, len(nums))):
if abs(cur_val - nums[j]) <= t:
return True
return False
| 26.3125 | 85 | 0.503563 | 379 | 0.900238 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.095012 |
753f4b0a72fb6a598355d9cd10551930431e1d41 | 5,024 | py | Python | LIBRAY_MANAGEMENT/f_passwd.py | ShriyasnhAgarwl/Hacktoberfest | 5e8adf77a833f7b99dbddff92716e05641dac857 | [
"MIT"
] | null | null | null | LIBRAY_MANAGEMENT/f_passwd.py | ShriyasnhAgarwl/Hacktoberfest | 5e8adf77a833f7b99dbddff92716e05641dac857 | [
"MIT"
] | null | null | null | LIBRAY_MANAGEMENT/f_passwd.py | ShriyasnhAgarwl/Hacktoberfest | 5e8adf77a833f7b99dbddff92716e05641dac857 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
from sqlite3 import Error
#creating window
class Fp(Tk):
def __init__(self):
super().__init__()
self.iconbitmap(r'libico.ico')
self.maxsize(480, 320)
self.title("Forget Password")
self.canvas = Canvas(width=500, height=500, bg='black')
self.canvas.pack()
self.photo = PhotoImage(file='forgot.png')
self.canvas.create_image(-20, -20, image=self.photo, anchor=NW)
#creating variables
a = StringVar()
b = StringVar()
c = StringVar()
d = StringVar()
e = StringVar()
#verifying input
def ins():
if (len(d.get())) < 8 or len(e.get()) < 8:
while True:
if not re.search("[a-z]", d.get()):
flag = -1
break
elif not re.search("[A-Z]", d.get()):
flag = -1
break
elif not re.search("[0-9]", d.get()):
flag = -1
break
elif not re.search("[_@$]", d.get()):
flag = -1
break
elif re.search("\s", d.get()):
flag = -1
break
else:
flag = 0
break
if len(d.get()) == 0:
messagebox.showinfo("Error","Please Enter Your Password")
elif flag == -1:
messagebox.showinfo("Error","Minimum 8 characters.\nThe alphabets must be between [a-z]\nAt least one alphabet should be of Upper Case [A-Z]\nAt least 1 number or digit between [0-9].\nAt least 1 character from [ _ or @ or $ ].")
elif d.get() != e.get():
messagebox.showinfo("Error","New and retype password are not some")
else:
try:
self.conn = sqlite3.connect('library_administration.db')
self.myCursor = self.conn.cursor()
self.myCursor.execute("Update admin set password = ? where id = ?",[e.get(),a.get()])
self.conn.commit()
self.myCursor.close()
self.conn.close()
messagebox.showinfo("Confirm","Password Updated Successfuly")
self.destroy()
except Error:
messagebox.showerror("Error","Something Goes Wrong")
def check():
if len(a.get()) < 5:
messagebox.showinfo("Error","Please Enter User Id")
elif len(b.get()) == 0:
messagebox.showinfo("Error","Please Choose a question")
elif len(c.get()) == 0:
messagebox.showinfo("Error", "Please Enter a answer")
else:
try:
self.conn = sqlite3.connect('library_administration.db')
self.myCursor = self.conn.cursor()
self.myCursor.execute("Select id,secQuestion,secAnswer from admin where id = ?",[a.get()])
pc = self.myCursor.fetchone()
if not pc:
messagebox.showinfo("Error", "Something Wrong in the Details")
elif str(pc[0]) == a.get() or str(pc[1]) == b.get() or str(pc[2]) == c.get():
Label(self, text="New Password", font=('arial', 15, 'bold')).place(x=40, y=220)
Entry(self, show = "*", textvariable=d, width=40).place(x=230, y=224)
Label(self, text="Retype Password", font=('arial', 15, 'bold')).place(x=40, y=270)
Entry(self, show = "*", textvariable=e, width=40).place(x=230, y=274)
Button(self, text="Submit", width=15, command=ins).place(x=230, y=324)
except Error:
messagebox.showerror("Error","Something Goes Wrong")
#label and input box
Label(self, text="Enter User Id",bg='black',fg='white', font=('arial', 15, 'bold')).place(x=40, y=20)
Label(self, text="Security Question",bg='black',fg='white',font=('arial', 15, 'bold')).place(x=40, y= 70)
Label(self, text="Security Answer",bg='black',fg='white',font=('arial', 15, 'bold')).place(x=40, y= 120)
Entry(self, textvariable=a, width=40).place(x=230, y=24)
ttk.Combobox(self, textvariable = b,values=["What is your school name?", "What is your home name?","What is your Father name?", "What is your pet name?"], width=37,state="readonly").place(x=230, y=74)
Entry(self, show = "*", textvariable=c, width=40).place(x=230, y=124)
Button(self, text='Verify', width=15,command = check).place(x=275, y=170)
Fp().mainloop()
| 50.24 | 250 | 0.488256 | 4,858 | 0.966959 | 0 | 0 | 0 | 0 | 0 | 0 | 1,153 | 0.229498 |
753fe1a1faf722bc2fb2591309f0654b5a03e396 | 3,615 | py | Python | data_preprocessing/preprocessing_text.py | florianfricke/Bachelor_Thesis_Sentiment_Analyse | aa1fa95cfbc13115ee60baaf79eab0d1940998ab | [
"MIT"
] | 1 | 2020-06-04T13:20:45.000Z | 2020-06-04T13:20:45.000Z | data_preprocessing/preprocessing_text.py | florianfricke/Bachelor_Thesis_Sentiment_Analyse | aa1fa95cfbc13115ee60baaf79eab0d1940998ab | [
"MIT"
] | 6 | 2020-06-03T18:45:11.000Z | 2022-02-10T01:51:03.000Z | data_preprocessing/preprocessing_text.py | florianfricke/Bachelor_Thesis_Sentiment_Analyse | aa1fa95cfbc13115ee60baaf79eab0d1940998ab | [
"MIT"
] | null | null | null | """
Created by Florian Fricke.
"""
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
from textblob_de.lemmatizers import PatternParserLemmatizer
from tqdm import tqdm
from nltk.corpus import stopwords
from utilities.utilities import transform_data
import pickle
class PreprocessingText:
def __init__(self, text, **kwargs):
self.text = text
self.text_processor = TextPreProcessor(
# terms that will be normalize e.g. test@gmx.de to <email>
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'date', 'number'],
# terms that will be annotated e.g. <hashtag>#test</hashtag>
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis'},
fix_html=True, # fix HTML tokens
unpack_hashtags=True, # perform word segmentation on hashtags
# select a tokenizer. You can use SocialTokenizer, or pass your own if not text tokenized on whitespace
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
def remove_stopwords(self, data):
stop_ger = stopwords.words('german')
allowed_stopwords = ['kein', 'keine', 'keinem',
'keinen', 'keiner', 'keines', 'nicht', 'nichts']
for a in allowed_stopwords:
stop_ger.remove(a)
customstopwords = ['rt', 'mal', 'heute', 'gerade', 'erst', 'macht', 'eigentlich', 'warum',
'gibt', 'gar', 'immer', 'schon', 'beim', 'ganz', 'dass', 'wer', 'mehr', 'gleich', 'wohl']
normalizedwords = ['<url>', '<email>', '<percent>', 'money>',
'<phone>', '<user>', '<time>', '<url>', '<date>', '<number>']
stop_ger = stop_ger + customstopwords + normalizedwords
clean_data = []
if(type(data) == list):
for d in data:
data_stop_words = []
for word in d:
if word not in stop_ger:
data_stop_words.append(word)
clean_data.append(data_stop_words)
if(type(data) == str):
words = data.split()
for word in words:
if word not in stop_ger:
clean_data.append(word)
return clean_data
def lemmatize_words(self, data):
_lemmatizer = PatternParserLemmatizer()
lemmatized_data = []
if(type(data) == list):
for d in data:
text = ""
for word in d:
text = text + " " + word
l = _lemmatizer.lemmatize(text)
lemmatized_data.append([i[0] for i in l])
if(type(data) == str):
l = _lemmatizer.lemmatize(data)
lemmatized_data.append([i[0] for i in l])
return lemmatized_data
def ekphrasis_preprocessing(self):
X_clean = []
if(type(self.text) == str):
X_clean.append(self.text_processor.pre_process_doc(self.text))
if(type(self.text) == list):
for row in tqdm(self.text):
X_clean.append(self.text_processor.pre_process_doc(row))
return X_clean
| 40.166667 | 116 | 0.569018 | 3,235 | 0.894882 | 0 | 0 | 0 | 0 | 0 | 0 | 912 | 0.252282 |
7540ff40645b92068ec7fcaab55701834909aa30 | 672 | py | Python | scripts/make_agg_intervals.py | mitchute/Open-GHX | 7a88872c36200c620cfd07994119cfb243a998c9 | [
"MIT"
] | 4 | 2017-10-09T21:08:44.000Z | 2020-11-18T11:09:56.000Z | scripts/make_agg_intervals.py | mitchute/Open-GHX | 7a88872c36200c620cfd07994119cfb243a998c9 | [
"MIT"
] | 1 | 2017-08-18T01:44:13.000Z | 2017-08-18T02:23:21.000Z | scripts/make_agg_intervals.py | mitchute/Open-GHX | 7a88872c36200c620cfd07994119cfb243a998c9 | [
"MIT"
] | 3 | 2016-09-08T14:57:21.000Z | 2021-06-29T08:42:08.000Z | def make_interval(depth, depth_integer_multiplier, num, step_num, start_val):
all_groups_str = "[\n"
for n in range(num):
all_groups_str += "\t["
for d in range(depth):
val = str(start_val * pow(depth_integer_multiplier, d))
if d == depth - 1:
if n == num - 1:
all_groups_str += "%s]\n" % val
else:
all_groups_str += "%s],\n" % val
else:
all_groups_str += "%s, " % val
start_val += step_num
all_groups_str += "]\n"
return all_groups_str
print(make_interval(12, 2, 10, 10, 10))
| 26.88 | 78 | 0.486607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.053571 |
754141d0382990aaa59611b96bf00b6df7082919 | 8,455 | py | Python | rsssql.py | su-sanzhou/rss-from-web | e4ad4f9de0b5f9eef82ac3464d9d7ceb70d649b4 | [
"MIT"
] | 11 | 2020-02-09T15:16:40.000Z | 2021-12-24T16:29:43.000Z | rsssql.py | su-sanzhou/rss-from-web | e4ad4f9de0b5f9eef82ac3464d9d7ceb70d649b4 | [
"MIT"
] | 4 | 2019-12-25T16:39:26.000Z | 2021-03-27T10:01:49.000Z | rsssql.py | su-sanzhou/rss-from-web | e4ad4f9de0b5f9eef82ac3464d9d7ceb70d649b4 | [
"MIT"
] | null | null | null | import config
from selectsql import SelectSql
class RssSql(object):
def __init__(self):
self.database = config.get_database_config()
self.select_sql = SelectSql(self.database)
self.do_not_success = "do_not_success"
self.do_success = "do_success"
self.user = {}
self.xpath = {}
self.xpath_id = -1
#not success,return []
async def get_user_id_password(self,user_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT user_id,user_name,password FROM rss_user WHERE user_name = $1
""",user_name)
await conn.close()
return res
#not success,return []
async def insert_xpath(self,user_id,
site_url,
entry_css,
entry_link_css,
add_base_url,
rss_link_prefix,
site_title_css,
site_motto_css,
entry_content_css,
author_css,
datetime_css,
interval_time,
rss_link,
base_url):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
INSERT INTO xpath (user_id,site_url,
entry_css,entry_link_css,add_base_url,
rss_link_prefix,site_title_css,site_motto_css,
entry_content_css,author_css,datetime_css,
interval_time,rss_link,base_url)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)
RETURNING xpath_id;
""",user_id,site_url,entry_css,entry_link_css,
add_base_url,rss_link_prefix,
site_title_css,site_motto_css,entry_content_css,
author_css,datetime_css,interval_time,rss_link,base_url)
await conn.close()
return res
#not success,return []
async def get_xpath_interval_one(self,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT xpath_id,interval_time FROM xpath WHERE xpath_id = $1
""",xpath_id)
await conn.close()
return res
#not success,return []
async def get_xpath_id_interval_all(self):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT xpath_id,interval_time FROM xpath
""")
await conn.close()
return res
#not success,return []
async def get_xpath_from_user_id(self,user_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM xpath WHERE user_id = $1
""", user_id)
await conn.close()
return res
#not success,return []
async def get_xpath_one_from_xpath_id(self,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT * FROM xpath WHERE xpath_id = $1
""", xpath_id)
await conn.close()
return res
#not success,return []
async def get_xpath_one_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM xpath WHERE rss_link = $1
""", url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
return res
#not success,return []
async def update_xpath_one_from_rss_link(self,
site_url,
entry_css,
entry_link_css,
add_base_url,
site_title_css,
site_motto_css,
entry_content_css,
author_css,
datetime_css,
interval_time,
rss_link,
base_url
):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
UPDATE xpath SET site_url = $1,
entry_css = $2,entry_link_css = $3,add_base_url = $4,
site_title_css = $5,site_motto_css = $6,entry_content_css = $7,
author_css = $8,datetime_css = $9,interval_time = $10,
base_url = $11
WHERE rss_link = $12 RETURNING xpath_id
""",site_url,entry_css,entry_link_css,add_base_url,
site_title_css,site_motto_css,entry_content_css,
author_css,datetime_css,interval_time,base_url,
rss_link)
await conn.close()
return res
#not success,return []
async def insert_rss(self,user_id,xpath_id,site_title,rss_url_name,
rss_content,rss_last_build_time,rss_sha256sum):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
INSERT INTO rss (user_id,xpath_id,site_title,rss_url_name,
rss_content,rss_last_build_time,rss_sha256sum)
VALUES ($1,$2,$3,$4,$5,$6,$7) RETURNING xpath_id
""", user_id,
xpath_id,
site_title,
rss_url_name,
rss_content,
rss_last_build_time,
rss_sha256sum)
await conn.close()
return res
#not success,return []
async def get_one_rss_from_userid_xpathid(self,user_id,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT * FROM rss WHERE user_id = $1 AND xpath_id = $2;
""", user_id,xpath_id)
await conn.close()
return res
#not success,return []
async def get_all_rss_from_userid(self,user_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM rss WHERE user_id = $1
""", user_id)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res) == 0:
res = [{"site_title": "rss_is_none","rss_url_name": "no_url"}]
return res
#not success,return []
async def get_one_rss_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM rss WHERE rss_url_name = $1
""", url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res) == 0:
res = [{"rss_content": "no rss,maybe deleted","rss_url_name": "no_url"}]
return res
#not success,return "do_not_success"
async def update_one_rss_xpath_id(self,rss_content,
rss_sha256sum,xpath_id):
conn = await self.select_sql.sql_conn()
try:
res = await conn.execute("""
UPDATE rss SET rss_content = $1,
rss_sha256sum = $2 WHERE xpath_id = $3
""",rss_content,
rss_sha256sum,xpath_id)
await conn.close()
except:
res = self.do_not_success
return res
else:
return res
#not success,return []
async def delete_one_rss_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res1 = await conn.fetchrow("""
DELETE FROM rss WHERE rss_url_name = $1 RETURNING *
""", url_name)
res2 = await conn.fetchrow("""
DELETE FROM xpath WHERE rss_link = $1 RETURNING *
""",url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res1) != 0 and len(res2) != 0:
res = self.do_success
else:
res = self.do_not_success
return res
| 34.651639 | 84 | 0.515553 | 8,402 | 0.993732 | 0 | 0 | 0 | 0 | 7,611 | 0.900177 | 2,851 | 0.337197 |
7541451076c8624e5930edf9291dc62703e20a8d | 2,492 | py | Python | tests/test_widget.py | betatim/cornerstone_widget | c22fafd4d8fe148f6b2349518188eb0bee5f18f1 | [
"Apache-2.0"
] | 24 | 2018-09-07T10:40:07.000Z | 2022-02-01T21:18:00.000Z | tests/test_widget.py | betatim/cornerstone_widget | c22fafd4d8fe148f6b2349518188eb0bee5f18f1 | [
"Apache-2.0"
] | 26 | 2018-09-04T16:32:46.000Z | 2018-10-08T09:11:50.000Z | tests/test_widget.py | betatim/cornerstone_widget | c22fafd4d8fe148f6b2349518188eb0bee5f18f1 | [
"Apache-2.0"
] | 3 | 2018-09-17T12:56:16.000Z | 2019-12-03T06:30:34.000Z | import numpy as np
import pytest
from ipywidgets.embed import embed_snippet
from cornerstone_widget import CornerstoneWidget, CornerstoneToolbarWidget
from cornerstone_widget.cs_widget import encode_numpy_b64
from cornerstone_widget.utils import _virtual_click_button
def test_encoding():
with pytest.raises(ValueError):
encode_numpy_b64(np.ones((4, 4, 2)))
with pytest.raises(ValueError):
encode_numpy_b64(np.ones((2, 3, 3)), rgb=True)
with pytest.raises(ValueError):
encode_numpy_b64(np.ones((2, 3)), rgb=True)
def test_ipy():
c = CornerstoneWidget()
c.update_image(np.ones((2, 1)))
widget_state = c.get_state()
assert widget_state['img_bytes'] == 'AAAAAA=='
assert widget_state['img_width'] == 1
assert widget_state['img_height'] == 2
widget_html = embed_snippet(c)
assert 'CornerstoneModel' in widget_html, 'Should contain widget code'
assert 'cornerstone_widget' in widget_html, 'Should contain widget code'
c.set_tool_state({'dog': 1})
widget_state = c.get_state()
assert widget_state['_tool_state_in'] == '{"dog": 1}'
def test_tools():
c = CornerstoneWidget()
c.select_tool('pan')
widget_state = c.get_state()
assert widget_state['_selected_tool'] == 'pan', 'Should be pan'
with pytest.raises(NotImplementedError):
c.select_tool('pane')
with pytest.raises(NotImplementedError):
c.update_image(np.zeros((3, 3, 3)))
def test_toolbar_tool():
c = CornerstoneToolbarWidget()
c.select_tool('pan')
widget_state = c.cur_image_view.get_state()
assert widget_state['_selected_tool'] == 'pan', 'Should be pan'
# check toolbar
for i in c._toolbar:
cw = i.tooltip
c.select_tool(cw)
widget_state = c.cur_image_view.get_state()
assert widget_state['_selected_tool'] == cw, 'Should be {}'.format(cw)
with pytest.raises(NotImplementedError):
c.select_tool('pane')
with pytest.raises(NotImplementedError):
c.update_image(np.zeros((3, 3, 3)))
def test_notoolbar():
c = CornerstoneToolbarWidget(tools=[])
assert len(c._toolbar) == 1
start_but = c._toolbar[0]
assert start_but.comm is not None, 'Should have something here'
# click button
_virtual_click_button(start_but)
assert start_but.comm is None, 'Should be a dead button'
def test_invalid_toolbar():
with pytest.raises(NotImplementedError):
CornerstoneToolbarWidget(tools=['Magic_Lasso'])
| 31.948718 | 78 | 0.696228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.156501 |
754420bf10ce818718b6b91f9f308e99e3df1a1e | 423 | py | Python | RecoPPS/Local/python/ctppsDiamondLocalReconstruction_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoPPS/Local/python/ctppsDiamondLocalReconstruction_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoPPS/Local/python/ctppsDiamondLocalReconstruction_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
# reco hit production
from RecoPPS.Local.ctppsDiamondRecHits_cfi import ctppsDiamondRecHits
# local track fitting
from RecoPPS.Local.ctppsDiamondLocalTracks_cfi import ctppsDiamondLocalTracks
ctppsDiamondLocalReconstructionTask = cms.Task(
ctppsDiamondRecHits,
ctppsDiamondLocalTracks
)
ctppsDiamondLocalReconstruction = cms.Sequence(ctppsDiamondLocalReconstructionTask)
| 30.214286 | 83 | 0.865248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.099291 |
7544963ba7d28d1172e33eda501c69199e904e17 | 86 | py | Python | g3_metaconfig/__init__.py | BigGreenDelta/g3-metaconfig | 5a75b63986fa42e70cdc7863b1d7436e20e1c4f9 | [
"MIT"
] | null | null | null | g3_metaconfig/__init__.py | BigGreenDelta/g3-metaconfig | 5a75b63986fa42e70cdc7863b1d7436e20e1c4f9 | [
"MIT"
] | 2 | 2022-01-17T08:51:38.000Z | 2022-02-24T21:42:36.000Z | g3_metaconfig/__init__.py | BigGreenDelta/g3-metaconfig | 5a75b63986fa42e70cdc7863b1d7436e20e1c4f9 | [
"MIT"
] | null | null | null | from .metaclass import G3ConfigMeta, Config, ArgParserConfig
from .param import Param
| 28.666667 | 60 | 0.837209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
754677d813bc13607d3f5c2d3ce3b5f14525c78c | 689 | py | Python | tests/run_all_tests.py | philbucher/communication-tests | 65ae430072e502465bfa3df56c8836cf3a061c29 | [
"BSD-3-Clause"
] | null | null | null | tests/run_all_tests.py | philbucher/communication-tests | 65ae430072e502465bfa3df56c8836cf3a061c29 | [
"BSD-3-Clause"
] | null | null | null | tests/run_all_tests.py | philbucher/communication-tests | 65ae430072e502465bfa3df56c8836cf3a061c29 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import os, sys
import communication_tests
if communication_tests.MPI.Rank() == 0:
communication_tests.CompilerInfo()
print()
communication_tests.MPI.Barrier()
is_slave_process = (("--tests-slave" in sys.argv[1:]) or (communication_tests.MPI.Rank() == 1))
if is_slave_process:
from base_communication_test import BaseCommunicationTestDataSender
BaseCommunicationTestDataSender().Execute()
else:
loader = unittest.TestLoader()
tests = loader.discover(os.path.dirname(__file__)) # automatically discover all tests in this directory
testRunner = unittest.runner.TextTestRunner(verbosity=1)
sys.exit(not testRunner.run(tests).wasSuccessful()) | 34.45 | 107 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.097242 |
7546c38999fd0ace1e9a5683705fcf28277bdcad | 10,407 | py | Python | bell.py | SWERecker/wf-auto | 1f7706c2f51aebdefe8791e50a1a894523882baf | [
"MIT"
] | null | null | null | bell.py | SWERecker/wf-auto | 1f7706c2f51aebdefe8791e50a1a894523882baf | [
"MIT"
] | null | null | null | bell.py | SWERecker/wf-auto | 1f7706c2f51aebdefe8791e50a1a894523882baf | [
"MIT"
] | null | null | null | import os
import sys
import time
import warnings
from time import sleep
import cv2
from image_similarity_measures.quality_metrics import fsim
from wf_auto import *
from wf_auto.util import Debug
warnings.filterwarnings('ignore')
ref_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "reference")
results_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "results")
operation_delay = 0.7
threshold = 0.64
boss_threshold = 0.64
fast_connect = False
ip = "localhost"
port = "7555"
record = {}
if len(sys.argv) > 1:
for arg in sys.argv:
if arg == "fast":
fast_connect = True
if arg.startswith("port"):
port = arg.split('=')[1]
if arg.startswith("ip"):
ip = arg.split('=')[1]
def battle_record(boss_name):
if boss_name in record:
record[boss_name] += 1
else:
record[boss_name] = 1
if __name__ == "__main__":
config = util.get_config()
device = device.Device(f"{ip}:{port}", fast=fast_connect)
bark = bark.Push(config["push"])
boss_ref = []
common_ref = {}
for index in range(len(config["boss"])):
ref_img = config["boss"][index]["ref_img"]
config["boss"][index]["ref_img"] = util.get_ref_image(ref_folder, ref_img)
boss_ref.append(config["boss"][index])
for item in config["common"]:
common_ref[item] = util.get_ref_image(ref_folder, config["common"][item])
idle_count = 0
skip_filter_boss = True if config["filter_boss"] == 0 else False
Debug.print("开始等待铃铛.", inline=True)
try:
while True:
screenshot = device.screenshot()
boss_info = {"name": "unknown", "friendly_name": "未知怪物", "target": 0}
bell = util.get_area_from_image(pos.bell_pos, screenshot)
simi = fsim(bell, common_ref["bell"])
# Debug.print("铃铛相似度:", simi)
Debug.print(".", prefix=False, inline=True)
if simi > threshold:
idle_count = 0
Debug.print("识别到铃铛,点击", new_line=True)
device.touch(pos.bell_pos)
sleep(1)
bell_info = device.screenshot()
boss_accept = False
if skip_filter_boss:
Debug.print("跳过筛选Boss,接受")
boss_accept = True
else:
boss_info = util.get_boss(
util.get_area_from_image(pos.boss_pos, bell_info), boss_ref, boss_threshold)
Debug.print(f"怪物名称:{boss_info['friendly_name']}")
Debug.log(f"怪物名称:{boss_info['friendly_name']}")
if boss_info["name"] == "unknown":
Debug.print("未知Boss,保存备用")
device.get_unknown_boss(bell_info)
boss_accept = boss_info["target"]
if boss_accept:
Debug.print("点击接受")
device.touch(pos.accept_pos)
sleep(1)
Debug.print("正在等待进入房间.", inline=True, new_line=True)
room_entered = False
while True:
screenshot = device.screenshot()
room_full_simi = fsim(common_ref["full"],
util.get_area_from_image(pos.full_pos, screenshot))
if room_full_simi > threshold:
Debug.print("房间已满或正在开始,返回继续等待", new_line=True)
bark.push("进入房间失败:已满或正在开始")
device.touch(pos.full_continue_pos)
break
in_room_simi = fsim(common_ref["in_room"],
util.get_area_from_image(pos.in_room_pos, screenshot))
if in_room_simi < threshold:
Debug.print(".", prefix=False, inline=True)
continue
else:
Debug.print("已进入房间!", new_line=True)
room_entered = True
break
if not room_entered:
Debug.print("未成功进入房间")
continue
Debug.print("点击准备.")
while True:
device.touch(pos.prepare_pos)
sleep(operation_delay)
screenshot = device.screenshot()
prepared_simi = fsim(common_ref["prepared"],
util.get_area_from_image(pos.prepare_pos, screenshot))
if prepared_simi > threshold:
Debug.print("已准备!")
break
unprepared_simi = fsim(common_ref["unprepared"],
util.get_area_from_image(pos.prepare_pos, screenshot))
if unprepared_simi > threshold + 0.05:
Debug.print("尚未准备...")
continue
else:
Debug.print("未找到准备标志,可能已经开始")
break
if skip_filter_boss:
Debug.print("尝试识别Boss信息.")
boss_info = util.get_boss(
util.get_area_from_image(pos.boss_pos, bell_info), boss_ref, boss_threshold)
Debug.print(f"怪物名称:{boss_info['friendly_name']}")
Debug.log(f"怪物名称:{boss_info['friendly_name']}")
if boss_info["name"] == "unknown":
Debug.print("未知Boss,保存备用")
device.get_unknown_boss(bell_info)
Debug.print("正在等待结束.", prefix=True, inline=True)
while True:
screenshot = device.screenshot()
continue_simi = fsim(common_ref["continue"],
util.get_area_from_image(pos.continue_btn_pos, screenshot))
finish = True if continue_simi > threshold else False
if finish:
Debug.print(f"{boss_info['friendly_name']} 已正常结束", new_line=True)
Debug.log(f"{boss_info['friendly_name']} 已正常结束")
file_name = f"{time.strftime('%Y%m%d_%H%M%S', time.localtime())}_{boss_info['name']}.jpg"
cv2.imwrite(os.path.join(results_folder, file_name), screenshot)
bark.push(f"{boss_info['friendly_name']} 正常结束")
battle_record(boss_info["name"])
device.touch(pos.continue_pos)
break
else:
Debug.print(".", prefix=False, inline=True)
main_simi = fsim(common_ref["main"],
util.get_area_from_image(pos.main_pos, screenshot))
finish_abnormal = True if main_simi > threshold else False
if finish_abnormal:
msg = f"{boss_info['friendly_name']} 非正常结束"
Debug.print(msg, new_line=True)
Debug.log(msg)
bark.push(msg)
break
dismiss_simi = fsim(common_ref["full"],
util.get_area_from_image(pos.full_pos, screenshot))
room_dismiss = True if dismiss_simi > threshold else False
if room_dismiss:
msg = f"{boss_info['friendly_name']} 房间已解散"
Debug.print(msg, new_line=True)
Debug.log(msg)
bark.push(msg)
device.touch(pos.full_continue_pos)
sleep(2)
break
sleep(3)
Debug.print("正在回到主界面.", inline=True, new_line=True)
attempt_count = 0
while True:
screenshot = device.screenshot()
main_simi = fsim(common_ref["main"],
util.get_area_from_image(pos.main_pos, screenshot))
back_to_main = True if main_simi > threshold else False
if back_to_main:
device.touch(pos.main_btn_pos)
Debug.print("已回到主界面,继续等待铃铛", inline=True, new_line=True)
break
else:
Debug.print(".", prefix=False, inline=True)
device.touch(pos.continue_pos)
attempt_count += 1
if attempt_count > 8:
Debug.print("似乎返回主界面失败,尝试使用返回键", new_line=True)
device.button("KEYCODE_BACK")
attempt_count = 0
sleep(2)
else:
Debug.print("不是要刷的怪物,继续等待.", inline=True, new_line=True)
device.touch(pos.decline_pos)
if idle_count == 100:
Debug.print("点击保持唤醒.", new_line=True, inline=True)
device.touch(pos.awake_pos)
idle_count = 0
idle_count += 1
sleep(operation_delay)
except KeyboardInterrupt:
if record:
total = 0
recs = []
for boss, count in record.items():
total += count
recs.append(f"{util.get_boss_friendly_name(boss_ref, boss)}{count}只")
msg = f"共成功战斗{total}场\n{';'.join(recs)}"
bark.push(msg)
else:
bark.push("本次未成功进入战斗")
Debug.print("Control-C", new_line=True, prefix=False)
exit(0)
| 46.253333 | 118 | 0.462381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,660 | 0.152112 |
754799f86978c3050ac87143e4f1594fb357ed37 | 7,095 | py | Python | clustering_capitals.py | BL-Labs/poetryhunt | cce687c46d0855f2960a9f944cadf28d16fbaba2 | [
"MIT"
] | null | null | null | clustering_capitals.py | BL-Labs/poetryhunt | cce687c46d0855f2960a9f944cadf28d16fbaba2 | [
"MIT"
] | null | null | null | clustering_capitals.py | BL-Labs/poetryhunt | cce687c46d0855f2960a9f944cadf28d16fbaba2 | [
"MIT"
] | null | null | null | from newspaperaccess import NewspaperArchive
from sklearn.feature_extraction import DictVectorizer
from sklearn.cluster import KMeans
import os
import sqlite3
CL_CACHE_FOLDER = "/datastore/cluster_cache_dbs"
class ClusterDB(object):
def __init__(self, dbfile = "area_cache.db"):
self._conn = sqlite3.connect(os.path.join(CL_CACHE_FOLDER, dbfile))
# Dict responses:
self._conn.row_factory = sqlite3.Row
self._cur = self._conn.cursor()
self._vector_keys = ["id", "x1ave_ledge", "redge_x2ave", "ltcount", "x1_var1",
"x1_var2", "x2_var1", "x2_var2", "density", "st_nums", "st_caps", "ave_lsp"]
self._item_keys = ["id", "newspaper", "year", "month", "day", "page", "article", "block_number", "vector_id"]
self._cluster_result = ["id", "label", "item_id"]
def commit(self):
return self._conn.commit()
def close(self):
return self._conn.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._conn.commit()
self._conn.close()
def create_tables(self, sure_about_this = False):
if sure_about_this:
print("Creating tables")
self._cur.executescript("""
DROP TABLE IF EXISTS vector;
CREATE TABLE vector(id INTEGER PRIMARY KEY,
x1ave_ledge INTEGER,
redge_x2ave INTEGER,
ltcount INTEGER,
x1_var1 FLOAT,
x1_var2 FLOAT,
x2_var1 FLOAT,
x2_var2 FLOAT,
density FLOAT,
st_nums FLOAT,
st_caps FLOAT,
ave_lsp FLOAT);
DROP TABLE IF EXISTS item;
CREATE TABLE item(id INTEGER PRIMARY KEY,
newspaper TEXT,
year TEXT,
month TEXT,
day TEXT,
page TEXT,
article TEXT,
block_number TEXT,
vector_id INTEGER,
FOREIGN KEY(vector_id) REFERENCES vector(id));
DROP TABLE IF EXISTS cluster;
CREATE TABLE cluster(id INTEGER PRIMARY KEY,
label INTEGER,
item_id INTEGER,
FOREIGN KEY(item_id) REFERENCES item(id));
""")
def vectors(self):
for md in self._cur.execute("SELECT * from vector ORDER BY id;").fetchall():
yield dict(md)
def vector(self, vid):
if vid != None:
r = self._cur.execute("SELECT * FROM vector WHERE id = ? LIMIT 1;", (vid,))
return r.fetchone()
def vecidtoitem(self, id = None, **md):
if id != None:
r = self._cur.execute("SELECT * FROM item WHERE vector_id = ? LIMIT 1;", (id,))
return r.fetchone()
def add_vector(self, **params):
vlist = ", ".join(self._vector_keys[1:])
vallist = ", ".join([":{0}".format(x) for x in self._vector_keys[1:]])
sql = """INSERT INTO vector({0}) VALUES({1});""".format(vlist, vallist)
self._cur.execute(sql, params)
id = self._cur.lastrowid
return id
def add_item(self, **params):
vlist = ", ".join(self._item_keys[1:])
vallist = ", ".join([":{0}".format(x) for x in self._item_keys[1:]])
self._cur.execute("""INSERT INTO item({0}) VALUES({1});""".format(vlist, vallist), params)
id = self._cur.lastrowid
return id
def add_cluster(self, label, item_id):
self._cur.execute("""INSERT INTO cluster(label, item_id) VALUES(?, ?);""", (label, item_id))
id = self._cur.lastrowid
return id
def blockarea(a):
if a['box'] == []:
return 0
else:
return (a['box'][2] - a['box'][0]) * (a['box'][3] - a['box'][1])
def get_vectrow(a):
vectr = {}
# redo box calculations, due to ocr discrepancies. Only if enough lines though
if len(a['lines']) > 5:
xs, ys = map(lambda x: sorted(list(x))[2:-2], zip(*a['lines']))
a['box'] = [min(xs), min(ys), max(xs), max(ys)] # x1, y1, x2, y2
vectr['x1ave_ledge'] = a['col_mean_and_var']['x1'][0] - min(xs)
vectr['redge_x2ave'] = max(xs) - a['col_mean_and_var']['x2'][0]
else:
vectr['x1ave_ledge'] = a['col_mean_and_var']['x1'][0] - a['box'][0]
vectr['redge_x2ave'] = a['box'][2] - a['col_mean_and_var']['x2'][0]
vectr['ltcount'] = a['ltcount']
vectr['x1_var1'] = a['col_mean_and_var']['x1'][1]
vectr['x1_var2'] = a['col_mean_and_var']['x1'][2]
vectr['x2_var1'] = a['col_mean_and_var']['x2'][1]
vectr['x2_var2'] = a['col_mean_and_var']['x2'][2]
vectr['st_nums'] = a["leading_numbers"]
vectr['st_caps'] = a["leading_capitals"]
true_linesp_list = a['line_widths'][:len(a['lines'])-1]
vectr['ave_lsp'] = float(sum(true_linesp_list)) / max(len(true_linesp_list),1)
if blockarea(a) > 0:
# number of letters found by the OCR / pixel area of the block.
vectr['density'] = a['ltcount']/blockarea(a)
else:
vectr['density'] = 0
return vectr
def create_cluster_dataset(n, daterange = [1744, 1756], dbfile = "1744_1756_cluster.db", refresh = False):
# create a sklearn vector db from the area data
# cache a flat db copy as a speed up for later iterations
doc = []
id_list = []
if not os.path.isfile(os.path.join(CL_CACHE_FOLDER, dbfile)) or refresh == True:
print("No cache file found. Creating.")
db = ClusterDB(dbfile)
with db:
# recreate tables:
db.create_tables(True)
# run through all entries
# pull the text areas data for each, create vector and label files:
# vector: x1ave-l_edge, redge-x2ave, ltcount, x1_var1, x1_var2, x2_var1, x2_var2, wordareas/boxarea
# label: newspaper, year, month, day, page, article, block_number
# row x of vector csv is linked to row x of label csv
for newspaper in n.NEWSPAPERS:
for md in n.all_available_newspaper_dates(newspaper, daterange):
areadoc = n.get_areas(**md)
# print("{newspaper} - {year}/{month}/{day}".format(**md))
for page in sorted(areadoc.keys()):
for art in sorted(areadoc[page].keys()):
for block_id, block in enumerate(areadoc[page][art]):
if blockarea(block) > 500:
vectrow = get_vectrow(block)
vect_id = db.add_vector(**vectrow)
item_id = db.add_item(page = page, article = art, block_number = block_id, vector_id = vect_id, **md)
db = ClusterDB(dbfile)
print("Loading from db cache '{0}'".format(dbfile))
idx = 0
for vect in db.vectors():
if vect['x1_var1'] and vect['x2_var1']:
id_list.append(vect['id'])
del vect['id']
doc.append(vect)
idx += 1
print("{0} vectors loaded.".format(idx+1))
transformer = DictVectorizer()
ds = transformer.fit_transform(doc)
return ds, transformer, id_list
if __name__ == "__main__":
n = NewspaperArchive()
dataset, transform, id_list = create_cluster_dataset(n)
| 37.146597 | 119 | 0.582523 | 3,513 | 0.495137 | 120 | 0.016913 | 0 | 0 | 0 | 0 | 3,075 | 0.433404 |
7547a91dafab9c7b229fe925a2917befdb556f92 | 158 | py | Python | argentum-api/api/tests/utils/utils.py | devium/argentum | 2bbb0f663fe9be78d106b1afa409b094da449519 | [
"MIT"
] | 1 | 2019-10-07T09:47:08.000Z | 2019-10-07T09:47:08.000Z | argentum-api/api/tests/utils/utils.py | devium/argentum | 2bbb0f663fe9be78d106b1afa409b094da449519 | [
"MIT"
] | null | null | null | argentum-api/api/tests/utils/utils.py | devium/argentum | 2bbb0f663fe9be78d106b1afa409b094da449519 | [
"MIT"
] | null | null | null | import datetime
def to_iso_format(time: datetime.datetime) -> str:
return time.replace(tzinfo=datetime.timezone.utc).isoformat().replace('+00:00', 'Z')
| 26.333333 | 88 | 0.734177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.06962 |
75480369bf7b93d4d8d97e9b33d6e4258ac7f12f | 2,625 | py | Python | day16/solve-16a.py | ggchappell/AdventOfCode2020 | d79a40eb667dad33ec34d4715176dfe03b38816b | [
"MIT"
] | null | null | null | day16/solve-16a.py | ggchappell/AdventOfCode2020 | d79a40eb667dad33ec34d4715176dfe03b38816b | [
"MIT"
] | null | null | null | day16/solve-16a.py | ggchappell/AdventOfCode2020 | d79a40eb667dad33ec34d4715176dfe03b38816b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Advent of Code 2020
# Glenn G. Chappell
import sys # .stdin
import re # .search
# ======================================================================
# HELPER FUNCTIONS
# ======================================================================
def does_rule_allow_num(rule, num):
assert isinstance(rule, list)
assert len(rule) == 5
assert isinstance(rule[0], str)
for k in rule[1:]:
assert isinstance(k, int)
assert rule[1] <= rule[2]
assert rule[3] <= rule[4]
assert isinstance(num, int)
return rule[1] <= num <= rule[2] or rule[3] <= num <= rule[4]
# check_valid_ticket - returns list of all invalid numbers found in the
# given ticket, according to the given rules. For a valid ticket, this
# list will be empty.
def check_valid_ticket(rules, ticket):
assert isinstance(rules, list)
assert isinstance(ticket, list)
invalid_numbers = []
for n in ticket:
for r in rules:
if does_rule_allow_num(r, n):
break
else:
invalid_numbers.append(n)
return invalid_numbers
# ======================================================================
# MAIN PROGRAM
# ======================================================================
# *** Process Input ***
# Read rules
rules = []
for line in sys.stdin:
line = line.rstrip()
if not line:
break
re1 = r"^([a-z][a-z ]*[a-z]|[a-z]): (\d+)-(\d+) or (\d+)-(\d+)$"
mo1 = re.search(re1, line)
assert mo1
the_rule = [mo1[1],
int(mo1[2]), int(mo1[3]), int(mo1[4]), int(mo1[5])]
rules.append(the_rule)
rules_count = len(rules)
# Read your ticket
line = sys.stdin.readline()
line = line.rstrip()
assert line == "your ticket:"
line = sys.stdin.readline()
line = line.rstrip()
ticket_strs = line.split(",")
your_ticket = [ int(n_str) for n_str in ticket_strs ]
assert len(your_ticket) == rules_count
line = sys.stdin.readline()
line = line.rstrip()
assert not line
# Read nearby tickets
line = sys.stdin.readline()
line = line.rstrip()
assert line == "nearby tickets:"
nearby_tickets = []
for line in sys.stdin:
line = line.rstrip()
assert line
ticket_strs = line.split(",")
the_ticket = [ int(n_str) for n_str in ticket_strs ]
assert len(the_ticket) == rules_count
nearby_tickets.append(the_ticket)
# *** Do Computation ***
all_invalid_numbers = []
for nt in nearby_tickets:
invalid_numbers = check_valid_ticket(rules, nt)
all_invalid_numbers += invalid_numbers
# *** Print Answer ***
print(f"Answer: {sum(all_invalid_numbers)}")
| 25.735294 | 72 | 0.566476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.309714 |
7548e64006c12b84e95b327958f7a42ffbcf5097 | 2,658 | py | Python | code/syn.py | ron-rivest/audit-lab | b8f1478035db04afa2c5e5349b9666fd8df5976e | [
"MIT"
] | 3 | 2018-09-12T03:06:28.000Z | 2019-05-04T06:45:54.000Z | code/syn.py | YAXINLEI/audit-lab | b8f1478035db04afa2c5e5349b9666fd8df5976e | [
"MIT"
] | 11 | 2017-09-19T18:23:02.000Z | 2018-07-06T19:08:49.000Z | code/syn.py | YAXINLEI/audit-lab | b8f1478035db04afa2c5e5349b9666fd8df5976e | [
"MIT"
] | 23 | 2017-09-05T17:09:59.000Z | 2019-12-15T19:55:53.000Z | # syn.py
# Ronald L. Rivest (with Karim Husayn Karimi)
# August 3, 2017
# python3
"""
Routines to generate a synthetic test election dataset for OpenAuditTool.py.
Calls data generation routines in syn1.py for elections "of type 1",
and calls routines in syn2.py for elections "of type 2".
"""
import numpy as np
import cli_syn
import OpenAuditTool
class Syn_Params(object):
""" An object we can hang synthesis generation parameters off of. """
pass
##############################################################################
## random choices
def geospace(start, stop, num=7):
"""
Return a list of up to num distinct integer values,
from start, start+1, ..., stop, inclusive, geometrically spread out.
A bit like numpy.linspace, but geometrically spread
out rather than linearly spread out, and only integers returned.
>>> geospace(1, 64)
[1, 2, 4, 8, 16, 32, 64]
>>> geospace(0,1)
[0, 1]
>>> geospace(0,10)
[0, 1, 2, 3, 5, 7, 10]
>>> geospace(20, 10000)
[20, 56, 159, 447, 1260, 3550, 10000]
Twelve-tone equal temperament scale
>>> geospace(1000, 2000, num=13)
[1000, 1059, 1122, 1189, 1260, 1335, 1414, 1498, 1587, 1682, 1782, 1888, 2000]
This should presumably be replaced by numpy.logspace !
(although duplicates need to be removed...)
"""
answer = {start, stop}
start = max(start, 1)
for i in range(1, num-1):
answer.add(int(np.rint(start*(stop/start)**(i/(num-1)))))
return sorted(answer)
def geospace_choice(e, syn, start, stop, num=7):
"""
Return a random element from geospace(start, stop, num),
based on syn.RandomState.
"""
elts = geospace(start, stop, num)
return syn.RandomState.choice(elts)
def generate_segments(e, syn, low, high):
"""
Return list of random segments (r, s) where low <= r < s <= high.
Number of segments returned is (high-low).
Since r<s, does not return segments of the form (k, k).
Intent is that cids are integers in range low <= cid <= high,
and each segment yields a contest group covering cids r..s (inclusive).
The segments "nest" -- given any two segments, either they
are disjoint, or they are equal, or one contains the other.
"""
assert low <= high
L = []
if low!=high:
L.append((low, high))
mid = syn.RandomState.choice(range(low, high))
L.extend(generate_segments(e, syn, low, mid))
L.extend(generate_segments(e, syn, mid+1, high))
return L
if __name__=="__main__":
e = OpenAuditTool.Election()
args = cli_syn.parse_args()
cli_syn.dispatch(e, args)
| 26.316832 | 82 | 0.623401 | 109 | 0.041008 | 0 | 0 | 0 | 0 | 0 | 0 | 1,771 | 0.66629 |
754958a9d123ff339cd26e0e2295e31acd2b6baa | 4,911 | py | Python | src/imagenet/imagenet_train_darknet.py | wenxichen/tensorflow_yolo2 | f040d9932816d8b2f8d7a67231060f0beea821d4 | [
"MIT"
] | 25 | 2017-05-15T08:44:26.000Z | 2019-09-05T05:23:59.000Z | src/imagenet/imagenet_train_darknet.py | wenxichen/tensorflow_yolo2 | f040d9932816d8b2f8d7a67231060f0beea821d4 | [
"MIT"
] | 5 | 2017-05-16T07:18:47.000Z | 2018-02-14T08:22:56.000Z | src/imagenet/imagenet_train_darknet.py | wenxichen/tensorflow_yolo2 | f040d9932816d8b2f8d7a67231060f0beea821d4 | [
"MIT"
] | 10 | 2017-07-03T13:27:27.000Z | 2018-11-21T13:10:16.000Z | """Train ILSVRC2017 Data using homemade scripts."""
import cv2
import os
import math
import tensorflow as tf
from multiprocessing import Process, Queue
import os
import sys
FILE_DIR = os.path.dirname(__file__)
sys.path.append(FILE_DIR + '/../')
import config as cfg
from img_dataset.ilsvrc2017_cls_multithread import ilsvrc_cls
from yolo2_nets.darknet import darknet19
from yolo2_nets.net_utils import get_ordered_ckpts
from utils.timer import Timer
slim = tf.contrib.slim
def get_validation_process(imdb, queue_in, queue_out):
"""Get validation dataset. Run in a child process."""
while True:
queue_in.get()
images, labels = imdb.get()
queue_out.put([images, labels])
imdb = ilsvrc_cls('train', data_aug=True, multithread=cfg.MULTITHREAD)
val_imdb = ilsvrc_cls('val', batch_size=64)
# set up child process for getting validation data
queue_in = Queue()
queue_out = Queue()
val_data_process = Process(target=get_validation_process,
args=(val_imdb, queue_in, queue_out))
val_data_process.start()
queue_in.put(True) # start getting the first batch
CKPTS_DIR = cfg.get_ckpts_dir('darknet19', imdb.name)
TENSORBOARD_TRAIN_DIR, TENSORBOARD_VAL_DIR = cfg.get_output_tb_dir(
'darknet19', imdb.name)
input_data = tf.placeholder(tf.float32, [None, 224, 224, 3])
label_data = tf.placeholder(tf.int32, None)
is_training = tf.placeholder(tf.bool)
logits = darknet19(input_data, is_training=is_training)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_data, logits=logits)
loss = tf.reduce_mean(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# train_op = tf.train.AdamOptimizer(0.0005).minimize(loss)
train_op = tf.train.MomentumOptimizer(0.001, 0.9).minimize(loss)
correct_pred = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), label_data)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
######################
# Initialize Session #
######################
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(TENSORBOARD_TRAIN_DIR)
val_writer = tf.summary.FileWriter(TENSORBOARD_VAL_DIR)
# # initialize variables, assume all vars are new now
# init_op = tf.global_variables_initializer()
# sess.run(init_op)
# load previous models
ckpts = get_ordered_ckpts(sess, imdb, 'darknet19')
variables_to_restore = slim.get_variables_to_restore()
# # change optimizer
# print('Initializing variables for the new optimizer')
# optimzer_vars = [var for var in tf.global_variables()
# if "Momentum" in var.name]
# init_op = tf.variables_initializer(optimzer_vars)
# sess.run(init_op)
# for var in optimzer_vars:
# if var in variables_to_restore:
# variables_to_restore.remove(var)
print('Restorining model snapshots from {:s}'.format(ckpts[-1]))
old_saver = tf.train.Saver(variables_to_restore)
old_saver.restore(sess, str(ckpts[-1]))
print('Restored.')
fnames = ckpts[-1].split('_')
old_epoch = int(fnames[-1][:-5])
imdb.epoch = old_epoch + 1
# simple model saver
cur_saver = tf.train.Saver()
T = Timer()
for i in range(imdb.total_batch * 10 + 1):
T.tic()
images, labels = imdb.get()
_, loss_value, acc_value, train_summary = sess.run(
[train_op, loss, accuracy, merged], {input_data: images, label_data: labels, is_training: 1})
_time = T.toc(average=False)
print('epoch {:d}, iter {:d}/{:d}, training loss: {:.3}, training acc: {:.3}, take {:.2}s'
.format(imdb.epoch, (i + 1) % imdb.total_batch,
imdb.total_batch, loss_value, acc_value, _time))
if (i + 1) % 25 == 0:
T.tic()
val_images, val_labels = queue_out.get()
val_loss_value, val_acc_value, val_summary = sess.run(
[loss, accuracy, merged], {input_data: val_images, label_data: val_labels, is_training: 0})
_val_time = T.toc(average=False)
print('###validation loss: {:.3}, validation acc: {:.3}, take {:.2}s'
.format(val_loss_value, val_acc_value, _val_time))
queue_in.put(True)
global_step = imdb.epoch * imdb.total_batch + (i % imdb.total_batch)
train_writer.add_summary(train_summary, global_step)
val_writer.add_summary(val_summary, global_step)
if (i % (imdb.total_batch * 2) == 0):
save_path = cur_saver.save(sess, os.path.join(
CKPTS_DIR,
cfg.TRAIN_SNAPSHOT_PREFIX + '_epoch_' + str(imdb.epoch - 1) + '.ckpt'))
print("Model saved in file: %s" % save_path)
# terminate child processes
if cfg.MULTITHREAD:
imdb.close_all_processes()
queue_in.cancel_join_thread()
queue_out.cancel_join_thread()
val_data_process.terminate()
| 34.342657 | 103 | 0.707799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,154 | 0.234983 |
754b6ed304a3a91854108f6ce34b4157cf24597b | 6,519 | py | Python | pycalc/MAVProxy/modules/lib/ANUGA/redfearn.py | joakimzhang/python-electron | 79bc174a14c5286ca739bb7d8ce6522fdc6e9e80 | [
"CC0-1.0"
] | null | null | null | pycalc/MAVProxy/modules/lib/ANUGA/redfearn.py | joakimzhang/python-electron | 79bc174a14c5286ca739bb7d8ce6522fdc6e9e80 | [
"CC0-1.0"
] | 8 | 2021-01-28T19:26:22.000Z | 2022-03-24T18:07:24.000Z | pycalc/MAVProxy/modules/lib/ANUGA/redfearn.py | joakimzhang/python-electron | 79bc174a14c5286ca739bb7d8ce6522fdc6e9e80 | [
"CC0-1.0"
] | null | null | null | """
This module adapted ANUGA
https://anuga.anu.edu.au/
------------
Implementation of Redfearn's formula to compute UTM projections from latitude and longitude
Based in part on spreadsheet
www.icsm.gov.au/gda/gdatm/redfearn.xls
downloaded from INTERGOVERNMENTAL COMMITTEE ON SURVEYING & MAPPING (ICSM)
http://www.icsm.gov.au/icsm/
"""
from geo_reference import Geo_reference, DEFAULT_ZONE
def degminsec2decimal_degrees(dd,mm,ss):
assert abs(mm) == mm
assert abs(ss) == ss
if dd < 0:
sign = -1
else:
sign = 1
return sign * (abs(dd) + mm/60. + ss/3600.)
def decimal_degrees2degminsec(dec):
if dec < 0:
sign = -1
else:
sign = 1
dec = abs(dec)
dd = int(dec)
f = dec-dd
mm = int(f*60)
ss = (f*60-mm)*60
return sign*dd, mm, ss
def redfearn(lat, lon, false_easting=None, false_northing=None,
zone=None, central_meridian=None, scale_factor=None):
"""Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed
"""
from math import pi, sqrt, sin, cos, tan
#GDA Specifications
a = 6378137.0 #Semi major axis
inverse_flattening = 298.257222101 #1/f
if scale_factor is None:
K0 = 0.9996 #Central scale factor
else:
K0 = scale_factor
#print 'scale', K0
zone_width = 6 #Degrees
longitude_of_central_meridian_zone0 = -183
longitude_of_western_edge_zone0 = -186
if false_easting is None:
false_easting = 500000
if false_northing is None:
if lat < 0:
false_northing = 10000000 #Southern hemisphere
else:
false_northing = 0 #Northern hemisphere)
#Derived constants
f = 1.0/inverse_flattening
b = a*(1-f) #Semi minor axis
e2 = 2*f - f*f# = f*(2-f) = (a^2-b^2/a^2 #Eccentricity
e = sqrt(e2)
e2_ = e2/(1-e2) # = (a^2-b^2)/b^2 #Second eccentricity
e_ = sqrt(e2_)
e4 = e2*e2
e6 = e2*e4
#Foot point latitude
n = (a-b)/(a+b) #Same as e2 - why ?
n2 = n*n
n3 = n*n2
n4 = n2*n2
G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180
phi = lat*pi/180 #Convert latitude to radians
sinphi = sin(phi)
sin2phi = sin(2*phi)
sin4phi = sin(4*phi)
sin6phi = sin(6*phi)
cosphi = cos(phi)
cosphi2 = cosphi*cosphi
cosphi3 = cosphi*cosphi2
cosphi4 = cosphi2*cosphi2
cosphi5 = cosphi*cosphi4
cosphi6 = cosphi2*cosphi4
cosphi7 = cosphi*cosphi6
cosphi8 = cosphi4*cosphi4
t = tan(phi)
t2 = t*t
t4 = t2*t2
t6 = t2*t4
#Radius of Curvature
rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5
nu = a/(1-e2*sinphi*sinphi)**0.5
psi = nu/rho
psi2 = psi*psi
psi3 = psi*psi2
psi4 = psi2*psi2
#Meridian distance
A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256
A2 = 3.0/8*(e2+e4/4+15*e6/128)
A4 = 15.0/256*(e4+3*e6/4)
A6 = 35*e6/3072
term1 = a*A0*phi
term2 = -a*A2*sin2phi
term3 = a*A4*sin4phi
term4 = -a*A6*sin6phi
m = term1 + term2 + term3 + term4 #OK
if zone is not None and central_meridian is not None:
msg = 'You specified both zone and central_meridian. Provide only one of them'
raise Exception, msg
# Zone
if zone is None:
zone = int((lon - longitude_of_western_edge_zone0)/zone_width)
# Central meridian
if central_meridian is None:
central_meridian = zone*zone_width+longitude_of_central_meridian_zone0
else:
zone = -1
omega = (lon-central_meridian)*pi/180 #Relative longitude (radians)
omega2 = omega*omega
omega3 = omega*omega2
omega4 = omega2*omega2
omega5 = omega*omega4
omega6 = omega3*omega3
omega7 = omega*omega6
omega8 = omega4*omega4
#Northing
term1 = nu*sinphi*cosphi*omega2/2
term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24
term3 = nu*sinphi*cosphi5*\
(8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\
psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720
term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320
northing = false_northing + K0*(m + term1 + term2 + term3 + term4)
#Easting
term1 = nu*omega*cosphi
term2 = nu*cosphi3*(psi-t2)*omega3/6
term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120
term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040
easting = false_easting + K0*(term1 + term2 + term3 + term4)
return zone, easting, northing
def convert_from_latlon_to_utm(points=None,
latitudes=None,
longitudes=None,
false_easting=None,
false_northing=None):
"""Convert latitude and longitude data to UTM as a list of coordinates.
Input
points: list of points given in decimal degrees (latitude, longitude) or
latitudes: list of latitudes and
longitudes: list of longitudes
false_easting (optional)
false_northing (optional)
Output
points: List of converted points
zone: Common UTM zone for converted points
Notes
Assume the false_easting and false_northing are the same for each list.
If points end up in different UTM zones, an ANUGAerror is thrown.
"""
old_geo = Geo_reference()
utm_points = []
if points == None:
assert len(latitudes) == len(longitudes)
points = map(None, latitudes, longitudes)
for point in points:
zone, easting, northing = redfearn(float(point[0]),
float(point[1]),
false_easting=false_easting,
false_northing=false_northing)
new_geo = Geo_reference(zone)
old_geo.reconcile_zones(new_geo)
utm_points.append([easting, northing])
return utm_points, old_geo.get_zone()
| 26.717213 | 91 | 0.595644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,906 | 0.292376 |
754d89e883dc7be548805f3db911539c49829b0b | 2,107 | py | Python | scikit/sk_random_forest_pca.py | abondar24/deepLearnPython | 9325cd18458f66f9d90ebd044fb4c8b2c6d8abc0 | [
"Apache-2.0"
] | 1 | 2018-03-28T02:30:12.000Z | 2018-03-28T02:30:12.000Z | scikit/sk_random_forest_pca.py | abondar24/deepLearnPython | 9325cd18458f66f9d90ebd044fb4c8b2c6d8abc0 | [
"Apache-2.0"
] | null | null | null | scikit/sk_random_forest_pca.py | abondar24/deepLearnPython | 9325cd18458f66f9d90ebd044fb4c8b2c6d8abc0 | [
"Apache-2.0"
] | 2 | 2017-03-03T14:49:00.000Z | 2018-03-28T02:30:16.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def plot_decision_regions(x, y, classifier, resolution=0.02):
# setup marker gen and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot decision surface
x1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1
x2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
z = z.reshape(xx1.shape)
plt.contourf(xx1, xx2, z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.xlim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=x[y == cl, 0], y=x[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
x, y = wine.iloc[:, 1:].values, wine.iloc[:, 0].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
# standardize d-dim set
stdsc = StandardScaler()
x_train_std = stdsc.fit_transform(x_train)
x_test_std = stdsc.transform(x_test)
pca = PCA(n_components=2)
lr = LogisticRegression()
x_train_pca = pca.fit_transform(x_train_std)
x_test_pca = pca.fit_transform(x_test_std)
lr.fit(x_train_pca, y_train)
plot_decision_regions(x_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
plot_decision_regions(x_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show() | 33.983871 | 107 | 0.676792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.128144 |
7550b253d74cbd404e4a92a149749a7cdd341199 | 15,189 | py | Python | Merge_CROME_LCM_PHI.py | nismod/natural-capital-mapping | f388fee3c2592bf99eb628b4d4743bf9be3d4346 | [
"MIT"
] | 1 | 2021-03-31T00:35:00.000Z | 2021-03-31T00:35:00.000Z | Merge_CROME_LCM_PHI.py | nismod/natural-capital-mapping | f388fee3c2592bf99eb628b4d4743bf9be3d4346 | [
"MIT"
] | null | null | null | Merge_CROME_LCM_PHI.py | nismod/natural-capital-mapping | f388fee3c2592bf99eb628b4d4743bf9be3d4346 | [
"MIT"
] | 2 | 2020-01-30T09:40:36.000Z | 2021-04-16T09:36:40.000Z | # Starts from OS Mastermap base map and:
# 1. Assigns CEH Landcover map (LCM) definition of either Arable or Improved grassland to agricultural land polygons
# 2. Assigns Rural Payments Agency CROME Crop map data (input must be dissolved by land use code and joined to description
# and simplified description (Arable, Improved grassland, Short-rotation coppice)
# 3. Assigns Natural England Priority Habitat data.
# Set up to loop through a set of Local Authority Districts
# -----------------------------------------------------------------------------------------------------------------
import time
import arcpy
import os
import MyFunctions
arcpy.CheckOutExtension("Spatial")
print(''.join(["## Started on : ", time.ctime()]))
arcpy.env.overwriteOutput = True # Overwrites files
arcpy.env.qualifiedFieldNames = False # Joined fields will be exported without the join table name
arcpy.env.XYTolerance = "0.001 Meters"
# region = "Oxon"
region = "Arc"
# method = "HLU"
method = "CROME_LCM_PHI"
if method == "CROME_LCM_PHI":
folder = r"C:\Users\cenv0389\Documents\Oxon_GIS\OxCamArc"
if region == "Arc":
LADs_included = ["Bedfordshire", "Buckinghamshire", "Cambridgeshire", "Northamptonshire"]
Hab_field = "Interpreted_habitat"
elif region == "Oxon":
LADs_included = ["Oxfordshire"]
Hab_field = "Interpreted_Habitat"
data_gdb = os.path.join(folder, "Data\Data.gdb")
LAD_table = os.path.join(data_gdb, "Arc_LADs")
CROME_data = os.path.join(data_gdb, "CROME_Arc_dissolve")
elif region == "Oxon" and method == "HLU":
# Operate in the Oxon_county folder
folder = r"C:\Users\cenv0389\Documents\Oxon_GIS\Oxon_county\Data"
data_gdb = os.path.join(folder, "Data.gdb")
LAD_table = os.path.join(folder, "Data.gdb", "Oxon_LADs")
CROME_data = os.path.join(data_gdb, "CROME_Oxon_dissolve")
Hab_field = "BAP_Habitat"
else:
print("ERROR: you cannot combine region " + region + " with method " + method)
exit()
LAD_names = []
needed_fields = ["TOID", "Theme", "DescriptiveGroup", "DescriptiveTerm", "Make", "OSMM_hab"]
# What method are we using to create the base map? Merge or intersect? This affects the processing stages used.
# merge_or_intersect = "intersect"
merge_or_intersect = "merge"
# Which stages of the code do we want to run? Depends on whether we are using merge or intersect to create the base map,
# as the merge is a two-stage process in which this script is called twice. Also useful for debugging or updates.
if merge_or_intersect == "intersect":
process_LCM = False
process_CROME = True
process_PHI = True
delete_landform = False
intersect_PHI = False
interpret_PHI = True
out_fc = "OSMM_LCM_PHI_intersect"
elif merge_or_intersect == "merge":
# Change step = 1 to step = 2 after running Merge_into_base_map to merge OSMM_LCM with PHI
step = 2
if step == 1:
process_LCM = True
process_CROME = True
process_PHI = True
delete_landform = True
intersect_PHI = False
interpret_PHI = False
elif step == 2:
process_LCM = False
process_CROME = False
process_PHI = True
delete_landform = False
intersect_PHI = False
interpret_PHI = True
out_fc = "OSMM_LCM_PHI_merge"
arcpy.env.workspace = data_gdb
LADs = arcpy.SearchCursor(os.path.join(data_gdb, LAD_table))
for LAD in LADs:
LAD_full_name = LAD.getValue("desc_")
LAD_county = LAD.getValue("county")
if LAD_county in LADs_included:
LAD_name = LAD_full_name.replace(" ", "")
LAD_names.append(LAD_name)
# Now process each LAD gdb
# Use CEH LCM to determine whether OSMM 'Agricultural land' is arable or improved grassland.
if process_LCM:
for LAD in LAD_names:
print ("Processing " + LAD)
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
print("Copying OSMM to OSMM_LCM")
arcpy.CopyFeatures_management("OSMM", "OSMM_LCM")
print ("Adding LCM farmland interpretation to " + LAD)
MyFunctions.delete_fields("OSMM_LCM", needed_fields, "")
print (" Adding habitat fields")
MyFunctions.check_and_add_field("OSMM_LCM", "LCM_farmland", "TEXT", 100)
MyFunctions.check_and_add_field("OSMM_LCM", Hab_field, "TEXT", 100)
arcpy.CalculateField_management("OSMM_LCM", Hab_field, "!OSMM_hab!", "PYTHON_9.3")
print (" Identifying arable land")
arcpy.MakeFeatureLayer_management("OSMM_LCM", "ag_lyr")
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="OSMM_hab = 'Agricultural land' OR OSMM_hab = 'Natural surface'")
arcpy.SelectLayerByLocation_management("ag_lyr", "HAVE_THEIR_CENTER_IN", "LCM_arable", selection_type="SUBSET_SELECTION")
arcpy.CalculateField_management("ag_lyr","LCM_farmland", "'Arable'", "PYTHON_9.3")
arcpy.CalculateField_management("ag_lyr", Hab_field, "'Arable'", "PYTHON_9.3")
arcpy.Delete_management("ag_lyr")
print (" Identifying improved grassland")
arcpy.MakeFeatureLayer_management("OSMM_LCM", "ag_lyr2")
arcpy.SelectLayerByAttribute_management("ag_lyr2", where_clause="OSMM_hab = 'Agricultural land' OR OSMM_hab = 'Natural surface'")
arcpy.SelectLayerByLocation_management("ag_lyr2", "HAVE_THEIR_CENTER_IN", "LCM_improved_grassland",
selection_type="SUBSET_SELECTION")
arcpy.CalculateField_management("ag_lyr2", "LCM_farmland", "'Improved grassland'", "PYTHON_9.3")
arcpy.Delete_management("ag_lyr2")
# Set interpreted habitat to Improved grassland if this is 'agricultural land'or Amenity grassland if this is 'Natural surface'
# unless it is railside (do not want to flag this as amenity grassland because it is not generally accessible)
expression = "LCM_farmland = 'Improved grassland' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy("OSMM_LCM", Hab_field, expression, "'Improved grassland'")
expression = "LCM_farmland = 'Improved grassland' AND " + Hab_field + " = 'Natural surface' AND DescriptiveGroup <> 'Rail'"
MyFunctions.select_and_copy("OSMM_LCM", Hab_field, expression, "'Amenity grassland'")
print(''.join(["## Finished on : ", time.ctime()]))
# Add crop type from CROME map, but only for agricultural land. This is probably better data then LCM and is freely available.
# This assumes we are adding CROME after adding LCM (so the Interpreted habitat field is already added and populated in the process_LCM
# step above), but in fact it is probably best just to use CROME (once we have tested vs LCM), so need to modify this step to include
# adding the interpreted habitat field
if process_CROME:
for LAD in LAD_names:
print ("Processing " + LAD)
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
in_map = "OSMM_LCM"
out_map = in_map + "_CROME"
print("Copying " + in_map + " to " + out_map)
arcpy.CopyFeatures_management(in_map, out_map)
print ("Adding CROME farmland interpretation to " + LAD)
print (" Adding habitat fields")
MyFunctions.check_and_add_field(out_map, "CROME_farmland", "TEXT", 50)
print(" Copying OBJECTID for base map")
MyFunctions.check_and_add_field(out_map, "BaseID_CROME", "LONG", 0)
arcpy.CalculateField_management(out_map, "BaseID_CROME", "!OBJECTID!", "PYTHON_9.3")
print (" Identifying farmland")
arcpy.MakeFeatureLayer_management(out_map, "ag_lyr")
expression = "Interpreted_hab IN ('Agricultural land', 'Natural surface') OR Interpreted_hab LIKE 'Arable%'"
expression = expression + " OR Interpreted_hab LIKE 'Improved grassland%'"
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause=expression)
print(" Calculating percentage of farmland features within CROME polygons")
arcpy.TabulateIntersection_analysis(CROME_data, ["LUCODE", "Land Use Description", "field", "Shape_Area"],
"ag_lyr", "CROME_TI", ["BaseID_CROME", Hab_field, "Shape_Area"])
print(" Sorting TI table by size so that larger intersections are first in the list")
arcpy.Sort_management("CROME_TI", "CROME_TI_sort", [["AREA", "ASCENDING"]])
print (" Adding fields for CROME data")
MyFunctions.check_and_add_field(out_map, "CROME_desc", "TEXT", 50)
MyFunctions.check_and_add_field(out_map, "CROME_simple", "TEXT", 30)
print (" Joining CROME info for base map polygons that are >50% inside CROME polygons")
arcpy.AddJoin_management("ag_lyr", "BaseID_CROME", "CROME_TI_sort", "BaseID_CROME", "KEEP_ALL")
print(" Copying CROME data")
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="CROME_TI_sort.PERCENTAGE > 50")
arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_desc", "!CROME_TI_sort.Land Use Description!", "PYTHON_9.3")
arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_simple", "!CROME_TI_sort.field!", "PYTHON_9.3")
# Remove the join
arcpy.RemoveJoin_management("ag_lyr", "CROME_TI_sort")
arcpy.Delete_management("ag_lyr")
# Set interpreted habitat to Improved grassland if this is 'agricultural land'or Amenity grassland if this is 'Natural surface'
# unless it is railside (do not want to flag this as amenity grassland because it is not generally accessible)
expression = "CROME_desc IN ('Grass', 'Fallow Land') AND " + Hab_field + " IN ('Agricultural land', 'Arable')"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Improved grassland'")
expression = "CROME_desc IN ('Grass', 'Fallow Land') AND " + Hab_field + " = 'Natural surface' AND DescriptiveGroup <> 'Rail'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Amenity grassland'")
expression = "CROME_desc = 'Arable' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Arable'")
expression = "CROME_desc = 'Short Rotation Coppice' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Arable'")
print(''.join(["## Finished on : ", time.ctime()]))
if process_PHI:
for LAD in LAD_names:
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
if delete_landform:
print(" Deleting overlapping 'Landform' and 'Pylon' from OSMM for " + LAD)
arcpy.MakeFeatureLayer_management("OSMM_LCM", "OSMM_layer")
expression = "DescriptiveGroup LIKE '%Landform%' OR DescriptiveTerm IN ('Cliff','Slope','Pylon')"
arcpy.SelectLayerByAttribute_management("OSMM_layer", where_clause=expression)
arcpy.DeleteFeatures_management("OSMM_layer")
arcpy.Delete_management("OSMM_layer")
if intersect_PHI:
print ("Intersecting " + LAD)
arcpy.Identity_analysis("OSMM_LCM", "PHI", out_fc, "NO_FID")
if interpret_PHI:
print ("Interpreting " + LAD)
# Copy PHI habitat across, but not for manmade, gardens, water, unidentified PHI, wood pasture or OMHD (dealt with later)
expression = "Make = 'Natural' AND DescriptiveGroup NOT LIKE '%water%' AND DescriptiveGroup NOT LIKE '%Water%' AND " \
"OSMM_hab <> 'Roadside - unknown surface' AND OSMM_hab <> 'Track' AND OSMM_hab <> 'Standing water' "
expression2 = expression + " AND PHI IS NOT NULL AND PHI <> '' AND PHI NOT LIKE 'No main%' AND " \
"PHI NOT LIKE 'Wood-pasture%' AND PHI NOT LIKE 'Open Mosaic%'"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression2, "!PHI!")
# Correction for traditional orchards in large gardens
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "PHI = 'Traditional orchard' AND OSMM_hab = 'Garden'",
"'Traditional orchards'")
# Other corrections / consolidations
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Deciduous woodland'",
"'Broadleaved woodland - semi-natural'")
expression3 = "Interpreted_habitat LIKE '%grazing marsh%' OR Interpreted_habitat LIKE 'Purple moor grass%'"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression3, "'Marshy grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%semi-improved grassland%'",
"'Semi-natural grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%meadow%'",
"'Neutral grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Traditional orchard'",
"'Traditional orchards'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%alcareous%'",
"'Calcareous grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Lowland heathland'",
"'Heathland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Reedbeds'",
"'Reedbed'")
# Copy over OMHD only if the habitat is fairly generic (OMHD dataset covers areas of mixed habitats)
expression5 = "(OMHD IS NOT NULL AND OMHD <> '') AND (Interpreted_habitat IN ('Arable', 'Agricultural land'," \
" 'Improved grassland', 'Natural surface', 'Cultivated/disturbed land', 'Bare ground', 'Landfill (inactive)'," \
"'Quarry or spoil (disused)', 'Sealed surface'))"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression5, "'Open mosaic habitats'")
# Copy over Wood pasture only if the habitat is fairly generic (WPP dataset covers very large areas of mixed habitats)
expression4 = "(WPP IS NOT NULL AND WPP <> '') AND (Interpreted_habitat IN ('Arable', 'Agricultural land', " \
"'Improved grassland', 'Natural surface', 'Cultivated/disturbed land') OR " \
"Interpreted_habitat LIKE 'Scattered%' OR Interpreted_habitat LIKE 'Semi-natural grassland%')"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression4, "'Parkland and scattered trees - broadleaved'")
print(''.join(["## Finished on : ", time.ctime()]))
if merge_or_intersect == "merge":
if step == 1:
print ("Now run Merge_into_Base_Map.py to merge OSMM_LCM with PHI, then set step = 2 in this code and re-run to interpret habitats")
exit() | 57.752852 | 140 | 0.662651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,072 | 0.531437 |
7551dd7a08f30ce3108b03fcbbf5b83d7533a27d | 908 | py | Python | core/migrations/0002_auto_20220209_1251.py | mazdakdev/video-compressor | 17e9f9f3f70f41b953a4c84ec6b1370f9faa97e3 | [
"MIT"
] | 3 | 2022-02-11T12:09:29.000Z | 2022-02-12T19:13:17.000Z | core/migrations/0002_auto_20220209_1251.py | mazdakdev/Video-compressor | 17e9f9f3f70f41b953a4c84ec6b1370f9faa97e3 | [
"MIT"
] | null | null | null | core/migrations/0002_auto_20220209_1251.py | mazdakdev/Video-compressor | 17e9f9f3f70f41b953a4c84ec6b1370f9faa97e3 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2022-02-09 12:51
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='percent',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='video',
name='video_240',
field=models.FileField(blank=True, null=True, upload_to='Videos/240p/%Y/%m/%d'),
),
migrations.AlterField(
model_name='video',
name='video_360',
field=models.FileField(blank=True, null=True, upload_to='Videos/360p/%Y/%m/%d'),
),
]
| 30.266667 | 162 | 0.60793 | 785 | 0.864537 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.179515 |
7552246fcee85fec14281ac041843d581b43d518 | 1,353 | py | Python | pdf417gen/compaction/optimizations.py | hgulino/pdf417-py | 3df2376a2bc2ee1497a0bb401b5eb735789eee85 | [
"MIT"
] | null | null | null | pdf417gen/compaction/optimizations.py | hgulino/pdf417-py | 3df2376a2bc2ee1497a0bb401b5eb735789eee85 | [
"MIT"
] | null | null | null | pdf417gen/compaction/optimizations.py | hgulino/pdf417-py | 3df2376a2bc2ee1497a0bb401b5eb735789eee85 | [
"MIT"
] | null | null | null | from itertools import chain, groupby
from pdf417gen.compaction.numeric import compact_numbers
from pdf417gen.compaction.text import compact_text
from pdf417gen.util import iterate_prev_next
def replace_short_numeric_chunks(chunks):
"""
The Numeric Compaction mode can pack almost 3 digits (2.93) into a symbol
character. Though Numeric Compaction mode can be invoked at any digit
length, it is recommended to use Numeric Compaction mode when there are
more than 13 consecutive digits. Otherwise, use Text Compaction mode.
"""
from pdf417gen.compaction import Chunk
for prev, chunk, next in iterate_prev_next(chunks):
is_short_numeric_chunk = (
chunk.compact_fn == compact_numbers
and len(chunk.data) < 13
)
borders_text_chunk = (
(prev and prev.compact_fn == compact_text) or
(next and next.compact_fn == compact_text)
)
if is_short_numeric_chunk and borders_text_chunk:
yield Chunk(chunk.data, compact_text)
else:
yield chunk
def merge_chunks_with_same_compact_fn(chunks):
from pdf417gen.compaction import Chunk
for compact_fn, group in groupby(chunks, key=lambda x: x[1]):
data = chain.from_iterable(chunk.data for chunk in group)
yield Chunk(list(data), compact_fn)
| 34.692308 | 77 | 0.702882 | 0 | 0 | 1,157 | 0.855137 | 0 | 0 | 0 | 0 | 313 | 0.231338 |
75522ca8fbe7cd45f4d322e2008582dbf31ff13b | 533 | py | Python | setup_guide/translation.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 1 | 2019-01-18T03:50:46.000Z | 2019-01-18T03:50:46.000Z | setup_guide/translation.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 50 | 2018-01-24T18:04:08.000Z | 2019-01-03T03:30:30.000Z | setup_guide/translation.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 2 | 2018-02-12T15:20:52.000Z | 2019-01-18T03:51:52.000Z | from .models import SetupGuidePage, SetupGuideLandingPage
from modeltranslation.translator import TranslationOptions
from modeltranslation.decorators import register
@register(SetupGuidePage)
class SetupGuidePageTranslation(TranslationOptions):
fields = (
'description',
'heading',
'sub_heading',
'subsections',
)
@register(SetupGuideLandingPage)
class SetupGuideLandingPageTranslation(TranslationOptions):
fields = (
'heading',
'sub_heading',
'lead_in',
)
| 23.173913 | 59 | 0.712946 | 302 | 0.566604 | 0 | 0 | 361 | 0.677298 | 0 | 0 | 79 | 0.148218 |
75525d4ac55cdb988499eb7c554fb6564e18cfa9 | 3,447 | py | Python | src/utils/grid_fuse.py | sisl/MultiAgentVariationalOcclusionInference | c46ef5dc99a6ca7a59937dbd4bf1d3f86d0eb757 | [
"Apache-2.0"
] | 6 | 2021-09-07T18:40:24.000Z | 2022-03-15T06:16:07.000Z | src/utils/grid_fuse.py | sisl/MultiAgentVariationalOcclusionInference | c46ef5dc99a6ca7a59937dbd4bf1d3f86d0eb757 | [
"Apache-2.0"
] | 2 | 2021-12-03T05:18:09.000Z | 2021-12-17T09:54:08.000Z | src/utils/grid_fuse.py | sisl/MultiAgentVariationalOcclusionInference | c46ef5dc99a6ca7a59937dbd4bf1d3f86d0eb757 | [
"Apache-2.0"
] | 3 | 2021-12-15T03:19:48.000Z | 2022-03-31T18:05:39.000Z | # Code to transform the driver sensor OGMs to the ego vehicle's OGM frame of reference.
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
from utils.grid_utils import global_grid
import time
from scipy.spatial import cKDTree
import pdb
def mask_in_EgoGrid(global_grid_x, global_grid_y, ref_xy, ego_xy, pred_egoGrid, pred_maps, res, mask_unk=None, tolerance=1):
# Consider only the unknown cells in pred_egoGrid (ego sensor grid before trasfering values).
indices = np.where(mask_unk)
ego_x = ego_xy[0][indices]
ego_y = ego_xy[1][indices]
ego_xy = [ego_x, ego_y]
flat_indicies = indices[0]*pred_egoGrid.shape[1]+indices[1]
# ref indx --> global indx
ref_x_ind = np.floor(global_grid_x.shape[1]*(ref_xy[0]-x_min+res/2.)/(x_max-x_min+res)).astype(int) # column index
ref_y_ind = np.floor(global_grid_y.shape[0]*(ref_xy[1]-y_min+res/2.)/(y_max-y_min+res)).astype(int) # row index
ref_global_ind = np.vstack((ref_y_ind.flatten(), ref_x_ind.flatten())).T
# ego indx --> global indx
ego_x_ind = np.floor(global_grid_x.shape[1]*(ego_xy[0]-x_min+res/2.)/(x_max-x_min+res)).astype(int) # column index
ego_y_ind = np.floor(global_grid_y.shape[0]*(ego_xy[1]-y_min+res/2.)/(y_max-y_min+res)).astype(int) # row index
ego_global_ind = np.vstack((ego_y_ind.flatten(), ego_x_ind.flatten())).T
# Look for the matching global_grid indices between the ref_grid and ego_grid.
kdtree = cKDTree(ref_global_ind)
dists, inds = kdtree.query(ego_global_ind)
pred_egoGrid_flat = pred_egoGrid.flatten()
pred_maps_flat = pred_maps.flatten()
# Back to the local grid indices. Tolerance should be an integer because kd tree is comparing indices.
ego_ind = flat_indicies[np.where(dists<=tolerance)]
ref_ind = inds[np.where(dists<=tolerance)]
# Assign the values for the corresponding cells.
pred_egoGrid_flat[ego_ind] = pred_maps_flat[ref_ind]
pred_egoGrid = pred_egoGrid_flat.reshape(pred_egoGrid.shape)
return pred_egoGrid
def Transfer_to_EgoGrid(ref_local_xy, pred_maps, ego_local_xy, ego_sensor_grid, endpoint, res=0.1, mask_unk=None):
global x_min, x_max, y_min, y_max
#####################################################################################################################################
## Goal : Transfer pred_maps (in driver sensor's grid) cell information to the unknown cells of ego car's sensor_grid
## Method : Use global grid as an intermediate (ref indx --> global indx --> ego indx).
## ref_local_xy (N, 2, w, h) & pred_maps (N, w, h)
## ego_xy (2, w', h') & & ego_sensor_grid (w', h')
## return pred_maps_egoGrid(N, w', h')
## * N : number of agents
#####################################################################################################################################
x_min = endpoint[0]
x_max = endpoint[2]
y_min = endpoint[1]
y_max = endpoint[3]
global_res = 1.0
global_grid_x, global_grid_y = global_grid(np.array([x_min,y_min]),np.array([x_max,y_max]),global_res)
if np.any(ref_local_xy[0] == None):
pred_maps_egoGrid.append(None)
else:
pred_egoGrid = copy.copy(ego_sensor_grid)
pred_egoGrid = np.ones(ego_sensor_grid.shape)*2
pred_egoGrid = mask_in_EgoGrid(global_grid_x, global_grid_y, ref_local_xy, ego_local_xy, pred_egoGrid, pred_maps, res, mask_unk)
return pred_egoGrid | 45.355263 | 137 | 0.656803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,143 | 0.331593 |
7553bc03ca96627edd28fe306335836754dd8d8f | 1,236 | py | Python | emailhub/management/commands/emailhub.py | h3/django-emailhub | a618256096b3faa55479c46b6313861cfd898a9f | [
"MIT"
] | null | null | null | emailhub/management/commands/emailhub.py | h3/django-emailhub | a618256096b3faa55479c46b6313861cfd898a9f | [
"MIT"
] | null | null | null | emailhub/management/commands/emailhub.py | h3/django-emailhub | a618256096b3faa55479c46b6313861cfd898a9f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from emailhub.utils.email import send_unsent_emails
log = logging.getLogger('emailhub')
class Command(BaseCommand):
"""
EmailHub management command
"""
help = 'EmailHub management command'
def add_arguments(self, parser):
parser.add_argument(
'--send',
dest='send',
action='store_true',
default=False,
help='Send unsent emails')
parser.add_argument(
'--status',
dest='status',
action='store_true',
default=False,
help='EmailHub system status')
parser.add_argument(
'--create-template',
dest='create_template',
action='store_true',
default=False,
help='Create a new template')
parser.add_argument(
'--list-templates',
dest='list_templates',
action='store_true',
default=False,
help='List templates')
def handle(self, *args, **options):
if options.get('send'):
send_unsent_emails()
| 25.22449 | 51 | 0.563916 | 1,009 | 0.816343 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.278317 |
75542a1318d360a9da0eafc7a95d7f84e675efef | 639 | py | Python | geo2sql/converter.py | budbee/geo2sql | 0e7e1f0c1983bb6965d3b3ac3fb06314128e110a | [
"MIT"
] | null | null | null | geo2sql/converter.py | budbee/geo2sql | 0e7e1f0c1983bb6965d3b3ac3fb06314128e110a | [
"MIT"
] | null | null | null | geo2sql/converter.py | budbee/geo2sql | 0e7e1f0c1983bb6965d3b3ac3fb06314128e110a | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
import json
def convert(path):
try:
with open(path, 'r') as f:
geojson = json.loads(f.read())
# Warning - Only looking at the exterior, hence skipping holes.
coordinates = geojson['features'][0]['geometry']['coordinates'][0]
coord_string = ", ".join(["{} {}".format(x, y) for x, y in coordinates])
sql_polygon = u"POLYGON(({}))\n".format(coord_string)
sys.stdout.write(sql_polygon)
except IOError:
print("No such file")
except KeyError:
print("File is not properly formatted")
| 29.045455 | 84 | 0.596244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.2723 |
7554f9acad191edf71d5e0947b588d081ddd4ae3 | 32 | py | Python | augmenty/lang/ru/__init__.py | koaning/augmenty | 13dbdbb5fd56b36c97678ae48d1e0d869987f6dd | [
"MIT"
] | null | null | null | augmenty/lang/ru/__init__.py | koaning/augmenty | 13dbdbb5fd56b36c97678ae48d1e0d869987f6dd | [
"MIT"
] | 1 | 2022-03-12T02:25:00.000Z | 2022-03-12T02:26:01.000Z | augmenty/lang/ru/__init__.py | HishamKhdair/augmenty | a65a7beac410f53706bb7838026f2bac9b89d544 | [
"MIT"
] | null | null | null | from .keyboard import create_ru
| 16 | 31 | 0.84375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7555fe5b77c17002f43ce21bf18e6218385b475c | 675 | py | Python | swepc.python/swepc/test/lakeatrest.py | hertzsprung/seamless-wave-uq | 10a9b2e18d11cf3f4e711a90523f85758e5fb531 | [
"MIT"
] | null | null | null | swepc.python/swepc/test/lakeatrest.py | hertzsprung/seamless-wave-uq | 10a9b2e18d11cf3f4e711a90523f85758e5fb531 | [
"MIT"
] | null | null | null | swepc.python/swepc/test/lakeatrest.py | hertzsprung/seamless-wave-uq | 10a9b2e18d11cf3f4e711a90523f85758e5fb531 | [
"MIT"
] | null | null | null | import swepc
class LakeAtRest:
domain = [-50.0, 50.0]
def __init__(self, mesh, solver, args):
self.endTime = 100.0
self.ic = swepc.InitialConditions(mesh.elements, degree=1)
bump = swepc.test.TwoBumps(args.topography_mean,
args.topography_stddev, halfWidth=10.0)
self.ic.z[:,0] = [bump.z0(x) for x in mesh.C]
self.ic.z[:,1] = [bump.z1(x) for x in mesh.C]
self.ic.water[:,0] = [solver.elevationToWater(1.5, z)
for z in self.ic.z[:,0]]
self.ic.water[:,1] = [solver.elevationToWater(0.0, z)
for z in self.ic.z[:,1]]
self.bc = swepc.BoundaryConditions()
| 32.142857 | 66 | 0.57037 | 660 | 0.977778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75564ca95e8bf6b16deb4a7f6f2da20bce562a30 | 167 | py | Python | osi_django_app/OSI/admin.py | godslayer201/open-source-list | 5c708249a9a52603f26e3ad2f0b4a0ebd586b495 | [
"MIT"
] | 2 | 2020-09-16T14:10:03.000Z | 2020-09-22T21:35:08.000Z | osi_django_app/OSI/admin.py | godslayer201/open-source-list | 5c708249a9a52603f26e3ad2f0b4a0ebd586b495 | [
"MIT"
] | null | null | null | osi_django_app/OSI/admin.py | godslayer201/open-source-list | 5c708249a9a52603f26e3ad2f0b4a0ebd586b495 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import soc, osc, univ_soc_woc
admin.site.register(soc)
admin.site.register(osc)
admin.site.register(univ_soc_woc)
| 23.857143 | 43 | 0.784431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7558d1c4c68c7402a2a152b6cc1f7bb1febec611 | 16,716 | py | Python | Phase_1/O-27-DaYan.py | yapanliu/ashrae-ob-database | 8c23e3ab5e8ed5589962e067a50c8940d2d31521 | [
"MIT"
] | null | null | null | Phase_1/O-27-DaYan.py | yapanliu/ashrae-ob-database | 8c23e3ab5e8ed5589962e067a50c8940d2d31521 | [
"MIT"
] | null | null | null | Phase_1/O-27-DaYan.py | yapanliu/ashrae-ob-database | 8c23e3ab5e8ed5589962e067a50c8940d2d31521 | [
"MIT"
] | null | null | null | '''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = pd.read_excel(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status']
temp_df['Door_ID'] = int(name.split('_')[0][4:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_door = pd.concat([combined_door, temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
for hvac_name in hvac_files:
name, extension = os.path.splitext(hvac_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
else:
temp_df = pd.read_excel(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
temp_df['HVAC_Zone_ID'] = int(name.split('_')[0][2:]) # get the number of ac
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_hvac = pd.concat([combined_hvac, temp_df], ignore_index=True) # concat the data
else:
pass
# drop na rows when specific column is null
combined_indoor = combined_indoor[combined_indoor['Date_Time'].notnull()]
combined_outdoor = combined_outdoor[combined_outdoor['Date_Time'].notnull()]
combined_window = combined_window[combined_window['Date_Time'].notnull()]
combined_door = combined_door[combined_door['Date_Time'].notnull()]
combined_hvac = combined_hvac[combined_hvac['Date_Time'].notnull()]
# process windows, door open/close data
combined_door['Door_Status'] = combined_door['Door_Status'].replace([0, 1, 2], [1, 0, 0])
combined_window['Window_Status'] = combined_window['Window_Status'].replace([0, 1, 2], [1, 0, 0])
# format datetime
print("Formatting datetime!")
combined_indoor['Date_Time'] = pd.to_datetime(combined_indoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_outdoor['Date_Time'] = pd.to_datetime(combined_outdoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_window['Date_Time'] = pd.to_datetime(combined_window['Date_Time'], infer_datetime_format=True)
combined_door['Date_Time'] = pd.to_datetime(combined_door['Date_Time'], infer_datetime_format=True)
combined_hvac['Date_Time'] = pd.to_datetime(combined_hvac['Date_Time'], infer_datetime_format=True)
# format data type
print(combined_indoor.dtypes)
print(combined_outdoor.dtypes)
print(combined_window.dtypes)
print(combined_door.dtypes)
print(combined_hvac.dtypes)
combined_indoor['Building_ID'] = combined_indoor['Building_ID'].astype(int)
combined_indoor['Room_ID'] = combined_indoor['Room_ID'].astype(int)
combined_outdoor['Building_ID'] = combined_outdoor['Building_ID'].astype(int)
combined_window['Building_ID'] = combined_window['Building_ID'].astype(int)
combined_window['Room_ID'] = combined_window['Room_ID'].astype(int)
combined_window['Window_ID'] = combined_window['Window_ID'].astype(int)
combined_door['Building_ID'] = combined_door['Building_ID'].astype(int)
combined_door['Room_ID'] = combined_door['Room_ID'].astype(int)
combined_door['Door_ID'] = combined_door['Door_ID'].astype(int)
combined_hvac['Building_ID'] = combined_hvac['Building_ID'].astype(int)
combined_hvac['Room_ID'] = combined_hvac['Room_ID'].astype(int)
combined_hvac['HVAC_Zone_ID'] = combined_hvac['HVAC_Zone_ID'].astype(int)
# replace null with empty
# # check combined data
# print('check null values')
# print(combined_window.isnull().sum())
# print(combined_door.isnull().sum())
# print(combined_hvac.isnull().sum())
#
# # check the unique IDs
# print(combined_window.Window_ID.unique())
# print(combined_door.Door_ID.unique())
# print(combined_hvac.HVAC_Zone_ID.unique())
#
# print(combined_hvac.Building_ID.unique())
# print(combined_window.Building_ID.unique())
# print(combined_door.Building_ID.unique())
# save data
combined_indoor.to_csv(save_path + 'combined_indoor.csv', index=False)
combined_outdoor.to_csv(save_path + 'combined_outdoor.csv', index=False)
combined_window.to_csv(save_path + 'combined_window.csv', index=False)
combined_door.to_csv(save_path + 'combined_door.csv', index=False)
combined_hvac.to_csv(save_path + 'combined_hvac.csv', index=False)
''' read templates and save data into the standard templates '''
# data
combined_indoor = pd.read_csv(save_path + 'combined_indoor.csv')
combined_outdoor = pd.read_csv(save_path + 'combined_outdoor.csv')
combined_window = pd.read_csv(save_path + 'combined_window.csv')
combined_door = pd.read_csv(save_path + 'combined_door.csv')
combined_hvac = pd.read_csv(save_path + 'combined_hvac.csv')
# templates
# read templates into pandas
template_window = pd.read_csv(template_path+'Window_Status.csv')
template_door = pd.read_csv(template_path+'Door_Status.csv')
template_hvac = pd.read_csv(template_path+'HVAC_Measurement.csv')
template_indoor = pd.read_csv(template_path+'Indoor_Measurement.csv')
template_outdoor = pd.read_csv(template_path+'Outdoor_Measurement.csv')
# columns
print(template_window.columns)
print(combined_window.columns)
print(template_door.columns)
print(combined_door.columns)
print(template_hvac.columns)
print(combined_hvac.columns)
print(template_indoor.columns)
print(combined_indoor.columns)
print(template_outdoor.columns)
print(combined_outdoor.columns)
# concat data
template_window = pd.concat([template_window, combined_window], ignore_index=True)
template_door = pd.concat([template_door, combined_door], ignore_index=True)
template_hvac = pd.concat([template_hvac, combined_hvac], ignore_index=True)
template_indoor = pd.concat([template_indoor, combined_indoor], ignore_index=True)
template_outdoor = pd.concat([template_outdoor, combined_outdoor], ignore_index=True)
template_door = template_door.drop(columns=['Study_ID'])
template_outdoor = template_outdoor.drop(columns=['Buiulding_ID'])
# columns
print(template_window.columns)
print(template_door.columns)
print(template_hvac.columns)
print(template_indoor.columns)
print(template_outdoor.columns)
# data types
print(template_window.dtypes)
print(template_door.dtypes)
print(template_hvac.dtypes)
print(template_indoor.dtypes)
print(template_outdoor.dtypes)
# format datetime
print("Formatting datetime!")
template_indoor['Date_Time'] = pd.to_datetime(template_indoor['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_outdoor['Date_Time'] = pd.to_datetime(template_outdoor['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_window['Date_Time'] = pd.to_datetime(template_window['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_door['Date_Time'] = pd.to_datetime(template_door['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_hvac['Date_Time'] = pd.to_datetime(template_hvac['Date_Time'], format='%Y-%m-%d %H:%M:%S')
# format data types
template_indoor['Building_ID'] = template_indoor['Building_ID'].astype(int)
template_indoor['Room_ID'] = template_indoor['Room_ID'].astype(int)
template_outdoor['Building_ID'] = template_outdoor['Building_ID'].astype(int)
template_window['Building_ID'] = template_window['Building_ID'].astype(int)
template_window['Room_ID'] = template_window['Room_ID'].astype(int)
template_window['Window_ID'] = template_window['Window_ID'].astype(int)
template_door['Building_ID'] = template_door['Building_ID'].astype(int)
template_door['Room_ID'] = template_door['Room_ID'].astype(int)
template_door['Door_ID'] = template_door['Door_ID'].astype(int)
template_hvac['Building_ID'] = template_hvac['Building_ID'].astype(int)
template_hvac['Room_ID'] = template_hvac['Room_ID'].astype(int)
template_hvac['HVAC_Zone_ID'] = template_hvac['HVAC_Zone_ID'].astype(int)
# save data
template_window.to_csv(save_path+'Window_Status.csv', index=False)
template_door.to_csv(save_path+'Door_Status.csv', index=False)
template_hvac.to_csv(save_path+'HVAC_Measurement.csv', index=False)
template_indoor.to_csv(save_path+'Indoor_Measurement.csv', index=False)
template_outdoor.to_csv(save_path+'Outdoor_Measurement.csv', index=False)
# check the unique room ids and building ids
print(template_window['Room_ID'].unique())
print(template_window['Building_ID'].unique())
print(template_door['Room_ID'].unique())
print(template_door['Building_ID'].unique())
print(template_hvac['Room_ID'].unique())
print(template_hvac['Building_ID'].unique())
print(template_indoor['Room_ID'].unique())
print(template_indoor['Building_ID'].unique())
print(template_outdoor['Building_ID'].unique())
''' convert ac measurement to on/off status '''
# read data
template_hvac = pd.read_csv(save_path+'HVAC_Measurement.csv')
template_outdoor = pd.read_csv(save_path+'Outdoor_Measurement.csv')
# check columns
print(template_hvac.columns)
print(template_outdoor.columns)
# check the buildings have ac data and outdoor data
template_hvac.groupby(['Room_ID', 'Building_ID']).size().reset_index()
template_outdoor.groupby(['Building_ID']).size().reset_index()
# check datetime
template_hvac['Date_Time']
template_outdoor['Date_Time']
# merge two dataframes together
hvac_df = template_hvac.merge(template_outdoor, how='left', on=['Building_ID', 'Date_Time'])
# use below two columns to calculate ac status
# hvac_df[['yapan_supply _t', 'Outdoor_Temp']]
hvac_df = hvac_df[hvac_df['Outdoor_Temp'].notnull()]
hvac_df['Cooling_Status'] = hvac_df.loc[:, 'Outdoor_Temp'] - hvac_df.loc[:, 'yapan_supply _t']
# convert negative values to 0-off, positive values to 1-on
hvac_df.loc[hvac_df['Cooling_Status'] < 0, 'Cooling_Status'] = 0
hvac_df.loc[hvac_df['Cooling_Status'] > 0, 'Cooling_Status'] = 1
# save data
cols = list(template_hvac) # get the column names as a slit
hvac_df = hvac_df[cols] # keep only desired columns
hvac_df.drop(['yapan_supply _t'], axis=1, inplace=True) # drop a column
hvac_df.to_csv(save_path+'/final/HVAC_Measurement.csv', index=False)
| 42.642857 | 148 | 0.71973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,383 | 0.38185 |
7558d464af2c0903cb1ca1c124d0a67287c52fa3 | 13,195 | py | Python | stacker/deploy.py | elliottgorrell/stacker | 6a8f2518dd04606255107937820bf3c249efd839 | [
"MIT"
] | null | null | null | stacker/deploy.py | elliottgorrell/stacker | 6a8f2518dd04606255107937820bf3c249efd839 | [
"MIT"
] | null | null | null | stacker/deploy.py | elliottgorrell/stacker | 6a8f2518dd04606255107937820bf3c249efd839 | [
"MIT"
] | null | null | null | __author__ = "Steve Mactaggart && Elliott Gorrell"
import base64
import json
import re
import sys
import traceback
from datetime import datetime
import boto3
import botocore
import yaml
from cf_helper import secure_print
from cf_helper.utils import DeployException, CloudFormationUtil, STSUtil
class DeployExecutor(object):
REGEX_YAML = re.compile('.+\.yaml|.+.yml')
REGEX_JSON = re.compile('.+\.json')
cf_client = None
ec2_client = None
kms_client = None
role = None
def execute(self, stack_name, template_name, config_filename=None,
role=None, add_parameters=None, version=None, ami_id=None, ami_tag_value=None,
scope=None, create=False, delete=False, dry_run=False,
debug=False):
self.role = role
try:
config_params = dict()
if config_filename is not None:
if debug:
print "Resolving config file {} using scope {}".format(config_filename, scope)
config_params = self.load_parameters(config_filename, scope)
# First override any of the defaults with those supplied at the command line
if add_parameters is None or len(add_parameters) == 0:
adds = {}
else:
adds = dict(item.split("=") for item in add_parameters)
config_params.update(adds)
self.create_boto_clients()
if version:
config_params["VersionParam"] = version
else:
version = datetime.now().isoformat('-').replace(":", "-")
if ami_id:
config_params["AMIParam"] = ami_id
elif ami_tag_value:
config_params["AMIParam"] = self.get_ami_id_by_tag(ami_tag_value)
secrets = []
for key in config_params:
# Check that config file doesn't have scopes (Parameters for more than one Cloudformation file)
if type(config_params[key]) is dict:
raise DeployException("Objects were found with nested values, you will need to specify which set of parameters to use with \"--scope <object_name>\"".format(key))
# Check if the value contains KMS encrypted value (KMSEncrypted /KMSEncrypted tag pair)
# if true, decrypt and replace the value
encryption_check = re.search('KMSEncrypted(.*)/KMSEncrypted', config_params[key])
if encryption_check:
decrypted_value = self.kms_client.decrypt(CiphertextBlob=base64.b64decode(encryption_check.group(1)))["Plaintext"]
config_params[key] = decrypted_value
secrets += [decrypted_value]
cloudformation = self.load_cloudformation(template_name)
raw_cloudformation = str(cloudformation)
# Go through parameters needed and fill them in from the parameters provided in the config file
# They need to be re-formated from the python dictionary into boto3 useable format
parameters = self.import_params_from_config(cloudformation, config_params, create, secrets)
print "Using stack parameters"
# This hides any encrypted values that were decrypted with KMS
print secure_print(json.dumps(parameters, indent=2), secrets)
if create:
change_set_name = "Create-{}".format(version.replace(".", "-"))
changeset = self.get_change_set(stack_name, raw_cloudformation, parameters, change_set_name, create)
elif delete:
if not dry_run:
result = self.cf_client.delete_stack(StackName=stack_name)
print result
else:
print "[Dry-Run] Not deleting stack."
else:
change_set_name = "Update-{}".format(version.replace(".", "-"))
changeset = self.get_change_set(stack_name, raw_cloudformation, parameters, change_set_name)
if changeset is not None:
self.cf_client.wait_for_change_set_to_complete(change_set_name=change_set_name,
stack_name=stack_name,
debug=False)
change_set_details = self.cf_client.describe_change_set(ChangeSetName=change_set_name,
StackName=stack_name)
self.print_change_set(change_set_details)
if dry_run:
response = self.cf_client.delete_change_set(ChangeSetName=change_set_name,
StackName=stack_name)
else:
response = self.cf_client.execute_change_set(ChangeSetName=change_set_name,
StackName=stack_name)
self.cf_client.wait_for_deploy_to_complete(stack_name=stack_name)
except botocore.exceptions.ClientError as e:
if str(e) == "An error occurred (ValidationError) when calling the UpdateStack operation: No updates are to be performed.":
print "No stack update required - CONTINUING"
else:
print "Unexpected error: %s" % e
sys.exit(1)
except DeployException as error:
print "ERROR: {0}".format(error)
sys.exit(1)
except Exception as error:
traceback.print_exc(file=sys.stdout)
traceback.print_stack(file=sys.stdout)
print "ERROR: {0}".format(error)
traceback.print_exc(file=sys.stdout)
sys.exit(1)
def create_boto_clients(self):
if self.ec2_client is None:
self.ec2_client = self._boto_connect('ec2')
if self.cf_client is None:
cf_client = self._boto_connect('cloudformation')
self.cf_client = CloudFormationUtil(cf_client)
if self.kms_client is None:
self.kms_client = self._boto_connect('kms')
def _boto_connect(self, client_type):
if self.role:
sts = STSUtil(sts_arn=self.role, debug=True)
credentials = sts.authenticate_role()['Credentials']
client = boto3.client(client_type,
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'])
# If no role is specified the current environments will be used
else:
client = boto3.client(client_type)
return client
def load_parameters(self, config_filename, scope=None):
try:
with open(config_filename) as config_file:
if re.match(self.REGEX_YAML, config_filename):
config_data = yaml.load(config_file)
elif re.match(self.REGEX_JSON, config_filename):
config_data = json.load(config_file)
else:
raise DeployException("Config must be a YAML or JSON file")
if scope is not None:
if scope not in config_data:
raise DeployException("Cannot find scope '{}' within the '{}' configuration file."
.format(scope, config_filename))
parameters = config_data[scope]
else:
parameters = config_data
return parameters
except Exception as error:
raise DeployException("Unable to open config file '{}'\n{}".format(config_filename, error))
def get_ami_id_by_tag(self, ami_tag_value):
images = self.ec2_client.describe_images(Filters=[
{'Name': 'tag:ArtifactID',
'Values': [ami_tag_value]}
])['Images']
if len(images) == 0:
raise DeployException("No images found for search '{}'".format(ami_tag_value))
elif len(images) > 1:
print images
raise DeployException("More than 1 image found for search '{}'".format(ami_tag_value))
else:
ami_id = images[0]["ImageId"]
print "Located AMI {} - {} created {}".format(ami_id, images[0]['Name'], images[0]['CreationDate'])
return ami_id
def import_params_from_config(self, cloudformation, config_params, create, secrets = []):
parameters = []
if 'Parameters' in cloudformation:
for param, values in cloudformation['Parameters'].items():
if param in config_params:
parameters += [{
"ParameterKey": param,
"ParameterValue": config_params[param]
}]
else:
# If this is first deployment of stack we try and use a default provided in the CF
if create:
defaultValue = values.get('Default')
if defaultValue is not None:
parameters += [{
"ParameterKey": param,
"ParameterValue": defaultValue
}]
# There is no default and there is no previous value to use so throw an error
else:
raise DeployException("Cannot CREATE new stack with missing parameter {}".format(param))
sys.exit(-1)
# This is an update of existing stack so use current value
else:
print "Using current stack value for parameter {}".format(param)
parameters += [{
"ParameterKey": param,
"UsePreviousValue": True
}]
return parameters
else:
print "Specified template has no stack parameters"
def get_change_set(self, stack_name, cloudformation, parameters, change_set_name, create = False):
if create:
changeset = self.cf_client.create_change_set(
StackName=stack_name,
TemplateBody=cloudformation,
Parameters=parameters,
Capabilities=[
'CAPABILITY_IAM',
],
ChangeSetName=change_set_name,
ChangeSetType="CREATE"
)
else:
changeset = self.cf_client.create_change_set(
StackName=stack_name,
TemplateBody=cloudformation,
Parameters=parameters,
Capabilities=[
'CAPABILITY_IAM',
],
ChangeSetName=change_set_name,
)
return changeset
def print_change_set(self, change_set_details):
if len(change_set_details['Changes']) > 0:
print "-------------------------------"
print "CloudFormation changes to apply"
print "-------------------------------"
for x in change_set_details['Changes']:
change = x["ResourceChange"]
if change["Action"] == "Add":
replace_mode = "New resource"
elif change["Action"] == "Modify":
replace_mode = change["Replacement"]
if replace_mode == "False":
replace_mode = "Update in place"
elif replace_mode == "False":
replace_mode = "Full replacement"
elif replace_mode == "Conditional":
replace_mode = "Conditionally replace"
else:
replace_mode = "Delete resource"
change_mode = "[{} - {}]".format(change["Action"], replace_mode)
print "{} {}/{} ({})".format(change_mode.ljust(34), change["LogicalResourceId"],
change.get("PhysicalResourceId", ""), change["ResourceType"])
print ""
else:
print "No CloudFormation changes detected"
def load_cloudformation(self, template_name):
try:
with open(template_name, "r") as myfile:
if re.match(self.REGEX_YAML, template_name):
cloudformation = yaml.load(myfile)
elif re.match(self.REGEX_JSON, template_name):
cloudformation = json.load(myfile)
else:
raise DeployException("Cloudformation template must be a JSON or YAML file")
except Exception as error:
raise DeployException("Unable to open CloudFormation template '{}'\n{}".format(template_name, error))
if cloudformation is None:
raise DeployException("It looks like the CloudFormation template file is empty")
return cloudformation
| 42.702265 | 182 | 0.551648 | 12,896 | 0.97734 | 0 | 0 | 0 | 0 | 0 | 0 | 2,693 | 0.204092 |
755c0cb0df35cafe122bc9046c0e5faf80eebb71 | 739 | py | Python | Projects/consultants/urls.py | jjfleet/Capstone | f81e21f0641ed0b75e06161198fca52805acb2e4 | [
"Apache-2.0"
] | 2 | 2018-07-23T05:44:50.000Z | 2018-09-10T09:12:36.000Z | Projects/consultants/urls.py | jjmassey/Capstone | f81e21f0641ed0b75e06161198fca52805acb2e4 | [
"Apache-2.0"
] | 14 | 2018-09-10T10:42:39.000Z | 2018-10-24T00:04:36.000Z | Projects/consultants/urls.py | jjfleet/Capstone | f81e21f0641ed0b75e06161198fca52805acb2e4 | [
"Apache-2.0"
] | 2 | 2018-09-10T06:34:31.000Z | 2018-09-17T06:05:23.000Z | from django.urls import path
from . views import ConsultantCreateView, UserConsultantPageView, ConsultantDetailView, ConsultantPageView, ConsultantDeleteView
from . import views as consultant_views
urlpatterns = [
path('', ConsultantPageView.as_view(), name='consultants-home'),
path('<int:pk>/', ConsultantDetailView.as_view(), name='consultant-detail'),
path('new/', consultant_views.consultantCreate, name='consultant-create'),
path('<str:username>/consultants', UserConsultantPageView.as_view(), name='user-consultant'),
path('<int:pk>/update/',consultant_views.consultantUpdateView, name='consultant-update'),
path('<int:pk>/consultant/delete/', ConsultantDeleteView.as_view(), name='consultant-delete'),
]
| 52.785714 | 129 | 0.760487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.277402 |
755e59a676f096d694c1bce55708827fd57f1854 | 68 | py | Python | api/qr/__init__.py | TailorDev/pauling | 3f616a24d3bdf6fc24308ba0ec0177c374a70707 | [
"MIT"
] | 7 | 2017-10-04T18:30:24.000Z | 2018-03-08T12:41:09.000Z | api/qr/__init__.py | sarvesh107/pauling | 3f616a24d3bdf6fc24308ba0ec0177c374a70707 | [
"MIT"
] | 27 | 2017-10-06T22:54:09.000Z | 2018-03-08T12:37:28.000Z | api/qr/__init__.py | sarvesh107/pauling | 3f616a24d3bdf6fc24308ba0ec0177c374a70707 | [
"MIT"
] | 3 | 2017-10-04T19:01:27.000Z | 2020-10-01T02:42:26.000Z | from .svg import make_svg # noqa
from .png import make_png # noqa
| 22.666667 | 33 | 0.735294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.176471 |
755ea89e8a5dc718280f982f7451bc3f1aae7a5f | 349 | py | Python | euler/73.py | stauntonknight/algorithm | 39dbe6dc952ab7db3a469e1ca785003a4660fedb | [
"CNRI-Python"
] | null | null | null | euler/73.py | stauntonknight/algorithm | 39dbe6dc952ab7db3a469e1ca785003a4660fedb | [
"CNRI-Python"
] | null | null | null | euler/73.py | stauntonknight/algorithm | 39dbe6dc952ab7db3a469e1ca785003a4660fedb | [
"CNRI-Python"
] | null | null | null | import fractions
num = 1
den = 1
count = 0
sum = 0
while den <= 12000:
count = 0
min_num = int((den + 0.0) / 3 + 1)
max_num = den/2
if den % 2 == 0:
max_num -= 1
while min_num <= max_num:
if fractions.gcd(min_num, den) == 1:
count += 1
min_num += 1
sum += count
den += 1
print(sum)
| 16.619048 | 44 | 0.495702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
755eb82e590516aaf0f14f5bbeadef6765c9002e | 2,698 | py | Python | attic/gui/core/indicator.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2015-11-04T16:37:39.000Z | 2015-11-04T16:37:39.000Z | attic/gui/core/indicator.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | null | null | null | attic/gui/core/indicator.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2020-03-05T02:50:43.000Z | 2020-03-05T02:50:43.000Z | from enum import Enum
from typing import Optional
import pkg_resources
from gi.repository import Gtk
from gi.repository import Pango
cssprovider = Gtk.CssProvider()
cssprovider.load_from_path(pkg_resources.resource_filename('cct', 'resource/css/indicatorcolors.css'))
class IndicatorState(Enum):
OK = 'ok'
WARNING = 'warning'
ERROR = 'error'
NEUTRAL = 'neutral'
UNKNOWN = 'unknown'
def __str__(self):
return self.value
class Indicator(Gtk.Box):
def __init__(self, label: str, value: object, state: IndicatorState, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'orientation' not in kwargs:
self.set_orientation(Gtk.Orientation.VERTICAL)
self._label = Gtk.Label(label=label)
# self._label.set_hexpand(True)
# self._label.set_hexpand_set(True)
self.pack_start(self._label, True, True, 0)
self._eventbox = Gtk.EventBox()
self.pack_start(self._eventbox, True, True, 0)
self._valuelabel = Gtk.Label(label=str(value))
# self._valuelabel.set_hexpand(False)
# self._valuelabel.set_hexpand_set(False)
self._valuelabel.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
self._valuelabel.set_max_width_chars(1)
self._value = value
self._eventbox.add(self._valuelabel)
self._eventbox.set_border_width(5)
self._eventbox.set_name('indicator_' + str(state))
self._eventbox.get_style_context().add_provider(cssprovider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
self.set_hexpand(True)
self.set_hexpand_set(True)
self._eventbox.queue_draw()
# self.set_size_request(self._label.get_size_request()[0],-1)
def set_label(self, text: str):
return self._label.set_text(text)
def get_label(self) -> str:
return self._label.get_text()
def set_value(self, value: object, state: Optional[IndicatorState] = None):
self._value = value
res = self._valuelabel.set_text(str(value))
self._eventbox.set_tooltip_text(self._label.get_text() + ': ' + value)
self._valuelabel.set_tooltip_text(self._label.get_text() + ': ' + value)
if state is not None:
self.set_state(state)
return res
def get_value(self):
return self._value
def set_state(self, state):
res = self._eventbox.set_name('indicator_' + str(state))
self._valuelabel.set_name('indicator_' + str(state))
self._eventbox.queue_draw()
self._valuelabel.queue_draw()
return res
def get_state(self):
return IndicatorState(self._eventbox.get_name().split('_', 1)[-1])
| 35.5 | 102 | 0.662713 | 2,422 | 0.897702 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.13788 |
f3290a43c48894b4555f25b583136c942a6ca761 | 13,539 | py | Python | ext/ANTsPyNet/antspynet/architectures/create_densenet_model.py | tsmonteiro/fmri_proc | ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1 | [
"MIT"
] | 2 | 2021-11-16T10:00:33.000Z | 2021-12-13T02:57:40.000Z | ext/ANTsPyNet/antspynet/architectures/create_densenet_model.py | tsmonteiro/fmri_proc | ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1 | [
"MIT"
] | null | null | null | ext/ANTsPyNet/antspynet/architectures/create_densenet_model.py | tsmonteiro/fmri_proc | ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1 | [
"MIT"
] | 1 | 2021-12-13T02:57:27.000Z | 2021-12-13T02:57:27.000Z |
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (Input, Dropout, BatchNormalization,
Activation, Dense, Concatenate,
Conv2D, Conv2DTranspose, GlobalAveragePooling2D,
Conv3D, Conv3DTranspose, GlobalAveragePooling3D)
from tensorflow.keras import initializers
from tensorflow.keras import regularizers
def create_densenet_model_2d(input_image_size,
number_of_classification_labels=1000,
number_of_filters=16,
depth=7,
number_of_dense_blocks=1,
growth_rate=12,
dropout_rate=0.2,
weight_decay=1e-4,
mode='classification'
):
"""
2-D implementation of the Wide ResNet deep learning architecture.
Creates a keras model of the DenseNet deep learning architecture for image
recognition based on the paper
G. Huang, Z. Liu, K. Weinberger, and L. van der Maaten. Densely Connected
Convolutional Networks Networks
available here:
https://arxiv.org/abs/1608.06993
This particular implementation was influenced by the following python
implementation:
https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py
Arguments
---------
input_image_size : tuple of length 3
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : integer
Number of classification labels.
number_of_filters : integer
Number of filters.
depth : integer
Number of layers---must be equal to 3 * N + 4 where N is an integer (default = 7).
number_of_dense_blocks : integer
Number of dense blocks number of dense blocks to add to the end (default = 1).
growth_rate : integer
Number of filters to add for each dense block layer (default = 12).
dropout_rate : scalar
Per drop out layer rate (default = 0.2).
weight_decay : scalar
Weight decay (default = 1e-4).
mode : string
'classification' or 'regression'. Default = 'classification'.
Returns
-------
Keras model
A 2-D Keras model defining the network.
Example
-------
>>> model = create_densenet_model_2d((128, 128, 1))
>>> model.summary()
"""
concatenation_axis = 0
if K.image_data_format() == 'channels_last':
concatenation_axis = -1
def convolution_factory_2d(model, number_of_filters, kernel_size=(3, 3),
dropout_rate=0.0, weight_decay=1e-4):
model = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(model)
model = Activation(activation='relu')(model)
model = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
padding='same',
use_bias=False,
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(model)
if dropout_rate > 0.0:
model = Dropout(rate=dropout_rate)(model)
return(model)
def transition_2d(model, number_of_filters, dropout_rate=0.0, weight_decay=1e-4):
model = convolution_factory_2d(model, number_of_filters, kernel_size=(1, 1),
dropout_rate=dropout_rate, weight_decay=weight_decay)
model = AveragePooling2D(pool_size=(2, 2),
strides=(2, 2))(model)
return(model)
def create_dense_blocks_2d(model, number_of_filters, depth, growth_rate,
dropout_rate=0.0, weight_decay=1e-4):
dense_block_layers = [model]
for i in range(depth):
model = convolution_factory_2d(model, number_of_filters=growth_rate,
kernel_size=(3, 3), dropout_rate=dropout_rate,
weight_decay=weight_decay)
dense_block_layers.append(model)
model = Concatenate(axis=concatenation_axis)(dense_block_layers)
number_of_filters += growth_rate
return(model, number_of_filters)
if ((depth - 4) % 3) != 0:
raise ValueError('Depth must be equal to 3*N+4 where N is an integer.')
number_of_layers = int((depth - 4) / 3)
inputs = Input(shape = input_image_size)
outputs = Conv2D(filters=number_of_filters,
kernel_size=(3, 3),
kernel_initializer='he_uniform',
padding='same',
use_bias=False,
kernel_regularizer=regularizers.l2(weight_decay))(inputs)
# Add dense blocks
nFilters = number_of_filters
for i in range(number_of_dense_blocks - 1):
outputs, nFilters = \
create_dense_blocks_2d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = transition_2d(outputs, number_of_filters=nFilters,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs, nFilters = \
create_dense_blocks_2d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(outputs)
outputs = Activation(activation='relu')(outputs)
outputs = GlobalAveragePooling2D()(outputs)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layerActivation = 'linear'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(units=number_of_classification_labels,
activation=layer_activation,
kernel_regularizer=regularizers.l2(weight_decay),
bias_regularizer=regularizers.l2(weight_decay))(outputs)
densenet_model = Model(inputs=inputs, outputs=outputs)
return(densenet_model)
def create_densenet_model_3d(input_image_size,
number_of_classification_labels=1000,
number_of_filters=16,
depth=7,
number_of_dense_blocks=1,
growth_rate=12,
dropout_rate=0.2,
weight_decay=1e-4,
mode='classification'
):
"""
2-D implementation of the Wide ResNet deep learning architecture.
Creates a keras model of the DenseNet deep learning architecture for image
recognition based on the paper
G. Huang, Z. Liu, K. Weinberger, and L. van der Maaten. Densely Connected
Convolutional Networks Networks
available here:
https://arxiv.org/abs/1608.06993
This particular implementation was influenced by the following python
implementation:
https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : integer
Number of classification labels.
number_of_filters : integer
Number of filters.
depth : integer
Number of layers---must be equal to 3 * N + 4 where N is an integer (default = 7).
number_of_dense_blocks : integer
Number of dense blocks number of dense blocks to add to the end (default = 1).
growth_rate : integer
Number of filters to add for each dense block layer (default = 12).
dropout_rate : scalar
Per drop out layer rate (default = 0.2).
weight_decay : scalar
Weight decay (default = 1e-4).
mode : string
'classification' or 'regression'. Default = 'classification'.
Returns
-------
Keras model
A 3-D Keras model defining the network.
Example
-------
>>> model = create_densenet_model_3d((128, 128, 128, 1))
>>> model.summary()
"""
concatenation_axis = 0
if K.image_data_format() == 'channels_last':
concatenation_axis = -1
def convolution_factory_3d(model, number_of_filters, kernel_size=(3, 3, 3),
dropout_rate=0.0, weight_decay=1e-4):
model = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(model)
model = Activation(activation='relu')(model)
model = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
padding='same',
use_bias=False,
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(model)
if dropout_rate > 0.0:
model = Dropout(rate=dropout_rate)(model)
return(model)
def transition_3d(model, number_of_filters, dropout_rate=0.0, weight_decay=1e-4):
model = convolution_factory_3d(model, number_of_filters, kernel_size=(1, 1, 1),
dropout_rate=dropout_rate, weight_decay=weight_decay)
model = AveragePooling3D(pool_size=(2, 2, 2),
strides=(2, 2, 2))(model)
return(model)
def create_dense_blocks_3d(model, number_of_filters, depth, growth_rate,
dropout_rate=0.0, weight_decay=1e-4):
dense_block_layers = [model]
for i in range(depth):
model = convolution_factory_3d(model, number_of_filters=growth_rate,
kernel_size=(3, 3, 3), dropout_rate=dropout_rate,
weight_decay=weight_decay)
dense_block_layers.append(model)
model = Concatenate(axis=concatenation_axis)(dense_block_layers)
number_of_filters += growth_rate
return(model, number_of_filters)
if ((depth - 4) % 3) != 0:
raise ValueError('Depth must be equal to 3*N+4 where N is an integer.')
number_of_layers = int((depth - 4) / 3)
inputs = Input(shape = input_image_size)
outputs = Conv3D(filters=number_of_filters,
kernel_size=(3, 3, 3),
kernel_initializer='he_uniform',
padding='same',
use_bias=False,
kernel_regularizer=regularizers.l2(weight_decay))(inputs)
# Add dense blocks
nFilters = number_of_filters
for i in range(number_of_dense_blocks - 1):
outputs, nFilters = \
create_dense_blocks_3d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = transition_3d(outputs, number_of_filters=nFilters,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs, nFilters = \
create_dense_blocks_3d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(outputs)
outputs = Activation(activation='relu')(outputs)
outputs = GlobalAveragePooling3D()(outputs)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layerActivation = 'linear'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(units=number_of_classification_labels,
activation=layer_activation,
kernel_regularizer=regularizers.l2(weight_decay),
bias_regularizer=regularizers.l2(weight_decay))(outputs)
densenet_model = Model(inputs=inputs, outputs=outputs)
return(densenet_model)
| 37.818436 | 104 | 0.602482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,010 | 0.296181 |
f32a9558031ac36d2759219a73cf75600bfb9cb5 | 5,030 | py | Python | scripts/demo.py | beizhengren/pytorch-deeplab-xception | 8dc3107104f4d25ec74ea29ba72af360f192afc1 | [
"MIT"
] | null | null | null | scripts/demo.py | beizhengren/pytorch-deeplab-xception | 8dc3107104f4d25ec74ea29ba72af360f192afc1 | [
"MIT"
] | null | null | null | scripts/demo.py | beizhengren/pytorch-deeplab-xception | 8dc3107104f4d25ec74ea29ba72af360f192afc1 | [
"MIT"
] | null | null | null | #
# demo.py
#
import argparse
import os
import numpy as np
import logging
import sys
import os.path
from PIL import Image
from modeling.deeplab import *
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
from dataloaders.utils import *
from torchvision.utils import make_grid, save_image
def blend_two_images(img1_path, img2_path, output_path):
img1 = Image.open( img1_path)
# if not img1.exists()
img1 = img1.convert('RGBA')
img2 = Image.open( img2_path)
img2 = img2.convert('RGBA')
r, g, b, alpha = img2.split()
alpha = alpha.point(lambda i: i>0 and 204)
img = Image.composite(img2, img1, alpha)
if output_path == "result":
if not os.path.exists(output_path):
os.makedirs(output_path)
# img.show()
img.save(output_path)
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--in-path', type=str, required=True, help='directory of images to test')
parser.add_argument('--out-path', type=str, required=True, help='directory of mask image to save')
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--ckpt', type=str, default='deeplab-resnet.pth',
help='saved model')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--no-cuda', action='store_true', default=True,
help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--crop-size', type=int, default=512,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
args = parser.parse_args()
print(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
model = DeepLab(num_classes=3,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
print(f"The args.ckpt is : {args.ckpt}")
ckpt = torch.load(args.ckpt, map_location='cpu')
model.load_state_dict(ckpt['state_dict'])
composed_transforms = transforms.Compose([
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
for img_path in os.listdir(args.in_path):
if os.path.splitext(img_path)[-1] not in ['.jpg']:
print('skip {}'.format(img_path))
continue
img_path = os.path.join(args.in_path, img_path)
output_path = os.path.join(args.out_path, os.path.splitext(os.path.split(img_path)[-1])[-2] + "-seg" + ".jpg")
# print("output path is {}".format(output_path))
combine_path = os.path.join(args.out_path, os.path.splitext(os.path.split(img_path)[-1])[-2] + "-blend" + ".png")
# print("blend path is {}".format(combine_path))
image = Image.open(img_path).convert('RGB')
target = Image.open(img_path).convert('L')
sample = {'image': image, 'label': target}
tensor_in = composed_transforms(sample)['image'].unsqueeze(0)
model.eval()
if args.cuda:
image = image.cuda()
with torch.no_grad():
output = model(tensor_in)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy()),
3, normalize=False, range=(0, 255))
print("type(grid) is:{}".format( type(grid_image)))
print("grid_image.shape is:{}".format( grid_image.shape))
save_image(grid_image, output_path)
print("saved {}".format(output_path))
blend_two_images(img_path, output_path, combine_path)
print("blended {}\n".format(combine_path))
if __name__ == "__main__":
main()
| 40.894309 | 122 | 0.606958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,124 | 0.223459 |
f32b46652a7f91d104e0559c972e6f7e2d96e56c | 4,862 | py | Python | codeMania-python-matplotlib/tut8.py | JayramMardi/codeMania | 2327bef1d2a25aacdf4e39dccf2d2e77191a0f35 | [
"Apache-2.0"
] | null | null | null | codeMania-python-matplotlib/tut8.py | JayramMardi/codeMania | 2327bef1d2a25aacdf4e39dccf2d2e77191a0f35 | [
"Apache-2.0"
] | null | null | null | codeMania-python-matplotlib/tut8.py | JayramMardi/codeMania | 2327bef1d2a25aacdf4e39dccf2d2e77191a0f35 | [
"Apache-2.0"
] | 1 | 2022-01-02T14:58:38.000Z | 2022-01-02T14:58:38.000Z | # chapter Matplotlib Plotting
'''
The plot() function is used to draw points (markers) in a diagram.
By default, the plot() function draws a line from point to point.
The function takes parameters for specifying points in the diagram.
Parameter 1 is an array containing the points on the x-axis.
Parameter 2 is an array containing the points on the y-axis.
If we need to plot a line from (1, 3) to (8, 10), we have to pass two arrays [1, 8] and [3, 10] to the plot function.
'''
# Draw a line in a diagram from position (1, 3) to position (8, 10):
import matplotlib.pyplot as plt
import numpy as r
import sys
x=r.array([1,9,])
y=r.array([4,10])
plt.plot(x,y)
plt.show()
'''
Plotting Without Line
To plot only the markers, you can use shortcut string notation parameter 'o', which means 'rings'.
'''
x=r.array([3,10])
y=r.array([0,34])
plt.plot(x,y,'o')
plt.show()
'''
Multiple Points
You can plot as many points as you like, just make sure you have the same number of points in both axis.
Example
Draw a line in a diagram from position (1, 3) to (2, 8) then to (6, 1) and finally to position (8, 10):f
'''
x=r.array([1,2,4,9])
y=r.array([3,6,8,10])
plt.plot(x,y,label="red")
plt.show()
#Two lines to make our compiler able to draw:
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
'''
Default X-Points
If we do not specify the points in the x-axis, they will get the default values 0, 1, 2, 3, (etc. depending on the length of the y-points.
So, if we take the same example as above, and leave out the x-points, the diagram will look like this:
'''
# Plotting without x-points:
ypoints=r.array([0,2,3,5,6,7,99])
plt.plot(ypoints)
plt.show()
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
# CHAPTER Matplotlib Markers
'''
Markers
You can use the keyword argument marker to emphasize each point with a specified marker:
'''
x=r.array([0,3,5,6,8,9])
y=r.array([2,4,6,7,8,10])
plt.plot(x,y,marker="*")
plt.show()
'''
Marker Reference
You can choose any of these markers:
Marker Description
'o' Circle
'*' Star
'.' Point
',' Pixel
'x' X
'X' X (filled)
'+' Plus
'P' Plus (filled)
's' Square
'D' Diamond
'd' Diamond (thin)
'p' Pentagon
'H' Hexagon
'h' Hexagon
'v' Triangle Down
'^' Triangle Up
'<' Triangle Left
'>' Triangle Right
'1' Tri Down
'2' Tri Up
'3' Tri Left
'4' Tri Right
'|' Vline
'_' Hline
'''
'''
Format Strings fmt
You can use also use the shortcut string notation parameter to specify the marker.
This parameter is also called fmt, and is written with this syntax:
marker|line|color
Example
Mark each point with a circle:
'''
x=r.array([3,5,5,6,7,8])
y=r.array([1,3,5,6,7,8])
plt.plot(x,y,'-.r')
plt.show()
'''
The marker value can be anything from the Marker Reference above.
The line value can be one of the following:
Line Reference
Line Syntax Description
'-' Solid line
':' Dotted line
'--' Dashed line
'-.' Dashed/dotted line
Note: If you leave out the line value in the fmt parameter, no line will be plottet.
'''
'''
Color Reference
Color Syntax Description
'r' Red
'g' Green
'b' Blue
'c' Cyan
'm' Magenta
'y' Yellow
'k' Black
'w' White
'''
'''
Marker Size
You can use the keyword argument markersize or the shorter version, ms to set the size of the markers:
'''
x=r.array([1,3,4,5,9,5])
y=r.array([0,3,6,8,8])
plt.plot(x,marker='o',ms='17')
plt.show()
'''
Marker Color
You can use the keyword argument markeredgecolor or the shorter mec to set the color of the edge of the markers:
Example
Set the EDGE color to red:
'''
x=r.array([2,3,5,6])
y=r.array('[0,3,5,6,8]')
plt.plot(x,marker='*',ms=34,mec='r')
plt.show()
'''
You can use the keyword argument markerfacecolor or the shorter mfc to set the color inside the edge of the markers:
Example
Set the FACE color to red:
'''
x=r.array([1,3,5,6])
y=r.array([2,3,5,6])
plt.plot(x,marker='*',ms=34,mfc='r')
plt.show()
'''
# Use both the mec and mfc arguments to color of the entire marker:
# Example
# Set the color of both the edge and the face to red:
'''
import matplotlib.pyplot as plt
import numpy as r
y=r.array([0,4,6,7,7,8])
plt.plot(y,marker='*',ms=30,mec='r',mfc='r')
plt.show()
'''
You can also use Hexadecimal color values:
Example
Mark each point with a beautiful green color:
...
plt.plot(ypoints, marker = 'o', ms = 20, mec = '#4CAF50', mfc = '#4CAF50')
...
'''
import matplotlib.pyplot as plt
import numpy as np
x=np.array([1,2,3,4,5,6,5,7])
y=np.array([1,2,4,5,5,6,])
plt.plot(y,ms=34,marker='*',mec='hotpink',mfc="hotpink",linestyle=':')
plt.show()
| 20.090909 | 139 | 0.633278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,570 | 0.734266 |
f32c53cc845f82e4bd29dd3cc7b2266b7a8d4e51 | 73 | py | Python | GoogleColab/InteligenciaAnalitica/Ex5/trainbase.py | AlexandroLuis/ComputerScience | 273c3f6797737d98ffdf00ae870ceafa3aba59d1 | [
"MIT"
] | 2 | 2021-02-06T21:48:24.000Z | 2022-03-21T00:16:17.000Z | GoogleColab/InteligenciaAnalitica/Ex5/trainbase.py | AlexandroLuis/ComputerScience | 273c3f6797737d98ffdf00ae870ceafa3aba59d1 | [
"MIT"
] | null | null | null | GoogleColab/InteligenciaAnalitica/Ex5/trainbase.py | AlexandroLuis/ComputerScience | 273c3f6797737d98ffdf00ae870ceafa3aba59d1 | [
"MIT"
] | 1 | 2022-02-10T20:59:23.000Z | 2022-02-10T20:59:23.000Z | dataset = pd.read_csv('Corona_NLP_train.csv',encoding='iso8859')
dataset
| 24.333333 | 64 | 0.794521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.424658 |
f3310c16bf2027ed954517e88ece42b1d7f94326 | 3,189 | py | Python | text/opencv_dnn_detect.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 1,017 | 2019-08-02T04:18:35.000Z | 2022-03-29T08:18:03.000Z | text/opencv_dnn_detect.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 47 | 2019-08-08T08:36:48.000Z | 2022-03-08T07:00:29.000Z | text/opencv_dnn_detect.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 300 | 2019-08-03T03:06:30.000Z | 2022-03-31T02:20:11.000Z | from config import yoloCfg,yoloWeights,opencvFlag
from config import AngleModelPb,AngleModelPbtxt
from config import IMGSIZE
from PIL import Image
import numpy as np
import cv2
if opencvFlag=='keras':
##转换为tf模型,以便GPU调用
import tensorflow as tf
from tensorflow.python.platform import gfile
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
with gfile.FastGFile(AngleModelPb, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
inputImg = sess.graph.get_tensor_by_name('input_1:0')
predictions = sess.graph.get_tensor_by_name('predictions/Softmax:0')
keep_prob = tf.placeholder(tf.float32)
else:
angleNet = cv2.dnn.readNetFromTensorflow(AngleModelPb,AngleModelPbtxt)##dnn 文字方向检测
textNet = cv2.dnn.readNetFromDarknet(yoloCfg,yoloWeights)##文字定位
def text_detect(img):
thresh=0
h,w = img.shape[:2]
inputBlob = cv2.dnn.blobFromImage(img, scalefactor=0.00390625, size=IMGSIZE,swapRB=True ,crop=False);
textNet.setInput(inputBlob)
pred = textNet.forward()
cx = pred[:,0]*w
cy = pred[:,1]*h
xmin = cx - pred[:,2]*w/2
xmax = cx + pred[:,2]*w/2
ymin = cy - pred[:,3]*h/2
ymax = cy + pred[:,3]*h/2
scores = pred[:,4]
indx = np.where(scores>thresh)[0]
scores = scores[indx]
boxes = np.array(list(zip(xmin[indx],ymin[indx],xmax[indx],ymax[indx])))
return boxes,scores
def angle_detect_dnn(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True ,
mean=[103.939,116.779,123.68],crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred,axis=1)[0]
return ROTATE[index]
def angle_detect_tf(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
img = cv2.resize(img,(224,224))
img = img[..., ::-1].astype(np.float32)
img[..., 0] -= 103.939
img[..., 1] -= 116.779
img[..., 2] -= 123.68
img = np.array([img])
out = sess.run(predictions, feed_dict={inputImg: img,
keep_prob: 0
})
index = np.argmax(out,axis=1)[0]
return ROTATE[index]
def angle_detect(img,adjust=True):
"""
文字方向检测
"""
if opencvFlag=='keras':
return angle_detect_tf(img,adjust=adjust)
else:
return angle_detect_dnn(img,adjust=adjust) | 30.371429 | 105 | 0.580433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.082396 |
f331a08e601c32af4b162497007126ab54f3aaaf | 3,071 | py | Python | rhapsody/lib/RAudiostream.py | anarchiae/rhapsody5 | bd4c4f008d9d3df17ef400ce8895d3da913f6b39 | [
"MIT"
] | null | null | null | rhapsody/lib/RAudiostream.py | anarchiae/rhapsody5 | bd4c4f008d9d3df17ef400ce8895d3da913f6b39 | [
"MIT"
] | null | null | null | rhapsody/lib/RAudiostream.py | anarchiae/rhapsody5 | bd4c4f008d9d3df17ef400ce8895d3da913f6b39 | [
"MIT"
] | null | null | null | import threading
import vlc
import alsaaudio
import time
import requests
import lib.RMonitoring as rmonitoring
class Audiostream(threading.Thread):
monitoring = None
audiostream_id = None
audiostream_file = None
audiostream_status = None
audiostream_volume = None
audio_player_instance = vlc.Instance()
audio_player = audio_player_instance.media_player_new()
audio_file = None
mixer = alsaaudio.Mixer('Headphone')
def __init__(self, audiostream_id, project_id, broker):
threading.Thread.__init__(self)
self.audiostream_id = audiostream_id
self.project_id = project_id
self.monitoring = rmonitoring.Monitoring(self.project_id, broker)
def run(self):
while 1:
audiostream_values = self.get_audiostream_values()
duration = self.audio_player.get_length() / 1000
mm, ss = divmod(duration, 60)
# Met à jour le status du lecteur si nécessaire
if(audiostream_values['audiostream_status'] != self.audiostream_status):
if(audiostream_values['audiostream_status'] == "play"):
self.audio_player.play()
self.monitoring.send("INFO", "audiostream", "playback on")
elif(audiostream_values['audiostream_status'] == "pause"):
self.audio_player.pause()
self.monitoring.send("INFO", "audiostream", "playback off")
self.audiostream_status = audiostream_values['audiostream_status']
# Met à jour le fichier si nécessaire
if(audiostream_values['audiostream_file'] != self.audiostream_file or vlc.State.Ended == self.audio_player.get_state()):
audio_file = self.audio_player_instance.media_new("http://rhapsody.hestiaworkshop.net/files/" + audiostream_values['audiostream_file'])
self.audio_player.set_media(audio_file)
self.monitoring.send("INFO", "audiostream", "audio file has been changed")
if(audiostream_values['audiostream_status'] == "play"):
self.audio_player.play()
self.monitoring.send("INFO", "audiostream", "playback on")
self.audiostream_file = audiostream_values['audiostream_file']
# Met à jour le volume si nécessaire
if(audiostream_values['audiostream_volume'] != self.audiostream_volume):
self.mixer.setvolume(int(audiostream_values['audiostream_volume']))
self.audiostream_volume = audiostream_values['audiostream_volume']
self.monitoring.send("INFO", "audiostream", "volume has been changed")
time.sleep(1)
# Récupère les valeurs des différents paramètres actuels
# du module audiostream
def get_audiostream_values(self):
retrieve_audiostream_values_request = "http://rhapsody.hestiaworkshop.net/rest/audiostreams/get_values/" + self.audiostream_id
r = requests.get(retrieve_audiostream_values_request)
return r.json()
| 40.407895 | 151 | 0.661674 | 2,967 | 0.962999 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.243752 |
f3336022271959da6f3feefe377eea05c0033d68 | 2,981 | py | Python | src/pages/mainPage.py | mvoitko/Habrahabr-tests | 0b909178ba09b31dbd02c73f8b34f191746a27c2 | [
"MIT"
] | null | null | null | src/pages/mainPage.py | mvoitko/Habrahabr-tests | 0b909178ba09b31dbd02c73f8b34f191746a27c2 | [
"MIT"
] | null | null | null | src/pages/mainPage.py | mvoitko/Habrahabr-tests | 0b909178ba09b31dbd02c73f8b34f191746a27c2 | [
"MIT"
] | null | null | null | """
Created on Oct 28, 2016
@author: mvoitko
"""
import re
import time
import locale
from datetime import datetime
from selenium import webdriver
from selenium.common.exceptions import *
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from src import config
from src.utils import helper
from src.pages.basePage import BasePage
from src.locators.mainLocators import MainLocators
class MainPage(BasePage):
"""
Main Habrahabr page representation.
Class for UI actions related to this page
"""
url = config.base_url + 'interesting'
locators_dictionary = MainLocators.locators_dictionary
def search(self, querry):
"""
Search given querry.
:param querry: str - text to search
:return: MainPage: selenium.webdriver.*
"""
self.click_on('search button')
self.fill('search field', querry)
self.find('search field').send_keys(Keys.ENTER)
return MainPage(self.driver)
def get_search_results(self):
"""
Get search results.
:param querry: str - text to search
:return: results: list of selenium.webdriver.remote.webelement.WebElement
"""
return self.find_elems('post')
def sort_by(self, sorting_param):
"""
Sort search results page by given sorting parameter.
:param sorting_param: str - sort by parameter
:return: MainPage: selenium.webdriver.*
"""
# old_post = self.driver.find_element(*MainLocators.locators_dictionary['POST TITLE'])
sorting_param = "sort by " + sorting_param
self.click_on(sorting_param)
# WebDriverWait(self.driver, self.timeout).until(EC.staleness_of(old_post))
return MainPage(self.driver)
def get_posts_timestamps(self):
"""
Get posts timestamps.
:return: timestamps: list of datetime objects of posts.
"""
time.sleep(1)
timestamps = []
timestamp_elements = self.find_elems('post timestamp')
for timestamp in timestamp_elements:
if re.match(helper.pattern_today, timestamp.text, re.IGNORECASE):
date_object = helper.parse_today(timestamp.text)
elif re.match(helper.pattern_yesterday, timestamp.text, re.IGNORECASE):
date_object = helper.parse_yesterday(timestamp.text)
elif re.match(helper.pattern_current_year, timestamp.text, re.IGNORECASE):
date_object = helper.parse_current_year(timestamp.text)
elif re.match(helper.pattern_full, timestamp.text):
date_object = helper.parse_full(timestamp.text)
else:
raise NoSuchElementException(
"Cannot find POST TIMESTAMP locator on the {1} page".format(str(cls)))
timestamps.append(date_object)
return timestamps | 35.488095 | 94 | 0.667226 | 2,477 | 0.830929 | 0 | 0 | 0 | 0 | 0 | 0 | 1,039 | 0.348541 |
f33413864f878b6fed6b830e414836e3dbceb13e | 2,673 | py | Python | tools/_lib/utils.py | zhongxinghong/Botzone-Tank2 | 17fd00a57405ddd2f2bace58bab27800080b1fc7 | [
"MIT"
] | 11 | 2019-06-01T11:17:55.000Z | 2021-06-03T00:47:24.000Z | tools/_lib/utils.py | zhongxinghong/Botzone-Tank2 | 17fd00a57405ddd2f2bace58bab27800080b1fc7 | [
"MIT"
] | 1 | 2021-03-31T19:17:52.000Z | 2021-03-31T19:17:52.000Z | tools/_lib/utils.py | zhongxinghong/Botzone-Tank2 | 17fd00a57405ddd2f2bace58bab27800080b1fc7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-04-25 06:10:39
# @Last Modified by: Administrator
# @Last Modified time: 2019-05-21 15:30:34
__all__ = [
"mkdir",
"get_abspath",
"read_file",
"json_load",
"json_dump",
"b",
"u",
"Singleton",
"CachedProperty",
]
import os
from requests.compat import json
__ROOT_DIR = os.path.join(os.path.dirname(__file__), "../") # tools/ 为根目录
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def get_abspath(*path):
return os.path.abspath(os.path.join(__ROOT_DIR, *path))
def read_file(file, encoding="utf-8-sig"):
with open(file, "r", encoding=encoding) as fp:
return fp.read()
def json_load(file, **kwargs):
with open(file, "r", encoding="utf-8-sig") as fp:
return json.load(fp, **kwargs)
def json_dump(obj, file, **kwargs):
encoding = kwargs.pop("encoding", "utf-8")
with open(file, "w", encoding=encoding) as fp:
json.dump(obj, fp, **kwargs)
def b(s):
"""
bytes/str/int/float -> bytes
"""
if isinstance(s, bytes):
return s
elif isinstance(s, (str,int,float)):
return str(s).encode("utf-8")
else:
raise TypeError(s)
def u(s):
"""
bytes/str/int/float -> str(utf8)
"""
if isinstance(s, (str,int,float)):
return str(s)
elif isinstance(s, bytes):
return s.decode("utf-8")
else:
raise TypeError(s)
class Singleton(type):
"""
Singleton Metaclass
@link https://github.com/jhao104/proxy_pool/blob/428359c8dada998481f038dbdc8d3923e5850c0e/Util/utilClass.py
"""
_inst = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._inst:
cls._inst[cls] = super(Singleton, cls).__call__(*args)
return cls._inst[cls]
class _Missing(object):
"""
from werkzeug._internal
"""
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_MISSING = _Missing()
class CachedProperty(property):
"""
from werkzeug.utils
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _MISSING)
if value is _MISSING:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value | 21.384 | 111 | 0.59596 | 1,170 | 0.436404 | 0 | 0 | 0 | 0 | 0 | 0 | 662 | 0.246923 |
f3354311e1e2a11ade0a61af2606d322f880e6a1 | 4,884 | py | Python | main.py | notafrancescodavid/Job-Shop-Scheduling-Problem---Hierarchical-Parallel-and-easy-to-use-Genetic-Algorithm | a2083cfc2385b6a5a59b8bed6a4fb4180c3eb2ac | [
"MIT"
] | null | null | null | main.py | notafrancescodavid/Job-Shop-Scheduling-Problem---Hierarchical-Parallel-and-easy-to-use-Genetic-Algorithm | a2083cfc2385b6a5a59b8bed6a4fb4180c3eb2ac | [
"MIT"
] | null | null | null | main.py | notafrancescodavid/Job-Shop-Scheduling-Problem---Hierarchical-Parallel-and-easy-to-use-Genetic-Algorithm | a2083cfc2385b6a5a59b8bed6a4fb4180c3eb2ac | [
"MIT"
] | null | null | null | #from tree_GA import TreeGA
import os
import time
import json
import pandas as pd
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from treeGA import TreeGA
def printChart(toFile=False,filename="",lang="en"):
iteration = 0
jobsForMachines = []
jobsForMachinesWithJobId = []
for machineSchedule in bestMachineSchedules:
jobsForMachine = []
jobsForMachineWithJobId = []
for jobOperationCouple in machineSchedule:
jobId = jobOperationCouple[0]
operationNumber = jobOperationCouple[1]
#print("stating time, ending time")
jobStartAndEndTime = bestJobTimings[jobId][operationNumber]
jobsForMachine.append((jobStartAndEndTime[0],jobStartAndEndTime[1] - jobStartAndEndTime[0]))
jobsForMachineWithJobId.append((jobStartAndEndTime[0],jobStartAndEndTime[1],jobId))
#print([jobStartAndEndTime[0],jobStartAndEndTime[1],jobId])
jobsForMachinesWithJobId.append(jobsForMachineWithJobId)
jobsForMachines.append(jobsForMachine)
iteration = iteration + 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 22}
plt.rc('font', **font)
chartPatches = []
#colors = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#000000']
colors = ["#696969","#8b4513","#808000","#483d8b","#008000","#008b8b","#00008b","#8fbc8f","#800080","#b03060","#ff0000","#ffa500","#ffff00","#00ff00","#8a2be2","#00ff7f","#dc143c","#00ffff","#00bfff","#0000ff","#adff2f","#b0c4de","#ff7f50","#ff00ff","#1e90ff","#90ee90","#ff1493","#ee82ee","#ffe4b5","#ffb6c1"]
for j in range(len(bestJobTimings)):
colorHex = colors[j]
chartPatches.append(mpatches.Patch(color=colorHex, label='Job' + str(j)))
fig, schedule = plt.subplots()
fig.set_figheight(18)
fig.set_figwidth(25)
numOfMachines = len(bestMachineSchedules)
# Setting Y-axis limits
schedule.set_ylim(0, numOfMachines * 20)
# Setting X-axis limits
schedule.set_xlim(0, minimumMakespan)
# Setting labels for x-axis and y-axis
if lang == "it":
schedule.set_xlabel("Minuti sin dall'inizio")
schedule.set_ylabel('Macchina')
machineString = "Macchina"
else:
schedule.set_xlabel("Minutes since the start")
schedule.set_ylabel('Machine')
machineString = "Machine"
schedule.grid(True)
# Setting ticks on y-axis
ytiks = []
yticksLabels = []
verticalOffeset = 20
for i in range(numOfMachines):
ytiks.append(i * verticalOffeset)
yticksLabels.append(machineString + " " + str(i))
colorsForChart = []
jobIds = []
for j in range(len(jobsForMachinesWithJobId[i])):
jobId = jobsForMachinesWithJobId[i][j][2]
colorsForChart.append(colors[jobId])
jobIds.append(jobId)
schedule.broken_barh(jobsForMachines[i], (i * verticalOffeset, verticalOffeset/2), facecolors = tuple(colorsForChart))
for j in range(len(jobsForMachines[i])):
x1,x2 = jobsForMachines[i][j]
schedule.text(x=x1 + x2/2, y=(i * verticalOffeset) + 5,s=jobIds[j],ha='center', va='center',color='white',fontsize=18,fontweight="bold")
schedule.set_yticks(ytiks)
# Labelling tickes of y-axis
schedule.set_yticklabels(yticksLabels)
fig.legend(handles=chartPatches,title='Nomi Job', bbox_to_anchor=(0.9, 0.9), loc='upper left')
if toFile:
plt.savefig(filename)
plt.show()
if __name__ == '__main__':
start_time = time.time()
fileInstanceName = sys.argv[1]
populationNumber = int(sys.argv[2])
iterationNumber = int(sys.argv[3])
numberThatIsAPowerOfTwo = int(sys.argv[4])
treeGA = TreeGA(fileName = fileInstanceName,populationNumber = populationNumber,iterationNumber = iterationNumber,numberThatIsAPowerOfTwo = numberThatIsAPowerOfTwo)
treeGA.execute()
minimumMakespan = treeGA.getMinimumMakespan()
bestSolution = treeGA.getBestSolution()
bestMachineSchedules = treeGA.getBestSolutionMachineSchedules()
bestJobTimings = treeGA.getBestSolutionJobTimings()
print("")
print("Minimum Makespan:")
print(minimumMakespan)
print("")
print("Best solution")
print(bestSolution.values)
end_time = time.time()
print("Execution time in seconds: ")
print(end_time - start_time)
if sys.argv[5] == "plot":
printChart()
elif sys.argv[5] == "plot_to_file":
printChart(toFile=True,filename=sys.argv[6],lang="it")
| 34.885714 | 314 | 0.63923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,079 | 0.220925 |
f335630c98f006cde2b540a21572bf9a60f94dc4 | 2,595 | py | Python | mainsite/migrations/0001_initial.py | MuratovER/IMO | 90efd087917159dc5b8aab3c8946003496e54418 | [
"MIT"
] | null | null | null | mainsite/migrations/0001_initial.py | MuratovER/IMO | 90efd087917159dc5b8aab3c8946003496e54418 | [
"MIT"
] | null | null | null | mainsite/migrations/0001_initial.py | MuratovER/IMO | 90efd087917159dc5b8aab3c8946003496e54418 | [
"MIT"
] | 1 | 2022-03-31T21:19:14.000Z | 2022-03-31T21:19:14.000Z | # Generated by Django 4.0.3 on 2022-05-07 11:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Speciality',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('key', models.CharField(max_length=10)),
('price', models.IntegerField(blank=True, null=True)),
('score', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('gender', models.CharField(blank=True, choices=[('Male', 'M'), ('Female', 'F'), ('None of this', 'N')], max_length=12, null=True)),
('birthdate', models.DateTimeField(blank=True, null=True)),
('country', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('citizenship', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(max_length=150)),
('phone', models.CharField(max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('short_description', models.CharField(blank=True, max_length=200, null=True)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 45.526316 | 148 | 0.589595 | 2,407 | 0.927553 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.122158 |
f3363cb0d95d7a8d90092c67699c8bf108010794 | 1,905 | py | Python | api/routes.py | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 | [
"Apache-2.0"
] | 3 | 2020-12-28T16:45:56.000Z | 2021-12-18T08:38:29.000Z | api/routes.py | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 | [
"Apache-2.0"
] | 1 | 2020-12-14T13:09:42.000Z | 2020-12-14T13:09:42.000Z | api/routes.py | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 | [
"Apache-2.0"
] | null | null | null | from fastapi import APIRouter, File, Header, HTTPException, UploadFile
from fastapi.responses import FileResponse, HTMLResponse
from pydantic.main import List
from api import controller
from api.config import application_name
from api.models.TTS_model import TTSModel
router = APIRouter()
MAX_CHARACTERS = 550
class NotVIPplsPAYError(Exception):
pass
@router.get("/", response_class=HTMLResponse)
def home():
return f"<body><h1>API of {application_name}</h1></body>"
@router.post("/taco")
def text_to_tacotron_audio_file(data: TTSModel, model=Header(None)):
try:
text = data.text
if len(text) > MAX_CHARACTERS:
raise NotVIPplsPAYError(
"Too many chararacters."
)
wav_audio_file_path = controller.text_to_tacotron_audio_file(data.text, model)
return FileResponse(str(wav_audio_file_path))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/taco_audio")
async def audio_to_tacotron_audio_file(
file: UploadFile = File(...), model=Header(None)
):
try:
bytes = await file.read()
if len(bytes) < 1:
raise NotImplementedError(
"No audio has been provided, check your microphone."
)
if len(bytes) > 120000:
raise NotVIPplsPAYError(
"Too many bytes."
)
wav_audio_file_path, text = await controller.audio_to_tacotron_audio_file(
bytes, model
)
return FileResponse(str(wav_audio_file_path), headers={'text': text})
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/get_models", response_model=List[str])
def get_available_models():
try:
return controller.get_models()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| 28.863636 | 86 | 0.669816 | 44 | 0.023097 | 0 | 0 | 1,534 | 0.805249 | 689 | 0.36168 | 185 | 0.097113 |
f33897f01f860ace5277d9d6efa8513dc51da187 | 3,357 | py | Python | server.py | zhangtuxin/CMPUT404-WEB-SERVER | afd409d503c1b32e3fa142b2cb6e924f5b0018ef | [
"Apache-2.0"
] | null | null | null | server.py | zhangtuxin/CMPUT404-WEB-SERVER | afd409d503c1b32e3fa142b2cb6e924f5b0018ef | [
"Apache-2.0"
] | null | null | null | server.py | zhangtuxin/CMPUT404-WEB-SERVER | afd409d503c1b32e3fa142b2cb6e924f5b0018ef | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import SocketServer,os
# Copyright 2013 Abram Hindle, Eddie Antonio Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(SocketServer.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
#print ("Got a request of: %s\n" % self.data)
#self.request.sendall("OK")
path = self.data.split()[1]
method_type = self.data.split()[0]
if method_type != 'GET': #only GET will be handled !
status_code = "HTTP/1.1 405 Method not allowed\r\n"
content_type = "Content-type: text/html\r\n\r\n"
content = "<html><head></head><body>"+"<h1><center>HTTP/1.1 405 Method not allowed</center></h1></body></html>\n"
self.request.sendall(status_code)
self.request.sendall(content_type)
self.request.sendall(content)
return
if path[-1] == '/':
Path = os.getcwd()+"/www"+path+"index.html"
else:
Path = os.getcwd()+"/www"+path
#print ("Path is %s \n"%Path) /home/tuxin/Desktop/CMPUT404/Assignment1/CMPUT404-WEB-SERVER/www/../../../../../../../../../../../../etc/group
#print ("path is %s \n"%path) /../../../../../../../../../../../../etc/group
#print ("Path is %s \n"%Path)
if ( os.path.exists(Path) == False or "../" in Path or "/.." in Path): #add "../ for the serc check"
#print ("path is %s \n"%path)
header = "HTTP/1.1 404 Not Found\r\n Content-type: text/html\r\n"
file_content ="<html><head></head><body>"+"<h1><center>HTTP/1.1 404 Page Not Found!</center></h1></body></html>\n"
self.request.sendall(header + "\r\n" + file_content)
return
read_file = os.path.abspath(Path)
myfile = open(read_file, 'r') #serve file in www
file_content = ""
for i in myfile:
file_content +=i
myfile.close()
mime_type = Path.split('.')[1] #after the . is the mime type
header = "HTTP/1.1 200 OK\r\n" + "Content-type: text/%s\r\n" %mime_type
self.request.sendall(header + "\r\n" + file_content)
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
SocketServer.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = SocketServer.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| 39.494118 | 148 | 0.618111 | 2,047 | 0.609589 | 0 | 0 | 0 | 0 | 0 | 0 | 1,943 | 0.578618 |
f3390fdf7f2b57c683fe56929f4a5884eaf1e0f7 | 22,125 | py | Python | off_django/models.py | klorophyl/openfoodfacts-django | 696e701355af9911e726c5579eacca8989f4c629 | [
"Apache-2.0"
] | 1 | 2022-02-02T19:54:34.000Z | 2022-02-02T19:54:34.000Z | off_django/models.py | klorophyl/openfoodfacts-django | 696e701355af9911e726c5579eacca8989f4c629 | [
"Apache-2.0"
] | null | null | null | off_django/models.py | klorophyl/openfoodfacts-django | 696e701355af9911e726c5579eacca8989f4c629 | [
"Apache-2.0"
] | 1 | 2021-12-27T20:40:18.000Z | 2021-12-27T20:40:18.000Z | #!/usr/bin/python
# _*_ coding: utf_8 _*_
import datetime
import logging
import openfoodfacts
from tqdm import tqdm
from django.core.exceptions import FieldDoesNotExist
from django.db import models, transaction
from .models_extensions import ListField
from .settings import DATETIME_FORMAT
from .codes_to_country import CODES_TO_COUNTRY
logger = logging.getLogger("django")
class AbstractOFFFood(models.Model):
"""
Abstract model representing an Open Food Facts food
"""
class Meta:
abstract = True
# General information
code = models.TextField(default=None, null=True, db_index=True)
url = models.TextField(default=None, null=True)
creator = models.TextField(default=None, null=True)
created_t = models.IntegerField(default=None, null=True)
created_datetime = models.DateTimeField(default=None, null=True)
last_modified_t = models.IntegerField(default=None, null=True)
last_modified_datetime = models.DateTimeField(default=None, null=True)
product_name = models.TextField(default=None, null=True)
generic_name = models.TextField(default=None, null=True)
quantity = models.TextField(default=None, null=True)
# Tags
packaging = ListField(default=None, null=True)
packaging_tags = ListField(default=None, null=True)
brands = ListField(default=None, null=True)
brands_tags = ListField(default=None, null=True)
categories = ListField(default=None, null=True)
categories_en = ListField(default=None, null=True)
categories_tags = ListField(default=None, null=True)
origins = ListField(default=None, null=True)
origins_tags = ListField(default=None, null=True)
manufacturing_places = ListField(default=None, null=True)
manufacturing_places_tags = ListField(default=None, null=True)
labels = ListField(default=None, null=True)
labels_en = ListField(default=None, null=True)
labels_tags = ListField(default=None, null=True)
emb_codes = models.TextField(default=None, null=True)
emb_codes_tags = ListField(default=None, null=True)
first_packaging_code_geo = ListField(default=None, null=True)
cities = ListField(default=None, null=True)
cities_tags = ListField(default=None, null=True)
purchase_places = ListField(default=None, null=True)
stores = ListField(default=None, null=True)
countries = ListField(default=None, null=True)
countries_en = ListField(default=None, null=True)
countries_tags = ListField(default=None, null=True)
# Ingredients
ingredients_text = ListField(default=None, null=True)
traces = ListField(default=None, null=True)
traces_en = ListField(default=None, null=True)
traces_tags = ListField(default=None, null=True)
# Misc
additives = ListField(default=None, null=True)
additives_en = ListField(default=None, null=True)
additives_n = models.IntegerField(default=None, null=True)
additives_tags = ListField(default=None, null=True)
allergens = ListField(default=None, null=True)
allergens_en = ListField(default=None, null=True)
image_small_url = models.TextField(default=None, null=True)
image_url = models.TextField(default=None, null=True)
image_ingredients_small_url = models.TextField(default=None, null=True)
image_ingredients_url = models.TextField(default=None, null=True)
image_nutrition_small_url = models.TextField(default=None, null=True)
image_nutrition_url = models.TextField(default=None, null=True)
ingredients_from_palm_oil = ListField(default=None, null=True)
ingredients_from_palm_oil_n = models.IntegerField(default=None, null=True)
ingredients_from_palm_oil_tags = ListField(default=None, null=True)
ingredients_that_may_be_from_palm_oil = ListField(default=None, null=True)
ingredients_that_may_be_from_palm_oil_n = models.IntegerField(default=None, null=True)
ingredients_that_may_be_from_palm_oil_tags = ListField(default=None, null=True)
main_category = models.TextField(default=None, null=True)
main_category_en = models.TextField(default=None, null=True)
no_nutriments = models.TextField(default=None, null=True)
nutrition_grade_fr = models.CharField(max_length=1, default=None, null=True)
nutrition_grade_uk = models.CharField(max_length=1, default=None, null=True)
nova_group = models.IntegerField(default=None, null=True)
pnns_groups_1 = models.TextField(default=None, null=True)
pnns_groups_2 = models.TextField(default=None, null=True)
serving_size = models.TextField(default=None, null=True)
serving_quantity = models.FloatField(default=None, null=True)
states = ListField(default=None, null=True)
states_en = ListField(default=None, null=True)
states_tags = ListField(default=None, null=True)
# Nutritional facts
_alpha_linolenic_acid_100g = models.FloatField(default=None, null=True)
_arachidic_acid_100g = models.FloatField(default=None, null=True)
_arachidonic_acid_100g = models.FloatField(default=None, null=True)
_behenic_acid_100g = models.FloatField(default=None, null=True)
_butyric_acid_100g = models.FloatField(default=None, null=True)
_capric_acid_100g = models.FloatField(default=None, null=True)
_caproic_acid_100g = models.FloatField(default=None, null=True)
_caprylic_acid_100g = models.FloatField(default=None, null=True)
_cerotic_acid_100g = models.FloatField(default=None, null=True)
_dihomo_gamma_linolenic_acid_100g = models.FloatField(default=None, null=True)
_docosahexaenoic_acid_100g = models.FloatField(default=None, null=True)
_eicosapentaenoic_acid_100g = models.FloatField(default=None, null=True)
_elaidic_acid_100g = models.FloatField(default=None, null=True)
_erucic_acid_100g = models.FloatField(default=None, null=True)
_fructose_100g = models.FloatField(default=None, null=True)
_gamma_linolenic_acid_100g = models.FloatField(default=None, null=True)
_glucose_100g = models.FloatField(default=None, null=True)
_gondoic_acid_100g = models.FloatField(default=None, null=True)
_lactose_100g = models.FloatField(default=None, null=True)
_lauric_acid_100g = models.FloatField(default=None, null=True)
_lignoceric_acid_100g = models.FloatField(default=None, null=True)
_linoleic_acid_100g = models.FloatField(default=None, null=True)
_maltodextrins_100g = models.FloatField(default=None, null=True)
_maltose_100g = models.FloatField(default=None, null=True)
_mead_acid_100g = models.FloatField(default=None, null=True)
_melissic_acid_100g = models.FloatField(default=None, null=True)
_montanic_acid_100g = models.FloatField(default=None, null=True)
_myristic_acid_100g = models.FloatField(default=None, null=True)
_nervonic_acid_100g = models.FloatField(default=None, null=True)
_oleic_acid_100g = models.FloatField(default=None, null=True)
_palmitic_acid_100g = models.FloatField(default=None, null=True)
_stearic_acid_100g = models.FloatField(default=None, null=True)
_sucrose_100g = models.FloatField(default=None, null=True)
alcohol_100g = models.FloatField(default=None, null=True)
beta_carotene_100g = models.FloatField(default=None, null=True)
beta_glucan_100g = models.FloatField(default=None, null=True)
bicarbonate_100g = models.FloatField(default=None, null=True)
biotin_100g = models.FloatField(default=None, null=True)
caffeine_100g = models.FloatField(default=None, null=True)
calcium_100g = models.FloatField(default=None, null=True)
carbohydrates_100g = models.FloatField(default=None, null=True)
carbon_footprint_100g = models.FloatField(default=None, null=True)
carnitine_100g = models.FloatField(default=None, null=True)
casein_100g = models.FloatField(default=None, null=True)
chloride_100g = models.FloatField(default=None, null=True)
chlorophyl_100g = models.FloatField(default=None, null=True)
cholesterol_100g = models.FloatField(default=None, null=True)
choline_100g = models.FloatField(default=None, null=True)
chromium_100g = models.FloatField(default=None, null=True)
cocoa_100g = models.FloatField(default=None, null=True)
collagen_meat_protein_ratio_100g = models.FloatField(default=None, null=True)
copper_100g = models.FloatField(default=None, null=True)
energy_100g = models.FloatField(default=None, null=True)
energy_from_fat_100g = models.FloatField(default=None, null=True)
fat_100g = models.FloatField(default=None, null=True)
fiber_100g = models.FloatField(default=None, null=True)
fluoride_100g = models.FloatField(default=None, null=True)
folates_100g = models.FloatField(default=None, null=True)
fruits_vegetables_nuts_100g = models.FloatField(default=None, null=True)
fruits_vegetables_nuts_estimate_100g = models.FloatField(default=None, null=True)
glycemic_index_100g = models.FloatField(default=None, null=True)
inositol_100g = models.FloatField(default=None, null=True)
iodine_100g = models.FloatField(default=None, null=True)
iron_100g = models.FloatField(default=None, null=True)
magnesium_100g = models.FloatField(default=None, null=True)
manganese_100g = models.FloatField(default=None, null=True)
molybdenum_100g = models.FloatField(default=None, null=True)
monounsaturated_fat_100g = models.FloatField(default=None, null=True)
nucleotides_100g = models.FloatField(default=None, null=True)
nutrition_score_fr_100g = models.FloatField(default=None, null=True)
nutrition_score_uk_100g = models.FloatField(default=None, null=True)
omega_3_fat_100g = models.FloatField(default=None, null=True)
omega_6_fat_100g = models.FloatField(default=None, null=True)
omega_9_fat_100g = models.FloatField(default=None, null=True)
pantothenic_acid_100g = models.FloatField(default=None, null=True)
ph_100g = models.FloatField(default=None, null=True)
phosphorus_100g = models.FloatField(default=None, null=True)
phylloquinone_100g = models.FloatField(default=None, null=True)
polyols_100g = models.FloatField(default=None, null=True)
polyunsaturated_fat_100g = models.FloatField(default=None, null=True)
potassium_100g = models.FloatField(default=None, null=True)
proteins_100g = models.FloatField(default=None, null=True)
salt_100g = models.FloatField(default=None, null=True)
saturated_fat_100g = models.FloatField(default=None, null=True)
selenium_100g = models.FloatField(default=None, null=True)
serum_proteins_100g = models.FloatField(default=None, null=True)
silica_100g = models.FloatField(default=None, null=True)
sodium_100g = models.FloatField(default=None, null=True)
starch_100g = models.FloatField(default=None, null=True)
sugars_100g = models.FloatField(default=None, null=True)
taurine_100g = models.FloatField(default=None, null=True)
trans_fat_100g = models.FloatField(default=None, null=True)
vitamin_a_100g = models.FloatField(default=None, null=True)
vitamin_b12_100g = models.FloatField(default=None, null=True)
vitamin_b1_100g = models.FloatField(default=None, null=True)
vitamin_b2_100g = models.FloatField(default=None, null=True)
vitamin_b6_100g = models.FloatField(default=None, null=True)
vitamin_b9_100g = models.FloatField(default=None, null=True)
vitamin_c_100g = models.FloatField(default=None, null=True)
vitamin_d_100g = models.FloatField(default=None, null=True)
vitamin_e_100g = models.FloatField(default=None, null=True)
vitamin_k_100g = models.FloatField(default=None, null=True)
vitamin_pp_100g = models.FloatField(default=None, null=True)
water_hardness_100g = models.FloatField(default=None, null=True)
zinc_100g = models.FloatField(default=None, null=True)
@classmethod
def parse_api_fields(cls, data):
"""
Parse API fields and return a django ORM ready dict
"""
treated_fields = {}
for field in data.keys():
django_field = field.replace("-", "_")
value = (data.get(field) or "").strip()
try:
field_class = cls._meta.get_field(django_field).__class__
except FieldDoesNotExist:
logger.info("A field has been added in Open Food facts and not in off_django : %s" % field)
continue
if value == "":
value = None
elif field_class == models.FloatField:
value = float(value)
elif field_class == models.IntegerField:
value = int(value)
elif field_class == models.DateTimeField:
value = datetime.datetime.strptime(value, DATETIME_FORMAT)
elif field_class == ListField:
if " ] [ " in value:
value = value.strip("[ ").strip(" ]").split("] [")
elif ", " in value:
value = value.split(", ")
else:
value = value.split(",")
treated_fields[django_field] = value
return treated_fields
@classmethod
def load(cls, data, create=False):
"""
Create an OFFFood instance from a dict or return updated existing instance.
Does not save it
:param data: dict serialization coming from dump
"""
treated_fields = cls.parse_api_fields(data)
# instance, created = cls.objects.update_or_create(code=code, defaults=treated_fields)
if create:
instance = cls.objects.create(**treated_fields)
else:
instances = cls.objects.filter(code=treated_fields.get("code", ""))
if instances.count() == 0:
raise Exception("Object update requested but not in DB, use create=True. %s" % data)
instances.update(**treated_fields)
instance = instances.first()
return instance
def guess_country(self):
prefix = self.code[:3]
for boundaries, country in CODES_TO_COUNTRY:
try:
code = int(prefix)
except Exception:
continue
if boundaries[0] <= code <= boundaries[1]:
return country
def serialize_for_off_api(self):
"""
Return (json compliant) dict representation of the object ready for post to OFF API
"""
serialized = {}
for field in self._meta.get_fields():
if getattr(self, field.name) is None:
continue
key = field.name
if "_100g" in key:
key = ("nutriment_%s" % key.replace("_100g", "").replace("_", "-")).replace("_-", "_")
serialized[key] = getattr(self, field.name)
if "nutriment_" in key:
serialized["%s_unit" % key] = "kcal" if "energy" in key else "g"
serialized["nutrition_data_per"] = "100g"
serialized["energy_unit"] = "kcal"
# update serving_size with serving_quantity real value if exists
if (getattr(self, "serving_quantity") or 0) != 0:
serialized["serving_size"] = "%sg" % getattr(self, "serving_quantity")
# Fill country if not already here
if "countries" not in serialized and (getattr(self, "countries") or []) == []:
country = self.guess_country()
if country is not None:
serialized["countries"] = [country]
return serialized
class OFFFood(AbstractOFFFood):
class Meta:
verbose_name = "OFFFood - Model for Open Food Facts food product"
class AbstractOFFFacet(models.Model):
"""
Abstract model to manage facets
Warning : field sameAs has been renamed same_as
"""
facet_id = models.CharField(max_length=255, primary_key=True, db_index=True)
name_fr = models.TextField(default=None, null=True)
name_en = models.TextField(default=None, null=True)
products = models.IntegerField(default=None, null=True)
url = models.TextField(default=None, null=True)
same_as = ListField(default=None, null=True)
LOCALES = ["fr", "world"]
class Meta:
abstract = True
@classmethod
def parse_api_fields(cls, data, locale, extra_fields={}):
"""
Rename and delete fields
id -> facet_id
name -> name_XX with XX locale
"""
locale = locale if locale != "world" else "en"
if "id" in data:
data["facet_id"] = data.get("id")
del data["id"]
if "name" in data:
data["name_%s" % locale] = data.get("name")
del data["name"]
if "sameAs" in data:
data["same_as"] = data.get("sameAs")
del data["sameAs"]
return data
@classmethod
def update_with_off_db(cls, fetch_function):
"""
Fetch latest info from OFF and update local DB, to be called from
child class with datasets = {locale: data, ...}
"""
datasets = {
locale: fetch_function(locale=locale)
for locale in cls.LOCALES
}
dump = {}
for locale, dataset in datasets.iteritems():
for data in dataset:
additive_info = cls.parse_api_fields(data, locale)
dump.setdefault(additive_info["facet_id"], {}).update(additive_info)
dump_ids = set(dump.keys())
existing_ids = set(list(cls.objects.values_list("facet_id", flat=True)))
to_create = dump_ids - existing_ids
to_update = dump_ids - to_create
to_delete = existing_ids - dump_ids
with tqdm(total=len(to_create) + len(to_update), unit='it', unit_scale=True) as pbar:
with transaction.atomic():
cls.objects.filter(facet_id__in=to_delete).delete()
for facet_id in to_create:
cls.objects.create(**dump.get(facet_id))
pbar.update(1)
for facet_id in to_update:
cls.objects.filter(facet_id=facet_id).update(**dump.get(facet_id))
pbar.update(1)
@classmethod
def fetch_all_facets(cls):
MODELS = [
OFFAdditive, OFFAllergen, OFFBrand, OFFCategory, OFFCountry, OFFIngredient,
OFFLanguage, OFFPackaging, OFFPackagingCode, OFFPurchasePlace, OFFStore,
OFFTrace, OFFState
]
for MODEL in MODELS:
logger.info("Fetching %s data" % MODEL.__name__)
MODEL.update_with_off_db()
class OFFAdditive(AbstractOFFFacet):
class Meta:
verbose_name = "OFFAdditive - Model for Open Food Facts facet additive"
@classmethod
def update_with_off_db(cls):
super(OFFAdditive, cls).update_with_off_db(openfoodfacts.facets.get_additives)
class OFFAllergen(AbstractOFFFacet):
class Meta:
verbose_name = "OFFAllergen - Model for Open Food Facts facet allergen"
@classmethod
def update_with_off_db(cls):
super(OFFAllergen, cls).update_with_off_db(openfoodfacts.facets.get_allergens)
class OFFBrand(AbstractOFFFacet):
class Meta:
verbose_name = "OFFBrand - Model for Open Food Facts facet band"
@classmethod
def update_with_off_db(cls):
super(OFFBrand, cls).update_with_off_db(openfoodfacts.facets.get_brands)
class OFFCategory(AbstractOFFFacet):
class Meta:
verbose_name = "OFFCategory - Model for Open Food Facts facet category"
@classmethod
def update_with_off_db(cls):
super(OFFCategory, cls).update_with_off_db(openfoodfacts.facets.get_categories)
class OFFCountry(AbstractOFFFacet):
class Meta:
verbose_name = "OFFCountry - Model for Open Food Facts facet country"
@classmethod
def update_with_off_db(cls):
super(OFFCountry, cls).update_with_off_db(openfoodfacts.facets.get_countries)
class OFFIngredient(AbstractOFFFacet):
class Meta:
verbose_name = "OFFIngredient - Model for Open Food Facts facet ingredient"
@classmethod
def update_with_off_db(cls):
super(OFFIngredient, cls).update_with_off_db(openfoodfacts.facets.get_ingredients)
class OFFLanguage(AbstractOFFFacet):
class Meta:
verbose_name = "OFFLanguage - Model for Open Food Facts facet language"
@classmethod
def update_with_off_db(cls):
super(OFFLanguage, cls).update_with_off_db(openfoodfacts.facets.get_languages)
class OFFPackaging(AbstractOFFFacet):
image = models.TextField(default=None, null=True)
class Meta:
verbose_name = "OFFPackaging - Model for Open Food Facts facet packaging"
@classmethod
def update_with_off_db(cls):
super(OFFPackaging, cls).update_with_off_db(openfoodfacts.facets.get_packaging)
class OFFPackagingCode(AbstractOFFFacet):
class Meta:
verbose_name = "OFFPackagingCode - Model for Open Food Facts facet packaging code"
@classmethod
def update_with_off_db(cls):
super(OFFPackagingCode, cls).update_with_off_db(openfoodfacts.facets.get_packaging_codes)
class OFFPurchasePlace(AbstractOFFFacet):
class Meta:
verbose_name = "OFFPurchasePlace - Model for Open Food Facts facet purchase place"
@classmethod
def update_with_off_db(cls):
super(OFFPurchasePlace, cls).update_with_off_db(openfoodfacts.facets.get_purchase_places)
class OFFStore(AbstractOFFFacet):
class Meta:
verbose_name = "OFFStore - Model for Open Food Facts facet store"
@classmethod
def update_with_off_db(cls):
super(OFFStore, cls).update_with_off_db(openfoodfacts.facets.get_stores)
class OFFTrace(AbstractOFFFacet):
class Meta:
verbose_name = "OFFTrace - Model for Open Food Facts facet trace"
@classmethod
def update_with_off_db(cls):
super(OFFTrace, cls).update_with_off_db(openfoodfacts.facets.get_traces)
class OFFState(AbstractOFFFacet):
class Meta:
verbose_name = "OFFState - Model for Open Food Facts facet state"
@classmethod
def update_with_off_db(cls):
super(OFFState, cls).update_with_off_db(openfoodfacts.facets.get_states)
| 41.432584 | 107 | 0.700475 | 21,697 | 0.980655 | 0 | 0 | 6,200 | 0.280226 | 0 | 0 | 2,415 | 0.109153 |
f339a807e3723d74dbde7cf96ca0b74587cac911 | 285 | py | Python | day01/ex04/test.py | d-r-e/Machine-Learning-Bootcamp | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | null | null | null | day01/ex04/test.py | d-r-e/Machine-Learning-Bootcamp | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | 6 | 2021-05-25T08:51:39.000Z | 2021-05-25T08:51:40.000Z | day01/ex04/test.py | d-r-e/Python-Bootcamp-42AI | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from eval import Evaluator
def main():
words = ["Le", "Lorem", "Ipsum", "est", "simple"]
coefs = [1, 2, 1, 4, 0.5]
print(Evaluator.zip_evaluate(coefs, words))
print(Evaluator.enumerate_evaluate(coefs, words))
if __name__ == '__main__':
main()
| 23.75 | 51 | 0.642105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.224561 |
f33b3b4d842cf4b0a3b2d2d3201e9d9742676ddb | 2,853 | py | Python | tools/deploy/term_backend/nrf.py | giovannistanco/iot-trust-task-alloc | 47e0c8186db32ecd563241d05ebdaaf23713a83f | [
"MIT"
] | null | null | null | tools/deploy/term_backend/nrf.py | giovannistanco/iot-trust-task-alloc | 47e0c8186db32ecd563241d05ebdaaf23713a83f | [
"MIT"
] | null | null | null | tools/deploy/term_backend/nrf.py | giovannistanco/iot-trust-task-alloc | 47e0c8186db32ecd563241d05ebdaaf23713a83f | [
"MIT"
] | null | null | null | import time
import threading
import sys
import pynrfjprog.HighLevel
import pynrfjprog.APIError
DEFAULT_BLOCK_SIZE = 1024
SECONDS_PER_READ = 0.010
SECONDS_PER_WRITE = 0.010
class RTT:
"""
RTT communication class
Based off: https://github.com/thomasstenersen/pyrtt-viewer/blob/master/pyrtt-viewer
"""
def __init__(self, probe, channel, block_size=DEFAULT_BLOCK_SIZE):
self.probe = probe
self.channel = channel
self.close_event = None
self.writer_thread = None
self.reader_thread = None
self.block_size = block_size
def _writer(self):
while not self.close_event.is_set():
data = sys.stdin.readline()#.strip("\n")
#print(f"WRITER:{data!r}")
if data:
written = self.probe.rtt_write(self.channel, data)
assert written == len(data)
time.sleep(SECONDS_PER_WRITE)
def _reader(self):
while not self.close_event.is_set():
data = self.probe.rtt_read(self.channel, self.block_size)
#print(f"READER:{data!r}")
if not data:
time.sleep(SECONDS_PER_READ)
continue
sys.stdout.write(data)#, flush=True)
sys.stdout.flush()
def run(self):
self.close_event = threading.Event()
self.close_event.clear()
self.reader_thread = threading.Thread(target=self._reader)
self.reader_thread.start()
self.writer_thread = threading.Thread(target=self._writer)
self.writer_thread.start()
try:
while self.reader_thread.is_alive() or \
self.writer_thread.is_alive():
time.sleep(0.1)
except KeyboardInterrupt:
self.close_event.set()
self.reader_thread.join()
self.writer_thread.join()
def term_nrf(mote: int, channel: int=0, block_size: int=DEFAULT_BLOCK_SIZE):
with pynrfjprog.HighLevel.API() as api:
with pynrfjprog.HighLevel.DebugProbe(api, mote) as probe:
probe.rtt_start()
# Wait for rtt to be properly setup
while not probe.rtt_is_control_block_found():
time.sleep(0.01)
try:
rtt = RTT(probe, 0)
rtt.run()
"""while True:
start = time.monotonic()
data = probe.rtt_read(channel, block_size)
if data:
print(data, end="", flush=("\n" in data))
taken = time.monotonic() - start
if taken < SECONDS_PER_READ:
time.sleep(SECONDS_PER_READ - taken)"""
finally:
probe.rtt_stop()
| 32.05618 | 88 | 0.552401 | 1,744 | 0.611286 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.226078 |
f33b5064310e224ae603a224f1c08afbe8fe1fd6 | 3,068 | py | Python | tests/test_elements.py | janosh/ml-matrics | 93c3eeb74ad316e07628e3b2a462f693498e6a33 | [
"MIT"
] | 11 | 2021-07-12T02:12:02.000Z | 2022-02-14T19:06:22.000Z | tests/test_elements.py | janosh/ml-matrics | 93c3eeb74ad316e07628e3b2a462f693498e6a33 | [
"MIT"
] | 7 | 2021-10-14T10:00:01.000Z | 2022-03-01T21:09:30.000Z | tests/test_elements.py | janosh/mlmatrics | 93c3eeb74ad316e07628e3b2a462f693498e6a33 | [
"MIT"
] | 2 | 2021-10-01T10:00:50.000Z | 2022-02-12T08:50:22.000Z | import pandas as pd
import pytest
from matplotlib.axes import Axes
from plotly.graph_objects import Figure
from ml_matrics import (
count_elements,
hist_elemental_prevalence,
ptable_heatmap,
ptable_heatmap_plotly,
ptable_heatmap_ratio,
)
compositions_1 = pd.read_csv("data/mp-n_elements<2.csv").formula
compositions_2 = pd.read_csv("data/ex-ensemble-roost.csv").composition
df_ptable = pd.read_csv("ml_matrics/elements.csv").set_index("symbol")
elem_counts_1 = count_elements(compositions_1)
elem_counts_2 = count_elements(compositions_2)
def test_hist_elemental_prevalence():
ax = hist_elemental_prevalence(compositions_1)
assert isinstance(ax, Axes)
hist_elemental_prevalence(compositions_1, log=True)
hist_elemental_prevalence(compositions_1, keep_top=10)
hist_elemental_prevalence(compositions_1, keep_top=10, bar_values="count")
def test_ptable_heatmap():
ax = ptable_heatmap(compositions_1)
assert isinstance(ax, Axes)
ptable_heatmap(compositions_1, log=True)
# custom color map
ptable_heatmap(compositions_1, log=True, cmap="summer")
# heat_labels normalized to total count
ptable_heatmap(compositions_1, heat_labels="fraction")
ptable_heatmap(compositions_1, heat_labels="percent")
# without heatmap values
ptable_heatmap(compositions_1, heat_labels=None)
ptable_heatmap(compositions_1, log=True, heat_labels=None)
# element properties as heatmap values
ptable_heatmap(df_ptable.atomic_mass)
# custom max color bar value
ptable_heatmap(compositions_1, cbar_max=1e2)
ptable_heatmap(compositions_1, log=True, cbar_max=1e2)
# element counts
ptable_heatmap(elem_counts_1)
with pytest.raises(ValueError) as exc_info:
ptable_heatmap(compositions_1, log=True, heat_labels="percent")
assert exc_info.type is ValueError
assert "Combining log color scale" in exc_info.value.args[0]
def test_ptable_heatmap_ratio():
# composition strings
ax = ptable_heatmap_ratio(compositions_1, compositions_2)
assert isinstance(ax, Axes)
# element counts
ptable_heatmap_ratio(elem_counts_1, elem_counts_2, normalize=True)
# mixed element counts and composition
ptable_heatmap_ratio(compositions_1, elem_counts_2)
ptable_heatmap_ratio(elem_counts_1, compositions_2)
def test_ptable_heatmap_plotly():
fig = ptable_heatmap_plotly(compositions_1)
assert isinstance(fig, Figure)
assert len(fig.layout.annotations) == 18 * 10 # n_cols * n_rows
assert sum(anno.text != "" for anno in fig.layout.annotations) == 118 # n_elements
ptable_heatmap_plotly(
compositions_1, hover_cols=["atomic_mass", "atomic_number", "density"]
)
ptable_heatmap_plotly(
compositions_1,
hover_data="density = " + df_ptable.density.astype(str) + " g/cm^3",
)
ptable_heatmap_plotly(df_ptable.density, precision=".1f")
ptable_heatmap_plotly(compositions_1, heat_labels="percent")
ptable_heatmap_plotly(compositions_1, colorscale=[(0, "red"), (1, "blue")])
| 30.989899 | 87 | 0.754889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.165906 |
f33bb25fe61367d04cbf0cfd08ba517964a6833f | 239 | py | Python | app/wrapper/utils.py | Sirius-social/TMTM | 5b4d426e358fc9fd6cff7ec84861c0388d3785ce | [
"Apache-2.0"
] | null | null | null | app/wrapper/utils.py | Sirius-social/TMTM | 5b4d426e358fc9fd6cff7ec84861c0388d3785ce | [
"Apache-2.0"
] | null | null | null | app/wrapper/utils.py | Sirius-social/TMTM | 5b4d426e358fc9fd6cff7ec84861c0388d3785ce | [
"Apache-2.0"
] | null | null | null | import hashlib
from django.conf import settings
def get_auth_connection_key_seed() -> str:
auth_key = "%s:auth" % str(settings.AGENT['entity'])
seed_auth_key = hashlib.md5(auth_key.encode()).hexdigest()
return seed_auth_key
| 23.9 | 62 | 0.732218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.07113 |
f33c3ab0adbea1c960c88092e131df7a1b774872 | 7,103 | py | Python | MUSim_make_figures.py | ericcfields/MUSim | 3c7dcf71964fda67544800ed5f57e570fca2655d | [
"BSD-3-Clause"
] | null | null | null | MUSim_make_figures.py | ericcfields/MUSim | 3c7dcf71964fda67544800ed5f57e570fca2655d | [
"BSD-3-Clause"
] | 1 | 2019-08-31T21:47:26.000Z | 2019-08-31T21:47:26.000Z | MUSim_make_figures.py | ericcfields/MUSim | 3c7dcf71964fda67544800ed5f57e570fca2655d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Make figures for MUSim paper
AUTHOR: Eric Fields
VERSION DATE: 26 June 2019
"""
import os
from os.path import join
import numpy as np
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
import matplotlib.pyplot as plt
def binom_ci_precision(proportion, nobs, method='beta', alpha=0.05):
"""
Get precision for binomial proportion confidence interval
"""
count = proportion * nobs
ci = proportion_confint(count, nobs, method=method, alpha=alpha)
ci_precision = ci[1] - proportion
return ci_precision
def make_power_bar(data, colors, error_bars='se', mean_amp=True, legend=False):
use_colors = colors.copy()
use_cols = ['Fmax', 'cluster_05', 'cluster_01', 'BH', 'BY', 'BKY']
if mean_amp:
use_cols.insert(0, 'mean_amp')
#Get values for error bars
power_data = data.loc[:, use_cols].to_numpy().T
if error_bars.lower() == 'se':
stderr = np.sqrt( (power_data*(1-power_data)) / 10000 )
elif error_bars.lower() == 'ci':
stderr = binom_ci_precision(power_data, 10000)
elif error_bars is None:
stderr = None
else:
raise ValueError('Incorrect input for error_bars')
#Plot
labels = ['Fmax', 'cluster (p≤0.05 threshold)', 'cluster (p≤0.05 threshold)',
'FDR (Benjamini & Hochberg, 1995)', 'FDR (Benjamini & Yekutieli, 2001)',
'FDR (Benjamini et al., 2006)']
if mean_amp:
labels.insert(0, 'mean amplitude')
use_colors.insert(0, 'black')
data.plot.bar(x='time_window', y=use_cols, label=labels, color=use_colors,
fontsize=16, yerr=stderr, legend=legend)
plt.xticks(rotation='horizontal')
plt.xlabel('')
plt.ylim((0,1))
if legend:
plt.legend(loc=(1.04,0), prop={'size': 12})
def make_power_figures(colors, results_dir):
#Get all results csv files
results_files = [file for file in os.listdir(results_dir) if file.endswith('.csv')]
for results_file in results_files:
#Load data
data = pd.read_csv(join(results_dir, results_file))
if 'Power' in results_file and 'Familywise' in results_file:
if 'FamilywisePower' in results_file:
mean_amp = True
else:
mean_amp = False
#Make file with legend
if not os.path.isfile(join(results_dir, 'legend.tif')):
make_power_bar(data[0:3], colors, legend=True)
img_file = join(results_dir, 'legend.tif')
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
#Make figures
make_power_bar(data[0:3], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_N400.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
make_power_bar(data[3:6], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_P300.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
make_power_bar(data[6:9], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_P1.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def make_null_figures(results_dir):
#Get data
data = pd.read_csv(join(results_dir, 'MUSim_Null_FamilywiseTypeI.csv'))
data[['n_trials', 'n_subjects']] = data[['n_trials', 'n_subjects']].astype(int)
#Plotting parameters
use_cols = ['mean_amp', 'Fmax', 'cluster_05', 'cluster_01']
labels = ['mean amplitude', 'Fmax', 'cluster (p ≤ 0.05 threshold)', 'cluster (p ≤ 0.01 threshold)']
use_colors = ['black', 'lightgreen', 'navy', 'cornflowerblue']
for time_wind in ('0 - 300', '300 - 1000'):
for trials in (40, 20, 10):
plot_subset = data[(data['time_window'] == time_wind) & (data['n_trials'] == trials)]
proportions = plot_subset.loc[:, use_cols].to_numpy().T
stderr = binom_ci_precision(proportions, 10000)
#Make bar graph
plot_subset.plot.bar(x='n_subjects', y=use_cols, label=labels, color=use_colors,
fontsize=16, yerr=stderr, legend=False)
plt.xticks(rotation='horizontal')
plt.xlabel('')
plt.ylim((0,0.1))
plt.axhline(y=0.05,linewidth=1, color='r', linestyle='--')
plt.yticks(np.arange(1,11)/100)
plt.xlabel('Number of Subjects', fontsize=18)
#Save file
img_file = join(results_dir, 'MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind, trials))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def make_EW_figures(colors, results_dir):
ew_files = [file for file in os.listdir(results_dir) if 'Power_EW' in file and file.endswith('.csv')]
for ew_file in ew_files:
#Get data
data = pd.read_csv(join(results_dir, ew_file))
#Rename colums to labels to be used in figure
data.columns = ['uncorrected', 'Sidak', 'Fmax', 'Clust0.05', 'Clust0.01', 'BH FDR', 'BY FDR', 'BKY FDR']
#Make box plot
bplot = data.loc[:, 'Fmax':].boxplot(whis=[5, 95], showfliers=False,
return_type='dict', patch_artist=True,
fontsize=12)
#For proporition measures, set standard y-scale
if 'onset' not in ew_file and 'offset' not in ew_file:
plt.ylim((0,1))
#Update colors and line sizes
for key in bplot.keys():
i = 0
for item in bplot[key]:
item.set_linewidth(4)
if key == 'medians':
item.set_color('black')
else:
item.set_color(colors[int(i)])
if key in ['whiskers', 'caps']:
i += 0.5
else:
i += 1
#Save figure
img_file = join(results_dir, ew_file.strip('.csv') + '.tif')
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def main():
results_dir = r'C:\Users\ecfne\Documents\Eric\Research\Stats Simulations\MUSim\results'
colors = ['lightgreen', 'navy', 'cornflowerblue', 'red', 'lightcoral', 'firebrick']
make_power_figures(colors, results_dir)
make_null_figures(results_dir)
make_EW_figures(colors, results_dir)
if __name__ == '__main__':
main()
| 38.188172 | 113 | 0.560749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,704 | 0.239629 |
f33cc042888520918896e0430cf4cdd6032e3352 | 3,364 | py | Python | train.py | zeochoy/RNASeq_CollaborativeFilter | 72066b29eaa4e5480d796ad57c249fd0058cc696 | [
"MIT"
] | 16 | 2019-04-04T22:31:12.000Z | 2022-03-27T14:37:33.000Z | train.py | zeochoy/RNASeq_CollaborativeFilter | 72066b29eaa4e5480d796ad57c249fd0058cc696 | [
"MIT"
] | null | null | null | train.py | zeochoy/RNASeq_CollaborativeFilter | 72066b29eaa4e5480d796ad57c249fd0058cc696 | [
"MIT"
] | 5 | 2018-11-06T09:11:13.000Z | 2021-02-10T13:50:00.000Z | from __future__ import print_function
from argparse import ArgumentParser
from fastai.learner import *
from fastai.column_data import *
import numpy as np
import pandas as pd
def build_parser():
parser = ArgumentParser()
parser.add_argument('--data', type=str, nargs=None, dest='in_path', help='input file path', required=True)
parser.add_argument('--out-prefix', type=str, nargs=None, dest='model', help='output prefix', required=True)
parser.add_argument('--out-dir', type=str, nargs=None, dest='out_dir', help='output directory', required=True)
parser.add_argument('--num-dim', type=int, nargs=None, dest='num_dim', help='number of dimension of resulting embedding', required=False, default=50)
parser.add_argument('--bs', type=int, nargs=None, dest='bs', help='batch size', required=False, default=64)
parser.add_argument('--num-epoch', type=int, nargs=None, dest='num_eps', help='number of epoch(s)', required=False, default=3)
parser.add_argument('--learning-rate', type=float, nargs=None, dest='lr', help='learning rate', required=False, default=1e-5)
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if torch.cuda.is_available() and torch.backends.cudnn.enabled:
torch.cuda.set_device(0)
else:
print('CUDA or CUDNN not available.')
return
in_path = opts.in_path
n_factors = opts.num_dim
bs = opts.bs
num_eps = opts.num_eps
lr = opts.lr
out_dir = opts.out_dir
prefix = opts.model
outpath = out_dir+'/'+prefix+'_'
### data preparation
df = pd.read_csv(in_path, sep=',', low_memory=False, index_col=[0], error_bad_lines=False)
sids = list(df.index)
df = df.assign(id=sids)
df = df.reset_index(drop=True)
mdf = pd.melt(df, id_vars=['id'], var_name='gene', value_name='log2exp')
### training
val_idxs = get_cv_idxs(len(mdf))
cd = CollabFilterDataset.from_data_frame(path, mdf, 'id', 'gene', 'log2exp')
learn = cd.get_learner(n_factors, val_idxs, bs, opt_fn=optim.Adam)
learn.fit(lr, num_eps)
learn.save(outpath+'model')
### plot jointplot
preds = learn.predict()
y=learn.data.val_y
jp = sns.jointplot(preds, y, kind='hex', stat_func=None)
jp.set_axis_labels('ground truth log2(exp)', 'predicted log2(exp)')
jp.savefig(outpath+'trn_metric_jointplot.png')
### output embedding
genes = list(df.columns[:-2])
sids = list(df['id'])
geneidx = np.array([cd.item2idx[g] for g in genes])
m=learn.model
m.cuda()
### output gene embedding matrix and bias
gene_emb = to_np(m.i(V(geneidx)))
gene_emb_df = pd.DataFrame(gene_emb, index=genes)
gene_emb_df.to_csv(outpath+'gemb.csv', sep=',')
gene_emb_bias = to_np(m.ib(V(geneidx)))
gene_emb_bias_df = pd.DataFrame(gene_emb_bias, index=genes)
gene_emb_bias_df.to_csv(outpath+'gemb_bias.csv')
### output sample embedding matrix and bias
sampleidx = np.array([cd.user2idx[sid] for sid in sids])
samp_emb = to_np(m.u(V(sampleidx)))
samp_emb_df = pd.DataFrame(samp_emb, index=sids)
samp_emb_df.to_csv(outpath+'semb.csv', sep=',')
samp_emb_bias = to_np(m.ub(V(sampleidx)))
samp_emb_bias_df = pd.DataFrame(samp_emb_bias, index=sids)
samp_emb_bias_df.to_csv(outpath+'semb_bias.csv')
if __name__ == '__main__':
main()
| 35.041667 | 153 | 0.681332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 656 | 0.195006 |
f33d3bd029f3e85a817ccb3908d36449089ade03 | 691 | py | Python | pyning/tail_recursion/fibonacci_test.py | rkoyanagui/pyning | 3e8905e240d2554f217f168c48c9edeba8658dec | [
"Apache-2.0"
] | null | null | null | pyning/tail_recursion/fibonacci_test.py | rkoyanagui/pyning | 3e8905e240d2554f217f168c48c9edeba8658dec | [
"Apache-2.0"
] | null | null | null | pyning/tail_recursion/fibonacci_test.py | rkoyanagui/pyning | 3e8905e240d2554f217f168c48c9edeba8658dec | [
"Apache-2.0"
] | null | null | null | import unittest
from pyning.tail_recursion.fibonacci import naive_fib
from pyning.utils.testutils import BaseTest
class FibonacciTailRecursionTest(BaseTest):
def test_zero(self):
self.check(f=naive_fib, xr=0, n=0)
def test_one(self):
self.check(f=naive_fib, xr=1, n=1)
def test_two(self):
self.check(f=naive_fib, xr=1, n=2)
def test_three(self):
self.check(f=naive_fib, xr=2, n=3)
def test_four(self):
self.check(f=naive_fib, xr=3, n=4)
def test_five(self):
self.check(f=naive_fib, xr=5, n=5)
def test_six(self):
self.check(f=naive_fib, xr=8, n=6)
if __name__ == '__main__':
unittest.main()
| 20.939394 | 53 | 0.646889 | 524 | 0.758321 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.014472 |
f33f047bd701a9285f93e58c03fcab26e4518b30 | 25 | py | Python | test/json/des/__init__.py | vincent-musedev/libacvp | b11247d9d0b2fbd88954358272a43d35c059be7b | [
"BSD-2-Clause",
"Apache-2.0"
] | 45 | 2016-08-01T11:47:34.000Z | 2022-02-22T21:27:27.000Z | test/json/des/__init__.py | vincent-musedev/libacvp | b11247d9d0b2fbd88954358272a43d35c059be7b | [
"BSD-2-Clause",
"Apache-2.0"
] | 221 | 2016-08-04T17:10:36.000Z | 2022-01-21T19:53:36.000Z | test/json/des/__init__.py | vincent-musedev/libacvp | b11247d9d0b2fbd88954358272a43d35c059be7b | [
"BSD-2-Clause",
"Apache-2.0"
] | 94 | 2016-10-23T11:08:19.000Z | 2022-01-21T11:50:16.000Z | from .des import main_des | 25 | 25 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f340cb596921e5050e8fd921f0453c58f3a6bf7a | 607 | py | Python | config/config_example1_singleton.py | orest-d/design-patterns-finance | 5878912dfa5b34925b00c38da978e7b9e4735a14 | [
"CC0-1.0"
] | null | null | null | config/config_example1_singleton.py | orest-d/design-patterns-finance | 5878912dfa5b34925b00c38da978e7b9e4735a14 | [
"CC0-1.0"
] | null | null | null | config/config_example1_singleton.py | orest-d/design-patterns-finance | 5878912dfa5b34925b00c38da978e7b9e4735a14 | [
"CC0-1.0"
] | null | null | null | class Config:
__instance = None
@staticmethod
def get_instance():
""" Static access method. """
if Config.__instance == None:
Config()
return Config.__instance
def __init__(self):
if Config.__instance != None:
raise Exception("This class can't be created, use Config.getInstance() instead")
else:
Config.__instance = self
self.db_driver="sqlite",
self.sqlite_file = "database.sqlite"
s = Config()
print (s)
s = Config.get_instance()
print (s, id(s))
s = Config.get_instance()
print (s, id(s)) | 22.481481 | 92 | 0.594728 | 495 | 0.815486 | 0 | 0 | 168 | 0.276771 | 0 | 0 | 117 | 0.192751 |
f3436f7a9fc684a11402ed73af31d4c502f05580 | 9,023 | py | Python | pyworkforce/shifts/tests/test_shifts.py | rodrigo-arenas/pyworkforce | f3986ebbc3c48a8ae08dc04dfb939ac6a9516233 | [
"MIT"
] | 10 | 2021-03-20T02:58:52.000Z | 2022-03-28T05:58:56.000Z | pyworkforce/shifts/tests/test_shifts.py | rodrigo-arenas/pyworkforce | f3986ebbc3c48a8ae08dc04dfb939ac6a9516233 | [
"MIT"
] | 3 | 2021-03-13T02:11:39.000Z | 2021-04-08T01:27:36.000Z | pyworkforce/shifts/tests/test_shifts.py | rodrigo-arenas/pyworkforce | f3986ebbc3c48a8ae08dc04dfb939ac6a9516233 | [
"MIT"
] | 1 | 2022-01-04T11:06:47.000Z | 2022-01-04T11:06:47.000Z | from pyworkforce.shifts import MinAbsDifference, MinRequiredResources
import pytest
def test_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_infeasible_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=10,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
def test_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Afternoon": 8, "Night": 10, "Mixed": 7}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_wrong_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Night": 10, "Mixed": 7}
num_days = 2
with pytest.raises(Exception) as excinfo:
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert str(excinfo.value) == "cost_dict must have the same keys as shifts_coverage"
def test_infeasible_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
| 49.850829 | 109 | 0.477557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 887 | 0.098304 |
f34387e7183d544cbaf6418b121b6f24e3768081 | 2,925 | py | Python | api.py | mr-mixas/Nested-Diff-RESTful | 4407b0beaa7484d84323db659278a54d96efb696 | [
"Apache-2.0"
] | null | null | null | api.py | mr-mixas/Nested-Diff-RESTful | 4407b0beaa7484d84323db659278a54d96efb696 | [
"Apache-2.0"
] | null | null | null | api.py | mr-mixas/Nested-Diff-RESTful | 4407b0beaa7484d84323db659278a54d96efb696 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2021 Michael Samoglyadov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, Response, jsonify, render_template, request
from nested_diff import Differ, Patcher
from nested_diff.fmt import HtmlFormatter, TextFormatter, TermFormatter
app = Flask(__name__)
def format_diff_response(fmt, diff, opts):
mimetype = 'text/plain'
if fmt == 'text':
formatter = TextFormatter
elif fmt == 'term':
formatter = TermFormatter
elif fmt == 'html':
formatter = HtmlFormatter
else:
return Response('Unsupported format ' + fmt, status=400)
return Response(
formatter(**opts).format(diff),
status=200,
mimetype=mimetype,
)
@app.route('/')
def index():
# this page is almost never reloaded, so it is OK to embed css in it
return render_template('index.html', html_fmt_css=HtmlFormatter.get_css())
@app.route('/ping')
def health_check():
return Response('pong', status=200)
@app.route('/api/v1/diff', methods=['POST'])
def diff():
try:
diff_opts = request.json.get('diff_opts', {'U': False})
except AttributeError:
return Response('Object expected', status=400)
try:
diff = Differ(**diff_opts).diff(
request.json.get('a', None),
request.json.get('b', None),
)
except Exception:
return Response('Incorrect options', status=400)
ofmt = request.json.get('ofmt', 'json')
if ofmt == 'json':
return jsonify(diff)
ofmt_opts = request.json.get('ofmt_opts', {})
return format_diff_response(ofmt, diff, ofmt_opts)
@app.route('/api/v1/format', methods=['POST'])
def format():
try:
diff = request.json.get('diff', {})
ofmt = request.json.get('ofmt', None)
if ofmt is None:
return jsonify(diff)
ofmt_opts = request.json.get('ofmt_opts', {})
return format_diff_response(ofmt, diff, ofmt_opts)
except Exception:
return Response('Incorrect arguments', status=400)
@app.route('/api/v1/patch', methods=['POST'])
def patch():
try:
return jsonify(Patcher().patch(
request.json['target'],
request.json['patch'],
))
except Exception:
return Response('Incorrect arguments', status=400)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| 27.336449 | 78 | 0.649231 | 0 | 0 | 0 | 0 | 1,579 | 0.539829 | 0 | 0 | 988 | 0.337778 |
f34466e2623ecc3731ee9a82d535b049123cd382 | 4,253 | py | Python | accelbyte_py_sdk/api/eventlog/__init__.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/eventlog/__init__.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/eventlog/__init__.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-event-log-service."""
__version__ = ""
__author__ = "AccelByte"
__email__ = "dev@accelbyte.net"
# pylint: disable=line-too-long
# event
from .wrappers import get_event_by_event_id_handler
from .wrappers import get_event_by_event_id_handler_async
from .wrappers import get_event_by_event_type_and_event_id_handler
from .wrappers import get_event_by_event_type_and_event_id_handler_async
from .wrappers import get_event_by_event_type_handler
from .wrappers import get_event_by_event_type_handler_async
from .wrappers import get_event_by_namespace_handler
from .wrappers import get_event_by_namespace_handler_async
from .wrappers import get_event_by_user_event_id_and_event_type_handler
from .wrappers import get_event_by_user_event_id_and_event_type_handler_async
from .wrappers import get_event_by_user_id_and_event_id_handler
from .wrappers import get_event_by_user_id_and_event_id_handler_async
from .wrappers import get_event_by_user_id_and_event_type_handler
from .wrappers import get_event_by_user_id_and_event_type_handler_async
from .wrappers import get_event_by_user_id_handler
from .wrappers import get_event_by_user_id_handler_async
from .wrappers import post_event_handler
from .wrappers import post_event_handler_async
# event_descriptions
from .wrappers import agent_type_description_handler
from .wrappers import agent_type_description_handler_async
from .wrappers import event_id_description_handler
from .wrappers import event_id_description_handler_async
from .wrappers import event_level_description_handler
from .wrappers import event_level_description_handler_async
from .wrappers import event_type_description_handler
from .wrappers import event_type_description_handler_async
from .wrappers import specific_agent_type_description_handler
from .wrappers import specific_agent_type_description_handler_async
from .wrappers import specific_event_id_description_handler
from .wrappers import specific_event_id_description_handler_async
from .wrappers import specific_event_level_description_handler
from .wrappers import specific_event_level_description_handler_async
from .wrappers import specific_event_type_description_handler
from .wrappers import specific_event_type_description_handler_async
from .wrappers import specific_ux_description_handler
from .wrappers import specific_ux_description_handler_async
from .wrappers import ux_name_description_handler
from .wrappers import ux_name_description_handler_async
# event_registry
from .wrappers import get_registered_event_id_handler
from .wrappers import get_registered_event_id_handler_async
from .wrappers import get_registered_events_by_event_type_handler
from .wrappers import get_registered_events_by_event_type_handler_async
from .wrappers import get_registered_events_handler
from .wrappers import get_registered_events_handler_async
from .wrappers import register_event_handler
from .wrappers import register_event_handler_async
from .wrappers import unregister_event_id_handler
from .wrappers import unregister_event_id_handler_async
from .wrappers import update_event_registry_handler
from .wrappers import update_event_registry_handler_async
# event_v2
from .wrappers import get_event_specific_user_v2_handler
from .wrappers import get_event_specific_user_v2_handler_async
from .wrappers import get_public_edit_history
from .wrappers import get_public_edit_history_async
from .wrappers import get_user_events_v2_public
from .wrappers import get_user_events_v2_public_async
from .wrappers import query_event_stream_handler
from .wrappers import query_event_stream_handler_async
# user_information
from .wrappers import delete_user_activities_handler
from .wrappers import delete_user_activities_handler_async
from .wrappers import get_user_activities_handler
from .wrappers import get_user_activities_handler_async
from .wrappers import last_user_activity_time_handler
from .wrappers import last_user_activity_time_handler_async
| 47.255556 | 88 | 0.894663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.113097 |
f344faaed6efdb55380a728e18b9884b9e5e2388 | 639 | py | Python | cisco_umbrella_enforcement/setup.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | cisco_umbrella_enforcement/setup.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | cisco_umbrella_enforcement/setup.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='cisco_umbrella_enforcement-rapid7-plugin',
version='1.0.0',
description='Cisco Umbrella Enforcement give technology partners the ability to send security events from their platform/service/appliance within a customer environment to the Cisco security cloud for enforcement',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/komand_cisco_umbrella_enforcement']
)
| 42.6 | 220 | 0.733959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.640063 |
f34959f0169dcb486cb4bef10086593c4a815fed | 628 | py | Python | auth0login/migrations/0001_initial.py | nkawa/vimeo-coursetool | 729215fe23b1bf05918a38d21e585a7b8862e75a | [
"Apache-2.0"
] | null | null | null | auth0login/migrations/0001_initial.py | nkawa/vimeo-coursetool | 729215fe23b1bf05918a38d21e585a7b8862e75a | [
"Apache-2.0"
] | null | null | null | auth0login/migrations/0001_initial.py | nkawa/vimeo-coursetool | 729215fe23b1bf05918a38d21e585a7b8862e75a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-10 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticketName', models.CharField(max_length=40)),
('ticketGroup', models.CharField(max_length=20)),
('ticketKeyword', models.CharField(max_length=20)),
],
),
]
| 26.166667 | 117 | 0.58121 | 535 | 0.851911 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.164013 |
f34c2978a8d9484314e28561848de83a6cbbfe3f | 7,241 | py | Python | s3_test_model.py | BillChan226/In-Field-Crop-Disease-Regnition-via-Domain-Adaptation | 4500e7149a51eab66778471750b84b09d415e578 | [
"MIT"
] | 1 | 2021-12-23T17:56:22.000Z | 2021-12-23T17:56:22.000Z | s3_test_model.py | BillChan226/In-Field-Crop-Disease-Regnition-via-Domain-Adaptation | 4500e7149a51eab66778471750b84b09d415e578 | [
"MIT"
] | null | null | null | s3_test_model.py | BillChan226/In-Field-Crop-Disease-Regnition-via-Domain-Adaptation | 4500e7149a51eab66778471750b84b09d415e578 | [
"MIT"
] | null | null | null | # %%
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from model.inceptionv4 import inceptionv4
from model.mobilenetv2 import mobilenetv2
from model.resnet import resnet18
from model.shufflenetv2 import shufflenetv2
from model.vgg import vgg9_bn
from s3_dataset import PlantDataSet, PlantDataSetB
# %%
def get_acc(net, device, data_loader):
'''
get acc
'''
correct = 0
total = 0
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
def get_pre(net, device, data_loader):
'''
得到整个测试集预测的结果,以及标签
'''
label_all = []
pre_all = []
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
label_all.extend(labels.data.cpu().numpy())
pre_all.extend(predicted.data.cpu().numpy())
return pre_all, label_all
# %%
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
# data_loader_val = DataLoader(PlantDataSetB(flag='val'),
# batch_size=64,
# shuffle=False)
# data_loader_test = DataLoader(PlantDataSetB(flag='test'),
# batch_size=64,
# shuffle=False)
data_loader_val = DataLoader(PlantDataSet(flag='val'),
batch_size=64,
shuffle=False)
data_loader_test = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
print('A 域数据集: 校核')
for Index in range(1):
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
val_acc = get_acc(net, device, data_loader_val)
test_acc = get_acc(net, device, data_loader_test)
print('{:d}: val_acc:{:.5f}, test_acc:{:.5f}'.format(
Index, val_acc, test_acc))
# %%
# 计算每个模型在两个测试集上的混淆矩阵
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
data_test_a = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
data_test_b = DataLoader(PlantDataSetB(flag='test'),
batch_size=64,
shuffle=False)
Index = 1
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
pre, label = get_pre(net, device, data_test_b)
pre, label = np.array(pre), np.array(label)
# %%
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score # 精度
from sklearn.metrics import confusion_matrix # 混淆矩阵
print('预测精度为:{:.9f}'.format(accuracy_score(label, pre)))
# 查看混淆矩阵
domain_A_class = {
'Apple___Apple_scab': 0,
'Apple___Black_rot': 1,
'Apple___Cedar_apple_rust': 2,
'Apple___healthy': 3,
'Blueberry___healthy': 4,
'Cherry_(including_sour)___Powdery_mildew': 5,
'Cherry_(including_sour)___healthy': 6,
'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot': 7,
'Corn_(maize)___Common_rust_': 8,
'Corn_(maize)___Northern_Leaf_Blight': 9,
'Corn_(maize)___healthy': 10,
'Grape___Black_rot': 11,
'Grape___Esca_(Black_Measles)': 12,
'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)':13,
'Grape___healthy':14,
'Orange___Haunglongbing_(Citrus_greening)':15,
'Peach___Bacterial_spot':16,
'Peach___healthy':17,
'Pepper,_bell___Bacterial_spot':18,
'Pepper,_bell___healthy':19,
'Potato___Early_blight':20,
'Potato___Late_blight':21,
'Potato___healthy':22,
'Raspberry___healthy':23,
'Soybean___healthy':24,
'Squash___Powdery_mildew':25,
'Strawberry___Leaf_scorch':26,
'Strawberry___healthy':27,
'Tomato___Bacterial_spot':28,
'Tomato___Early_blight':29,
'Tomato___Late_blight':30,
'Tomato___Leaf_Mold':31,
'Tomato___Septoria_leaf_spot':32,
'Tomato___Spider_mites Two-spotted_spider_mite':33,
'Tomato___Target_Spot':34,
'Tomato___Tomato_Yellow_Leaf_Curl_Virus':35,
'Tomato___Tomato_mosaic_virus':36,
'Tomato___healthy':37}
c_matrix = confusion_matrix(label, pre, labels=list(range(38)))
# %% 这个代码留着
def plot_Matrix(cm, classes, title=None, cmap=plt.cm.Blues):
plt.rc('font',family='Times New Roman',size='8') # 设置字体样式、大小
# 按行进行归一化
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
str_cm = cm.astype(np.str).tolist()
for row in str_cm:
print('\t'.join(row))
# 占比1%以下的单元格,设为0,防止在最后的颜色中体现出来
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) == 0:
cm[i, j]=0
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax) # 侧边的颜色条带
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='Actual',
xlabel='Predicted')
# 通过绘制格网,模拟每个单元格的边框
ax.set_xticks(np.arange(cm.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(cm.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="gray", linestyle='-', linewidth=0.2)
ax.tick_params(which="minor", bottom=False, left=False)
# 将x轴上的lables旋转45度
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# 标注百分比信息
fmt = 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) > 0:
ax.text(j, i, format(int(cm[i, j]*100 + 0.5) , fmt) + '%',
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
plt.show()
# %%
domain_A_class.keys()
# %%
plt.matshow(cm, cmap=plt.cm.Blues)
# %%
| 31.898678 | 74 | 0.628228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,646 | 0.350046 |
f34de76ae9039725d601fb7aab02966a635e24da | 2,071 | py | Python | Control/AFGRoperations.py | RNolioSC/TrabalhoFormais | 1668fe2cd21eb2a5546e5d04381e5927637aab4c | [
"MIT"
] | null | null | null | Control/AFGRoperations.py | RNolioSC/TrabalhoFormais | 1668fe2cd21eb2a5546e5d04381e5927637aab4c | [
"MIT"
] | null | null | null | Control/AFGRoperations.py | RNolioSC/TrabalhoFormais | 1668fe2cd21eb2a5546e5d04381e5927637aab4c | [
"MIT"
] | null | null | null | class AFGR:
@staticmethod
def af_to_gr(dict_af):
dict_swap = {}
for keys in dict_af:
dict_swap[keys] = []
for list in dict_af[keys]:
dict_swap[keys].insert(len(dict_swap[keys]), list[0])
dict_swap[keys].insert(len(dict_swap[keys]), list[1])
return dict_swap
@staticmethod
def gr_to_af(dict_gr, estados_aceitacao):
dict_swap = {}
# Adicionando estado final
dict_swap['F'] = []
estados_aceitacao.insert(len(estados_aceitacao), 'F')
for keys in dict_gr:
dict_swap[keys] = []
qtd_elementos = len(dict_gr[keys])
contador = 0
while contador < qtd_elementos:
if contador+1 < len(dict_gr[keys]):
if dict_gr[keys][contador+1].istitle():
dict_swap[keys].insert(len(dict_swap[keys]), [dict_gr[keys][contador], dict_gr[keys][contador+1]])
contador += 2
else:
if AFGR.verifica_estado_final(dict_swap, dict_gr, keys, contador):
dict_swap[keys].insert(len(dict_swap[keys]), [dict_gr[keys][contador], 'F'])
contador += 1
else:
if AFGR.verifica_estado_final(dict_swap, dict_gr, keys, contador):
dict_swap[keys].insert(len(dict_swap[keys]), [dict_gr[keys][contador], 'F'])
contador += 1
# Caso o ultimo elemento seja um nao terminal (NAO FINALIZADO TA REPETIDO O S)
AFGR.verifica_estado_final(dict_swap, dict_gr, keys, contador-2)
return dict_swap
# Verifica os estados finais, tem que ver uma forma melhor no futuro
@staticmethod
def verifica_estado_final(dict_swap, dict_gr, keys, contador):
for estados in dict_swap[keys]:
if estados[0] == dict_gr[keys][contador] and estados[1] != 'F':
estados[1] = 'F'
return False
return True
| 38.351852 | 122 | 0.551424 | 2,070 | 0.999517 | 0 | 0 | 1,968 | 0.950266 | 0 | 0 | 190 | 0.091743 |
f3509ac1a46b83c1515e0c359aea025cc6d03375 | 3,996 | py | Python | menu.py | bkaganozkan/Difference | 725a3e74dd620c2f646825aa60ac715833ec6857 | [
"MIT"
] | null | null | null | menu.py | bkaganozkan/Difference | 725a3e74dd620c2f646825aa60ac715833ec6857 | [
"MIT"
] | null | null | null | menu.py | bkaganozkan/Difference | 725a3e74dd620c2f646825aa60ac715833ec6857 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtGui import QPainter, QColor, QSyntaxHighlighter, QTextCharFormat
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QToolTip, QWidget, QLabel
from PyQt5.QtWidgets import QFileDialog, QTextEdit
from highlighter import Highlighter
# SubWindow Class
class Widget(QTextEdit):
result = []
def __init__(self, left, top, width, height, parent=None):
super().__init__(parent)
self.left = left
self.top = top
self.width = width
self.height = height
self.setParent(parent)
self.initWidget()
self.highLighter = Highlighter(self.document())
# self.highLighter = Highlighter(self.document())
# self.setStyleSheet("""QTextEdit{
# font-family:'Consolas';
# color: #ccc;
# background-color: #2b2b2b;}""")
def returnResult(self, res):
self.highLighter.getResult(res)
def initWidget(self):
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
def setText(self, text):
super().setText(text)
def getText(self):
super().toPlainText()
# Main Program Window
class Window(QMainWindow):
readFile, readFile1 = "", ""
def __init__(self):
super().__init__()
self.Wid = Widget(25, 25, 400, 450, self)
self.Wid1 = Widget(450, 25, 400, 450, self)
self.first_path_label = QLabel(self)
self.first_path_label.setWordWrap(True)
self.second_path_label = QLabel(self)
self.second_path_label.setWordWrap(True)
self.title = "PyQt 5 Application"
self.left = 100
self.top = 100
self.width = 1024
self.height = 500
self.InitUI()
def returnTextWindow(self):
return self.Wid,self.Wid1
def InitUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
# First OpenFile Button
openFileButton_1 = QPushButton("First File", self)
openFileButton_1.setGeometry(860, 75, 75, 25)
openFileButton_1.clicked.connect(self.buttonClick)
self.first_path_label.setGeometry(860, 105, 125, 25)
# Second OpenFile Button
openFileButton_2 = QPushButton("Second File", self)
openFileButton_2.setGeometry(860, 150, 75, 25)
openFileButton_2.clicked.connect(self.buttonClick_1)
self.second_path_label.setGeometry(860, 180, 125, 25)
# Save Button
saveButton = QPushButton("Save", self)
saveButton.setGeometry(925, 450, 75, 25)
#Compare Button
comparePutton = QPushButton("Compare", self)
comparePutton.setGeometry(850,450,75,25)
comparePutton.clicked.connect(self.Compared)
self.show()
def Compared(self):
self.Wid.getText()
self.Wid1.getText()
def buttonClick(self):
data, path = self.openFile()
self.first_path_label.setText(path)
Window.readFile = path
self.Wid.setText(data)
# Widget.result = retResult(self.readFile,self.readFile1)
def buttonClick_1(self):
data, path = self.openFile()
self.second_path_label.setText(path)
Window.readFile1 = path
self.Wid1.setText(data)
# Widget.result = retResult(self.readFile,self.readFile1)
def openFile(self):
fileName = QFileDialog.getOpenFileName(self, "Open File")
if fileName[0]:
f = open(fileName[0], 'r')
with f:
data = f.read()
path = fileName[0]
else:
data = "Sie uzağa git karşim"
path = "None"
return data, path
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| 26.463576 | 94 | 0.593093 | 3,600 | 0.90045 | 0 | 0 | 0 | 0 | 0 | 0 | 545 | 0.136318 |
f350b79d3a3986015ca3232bfcbeaee0d5fcd69f | 6,501 | py | Python | syft_proto/execution/v1/protocol_pb2.py | karlhigley/syft-proto | f7c926ce26f7551b7ab430fcab9c204819845396 | [
"Apache-2.0"
] | null | null | null | syft_proto/execution/v1/protocol_pb2.py | karlhigley/syft-proto | f7c926ce26f7551b7ab430fcab9c204819845396 | [
"Apache-2.0"
] | null | null | null | syft_proto/execution/v1/protocol_pb2.py | karlhigley/syft-proto | f7c926ce26f7551b7ab430fcab9c204819845396 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: syft_proto/execution/v1/protocol.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from syft_proto.types.syft.v1 import id_pb2 as syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2
from syft_proto.execution.v1 import role_pb2 as syft__proto_dot_execution_dot_v1_dot_role__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='syft_proto/execution/v1/protocol.proto',
package='syft_proto.execution.v1',
syntax='proto3',
serialized_options=b'\n$org.openmined.syftproto.execution.v1',
serialized_pb=b'\n&syft_proto/execution/v1/protocol.proto\x12\x17syft_proto.execution.v1\x1a!syft_proto/types/syft/v1/id.proto\x1a\"syft_proto/execution/v1/role.proto\"\x9f\x02\n\x08Protocol\x12,\n\x02id\x18\x01 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x42\n\x05roles\x18\x03 \x03(\x0b\x32,.syft_proto.execution.v1.Protocol.RolesEntryR\x05roles\x12\x12\n\x04tags\x18\x04 \x03(\tR\x04tags\x12 \n\x0b\x64\x65scription\x18\x05 \x01(\tR\x0b\x64\x65scription\x1aW\n\nRolesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32\x1d.syft_proto.execution.v1.RoleR\x05value:\x02\x38\x01\x42&\n$org.openmined.syftproto.execution.v1b\x06proto3'
,
dependencies=[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.DESCRIPTOR,syft__proto_dot_execution_dot_v1_dot_role__pb2.DESCRIPTOR,])
_PROTOCOL_ROLESENTRY = _descriptor.Descriptor(
name='RolesEntry',
full_name='syft_proto.execution.v1.Protocol.RolesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='syft_proto.execution.v1.Protocol.RolesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='syft_proto.execution.v1.Protocol.RolesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=339,
serialized_end=426,
)
_PROTOCOL = _descriptor.Descriptor(
name='Protocol',
full_name='syft_proto.execution.v1.Protocol',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='syft_proto.execution.v1.Protocol.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='id', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='syft_proto.execution.v1.Protocol.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='name', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roles', full_name='syft_proto.execution.v1.Protocol.roles', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='roles', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='syft_proto.execution.v1.Protocol.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='syft_proto.execution.v1.Protocol.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='description', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PROTOCOL_ROLESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=426,
)
_PROTOCOL_ROLESENTRY.fields_by_name['value'].message_type = syft__proto_dot_execution_dot_v1_dot_role__pb2._ROLE
_PROTOCOL_ROLESENTRY.containing_type = _PROTOCOL
_PROTOCOL.fields_by_name['id'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
_PROTOCOL.fields_by_name['roles'].message_type = _PROTOCOL_ROLESENTRY
DESCRIPTOR.message_types_by_name['Protocol'] = _PROTOCOL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Protocol = _reflection.GeneratedProtocolMessageType('Protocol', (_message.Message,), {
'RolesEntry' : _reflection.GeneratedProtocolMessageType('RolesEntry', (_message.Message,), {
'DESCRIPTOR' : _PROTOCOL_ROLESENTRY,
'__module__' : 'syft_proto.execution.v1.protocol_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.execution.v1.Protocol.RolesEntry)
})
,
'DESCRIPTOR' : _PROTOCOL,
'__module__' : 'syft_proto.execution.v1.protocol_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.execution.v1.Protocol)
})
_sym_db.RegisterMessage(Protocol)
_sym_db.RegisterMessage(Protocol.RolesEntry)
DESCRIPTOR._options = None
_PROTOCOL_ROLESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 43.05298 | 727 | 0.76219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,923 | 0.295801 |
f351beee33294314503ee751e48a72f851b330e8 | 3,747 | py | Python | ndn_python_repo/clients/delete.py | daniel-achee/ndn-python-repo-replication | 87b5aadd79d7b06e8d87df08baf0affe0369e008 | [
"Apache-2.0"
] | null | null | null | ndn_python_repo/clients/delete.py | daniel-achee/ndn-python-repo-replication | 87b5aadd79d7b06e8d87df08baf0affe0369e008 | [
"Apache-2.0"
] | null | null | null | ndn_python_repo/clients/delete.py | daniel-achee/ndn-python-repo-replication | 87b5aadd79d7b06e8d87df08baf0affe0369e008 | [
"Apache-2.0"
] | null | null | null | # -----------------------------------------------------------------------------
# NDN Repo delete client.
#
# @Author jonnykong@cs.ucla.edu
# @Date 2019-09-26
# -----------------------------------------------------------------------------
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import argparse
import asyncio as aio
from ..command.repo_commands import RepoCommandParameter, RepoCommandResponse
from .command_checker import CommandChecker
from ..utils import PubSub
import logging
from ndn.app import NDNApp
from ndn.encoding import Name, Component, DecodeError, NonStrictName
from ndn.types import InterestNack, InterestTimeout
from ndn.utils import gen_nonce
class DeleteClient(object):
def __init__(self, app: NDNApp, prefix: NonStrictName, repo_name: NonStrictName):
"""
This client deletes data packets from the remote repo.
:param app: NDNApp.
:param repo_name: NonStrictName. Routable name to remote repo.
"""
self.app = app
self.prefix = prefix
self.repo_name = repo_name
self.pb = PubSub(self.app, self.prefix)
async def delete_file(self, prefix: NonStrictName, start_block_id: int=None,
end_block_id: int=None) -> int:
"""
Delete from repo packets between "<name_at_repo>/<start_block_id>" and\
"<name_at_repo>/<end_block_id>" inclusively.
:param prefix: NonStrictName. The name of the file stored in the remote repo.
:param start_block_id: int. Default value is 0.
:param end_block_id: int. If not specified, repo will attempt to delete all data packets\
with segment number starting from `start_block_id` continously.
:return: Number of deleted packets.
"""
# send command interest
cmd_param = RepoCommandParameter()
cmd_param.name = prefix
cmd_param.start_block_id = start_block_id
cmd_param.end_block_id = end_block_id
cmd_param.register_prefix = prefix
process_id = gen_nonce()
cmd_param.process_id = process_id
cmd_param_bytes = cmd_param.encode()
# publish msg to repo's delete topic
await self.pb.wait_for_ready()
self.pb.publish(self.repo_name + ['delete'], cmd_param_bytes)
# wait until repo delete all data
return await self._wait_for_finish(process_id)
async def _wait_for_finish(self, process_id: int):
"""
Send delete check interest to wait until delete process completes
:param process_id: int. The process id to check for delete process
:return: Number of deleted packets.
"""
checker = CommandChecker(self.app)
n_retries = 3
while n_retries > 0:
response = await checker.check_delete(self.repo_name, process_id)
if response is None:
logging.info(f'Response code is None')
await aio.sleep(1)
# might receive 404 if repo has not yet processed delete command msg
elif response.status_code == 404:
n_retries -= 1
logging.info(f'Response code is {response.status_code}')
await aio.sleep(1)
elif response.status_code == 300:
logging.info(f'Response code is {response.status_code}')
await aio.sleep(1)
elif response.status_code == 200:
logging.info('Delete process {} status: {}, delete_num: {}'
.format(process_id, response.status_code, response.delete_num))
return response.delete_num
else:
# Shouldn't get here
assert False | 39.861702 | 97 | 0.613558 | 3,047 | 0.813184 | 0 | 0 | 0 | 0 | 2,600 | 0.693888 | 1,481 | 0.39525 |
f351c677fbd55b35040364f1c638b44dd7ef0b32 | 1,291 | py | Python | watchmate_v2.0.1/app/models.py | rroy11705/Rest_API_With_Django | 6a75db2e2c3913ec9afc1cbfef67a5c9fd655e60 | [
"CNRI-Python"
] | null | null | null | watchmate_v2.0.1/app/models.py | rroy11705/Rest_API_With_Django | 6a75db2e2c3913ec9afc1cbfef67a5c9fd655e60 | [
"CNRI-Python"
] | null | null | null | watchmate_v2.0.1/app/models.py | rroy11705/Rest_API_With_Django | 6a75db2e2c3913ec9afc1cbfef67a5c9fd655e60 | [
"CNRI-Python"
] | null | null | null | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.contrib.auth.models import User
class StreamPlatform(models.Model):
name = models.CharField(max_length=64)
about = models.TextField(max_length=512)
website = models.URLField(max_length=128)
def __str__(self):
return self.name
class WatchList(models.Model):
title = models.CharField(max_length=256)
storyline = models.TextField(max_length=2048)
platforms = models.ManyToManyField(StreamPlatform, related_name="watchlist")
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Review(models.Model):
reviewer = models.ForeignKey(User, on_delete=models.CASCADE)
rating = models.PositiveIntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)])
description = models.CharField(max_length=256, null=True)
watchlist = models.ForeignKey(WatchList, on_delete=models.CASCADE, related_name="reviews")
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.rating)
| 33.973684 | 94 | 0.749032 | 1,137 | 0.880713 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.015492 |
f351d6d450e1d175828af0a3c364b01689cdf89d | 1,998 | py | Python | junk-test/junk-cable.py | SanjibSarkarU/EDRC | c2408fad8b007b4709ee91caf173f98612afadb1 | [
"Apache-2.0"
] | null | null | null | junk-test/junk-cable.py | SanjibSarkarU/EDRC | c2408fad8b007b4709ee91caf173f98612afadb1 | [
"Apache-2.0"
] | null | null | null | junk-test/junk-cable.py | SanjibSarkarU/EDRC | c2408fad8b007b4709ee91caf173f98612afadb1 | [
"Apache-2.0"
] | null | null | null | import threading
import datetime
import serial
import functions
from queue import Queue
rf_port = 'COM4'
ser_rf = serial.Serial(rf_port, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=1, xonxoff=0)
iver = '3089'
send_through_rf_every = 2
def read_rf():
"""Read RF port"""
ser_rf.reset_input_buffer()
send_through_rf()
osi_rec, osd_ak = 0, 0
while True:
try:
frm_iver = ser_rf.readline().decode()
if len(frm_iver) > 1:
if functions.received_stream(frm_iver) == 'osi':
osi_return = functions.osi(frm_iver)
if functions.osi(frm_iver) is not None:
print(datetime.datetime.now(), ': RF: lat:', osi_return['Latitude'],
'lng:', osi_return['Longitude'], ', speed:', osi_return['Speed'],
', Battery:', osi_return['Battery'], ', nxtWP:', osi_return['NextWp'],
', DistantNxt WP: ', osi_return['DistanceToNxtWP'])
print(datetime.datetime.now(), f': OSI received RF: {osi_rec} / requested: {rf_i}')
osi_rec += 1
elif functions.received_stream(frm_iver) == 'osdAck':
if functions.osd_ack(frm_iver) == 0:
print(datetime.datetime.now(), ': OSI Ack received RF ', osd_ak)
osd_ak += 1
except Exception as e:
# q_log.put([datetime.datetime.now().strftime("%H:%M:%S:%f"), ':', e])
ser_rf.reset_input_buffer()
continue
rf_i = 0
def send_through_rf():
# send_through_ac_every = 15
inst_snd = '$AC;Iver3-' + iver + ';' + '$' + functions.osd() + '\r\n'
ser_rf.reset_output_buffer()
ser_rf.write(inst_snd.encode())
global rf_i
print(datetime.datetime.now(), ': Sending through RF: ', rf_i)
rf_i += 1
threading.Timer(send_through_rf_every, send_through_rf).start()
read_rf() | 37 | 107 | 0.560561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.199199 |
f3524d7786f294f680962dddd5beb46c86fd8c5e | 15,143 | py | Python | experiments.py | joshsanz/learned_uncertainty | 2103126105dbe44cfe75fc22291ba669c1a162f3 | [
"MIT"
] | null | null | null | experiments.py | joshsanz/learned_uncertainty | 2103126105dbe44cfe75fc22291ba669c1a162f3 | [
"MIT"
] | null | null | null | experiments.py | joshsanz/learned_uncertainty | 2103126105dbe44cfe75fc22291ba669c1a162f3 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('tkagg')
from matplotlib import pyplot as plt
plt.rc('figure', figsize=[10, 6])
import time
from data_models import *
from prediction_models import *
from control_models import *
def error(predicted_return, true_return):
return (predicted_return - true_return)
def get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
sampler = GaussianNoise(seed)
data = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
sampler_input = (true_asset_value, asset_covariance)
data[t] = sampler.sample(sampler_input)
return data
def get_wiener_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
steps = get_gaussian_data(num_samples, np.zeros((num_assets,)), asset_covariance, seed)
return np.cumsum(steps, axis=0) + true_asset_value
def get_real_data():
sampler = RealData()
return sampler.labels(), sampler.dates(), sampler.sample()
def get_returns(data, investment_strategies, asset_predictions):
num_samples = investment_strategies.shape[0]
predicted_return = np.zeros(shape=(num_samples,))
true_return = np.zeros(shape=(num_samples,))
for t in range(num_samples):
if t <= 2:
continue
observed_asset_value = data[t]
predicted_asset_value = asset_predictions[t]
investment_strategy = investment_strategies[t]
true_return[t] = investment_strategy.dot(observed_asset_value)
predicted_return[t] = investment_strategy.dot(predicted_asset_value)
return predicted_return, true_return
def run_gaussian_norm(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
regularization = control_params['regularization']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = NormModel(num_assets=num_assets, gamma=gamma, regularization=regularization)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_gaussian_covar(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = CovarianceModel(num_assets=num_assets, gamma=gamma)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_simple_gaussian_experiments(params, real_data=False, plot=False, seed=1):
if not real_data:
num_samples = 100
true_asset_value = params['asset_value']
asset_covariance = params['asset_covariance']
data = get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
else:
data_labels, data_dates, data = get_real_data()
print("date range:", data_dates[0][0], "-", data_dates[0][-1])
num_samples = data.shape[0]
gamma = params['gamma']
window = params['window']
num_assets = data.shape[1]
if plot:
if real_data:
for i in range(num_assets):
plt.plot(data.T[i], label=data_labels[i])
else:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.5)
# In final plots, predicted return may not be relevant.
# plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_ltv_gaussian_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
true_asset_delta = params['asset_delta']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
true_asset_value = true_asset_v0 + (true_asset_delta.T @ np.arange(0,num_samples).reshape(-1,1).T).T
data = get_gaussian_data(num_samples, np.zeros((3,)), asset_covariance, seed) + true_asset_value
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_wiener_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
data = get_wiener_data(num_samples, true_asset_v0, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
if __name__ == "__main__":
run_simple_gaussian_experiments(params={'gamma': 1,
'window': 10},
real_data=True,
plot=True, seed=int(time.time()))
run_simple_gaussian_experiments(params={'asset_value': np.array([0.8, 1.0, 1.1]),
'asset_covariance': np.diag([0.02, 0.01, 0.03]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_ltv_gaussian_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'asset_delta': np.array([[0.002, -0.003, 0.001]]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_wiener_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
| 44.801775 | 111 | 0.606947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,815 | 0.185894 |
f3539262cba46ff60c76205e56634f13ca25de23 | 190 | py | Python | Python 201/enumeration.py | PacktPublishing/The-Complete-Python-Course-including-Django-Web-Framework | 402b35d4739ed91e50d6c3380cab6f085a46c52b | [
"MIT"
] | 3 | 2021-07-09T01:24:20.000Z | 2022-03-24T06:30:19.000Z | Python 201/enumeration.py | PacktPublishing/The-Complete-Python-Course-including-Django-Web-Framework | 402b35d4739ed91e50d6c3380cab6f085a46c52b | [
"MIT"
] | null | null | null | Python 201/enumeration.py | PacktPublishing/The-Complete-Python-Course-including-Django-Web-Framework | 402b35d4739ed91e50d6c3380cab6f085a46c52b | [
"MIT"
] | 3 | 2021-07-01T21:52:53.000Z | 2021-09-02T08:54:23.000Z | animals = ["Gully", "Rhubarb", "Zephyr", "Henry"]
for index, animal in enumerate(animals):
# if index % 2 == 0:
# continue
# print(animal)
print(f"{index+1}.\t{animal}")
| 27.142857 | 49 | 0.573684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.542105 |
f3581d9401d2f5345ec7ef679035930fcff3d232 | 2,548 | py | Python | openpeerpower/components/rituals_perfume_genie/__init__.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | openpeerpower/components/rituals_perfume_genie/__init__.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | openpeerpower/components/rituals_perfume_genie/__init__.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """The Rituals Perfume Genie integration."""
from datetime import timedelta
import logging
import aiohttp
from pyrituals import Account, Diffuser
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.core import OpenPeerPower
from openpeerpower.exceptions import ConfigEntryNotReady
from openpeerpower.helpers.aiohttp_client import async_get_clientsession
from openpeerpower.helpers.update_coordinator import DataUpdateCoordinator
from .const import ACCOUNT_HASH, COORDINATORS, DEVICES, DOMAIN, HUBLOT
PLATFORMS = ["binary_sensor", "sensor", "switch"]
EMPTY_CREDENTIALS = ""
_LOGGER = logging.getLogger(__name__)
UPDATE_INTERVAL = timedelta(seconds=30)
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Set up Rituals Perfume Genie from a config entry."""
session = async_get_clientsession(opp)
account = Account(EMPTY_CREDENTIALS, EMPTY_CREDENTIALS, session)
account.data = {ACCOUNT_HASH: entry.data.get(ACCOUNT_HASH)}
try:
account_devices = await account.get_devices()
except aiohttp.ClientError as err:
raise ConfigEntryNotReady from err
opp.data.setdefault(DOMAIN, {})[entry.entry_id] = {
COORDINATORS: {},
DEVICES: {},
}
for device in account_devices:
hublot = device.hub_data[HUBLOT]
coordinator = RitualsDataUpdateCoordinator(opp, device)
await coordinator.async_refresh()
opp.data[DOMAIN][entry.entry_id][DEVICES][hublot] = device
opp.data[DOMAIN][entry.entry_id][COORDINATORS][hublot] = coordinator
opp.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
opp.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class RitualsDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Rituals Perufme Genie device data from single endpoint."""
def __init__(self, opp: OpenPeerPower, device: Diffuser) -> None:
"""Initialize global Rituals Perufme Genie data updater."""
self._device = device
super().__init__(
opp,
_LOGGER,
name=f"{DOMAIN}-{device.hub_data[HUBLOT]}",
update_interval=UPDATE_INTERVAL,
)
async def _async_update_data(self) -> None:
"""Fetch data from Rituals."""
await self._device.update_data()
| 31.85 | 90 | 0.724097 | 622 | 0.244113 | 0 | 0 | 0 | 0 | 1,362 | 0.534537 | 372 | 0.145997 |
f3588062eb6aef89f6985ff4837093034c99ff99 | 156 | py | Python | GwasJP/utils/__init__.py | 2waybene/GwasJP | ddd54b276655baa79556b5f10d7959099a2e3a0b | [
"BSD-3-Clause"
] | null | null | null | GwasJP/utils/__init__.py | 2waybene/GwasJP | ddd54b276655baa79556b5f10d7959099a2e3a0b | [
"BSD-3-Clause"
] | null | null | null | GwasJP/utils/__init__.py | 2waybene/GwasJP | ddd54b276655baa79556b5f10d7959099a2e3a0b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""This is utility folder that contains useful functions"""
from . import statFittings
# from .model_eval_cv_genotyped import *
| 17.333333 | 59 | 0.711538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.782051 |
f359307cc061fd8d95e439ae9c16aa80c33d1ac4 | 1,325 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAndAccessoryCalculationType.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAndAccessoryCalculationType.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAndAccessoryCalculationType.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class FittingAndAccessoryCalculationType(Enum, IComparable, IFormattable, IConvertible):
"""
Enum of fitting and accessory pressure drop calculation type.
enum FittingAndAccessoryCalculationType,values: CalculateDefaultSettings (2),CalculatePressureDrop (1),Undefined (0),ValidateCurrentSettings (4)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
CalculateDefaultSettings = None
CalculatePressureDrop = None
Undefined = None
ValidateCurrentSettings = None
value__ = None
| 27.604167 | 221 | 0.616604 | 1,323 | 0.998491 | 0 | 0 | 0 | 0 | 0 | 0 | 574 | 0.433208 |
f35a2617d8ce671493d1be29ba2708befda35ba4 | 4,578 | py | Python | python/dnstest/netns.py | InfrastructureServices/dnssec-trigger-testing | 76cd50736f324ecbd60d775e7054b1ca50e90dbd | [
"MIT"
] | 1 | 2017-06-01T13:46:52.000Z | 2017-06-01T13:46:52.000Z | python/dnstest/netns.py | InfrastructureServices/dnssec-trigger-testing | 76cd50736f324ecbd60d775e7054b1ca50e90dbd | [
"MIT"
] | null | null | null | python/dnstest/netns.py | InfrastructureServices/dnssec-trigger-testing | 76cd50736f324ecbd60d775e7054b1ca50e90dbd | [
"MIT"
] | null | null | null | # Network namespaces
#
# Currently best-effort functions meaning no clean up after error.
import subprocess
import re
from dnstest.error import ConfigError
def _run_ip_command(arguments):
"""
:param arguments: List of strings
:return:
"""
ret = subprocess.run(["ip"] + arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if ret.returncode != 0:
raise ConfigError("ip", arguments, ret.stderr.decode("UTF-8"))
else:
return ret
def _run_ip_ns_command(ns, arguments):
"""
:param ns: Namespace
:param arguments: List of strings
:return:
"""
args_within_ns = ["netns", "exec", ns, "ip"] + arguments
ret = _run_ip_command(args_within_ns)
if ret.returncode != 0:
raise ConfigError("ip", args_within_ns, ret.stderr.decode("UTF-8"))
else:
return ret
def _run_ns_command(ns, arguments):
"""
:param ns: Namespace
:param arguments: List of strings
:return:
"""
args_within_ns = ["netns", "exec", ns] + arguments
ret = _run_ip_command(args_within_ns)
if ret.returncode != 0:
raise ConfigError("ip", args_within_ns, ret.stderr.decode("UTF-8"))
else:
return ret
def _root_link_name(name):
return "v-r-" + name
def _ns_link_name(name):
return "v-" + name + "-r"
def new_namespace(name):
"""
Create a new network namespace
:param name: Name of the new namespace
"""
if name == "r":
raise ConfigError("ip", ["netns", "add", name], "Namespace name 'r' is reserved.")
_run_ip_command(["netns", "add", name])
def connect_to_root_ns(name):
"""
Create a new veth pair, connect one side to the root and the second one to the NAME namespace.
:param name: Name of the namespace to be connected to the root ns
"""
root_link_name = _root_link_name(name)
ns_link_name = _ns_link_name(name)
_run_ip_command(["link", "add", root_link_name, "type", "veth", "peer", "name", ns_link_name])
_run_ip_command(["link", "set", ns_link_name, "netns", name])
def root_address(id):
return "100." + str(id) + ".1.1"
def ns_address(id):
return "100." + str(id) + ".1.2"
def assign_addresses(name, id):
"""
Assign IPv4 addresses to the link from root to NAME namespace. IP address will be 100.id.1.{1,2}
:param name: Namespace name as a string
:param id: Address ID as a number
"""
root_ip = root_address(id) + "/24"
ns_ip = ns_address(id) + "/24"
root_link_name = _root_link_name(name)
ns_link_name = _ns_link_name(name)
# Commands to run under root namespace
root_cmds = [["addr", "add", root_ip, "dev", root_link_name],
["link", "set", root_link_name, "up"]]
for cmd in root_cmds:
_run_ip_command(cmd)
# Commands to run under specified namespace
ns_cmds = [["addr", "add", ns_ip, "dev", ns_link_name],
["link", "set", ns_link_name, "up"]]
for cmd in ns_cmds:
_run_ip_ns_command(name, cmd)
def get_ns_list():
"""
Get a list of namespaces available on this system.
:return: Result type containing either a list of namespaces or a string with error message
"""
ret = _run_ip_command(["netns"])
ns_list_text = ret.stdout.decode("UTF-8")
ns_list = ns_list_text.splitlines()
return ns_list
def get_ns_dev_addr(name):
"""
ip -4 a show dev veth-root-ns1 scope global | grep -E "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*/[0-9]*"
:param name:
:return:
"""
link = _ns_link_name(name)
ret = _run_ip_ns_command(name, ["-4", "a", "show", "dev", link, "scope", "global"])
out = ret.stdout.decode("UTF-8")
ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', out)
if len(ip) == 1:
return ip[0]
elif len(ip) > 1:
# fixme: not sure what to do in this case
return ip[0]
else:
return None
def set_default_route(ns, net_id):
_run_ip_ns_command(ns, ["route", "add", "default", "via", root_address(net_id), "dev", _ns_link_name(ns)])
class NetworkInterface:
"""
Virtual network interface
"""
def __init__(self, name, net_id):
self.name = name
self.net_id = net_id
new_namespace(name)
connect_to_root_ns(name)
assign_addresses(name, net_id)
set_default_route(name, net_id)
def get_address(self):
return ns_address(self.net_id)
def run_command(self, command):
"""
:param command: List of strings
:return:
"""
_run_ns_command(self.name, command)
| 27.25 | 111 | 0.619921 | 531 | 0.11599 | 0 | 0 | 0 | 0 | 0 | 0 | 1,732 | 0.378331 |
f35a6ac3a0786559c425ea2a19ddcd546dac4596 | 14,868 | py | Python | scripts/matrixlengthandisoformanalysis/raw_matrixlengthandisoformanalysis.py | serenolopezdarwin/apanalysis | 1cb6569ed1643569e558db0a7dfe70b018c6242f | [
"MIT"
] | 4 | 2021-07-21T01:07:06.000Z | 2022-03-11T00:58:06.000Z | scripts/matrixlengthandisoformanalysis/raw_matrixlengthandisoformanalysis.py | serenolopezdarwin/apanalysis | 1cb6569ed1643569e558db0a7dfe70b018c6242f | [
"MIT"
] | 1 | 2021-09-09T09:19:01.000Z | 2021-09-11T08:55:55.000Z | scripts/matrixlengthandisoformanalysis/raw_matrixlengthandisoformanalysis.py | serenolopezdarwin/apanalysis | 1cb6569ed1643569e558db0a7dfe70b018c6242f | [
"MIT"
] | null | null | null | import csv
import gzip
import numpy as np
import pickle as pkl
import sys
INPUT_FILE_PATH = ""
OVERLAP_PATH = "/net/shendure/vol1/home/sereno/projects/cell_clustering/nobackup/newannotations/data/overlapfiles/"
PAS_DATASET = ""
CELL_DATA_DICT = {}
GENE_DATA_DICT = {}
REFERENCE_PATH = ""
'''
This script is a subscript of matrixrawandisoformanalysis.py
This script is set to be run in parallel.
This script takes input in the form of a gzipped bed data file containing overlap data in a folder named "overlapfiles/"
This script will provide output in the form of three vectors corresponding to cell id, gene id, and utr length.
This script's output is based on raw, noncentered length calculations and therefore isn't very useful for analysis.
Instead, this script's output can be used to calculate averages across the entire dataset.
To test this script use:
>python raw_matrixlengthandisoformanalysis.py \
/net/shendure/vol1/home/sereno/projects/cell_clustering/nobackup/cellbed/overlapfiles/utr_overlaps_001.bed.gz raw
Test with:
python /net/shendure/vol1/home/sereno/projects/scripts/matrixlengthandisoformanalysis/\
raw_matrixlengthandisoformanalysis.py data/overlapfiles/utr_overlaps_###.bed.gz PAS_DATASET \
/net/shendure/vol10/projects/cell_clustering/nobackup/newannotations/
'''
def isoform_analysis():
"""Calculates PAS usage and exon coverage for each 3'UTR-overlapping read in our input data."""
with open(REFERENCE_PATH + PAS_DATASET + "/pas_data_dict.pkl", 'rb') as pas_data_in:
pas_data_dict = pkl.load(pas_data_in)
# with open(REFERENCE_PATH + PAS_DATASET + "/pas_function.pkl", 'rb') as pas_function_in:
# pas_function = pkl.load(pas_function_in)
pas_overlap_file_path = INPUT_FILE_PATH.replace(".bed.gz", ".bed")\
.replace(OVERLAP_PATH, PAS_DATASET + "/assignedpas/")
pas_overlap_file = open(pas_overlap_file_path, 'wt')
outlier_count = 0
file_input = gzip.open(INPUT_FILE_PATH, 'rt')
reader = csv.reader(file_input, delimiter='\t')
cell = 'first_cell'
cell_age = 0
cell_cluster = 0
cell_trajectory = 0
cell_subtrajectory = 0
cell_dict = {}
gene_dict = {}
coverage_dict = {}
locus_dict = {}
for chrom in pas_data_dict:
if chrom not in locus_dict:
locus_dict[chrom] = {}
for row in reader:
chrom = row[0]
strand = row[5]
gene = row[9]
if gene not in GENE_DATA_DICT or chrom not in pas_data_dict or strand not in pas_data_dict[chrom]\
or gene not in pas_data_dict[chrom][strand]:
continue
new_cell = row[3]
if new_cell not in CELL_DATA_DICT:
continue
# This makes our first cell id and cell dictionary.
if cell == 'first cell':
cell_age = CELL_DATA_DICT[new_cell][0]
cell_cluster = CELL_DATA_DICT[new_cell][2]
cell_trajectory = CELL_DATA_DICT[new_cell][8]
cell_subtrajectory = CELL_DATA_DICT[new_cell][16]
# This executes each time the script encounters a new cell.
elif cell != new_cell:
# First, we format the previous cell's data for output.
for gene_id in cell_dict:
cell_gene_median = np.median(cell_dict[gene_id])
if gene_id not in gene_dict:
gene_dict[gene_id] = [[], {}, {}]
if cell_cluster not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_cluster] = []
if cell_trajectory not in gene_dict[gene_id][1]:
# Splitting this into two different indices of the head dict greatly simplifies later algorithms.
gene_dict[gene_id][1][cell_trajectory] = []
if cell_subtrajectory not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_subtrajectory] = []
# This looks like it should not be necessary. It is.
if cell_trajectory not in gene_dict[gene_id][2]:
# This will hold our data for trajectories at certain ages.
gene_dict[gene_id][2][cell_trajectory] = {}
if cell_age not in gene_dict[gene_id][2][cell_trajectory]:
gene_dict[gene_id][2][cell_trajectory][cell_age] = []
if cell_age not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_age] = []
gene_dict[gene_id][0].append(cell_gene_median)
gene_dict[gene_id][1][cell_cluster].append(cell_gene_median)
gene_dict[gene_id][1][cell_trajectory].append(cell_gene_median)
gene_dict[gene_id][1][cell_age].append(cell_gene_median)
gene_dict[gene_id][1][cell_subtrajectory].append(cell_gene_median)
gene_dict[gene_id][2][cell_trajectory][cell_age].append(cell_gene_median)
# Then, we reset our cell ID and cell dictionary for the next cell's entries.
cell_age = CELL_DATA_DICT[new_cell][0]
cell_cluster = CELL_DATA_DICT[new_cell][2]
cell_trajectory = CELL_DATA_DICT[new_cell][8]
cell_subtrajectory = CELL_DATA_DICT[new_cell][16]
cell_dict = {}
cell = new_cell
# By default, we set the read as an outlier, which will change if we can attach it to an annotated isoform.
# otherwise we scan forwards/backwards strand-wise
gene_id = GENE_DATA_DICT[gene][0]
pas_length = 30001
pas_hits = []
pas_counts = []
pas_loci = []
if strand == "+":
locus = int(row[2])
for pas_data in pas_data_dict[chrom][strand][gene]:
pas = pas_data[0]
pas_count = pas_data[3]
pas_dist = locus - pas
if -300 <= pas_dist <= 20:
pas_length = pas_data[1]
pas_hits.append((pas_dist, pas_length))
pas_loci.append(pas)
pas_counts.append(pas_count)
else:
continue
else:
locus = int(row[1])
for pas_data in pas_data_dict[chrom][strand][gene]:
pas = pas_data[0]
pas_count = pas_data[3]
pas_dist = pas - locus
if -300 <= pas_dist <= 20:
pas_length = pas_data[1]
pas_hits.append((pas_dist, pas_length))
pas_loci.append(pas)
pas_counts.append(pas_count)
else:
continue
# If no PAS overlaps are found, or the read's total length is an outlier, it is discarded.
if pas_length > 30000:
outlier_count = outlier_count + 1
# Records the read's raw length for our coverage map.
for exon in GENE_DATA_DICT[gene][1]:
if exon[0] <= locus <= exon[1]:
if strand == "+":
raw_length = locus - exon[0] + exon[2]
else:
raw_length = exon[1] - locus + exon[2]
if gene not in coverage_dict:
coverage_dict[gene] = {'unfiltered': {raw_length: 1}}
elif raw_length in coverage_dict[gene]['unfiltered']:
coverage_dict[gene]['unfiltered'][raw_length] += 1
else:
coverage_dict[gene]['unfiltered'][raw_length] = 1
continue
else:
# Writes the read and gene data to the pas assignment file.
pas_overlap_file.write('\t'.join(row) + '\t')
# If the read overlaps multiple PAS, we choose the nearest PAS and attach the read to it.
if len(pas_hits) > 1:
max_pas_idx = pas_counts.index(max(pas_counts))
final_dist = pas_hits[max_pas_idx][0]
final_length = pas_hits[max_pas_idx][1]
locus = pas_loci[max_pas_idx]
# Returns a list of tuples of pas hit indices and their corresponding probabilities
# pas_weights = [(idx, pas_function[((pas_hit[0] + 25) // 5)]) for idx, pas_hit in enumerate(pas_hits)]
# Sorts the list of tuples by probabilities, takes the index of the entry with the greatest probability,
# then references that index in pas_hits to retrieve the corresponding PAS length as our final length.
# final_pas = pas_hits[max(pas_weights, key=lambda t: t[1])[0]]
# final_dist = final_pas[0]
# final_length = final_pas[1]
else:
final_dist = pas_hits[0][0]
final_length = pas_hits[0][1]
locus = pas_loci[0]
# Writes the PAS data to the pas assignment file.
pas_overlap_file.write('\t'.join([str(locus), str(final_dist), str(final_length)]) + '\n')
# Adds one to this locus' read assignment count
if locus not in locus_dict[chrom]:
locus_dict[chrom][locus] = 1
else:
locus_dict[chrom][locus] += 1
raw_length = final_length + final_dist
# Builds a dictionary of PAS lengths and their corresponding coverages, post-filtration.
if gene not in coverage_dict:
coverage_dict[gene] = {'filtered': {final_length: 1}, 'unfiltered': {raw_length: 1}}
if 'filtered' not in coverage_dict[gene]:
coverage_dict[gene] = {'filtered': {final_length: 1}}
if 'unfiltered' not in coverage_dict[gene]:
coverage_dict[gene] = {'unfiltered': {raw_length: 1}}
if gene in coverage_dict and 'filtered' in coverage_dict[gene] and 'unfiltered' in coverage_dict[gene]:
if final_length in coverage_dict[gene]['filtered']:
coverage_dict[gene]['filtered'][final_length] += 1
else:
coverage_dict[gene]['filtered'][final_length] = 1
if raw_length in coverage_dict[gene]['unfiltered']:
coverage_dict[gene]['unfiltered'][raw_length] += 1
else:
coverage_dict[gene]['unfiltered'][raw_length] = 1
if gene_id not in cell_dict:
cell_dict[gene_id] = []
cell_dict[gene_id].append(final_length)
pas_overlap_file.close()
# Executes on the last cell dataset of the file.
for gene_id in cell_dict:
cell_gene_median = np.median(cell_dict[gene_id])
if gene_id not in gene_dict:
gene_dict[gene_id] = [[], {}, {}]
if cell_cluster not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_cluster] = []
if cell_trajectory not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_trajectory] = []
if cell_subtrajectory not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_subtrajectory] = []
if cell_trajectory not in gene_dict[gene_id][2]:
gene_dict[gene_id][2][cell_trajectory] = {}
if cell_age not in gene_dict[gene_id][2][cell_trajectory]:
gene_dict[gene_id][2][cell_trajectory][cell_age] = []
if cell_age not in gene_dict[gene_id][1]:
gene_dict[gene_id][1][cell_age] = []
gene_dict[gene_id][0].append(cell_gene_median)
gene_dict[gene_id][1][cell_cluster].append(cell_gene_median)
gene_dict[gene_id][1][cell_trajectory].append(cell_gene_median)
gene_dict[gene_id][1][cell_age].append(cell_gene_median)
gene_dict[gene_id][2][cell_trajectory][cell_age].append(cell_gene_median)
gene_dict[gene_id][1][cell_subtrajectory].append(cell_gene_median)
file_input.close()
print(outlier_count)
return gene_dict, coverage_dict, locus_dict
"""def calculate_threepseq_coverage(coverage_dict_build):
#Formats and adds the 3pseq data to our coverage dictionary.
threepseq_in_file_path = INPUT_FILE_PATH.replace("utr_overlaps", "3pseq")\
.replace(OVERLAP_PATH, "data/3pseq/")
file_input = gzip.open(threepseq_in_file_path, 'rt')
reader = csv.reader(file_input, delimiter='\t')
for row in reader:
gene = row[9]
if gene not in GENE_DATA_DICT:
continue
strand = row[5]
if strand == "+":
locus = int(row[2])
else:
locus = int(row[1])
coverage = float(row[3])
for exon in GENE_DATA_DICT[gene][1]:
if exon[0] <= locus <= exon[1]:
if strand == "+":
length = locus - exon[0] + exon[2]
else:
length = exon[1] - locus + exon[2]
if gene not in coverage_dict_build:
coverage_dict_build[gene] = {'3pseq': {length: coverage}}
elif '3pseq' not in coverage_dict_build[gene]:
coverage_dict_build[gene]['3pseq'] = {length: coverage}
elif length in coverage_dict_build[gene]['3pseq']:
coverage_dict_build[gene]['3pseq'][length] += coverage
else:
coverage_dict_build[gene]['3pseq'][length] = coverage
file_input.close()
coverage_dict = coverage_dict_build
return coverage_dict"""
def main():
global INPUT_FILE_PATH
INPUT_FILE_PATH = sys.argv[1]
global PAS_DATASET
PAS_DATASET = sys.argv[2]
global REFERENCE_PATH
REFERENCE_PATH = sys.argv[3]
with open(REFERENCE_PATH + "cell_data_dict.pkl", 'rb') as cell_data_in:
global CELL_DATA_DICT
CELL_DATA_DICT = pkl.load(cell_data_in)
with open(REFERENCE_PATH + "gene_data_dict.pkl", 'rb') as gene_data_in:
global GENE_DATA_DICT
GENE_DATA_DICT = pkl.load(gene_data_in)
gene_dict, coverage_dict_build, locus_dict = isoform_analysis()
#coverage_dict = calculate_threepseq_coverage(coverage_dict_build)
# Our output is stored in a pkl file in the raw/ folder of our PAS dataset folder.
print(INPUT_FILE_PATH)
print(OVERLAP_PATH)
print(REFERENCE_PATH)
raw_out_file_path = INPUT_FILE_PATH.replace(".bed.gz", ".pkl")\
.replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + "/raw/")
with open(raw_out_file_path, 'wb') as raw_out_file:
pkl.dump(gene_dict, raw_out_file)
print(raw_out_file_path)
#coverage_out_file_path = INPUT_FILE_PATH.replace(".bed.gz", ".pkl")\
# .replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + "/coverage/")
#with open(coverage_out_file_path, 'wb') as coverage_out_file:
# pkl.dump(coverage_dict, coverage_out_file)
locus_file_out_path = INPUT_FILE_PATH.replace(".bed.gz", ".pkl")\
.replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + "/pasloci/")
with open(locus_file_out_path, 'wb') as locus_out_file:
pkl.dump(locus_dict, locus_out_file)
print(locus_file_out_path)
if __name__ == "__main__":
main()
| 44.38209 | 120 | 0.62046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,298 | 0.356336 |
f35ab5301440aa325869937b34d4d4a986a14ae0 | 1,367 | py | Python | 016 3Sum Closest.py | ChiFire/legend_LeetCode | 93fe97fef7e929fdbdc25fbb53955d44e14ecff8 | [
"MIT"
] | 872 | 2015-06-15T12:02:41.000Z | 2022-03-30T08:44:35.000Z | 016 3Sum Closest.py | ChiFire/legend_LeetCode | 93fe97fef7e929fdbdc25fbb53955d44e14ecff8 | [
"MIT"
] | 8 | 2015-06-21T15:11:59.000Z | 2022-02-01T11:22:34.000Z | 016 3Sum Closest.py | ChiFire/legend_LeetCode | 93fe97fef7e929fdbdc25fbb53955d44e14ecff8 | [
"MIT"
] | 328 | 2015-06-28T03:10:35.000Z | 2022-03-29T11:05:28.000Z | """
Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target. Return
the sum of the three integers. You may assume that each input would have exactly one solution.
For example, given array S = {-1 2 1 -4}, and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
"""
__author__ = 'Danyang'
class Solution:
def threeSumClosest(self, num, target):
"""
Three pointers scanning algorithm
Similar to 014 3Sum
:param num: array
:param target: target
:return: sum of the three digits
"""
min_distance = 1<<32
num.sort()
min_summation = 0
for i, val in enumerate(num):
j = i+1
k = len(num)-1
while j<k:
lst = [val, num[j], num[k]]
if min_distance>abs(target-sum(lst)):
min_summation = sum(lst)
if sum(lst)==target:
return min_summation
min_distance = abs(target-min_summation)
elif sum(lst)>target:
k -= 1
else:
j += 1
return min_summation
if __name__=="__main__":
print Solution().threeSumClosest([1, 1, 1, 1], 0)
| 31.790698 | 120 | 0.512802 | 895 | 0.654718 | 0 | 0 | 0 | 0 | 0 | 0 | 568 | 0.415508 |
f35b1b6906f039410938832141f022b5a187af5f | 4,253 | py | Python | SENN/models.py | EdwardGuen/SENN-revisited | 41145a89214c6d978eb7c83e74c0f43007e0be4d | [
"MIT"
] | null | null | null | SENN/models.py | EdwardGuen/SENN-revisited | 41145a89214c6d978eb7c83e74c0f43007e0be4d | [
"MIT"
] | null | null | null | SENN/models.py | EdwardGuen/SENN-revisited | 41145a89214c6d978eb7c83e74c0f43007e0be4d | [
"MIT"
] | null | null | null | # torch
import torch.nn as nn
class Senn(nn.Module):
"""Self-Explaining Neural Network (SENN)
Args:
conceptizer: conceptizer architecture
parametrizer: parametrizer architecture
aggregator: aggregator architecture
Inputs:
x: image (b, n_channels, h, w)
Returns:
pred: vector of class probabilities (b, n_classes)
concepts: concept vector (b, n_concepts)
relevances: vector of concept relevances (b, n_concepts, n_classes)
x_reconstructed: reconstructed image (b, n_channels, h, w)
"""
def __init__(self, conceptizer, parametrizer, aggregator):
super(Senn, self).__init__()
self.conceptizer = conceptizer
self.parametrizer = parametrizer
self.aggregator = aggregator
def forward(self, x):
concepts, x_reconstructed = self.conceptizer(x)
relevances = self.parametrizer(x)
pred = self.aggregator(concepts, relevances)
return pred, (concepts, relevances), x_reconstructed
class VAESenn(nn.Module):
"""VaeSENN
Args:
conceptizer: conceptizer architecture
parametrizer: parametrizer architecture
aggregator: aggregator architecture
Inputs:
x: image (b, n_channels, h, w)
Returns:
pred: vector of class probabilities (b, n_classes)
concepts: concept vector (b, n_concepts)
relevances: vector of concept relevances (b, n_concepts, n_classes)
x_reconstructed: reconstructed image (b, n_channels, h, w)
log_var: log variance of concepts posteriors
"""
def __init__(self, conceptizer, parametrizer, aggregator):
super(VAESenn, self).__init__()
self.conceptizer = conceptizer
self.parametrizer = parametrizer
self.aggregator = aggregator
def forward(self, x):
concepts, mean, log_var, x_recon = self.conceptizer(x)
relevances = self.parametrizer(x)
pred = self.aggregator(concepts, relevances)
return pred, (concepts, relevances), x_recon, log_var, mean
class GaussSiamSenn(nn.Module):
"""VSiamSENN
!! Naming not consistent with report
Args:
conceptizer: conceptizer architecture
parametrizer: parametrizer architecture
aggregator: aggregator architecture
Inputs:
x: image (b, n_channels, h, w)
Returns:
pred: vector of class probabilities (b, n_classes)
concepts: concept vector (b, n_concepts)
relevances: vector of concept relevances (b, n_concepts, n_classes)
"""
def __init__(self, conceptizer, parametrizer, aggregator):
super(GaussSiamSenn, self).__init__()
self.conceptizer = conceptizer
self.parametrizer = parametrizer
self.aggregator = aggregator
def forward(self, x, x_eq = None, x_diff = None):
if self.conceptizer.training:
concepts, (L1, L2, KL) = self.conceptizer.forward_training(x, x_eq, x_diff)
else:
concepts = self.conceptizer(x)
relevances = self.parametrizer(x)
pred = self.aggregator(concepts, relevances)
if self.conceptizer.training:
return pred, (concepts, relevances), (L1, L2, KL)
else:
return pred, (concepts, relevances)
class InvarSennM(nn.Module):
"""InvarSENN
Args:
m1: m1 architecture
m2: m2 architecture
Inputs:
x: image (b, n_channels, h, w)
Returns:
pred: vector of class probabilities (b, n_classes)
e1: concept vector (b, n_concepts)
relevances: vector of concept relevances (b, n_concepts, n_classes)
e2: noise vector (b, n_concepts)
x_reconstructed: reconstructed input image
e1_reconstructed: reconstructed concept vector
e2_reconstructed: reconstructed noise vector
"""
def __init__(self, m1, m2):
super(InvarSennM, self).__init__()
self.m1 = m1
self.m2 = m2
def forward(self, x):
pred, (e1, relevances), e2, x_reconstructed = self.m1(x)
e1_reconstructed, e2_reconstructed = self.m2(e1, e2)
return pred, (e1, relevances), e2, x_reconstructed, (e1_reconstructed, e2_reconstructed)
| 33.753968 | 96 | 0.648013 | 4,216 | 0.9913 | 0 | 0 | 0 | 0 | 0 | 0 | 2,068 | 0.486245 |
f35b95ac2f68a6d14b293d5a638873dedb57f7b1 | 1,827 | py | Python | src/pets/crud.py | nadundesilva/sample-open-telemetry | 6ca0d6e16ed3968935ab6994cc9f74abf8886140 | [
"Apache-2.0"
] | 2 | 2022-02-01T01:09:27.000Z | 2022-03-07T17:54:17.000Z | src/pets/crud.py | nadundesilva/samples-pet-store | 6ca0d6e16ed3968935ab6994cc9f74abf8886140 | [
"Apache-2.0"
] | null | null | null | src/pets/crud.py | nadundesilva/samples-pet-store | 6ca0d6e16ed3968935ab6994cc9f74abf8886140 | [
"Apache-2.0"
] | null | null | null | """Copyright (c) 2021, Nadun De Silva. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple
from sqlalchemy.orm import Session
from sqlalchemy.sql.sqltypes import Boolean
from . import db_models as models
from data import schemas
def create_pet(db: Session, pet: schemas.Pet) -> schemas.Pet:
db_pet = models.Pet(
display_name=pet.display_name,
kind=pet.kind,
current_price=pet.current_price,
available_amount=pet.available_amount,
)
db.add(db_pet)
db.commit()
db.refresh(db_pet)
return schemas.Pet.from_orm(db_pet)
def get_available_pets(db: Session, limit: int, offset: int) -> List[schemas.Pet]:
db_pets = (
db.query(models.Pet)
.filter(models.Pet.available_amount > 0)
.limit(limit)
.offset(offset)
.all()
)
return [schemas.Pet.from_orm(db_pet) for db_pet in db_pets]
def reserve_pet(db: Session, pet_id: int, amount: int) -> Tuple[Boolean, int]:
db_pet = db.query(models.Pet).filter(models.Pet.id == pet_id).first()
is_success = False
if db_pet.available_amount > amount:
db_pet.available_amount = models.Pet.available_amount - amount
db.commit()
db.refresh(db_pet)
is_success = True
return is_success, schemas.Pet.from_orm(db_pet)
| 31.5 | 82 | 0.70936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.320744 |
f35be7ca6e9a9da74f2d705603c106d9b9879d68 | 1,740 | py | Python | problems/1232.py | mengshun/Leetcode | 8bb676f2fff093e1417a4bed13d9ad708149be78 | [
"MIT"
] | null | null | null | problems/1232.py | mengshun/Leetcode | 8bb676f2fff093e1417a4bed13d9ad708149be78 | [
"MIT"
] | null | null | null | problems/1232.py | mengshun/Leetcode | 8bb676f2fff093e1417a4bed13d9ad708149be78 | [
"MIT"
] | null | null | null | """
1232. 缀点成线
在一个 XY 坐标系中有一些点,我们用数组 coordinates 来分别记录它们的坐标,
其中 coordinates[i] = [x, y] 表示横坐标为 x、纵坐标为 y 的点。
请你来判断,这些点是否在该坐标系中属于同一条直线上,是则返回 true,否则请返回 false。
"""
class XYCheck:
def __init__(self, coordinates):
self.a = self.b = 0
x1, y1 = coordinates[0]
x2, y2 = coordinates[1]
self.a = float(y1 - y2)/float(x1 - x2)
self.b = float(y1 - x1 * self.a)
def check(self, x, y):
return self.a * x + self.b == y
def checkStraightLine(coordinates):
check = XYCheck(coordinates)
for x, y in coordinates[2:]:
if check.check(x, y) == False:
return False
return True
def checkStraightLine2(coordinates):
x1, y1 = coordinates[0]
x2, y2 = coordinates[1]
A = y2 - y1
B = -(x2 - x1)
for x, y in coordinates[2:]:
if A * (x - x1) + B * (y - y1) != 0:
return False
return True
print(checkStraightLine([[1,2],[2,3],[3,4],[4,5],[5,6],[6,7]])) #true
print(checkStraightLine2([[1,2],[2,3],[3,4],[4,5],[5,6],[6,7]])) #true
print(checkStraightLine([[1,1],[2,2],[3,4],[4,5],[5,6],[7,7]])) #false
print(checkStraightLine2([[1,1],[2,2],[3,4],[4,5],[5,6],[7,7]])) #false
"""
https://leetcode-cn.com/problems/check-if-it-is-a-straight-line/solution/da-qia-by-shun-zi-6-6s1g/
将原列表的点移动相同的距离,使其经过原点,
利用方程式 A * x + B * y = 0, 得出常数A和B,
带入后续的点进行计算, 一旦发现结果不为0, 直接返回结果
"""
print("========NEXT===斜率计算=======")
def xielv(coordinates):
x1, y1 = coordinates[0]
x2, y2 = coordinates[1]
for x, y in coordinates[2:]:
if (x1 - x2) * (y2 - y) != (x2 - x) * (y1 - y2):
return False
return True
print(xielv([[1,2],[2,3],[3,4],[4,5],[5,6],[6,7]])) #true
print(xielv([[1,1],[2,2],[3,4],[4,5],[5,6],[7,7]])) #false
| 28.52459 | 98 | 0.562069 | 298 | 0.147233 | 0 | 0 | 0 | 0 | 0 | 0 | 698 | 0.344862 |
f35c705d9c5e14b5fe17b4f0cd432b05290071c9 | 6,651 | py | Python | test/test_utils.py | scanon/execution_engine2 | ce98b69f44fbd5353cf92cdc413080c8262a188e | [
"MIT"
] | null | null | null | test/test_utils.py | scanon/execution_engine2 | ce98b69f44fbd5353cf92cdc413080c8262a188e | [
"MIT"
] | null | null | null | test/test_utils.py | scanon/execution_engine2 | ce98b69f44fbd5353cf92cdc413080c8262a188e | [
"MIT"
] | null | null | null | from configparser import ConfigParser
import os
from dotenv import load_dotenv
import pathlib
from shutil import copyfile
from execution_engine2.db.models.models import Job, JobInput, Meta
from dateutil import parser as dateparser
import requests
import json
from datetime import datetime
from execution_engine2.exceptions import MalformedTimestampException
from execution_engine2.db.models.models import Status
def get_example_job(
user: str = "boris", wsid: int = 123, authstrat: str = "kbaseworkspace"
) -> Job:
j = Job()
j.user = user
j.wsid = wsid
job_input = JobInput()
job_input.wsid = j.wsid
job_input.method = "method"
job_input.requested_release = "requested_release"
job_input.params = {}
job_input.service_ver = "dev"
job_input.app_id = "super_module.super_function"
m = Meta()
m.cell_id = "ApplePie"
job_input.narrative_cell_info = m
j.job_input = job_input
j.status = "queued"
j.authstrat = authstrat
return j
def _create_sample_params(self):
params = dict()
params["job_id"] = self.job_id
params["user"] = "kbase"
params["token"] = "test_token"
params["client_group_and_requirements"] = "njs"
return params
def read_config_into_dict(config="deploy.cfg", section="execution_engine2"):
config_parser = ConfigParser()
config_parser.read(config)
config = dict()
for key, val in config_parser[section].items():
config[key] = val
return config
def bootstrap():
test_env = "test.env"
pwd = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
if not os.path.exists(test_env):
copyfile(f"{pwd}/test/env/{test_env}", f"{test_env}")
load_dotenv("test.env", verbose=True)
# flake8: noqa: C901
def validate_job_state(state):
"""
Validates whether a returned Job State has all the required fields with the right format.
If all is well, returns True,
otherwise this prints out errors to the command line and returns False.
Can be just used with assert in tests, like "assert validate_job_state(state)"
"""
required_fields = {
"job_id": str,
"user": str,
"wsid": int,
"authstrat": str,
"job_input": dict,
"updated": int,
"created": int,
"status": str,
}
optional_fields = {
"estimating": int,
"queued": int,
"running": int,
"finished": int,
"error_code": int,
"terminated_code": int,
"errormsg": str,
}
timestamp_fields = [
"created",
"updated",
"estimating",
"queued",
"running",
"completed",
]
# fields that have to be present based on the context of different statuses
valid_statuses = vars(Status)["_member_names_"]
status_context = {
"estimating": ["estimating"],
"running": ["running"],
"completed": ["completed"],
"error": ["error_code", "errormsg"],
"terminated": ["terminated_code"],
}
# 1. Make sure required fields are present and of the correct type
missing_reqs = list()
wrong_reqs = list()
for req in required_fields.keys():
if req not in state:
missing_reqs.append(req)
elif not isinstance(state[req], required_fields[req]):
wrong_reqs.append(req)
if missing_reqs or wrong_reqs:
print(f"Job state is missing required fields: {missing_reqs}.")
for req in wrong_reqs:
print(
f"Job state has faulty req - {req} should be of type {required_fields[req]}, but had value {state[req]}."
)
return False
# 2. Make sure that context-specific fields are present and the right type
status = state["status"]
if status not in valid_statuses:
print(f"Job state has invalid status {status}.")
return False
if status in status_context:
context_fields = status_context[status]
missing_context = list()
wrong_context = list()
for field in context_fields:
if field not in state:
missing_context.append(field)
elif not isinstance(state[field], optional_fields[field]):
wrong_context.append(field)
if missing_context or wrong_context:
print(f"Job state is missing status context fields: {missing_context}.")
for field in wrong_context:
print(
f"Job state has faulty context field - {field} should be of type {optional_fields[field]}, but had value {state[field]}."
)
return False
# 3. Make sure timestamps are really timestamps
bad_ts = list()
for ts_type in timestamp_fields:
if ts_type in state:
is_second_ts = is_timestamp(state[ts_type])
if not is_second_ts:
print(state[ts_type], "is not a second ts")
is_ms_ts = is_timestamp(state[ts_type] / 1000)
if not is_ms_ts:
print(state[ts_type], "is not a millisecond ts")
if not is_second_ts and not is_ms_ts:
bad_ts.append(ts_type)
if bad_ts:
for ts_type in bad_ts:
print(
f"Job state has a malformatted timestamp: {ts_type} with value {state[ts_type]}"
)
raise MalformedTimestampException()
return True
def is_timestamp(ts: int):
"""
Simple enough - if dateutil.parser likes the string, it's a time string and we return True.
Otherwise, return False.
"""
try:
datetime.fromtimestamp(ts)
return True
except ValueError:
return False
def custom_ws_perm_maker(user_id: str, ws_perms: dict):
"""
Returns an Adapter for requests_mock that deals with mocking workspace permissions.
:param user_id: str - the user id
:param ws_perms: dict of permissions, keys are ws ids, values are permission. Example:
{123: "a", 456: "w"} means workspace id 123 has admin permissions, and 456 has
write permission
:return: an adapter function to be passed to request_mock
"""
def perm_adapter(request):
perms_req = request.json().get("params")[0].get("workspaces")
ret_perms = []
for ws in perms_req:
ret_perms.append({user_id: ws_perms.get(ws["id"], "n")})
response = requests.Response()
response.status_code = 200
response._content = bytes(
json.dumps({"result": [{"perms": ret_perms}], "version": "1.1"}), "UTF-8"
)
return response
return perm_adapter
| 30.791667 | 141 | 0.62562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,314 | 0.347918 |
f35d58fa5075838415c7a9cbb571b937830b43fb | 999 | py | Python | tests/servers/test_tcp.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | 2 | 2020-09-02T13:46:06.000Z | 2020-10-11T16:11:02.000Z | tests/servers/test_tcp.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | null | null | null | tests/servers/test_tcp.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | 4 | 2018-10-15T07:08:34.000Z | 2019-11-26T01:52:47.000Z | # -*- coding: utf-8 -*-
import asyncio
import socket
from xTool.servers.tcp import TCPServer
def test_tcp_server(aiomisc_unused_port):
loop = asyncio.get_event_loop()
class TestTcpService(TCPServer):
DATA = []
async def handle_client(self, reader: asyncio.StreamReader,
writer: asyncio.StreamWriter):
while True:
data = await reader.readline()
writer.write(data)
self.DATA.append(data)
service = TestTcpService(
"127.0.0.1", aiomisc_unused_port, **{"loop": loop})
async def writer():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with sock:
sock.connect(("127.0.0.1", aiomisc_unused_port))
sock.sendall(b"hello server\n")
await asyncio.sleep(1)
loop.run_until_complete(service.start())
loop.run_until_complete(writer())
loop.close()
assert TestTcpService.DATA == [b"hello server\n"]
| 27.75 | 67 | 0.611612 | 327 | 0.327327 | 0 | 0 | 0 | 0 | 506 | 0.506507 | 85 | 0.085085 |
f35da1b1d84b79f3638ec8c0791f85ce28bc153d | 7,514 | py | Python | backend/benefit/applications/tests/factories.py | City-of-Helsinki/kesaseteli | 964f801c2dba72c4105b6e436b12b821b199d6d2 | [
"MIT"
] | 2 | 2021-05-10T09:28:35.000Z | 2021-05-17T12:15:34.000Z | backend/benefit/applications/tests/factories.py | City-of-Helsinki/yjdh | 1c07576b456d2be9c3171363450ed46de2c1bbcb | [
"MIT"
] | 931 | 2021-05-21T15:24:35.000Z | 2022-03-31T20:07:40.000Z | backend/benefit/applications/tests/factories.py | City-of-Helsinki/yjdh | 1c07576b456d2be9c3171363450ed46de2c1bbcb | [
"MIT"
] | 6 | 2021-07-06T11:07:02.000Z | 2022-02-07T12:42:21.000Z | import decimal
import itertools
import random
from datetime import date, timedelta
import factory
from applications.enums import ApplicationStatus, ApplicationStep, BenefitType
from applications.models import (
AhjoDecision,
Application,
APPLICATION_LANGUAGE_CHOICES,
ApplicationBasis,
ApplicationBatch,
DeMinimisAid,
Employee,
)
from calculator.models import Calculation
from companies.tests.factories import CompanyFactory
from users.tests.factories import HandlerFactory
class DeMinimisAidFactory(factory.django.DjangoModelFactory):
granter = factory.Faker("sentence", nb_words=2)
# delay evaluation of date_start and date_end so that any freeze_time takes effect
granted_at = factory.Faker(
"date_between_dates",
date_start=factory.LazyAttribute(
lambda _: date.today() - timedelta(days=365 * 2)
),
date_end=factory.LazyAttribute(lambda _: date.today()),
)
amount = factory.Faker("pyint", min_value=1, max_value=100000)
ordering = factory.Iterator(itertools.count(0))
class Meta:
model = DeMinimisAid
class ApplicationBasisFactory(factory.django.DjangoModelFactory):
identifier = factory.Sequence(
lambda id: f"basis_identifier_{id}"
) # ensure it is unique
class Meta:
model = ApplicationBasis
class ApplicationFactory(factory.django.DjangoModelFactory):
company = factory.SubFactory(CompanyFactory)
employee = factory.RelatedFactory(
"applications.tests.factories.EmployeeFactory",
factory_related_name="application",
)
company_name = factory.Faker("sentence", nb_words=2)
company_form = factory.Faker("sentence", nb_words=1)
company_department = factory.Faker("street_address")
official_company_street_address = factory.Faker("street_address")
official_company_city = factory.Faker("city")
official_company_postcode = factory.Faker("postcode")
use_alternative_address = factory.Faker("boolean")
alternative_company_street_address = factory.Faker("street_address")
alternative_company_city = factory.Faker("city")
alternative_company_postcode = factory.Faker("postcode", locale="fi_FI")
company_bank_account_number = factory.Faker("iban", locale="fi_FI")
company_contact_person_phone_number = factory.Sequence(
lambda n: f"050-10000{n}"
) # max.length in validation seems to be 10 digits
company_contact_person_email = factory.Faker("email")
company_contact_person_first_name = factory.Faker("first_name")
company_contact_person_last_name = factory.Faker("last_name")
association_has_business_activities = None
applicant_language = factory.Faker(
"random_element", elements=[v[0] for v in APPLICATION_LANGUAGE_CHOICES]
)
co_operation_negotiations = factory.Faker("boolean")
co_operation_negotiations_description = factory.LazyAttribute(
lambda o: factory.Faker("sentence") if o.co_operation_negotiations else ""
)
pay_subsidy_granted = False
pay_subsidy_percent = None
additional_pay_subsidy_percent = None
apprenticeship_program = factory.Faker("boolean")
archived = factory.Faker("boolean")
application_step = ApplicationStep.STEP_1
benefit_type = BenefitType.EMPLOYMENT_BENEFIT
start_date = factory.Faker(
"date_between_dates",
date_start=date(date.today().year, 1, 1),
date_end=date.today() + timedelta(days=100),
)
end_date = factory.LazyAttribute(
lambda o: o.start_date + timedelta(days=random.randint(31, 364))
)
de_minimis_aid = True
status = ApplicationStatus.DRAFT
@factory.post_generation
def bases(self, created, extracted, **kwargs):
if basis_count := kwargs.pop("basis_count", random.randint(1, 5)):
for bt in ApplicationBasisFactory.create_batch(basis_count, **kwargs):
self.bases.add(bt)
de_minimis_1 = factory.RelatedFactory(
DeMinimisAidFactory,
factory_related_name="application",
)
de_minimis_2 = factory.RelatedFactory(
DeMinimisAidFactory,
factory_related_name="application",
)
class Meta:
model = Application
class ReceivedApplicationFactory(ApplicationFactory):
status = ApplicationStatus.RECEIVED
applicant_terms_approval = factory.RelatedFactory(
"terms.tests.factories.ApplicantTermsApprovalFactory",
factory_related_name="application",
)
calculation = factory.RelatedFactory(
"calculator.tests.factories.CalculationFactory",
factory_related_name="application",
)
@factory.post_generation
def calculation(self, created, extracted, **kwargs):
self.calculation = Calculation.objects.create_for_application(self)
self.calculation.calculated_benefit_amount = decimal.Decimal("321.00")
self.calculation.save()
class HandlingApplicationFactory(ReceivedApplicationFactory):
status = ApplicationStatus.HANDLING
@factory.post_generation
def calculation(self, created, extracted, **kwargs):
self.calculation = Calculation.objects.create_for_application(self)
self.calculation.calculated_benefit_amount = decimal.Decimal("123.00")
self.calculation.handler = HandlerFactory()
self.calculation.save()
class DecidedApplicationFactory(HandlingApplicationFactory):
status = ApplicationStatus.ACCEPTED
class EmployeeFactory(factory.django.DjangoModelFactory):
# pass employee=None to prevent ApplicationFactory from creating another employee
application = factory.SubFactory(ApplicationFactory, employee=None)
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
social_security_number = factory.Faker("ssn", locale="fi_FI")
phone_number = factory.Sequence(lambda n: f"050-10000{n}")
email = factory.Faker("email")
employee_language = factory.Faker(
"random_element", elements=[v[0] for v in APPLICATION_LANGUAGE_CHOICES]
)
job_title = factory.Faker("job")
monthly_pay = factory.Faker("random_int", max=5000)
vacation_money = factory.Faker("random_int", max=5000)
other_expenses = factory.Faker("random_int", max=5000)
working_hours = factory.Faker("random_int", min=18, max=40)
is_living_in_helsinki = factory.Faker("boolean")
collective_bargaining_agreement = factory.Faker("words")
class Meta:
model = Employee
class ApplicationBatchFactory(factory.django.DjangoModelFactory):
proposal_for_decision = AhjoDecision.DECIDED_ACCEPTED
application_1 = factory.RelatedFactory(
DecidedApplicationFactory,
factory_related_name="batch",
status=factory.SelfAttribute("batch.proposal_for_decision"),
)
application_2 = factory.RelatedFactory(
DecidedApplicationFactory,
factory_related_name="batch",
status=factory.SelfAttribute("batch.proposal_for_decision"),
)
decision_maker_title = factory.Faker("sentence", nb_words=2)
decision_maker_name = factory.Faker("name")
section_of_the_law = factory.Faker("word")
decision_date = factory.Faker(
"date_between_dates",
date_start=factory.LazyAttribute(lambda _: date.today() - timedelta(days=30)),
date_end=factory.LazyAttribute(lambda _: date.today()),
)
expert_inspector_name = factory.Faker("name")
expert_inspector_email = factory.Faker("email")
class Meta:
model = ApplicationBatch
| 36.833333 | 86 | 0.729838 | 6,987 | 0.929864 | 0 | 0 | 856 | 0.113921 | 0 | 0 | 1,051 | 0.139872 |
f35db14649e34fcd913939f8f437aec72367b212 | 344 | py | Python | Python/kata/bankocr.py | caichinger/BankOCR-Outside-in-Kata | 0296cc64d2559464300d2eb996bae41a5e13e26b | [
"BSD-3-Clause"
] | 2 | 2021-04-26T19:21:48.000Z | 2021-06-05T15:48:08.000Z | Python/kata/bankocr.py | caichinger/BankOCR-Outside-in-Kata | 0296cc64d2559464300d2eb996bae41a5e13e26b | [
"BSD-3-Clause"
] | 1 | 2021-01-21T19:50:21.000Z | 2021-01-21T21:01:07.000Z | Python/kata/bankocr.py | caichinger/BankOCR-Outside-in-Kata | 0296cc64d2559464300d2eb996bae41a5e13e26b | [
"BSD-3-Clause"
] | 3 | 2020-09-19T07:42:26.000Z | 2021-01-20T18:08:28.000Z | # coding=utf-8
from kata.accountnumber import AccountNumber
class BankOcr(object):
"""Example for the outside interface of the API we need to create."""
def __init__(self):
pass
def parse(self, raw_lines):
# TODO return an array of AccountNumber
raise NotImplementedError("not implemented")
| 24.571429 | 74 | 0.665698 | 276 | 0.802326 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.409884 |
f35e7aeafd5d8413da522d85881d20ec08c21815 | 3,419 | py | Python | createData.py | msiampou/distributed-fault-tolerant-kv-store | 26dd701ef133c8f463b364e085773b551dfb98ce | [
"MIT"
] | 7 | 2021-04-17T18:47:36.000Z | 2022-03-24T13:09:21.000Z | createData.py | msiampou/efficient-kv-store | 26dd701ef133c8f463b364e085773b551dfb98ce | [
"MIT"
] | null | null | null | createData.py | msiampou/efficient-kv-store | 26dd701ef133c8f463b364e085773b551dfb98ce | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys, getopt
import random
import string
import sys
import math
def create_random_value(type, maxlength):
if type == "int":
start = 10**(maxlength-1)
end = (10**maxlength)-1
return random.randint(start, end)
elif type == "string":
return ''.join(random.choice(string.ascii_lowercase) for i in range(maxlength))
elif type == "float" or type == "double" :
start = 10**(maxlength-1)
end = (10**maxlength)-1
return (random.randint(start, end)) / (10** math.ceil(maxlength/2))
else:
raise TypeError("Type " + type + " is not supported")
def create_nested(fields, maxlevel, maxlength, maxkeys, value):
if (maxkeys == 0):
print("{}", end="")
return
for i in range(maxkeys):
key, type = random.choice(list(fields.items()))
if maxlevel == 0:
print(value, end="")
return
if i == 0:
print("{ ", end="")
print(key + " : ", end="")
create_nested(fields, maxlevel-1, maxlength, maxkeys, create_random_value(type, maxlength))
if i != maxkeys-1:
print("; ", end="")
print(" }", end="")
def generate_random_depth(maxlevel, maxkeys):
depth = []
for i in range(maxkeys):
depth.append(random.randint(0,maxlevel))
return depth
def generate_keys(fields, maxlevel, maxlength, maxkeys):
depth = generate_random_depth(maxlevel,maxkeys)
for i in range(0,maxkeys):
if i==0:
print("{ ", end="")
print("key" + str(i) + ": ", end="")
create_nested(fields, depth[i], maxlength, random.randint(0,maxkeys), create_random_value("int", maxlength))
if i < maxkeys-1:
print("; ", end="")
else:
print(" }", end="")
if maxkeys == 0:
print("{}", end="")
def generate_dataset(fields, numlines, maxlevel, maxlength, maxkeys):
for i in range(0,numlines):
print("person" + str(i+1) + ": ", end="")
generate_keys(fields, maxlevel,maxlength,maxkeys)
print()
def create_outfile(fields, numlines, maxlevel, maxlength, maxkeys):
with open('dataToIndex.txt', 'w') as f:
sys.stdout = f
generate_dataset(fields, numlines, maxlevel, maxlength, maxkeys)
def parse_file(file):
fields = {}
with open(file) as f:
for line in f:
key, value = line.split()
fields[key] = value
return fields
def parse_args(argv):
try:
opts, args = getopt.getopt(argv,"k:n:d:l:m:")
except getopt.GetoptError:
print('createData.py -k <keyfile> -n <numlines> -d <maxlevel> -l <maxlength> -m <maxkeys>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('createData.py -k <keyfile> -n <numlines> -d <maxlevel> -l <maxlength> -m <maxkeys>')
sys.exit()
elif opt in ("-k"):
keyfile = arg
elif opt in ("-n"):
numlines = int(arg)
elif opt in ("-d"):
maxlevel = int(arg)
elif opt in ("-l"):
maxlength = int(arg)
elif opt in ("-m"):
maxkeys = int(arg)
else:
print('createData.py -k <keyfile> -n <numlines> -d <maxlevel> -l <maxlength> -m <maxkeys>')
return [keyfile,numlines,maxlevel,maxlength,maxkeys]
if __name__ == "__main__":
keyfile, numlines, maxlevel, maxlength, maxkeys = parse_args(sys.argv[1:])
fields = parse_file(keyfile)
create_outfile(fields, numlines, maxlevel, maxlength, maxkeys)
| 31.366972 | 114 | 0.595788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.139807 |
f35f43c00e526592a35ccf5d1df69d3b25cc9782 | 492 | py | Python | homework(december)/decemberAssigment1/random1.py | tkanicka/python_learning | 67fc0e8ca6333571f8b0d30f835b759d670a8643 | [
"Unlicense"
] | null | null | null | homework(december)/decemberAssigment1/random1.py | tkanicka/python_learning | 67fc0e8ca6333571f8b0d30f835b759d670a8643 | [
"Unlicense"
] | null | null | null | homework(december)/decemberAssigment1/random1.py | tkanicka/python_learning | 67fc0e8ca6333571f8b0d30f835b759d670a8643 | [
"Unlicense"
] | null | null | null | import random
class Play:
def __init__(self, name="Player"):
self.name = name
def print_name(self):
print("your name is ", self.name)
def TossDie(self, x=1):
for i in range(x):
print(random.randint(1, 6))
def RPC(self, x=1):
for i in range(x):
options = ["rock", "paper", "scissors"]
print(random.choice(options))
player1 = Play("Andula")
player1.print_name()
player1.RPC(3)
player1.TossDie(2)
| 15.375 | 51 | 0.565041 | 390 | 0.792683 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.109756 |
f3622570165ab8b3547a7e98a1dead76e8814089 | 1,855 | py | Python | armory/baseline_models/pytorch/resnet50.py | paperwhite/armory | 3868cf5dd86578b58105f5901139a2f0b939ab15 | [
"MIT"
] | null | null | null | armory/baseline_models/pytorch/resnet50.py | paperwhite/armory | 3868cf5dd86578b58105f5901139a2f0b939ab15 | [
"MIT"
] | null | null | null | armory/baseline_models/pytorch/resnet50.py | paperwhite/armory | 3868cf5dd86578b58105f5901139a2f0b939ab15 | [
"MIT"
] | null | null | null | """
ResNet50 CNN model for 244x244x3 image classification
"""
import logging
from art.classifiers import PyTorchClassifier
import numpy as np
import torch
from torchvision import models
logger = logging.getLogger(__name__)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
IMAGENET_MEANS = [0.485, 0.456, 0.406]
IMAGENET_STDEV = [0.229, 0.224, 0.225]
def preprocessing_fn(img):
"""
Standardize, then normalize imagenet images
"""
# Standardize images to [0, 1]
img /= 255.0
# Normalize images ImageNet means
for i, (mean, std) in enumerate(zip(IMAGENET_MEANS, IMAGENET_STDEV)):
img[i] -= mean
img[i] /= std
return img
# NOTE: PyTorchClassifier expects numpy input, not torch.Tensor input
def get_art_model(model_kwargs, wrapper_kwargs, weights_path=None):
model = models.resnet50(**model_kwargs)
model.to(DEVICE)
if weights_path:
checkpoint = torch.load(weights_path, map_location=DEVICE)
model.load_state_dict(checkpoint)
wrapped_model = PyTorchClassifier(
model,
loss=torch.nn.CrossEntropyLoss(),
optimizer=torch.optim.Adam(model.parameters(), lr=0.003),
input_shape=(224, 224, 3),
**wrapper_kwargs,
clip_values=(
np.array(
[
0.0 - IMAGENET_MEANS[0] / IMAGENET_STDEV[0],
0.0 - IMAGENET_MEANS[1] / IMAGENET_STDEV[1],
0.0 - IMAGENET_MEANS[2] / IMAGENET_STDEV[2],
]
),
np.array(
[
1.0 - IMAGENET_MEANS[0] / IMAGENET_STDEV[0],
1.0 - IMAGENET_MEANS[1] / IMAGENET_STDEV[1],
1.0 - IMAGENET_MEANS[2] / IMAGENET_STDEV[2],
]
),
),
)
return wrapped_model
| 27.279412 | 73 | 0.596226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.141779 |
f363d12a0b564c0f2c4645099bd660201af5ca16 | 4,428 | py | Python | clac_line_index.py | shichenhui/Data-mining-techniques-on-astronomical-spectra-data.-I-Clustering-analysis | fd6a7c27cfe2110ee1a2ffc31ddca26340d2cabc | [
"Apache-2.0"
] | null | null | null | clac_line_index.py | shichenhui/Data-mining-techniques-on-astronomical-spectra-data.-I-Clustering-analysis | fd6a7c27cfe2110ee1a2ffc31ddca26340d2cabc | [
"Apache-2.0"
] | null | null | null | clac_line_index.py | shichenhui/Data-mining-techniques-on-astronomical-spectra-data.-I-Clustering-analysis | fd6a7c27cfe2110ee1a2ffc31ddca26340d2cabc | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
class LineIndex:
def __init__(self):
self.elements = [(4143.375, 4178.375, 4081.375, 4118.875, 4245.375, 4285.375),
(4143.375, 4178.375, 4085.125, 4097.625, 4245.375, 4285.375),
(4223.500, 4236.000, 4212.250, 4221.000, 4242.250, 4252.250),
(4282.625, 4317.625, 4267.625, 4283.875, 4320.125, 4333.375),
(4370.375, 4421.625, 4360.375, 4371.625, 4444.125, 4456.625),
(4453.375, 4475.875, 4447.125, 4455.875, 4478.375, 4493.375),
(4515.500, 4560.500, 4505.500, 4515.500, 4561.750, 4580.500),
(4635.250, 4721.500, 4612.750, 4631.500, 4744.000, 4757.750),
(4848.875, 4877.625, 4828.875, 4848.875, 4877.625, 4892.625),
(4979.000, 5055.250, 4947.750, 4979.000, 5055.250, 5066.500),
(5070.375, 5135.375, 4896.375, 4958.875, 5302.375, 5367.375),
(5155.375, 5197.875, 4896.375, 4958.875, 5302.375, 5367.375),
(5161.375, 5193.875, 5143.875, 5162.625, 5192.625, 5207.625),
(5247.375, 5287.375, 5234.875, 5249.875, 5287.375, 5319.875),
(5314.125, 5354.125, 5306.625, 5317.875, 5355.375, 5365.375),
(5390.250, 5417.750, 5379.000, 5390.250, 5417.750, 5427.750),
(5698.375, 5722.125, 5674.625, 5698.375, 5724.625, 5738.375),
(5778.375, 5798.375, 5767.125, 5777.125, 5799.625, 5813.375),
(5878.625, 5911.125, 5862.375, 5877.375, 5923.875, 5949.875),
(5938.875, 5995.875, 5818.375, 5850.875, 6040.375, 6105.375),
(6191.375, 6273.875, 6068.375, 6143.375, 6374.375, 6416.875),]
def calc(self, flux, wave):
"""
计算一条光谱的线指数
:param flux: 光谱的流量向量
:param wave: 光谱的波长向量
:return: 线指数
"""
line_index = []
for num, i in enumerate(self.elements):
print(num)
# 求每一个元素的线指数
# 找出中心波段、蓝端、红端的波段和流量
center_band, center_flux = wave[(wave >= i[0]) & (wave <= i[1])], flux[(wave >= i[0]) & (wave <= i[1])]
left_band, left_flux = wave[(wave >= i[2]) & (wave <= i[3])], flux[(wave >= i[2]) & (wave <= i[3])]
right_band, right_flux = wave[(wave >= i[4]) & (wave <= i[5])], flux[(wave >= i[4]) & (wave <= i[5])]
# 计算连续谱直线,通过两个点画直线
y_left = np.trapz(left_flux, left_band)
y_right = np.trapz(right_flux, right_band)
x_left = np.mean(left_band)
x_right = np.mean(right_band)
# y = kx + b
k = (y_right - y_left) / (x_right - x_left)
b = y_right - k*y_right
if num in (0,1,10,11,19,20):
# 对部分元素,计算Mag星等,当做线指数值
Fc = k * center_band + b # 连续谱流量
Mag = -2.5*np.log2((1 / (center_band[-1]-center_band[1])) * np.trapz(center_flux/Fc, center_band))
line_index.append(Mag)
else:
# 对部分元素,计算equivalent width等效带宽,当做线指数值
Fc = k*center_band + b # 连续谱流量
EW = np.trapz((1-center_flux/Fc), center_band)
line_index.append(EW)
# 转换成np.array,并消除控制和无限值
line_index = np.array(line_index)
line_index[np.isnan(line_index)] = 0
line_index[np.isinf(line_index)] = 0
return line_index
def calc_and_plot(self,flux, wave):
# 计算线指数,并画图看看效果,与self.calc() 函数传进传出相同
line_index = self.calc(flux, wave)
center_wave = []
for i in self.elements:
center_wave.append((i[0]+i[1]) / 2)
plt.plot(wave, flux)
plt.scatter(center_wave, line_index)
plt.show()
return line_index
if __name__ == '__main__':
from astropy.io import fits
data = fits.open(r'C:\Users\panda\Desktop\spec-56591-EG012606S021203F01_sp08-138.fits')
a = data[0]
wave = a.data[2] # 第3行是波长
flux = a.data[0] # 第1行是光谱
model = LineIndex()
line_index = model.calc_and_plot(flux, wave) | 45.649485 | 116 | 0.497967 | 4,359 | 0.917298 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.156566 |
f3649feeb844c0af49496f4dfa7da1a11ffe1955 | 2,961 | py | Python | spire/github/upgrades/broodauth.py | bugout-dev/spire | def55cb64fbd306ddde47067d8573cf4b234115c | [
"Apache-2.0"
] | 1 | 2021-12-01T14:18:12.000Z | 2021-12-01T14:18:12.000Z | spire/github/upgrades/broodauth.py | bugout-dev/spire | def55cb64fbd306ddde47067d8573cf4b234115c | [
"Apache-2.0"
] | 13 | 2021-07-13T19:19:22.000Z | 2022-01-14T16:33:42.000Z | spire/github/upgrades/broodauth.py | bugout-dev/spire | def55cb64fbd306ddde47067d8573cf4b234115c | [
"Apache-2.0"
] | null | null | null | """
According with BUG-132 was added table GitHubBugoutUser.
It requires additional script to generate BugoutUser for existing installations
after database migration.
"""
import argparse
import uuid
from ..models import GitHubOAuthEvent, GitHubBugoutUser
from ...broodusers import bugout_api
from ...db import yield_connection_from_env_ctx
from ...utils.settings import INSTALLATION_TOKEN, BOT_INSTALLATION_TOKEN_HEADER
def main(args: argparse.Namespace) -> None:
if args.run:
print("Starting upgrade")
with yield_connection_from_env_ctx() as db_session:
bot_installations = db_session.query(GitHubOAuthEvent).all()
for bot_installation in bot_installations:
user_installation = (
db_session.query(GitHubBugoutUser)
.filter(GitHubBugoutUser.event_id == bot_installation.id)
.one_or_none()
)
if user_installation is not None:
continue
org_name = bot_installation.github_installation_url.rstrip("/").split(
"/"
)[-1]
# Create Brood user
generated_password: str = str(uuid.uuid4())
username = f"{org_name}-{bot_installation.github_account_id}"
email = f"{org_name}-{bot_installation.github_account_id}@bugout.dev"
headers = {BOT_INSTALLATION_TOKEN_HEADER: INSTALLATION_TOKEN}
bugout_user = bugout_api.create_user(
username, email, generated_password, headers=headers
)
bugout_user_token = bugout_api.create_token(
username, generated_password
)
installation_user = GitHubBugoutUser(
event_id=bot_installation.id,
bugout_user_id=bugout_user.id,
bugout_access_token=bugout_user_token.id,
)
db_session.add(installation_user)
db_session.commit()
installation_group_name = (
f"Team group: {org_name}-{bot_installation.github_account_id}"
)
# TODO(kompotkot): Add group id to SlackBugoutUser
if bot_installation.deleted is False:
bugout_api.create_group(
installation_user.bugout_access_token, installation_group_name
)
print(
f"Installation {bot_installation.github_installation_id} complete."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate Bugout user and group for installations"
)
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument("run", help="Start upgrade existing installations")
args = parser.parse_args()
main(args)
| 36.109756 | 87 | 0.604863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 606 | 0.204661 |
f364a3b71f51ba8bce139138673bbb2d0659faf9 | 9,500 | py | Python | preprocess_list_tokenized.py | c-col/Transformer | c7b6ae2aed877ba32dc44544429051181fc4da71 | [
"Apache-2.0"
] | null | null | null | preprocess_list_tokenized.py | c-col/Transformer | c7b6ae2aed877ba32dc44544429051181fc4da71 | [
"Apache-2.0"
] | null | null | null | preprocess_list_tokenized.py | c-col/Transformer | c7b6ae2aed877ba32dc44544429051181fc4da71 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import json
import re
from Utils import *
np.random.seed(4)
def output_process(example):
state = e['state'][-1]
if type(state) == str:
return state
else:
return ' '.join(state)
def polish_notation(steps):
step_mapping = {}
for ix, s in enumerate(steps):
references = re.findall('@@\d+@@', s)
if len(references):
indices = [int(x.replace('@@','')) - 1 for x in references]
if len(references) == 1:
s = '(' + s.replace(' ' + references[0], '') + ', ' + step_mapping[indices[0]] + ')'
step_mapping[ix] = s
else:
first_index, final_index = s.index(references[0]) - 1, s.index(references[-1]) + len(references[-1])
s = '(' + s[:first_index] + s[final_index:] + ', '
for jx in indices:
s += step_mapping[jx] + ', '
s = s[:-2] + ')'
step_mapping[ix] = s
else:
step_mapping[ix] = s
return step_mapping[len(steps) - 1][1:-1]
def subgraphs_from_polish(polish_):
if polish_.count('(') == 0 and polish_.count(','):
return [x.strip() for x in polish_.split(',')][1:]
result_holder = []
while True:
try:
first_paren = polish_.index('(')
except ValueError:
break
open_paren = 1
for ix, char in enumerate(polish_[first_paren+1:]):
if char == '(':
open_paren += 1
elif char == ')':
open_paren -= 1
if open_paren == 0:
result_holder.append(polish_[first_paren+1:first_paren + ix + 1])
polish_ = polish_[first_paren + ix:]
# print('new polish:', polish_)
break
while '' in result_holder:
result_holder.remove('')
intermed_results = [subgraphs_from_polish(x) for x in result_holder]
if type(intermed_results[0]) == list:
intermed_results = [item for sublist in intermed_results for item in sublist]
return result_holder + intermed_results
def remove_duplicates(data_list):
data_set = []
sorted_data = sorted(data_list, key=lambda x: ' '.join(x['nlg']))
for ix in range(len(sorted_data) - 1):
e = sorted_data[ix]
if e['nlg'] != sorted_data[ix + 1]['nlg']:
data_set.append(e)
data_set.append(sorted_data[-1])
return data_set
def is_valid_dag(nlg):
steps_n = len(nlg)
references = re.findall('@@\d+@@', ' '.join(nlg))
return len(list(set(references))) + 1 == steps_n
def get_valid_subgraphs(example):
states, instructions, tokenized_states = example['state'], example['nlg'], example['tokenized_state']
subgraphs = []
steps_n = len(states)
for steps_index in range(steps_n):
if is_valid_dag(instructions[:steps_index + 1]):
subgraphs.append((instructions[:steps_index + 1], tokenized_states[steps_index]))
else:
new_instructions = prune_and_reference(instructions[:steps_index + 1])
subgraphs.append((new_instructions, tokenized_states[steps_index]))
return subgraphs
def prune_and_reference(instructions):
queue = [instructions[-1]]
required_indices = [len(instructions) - 1]
while len(queue):
step = queue.pop(0)
references = re.findall(r'@@\d+@@', step)
indices = [int(x.replace('@@', '')) - 1 for x in references]
required_indices += indices
queue += [instructions[index] for index in indices]
prior_removals = 0
pruned_instructions = []
for index, instruction in enumerate(instructions):
if index not in required_indices:
prior_removals += 1
else:
if prior_removals > 0:
for ref_index, referencer in enumerate(instructions[index + 1:]):
if '@@' + str(index + 1) + '@@' in referencer:
instructions[index + ref_index + 1] = instructions[index + ref_index + 1].replace(
'@@' + str(index + 1) + '@@', '@@' + str(index + 1 - prior_removals) + '@@'
)
pruned_instructions.append(instruction)
return pruned_instructions
def tokenize_string(example_state, example_vocab):
return_step = ''
temp_state = example_state[:].lower()
first_tok = True
while len(temp_state):
if temp_state[:3] in example_vocab or temp_state[:3][::-1] in example_vocab:
if first_tok:
return_step += temp_state[:3] + ' '
first_tok = False
else:
return_step += '%' + temp_state[:3] + ' '
temp_state = temp_state[3:]
elif temp_state[:2] in example_vocab or temp_state[:2][::-1] in example_vocab:
if first_tok:
return_step += temp_state[:2] + ' '
first_tok = False
else:
return_step += '%' + temp_state[:2] + ' '
temp_state = temp_state[2:]
elif temp_state[0] in example_vocab:
if first_tok:
return_step += temp_state[0] + ' '
first_tok = False
else:
return_step += '%' + temp_state[0] + ' '
temp_state = temp_state[1:]
else:
return None
return return_step
with open('list_task_v2.json', 'r', encoding="utf-8") as input_file:
data = json.loads(input_file.read())
data = remove_duplicates(data)
n = len(data)
np.random.shuffle(data)
vocab = []
for e in data:
e_vocab, tokenized_state = [], []
nlg, state = e['nlg'], e['state']
add_bool = True
for ix, step in enumerate(nlg):
tokenized_step = ''
# if terminal node ...
if step.startswith('the string '):
new_string = step.split("'")[1]
tokenized_state.append(state[ix].lower().strip())
e_vocab.append(new_string.lower())
# if state is a string
elif type(state[ix]) == str:
# if it's a reversal
if state[ix][::-1].lower() in e_vocab:
tokenized_state.append(state[ix].lower().strip())
else:
tokenized_step = tokenize_string(state[ix], e_vocab)
if tokenized_step is not None:
tokenized_state.append(tokenized_step.strip())
else:
add_bool = False
break
# if state[ix] is a list
else:
for list_element in state[ix]:
temp_tok = tokenize_string(list_element, e_vocab)
if temp_tok is None:
add_bool = False
break
else:
tokenized_step += ' ' + temp_tok
if add_bool:
tokenize_step = remove_whitespace(tokenized_step).strip()
tokenized_state.append(tokenized_step)
else:
break
if add_bool:
e['tokenized_state'] = tokenized_state
vocab += e_vocab + ['%' + x for x in e_vocab] + [x[::-1] for x in e_vocab] + ['%' + x[::-1] for x in e_vocab]
vocab = list(set(vocab))
# with open('string_piece_vocabulary.txt', 'w', encoding='utf-8') as f:
# f.write('\n'.join(vocab))
filtered_data = []
for e in data:
if 'tokenized_state' in e.keys():
filtered_data.append(e)
train = filtered_data[:int(n*0.8)]
val = filtered_data[int(n*0.8):int(n*0.9)]
test = filtered_data[int(n*0.9):]
train_in, train_out = '', ''
for jx, e in enumerate(train):
if jx % 5000 == 0:
print(round(float(jx / len(train) * 100), 2), '% complete')
subgraphs = get_valid_subgraphs(e)
for subgraph in subgraphs:
train_input = remove_whitespace(' @@SEP@@ '.join(subgraph[0]).lower().strip())
train_in += train_input + '\n'
if type(subgraph[1]) == list:
train_out += ' '.join(subgraph[1]) + '\n'
else:
train_out += remove_whitespace(subgraph[1].strip()) + '\n'
# train_in += ' @@SEP@@ '.join(e['nlg']).lower() + '\n'
# train_out += e['tokenized_state'][-1].strip() + '\n'
val_in, val_out = '', ''
for e in val:
val_input = ' @@SEP@@ '.join(e['nlg']).lower()
val_in += val_input + '\n'
val_out += e['tokenized_state'][-1].strip() + '\n'
test_in, test_out = '', ''
for e in test:
test_input = ' @@SEP@@ '.join(e['nlg']).lower()
test_in += test_input + '\n'
test_out += e['tokenized_state'][-1].strip() + '\n'
base_path = './dag_baseline_2a/'
with open(base_path + 'train_in.txt', 'w', encoding='utf-8') as f:
f.write(train_in)
with open(base_path + 'train_out.txt', 'w', encoding='utf-8') as f:
f.write(train_out)
with open(base_path + 'val_in.txt', 'w', encoding='utf-8') as f:
f.write(val_in)
with open(base_path + 'val_out.txt', 'w', encoding='utf-8') as f:
f.write(val_out)
with open(base_path + 'test_in.txt', 'w', encoding='utf-8') as f:
f.write(test_in)
with open(base_path + 'test_out.txt', 'w', encoding='utf-8') as f:
f.write(test_out) | 34.926471 | 117 | 0.533895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 929 | 0.097789 |
f364c17a276ab73e615a247493444fe756fae197 | 2,650 | py | Python | galaxy-shooter/src/enemy.py | akshayreddy/games | c7256f9e7da71eee96236ccef1ff72248adff8c0 | [
"MIT"
] | null | null | null | galaxy-shooter/src/enemy.py | akshayreddy/games | c7256f9e7da71eee96236ccef1ff72248adff8c0 | [
"MIT"
] | null | null | null | galaxy-shooter/src/enemy.py | akshayreddy/games | c7256f9e7da71eee96236ccef1ff72248adff8c0 | [
"MIT"
] | null | null | null | import pygame, random
from datetime import datetime
from bullet import EnemyBullet, ChasingBullet
class Enemy:
stepSize = 0.4
def __init__(self, screen, gameScreenX, gameScreenY):
self.gameScreenX = gameScreenX
self.gameScreenY = gameScreenY
self.screen = screen
self.bodyX = 50
self.bodyY = 50
self.body = pygame.image.load('../assets/spaceship4.svg')
self.body = pygame.transform.rotate(self.body, 180)
self.body = pygame.transform.scale(self.body, (self.bodyX, self.bodyY))
self.enemyMovement = 'left'
self.positionX = random.randint(50, gameScreenX - 50)
self.positionY = random.randint(0, 50)
self.lastBulletFiredTime = datetime.now()
self.bullets = []
def move(self):
self.screen.blit(self.body, (self.positionX, self.positionY))
# decide which way to move
if self.enemyMovement == 'right':
self.positionX = self.positionX + self.stepSize
elif self.enemyMovement == 'left':
self.positionX = self.positionX - self.stepSize
# bounce back from the boundary
if self.positionX < 0:
self.enemyMovement = 'right'
elif self.positionX > self.gameScreenX - self.bodyX:
self.enemyMovement = 'left'
def destroyed(self):
self.body = pygame.transform.scale(self.body, (10, 10))
def canFire(self):
# check if the enemy can fire
currentTime = datetime.now()
if (currentTime - self.lastBulletFiredTime).total_seconds() > random.randint(5, 10) :
return True
else:
return False
# fire a bullet
def fire(self):
if self.canFire():
pygame.mixer.Sound('../assets/Gun+Silencer.wav').play()
bullet = EnemyBullet(self.screen, self.positionX, self.positionY)
self.lastBulletFiredTime = datetime.now()
self.bullets.append(bullet)
class SmartEnemy(Enemy):
def __init__(self, screen, gameScreenX, gameScreenY):
super().__init__(screen, gameScreenX, gameScreenY)
self.body = pygame.image.load('../assets/rocket.svg')
self.body = pygame.transform.rotate(self.body, 180)
self.body = pygame.transform.scale(self.body, (self.bodyX, self.bodyY))
# fire a bullet
def fire(self):
if self.canFire():
pygame.mixer.Sound('../assets/Gun+Silencer.wav').play()
bullet = ChasingBullet(self.screen, self.positionX, self.positionY)
self.lastBulletFiredTime = datetime.now()
self.bullets.append(bullet) | 36.30137 | 93 | 0.622642 | 2,548 | 0.961509 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.095472 |
f364c54a60ecdfe331ea87a6ae8d2da27d7ac741 | 1,397 | py | Python | python/ign_topic_info.py | srmainwaring/python-ignition | 720f2e6d8e675ed7e10488caf11ef7e93e519d58 | [
"Apache-2.0"
] | 3 | 2022-01-05T19:25:42.000Z | 2022-02-28T02:51:28.000Z | python/ign_topic_info.py | srmainwaring/python-ignition | 720f2e6d8e675ed7e10488caf11ef7e93e519d58 | [
"Apache-2.0"
] | 4 | 2022-01-10T16:54:30.000Z | 2022-01-11T15:38:41.000Z | python/ign_topic_info.py | srmainwaring/python-ignition | 720f2e6d8e675ed7e10488caf11ef7e93e519d58 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2022 Rhys Mainwaring
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Replicate the ign_tools command:
$ ign topic -i -t /topic
'''
import argparse
from ignition.transport import Node
def main():
# process command line
parser = argparse.ArgumentParser(description="Get info about a topic.")
parser.add_argument("-t", "--topic",
metavar="topic", required=True, help="Name of a topic")
args = parser.parse_args()
# topic is required
topic = args.topic
# create a transport node
node = Node()
# get list of topic info
topic_info_list = node.topic_info(topic)
# display address and message type
print("Publishers [Address, Message Type]:")
for topic_info in topic_info_list:
print(" {}, {}".format(topic_info.addr, topic_info.msg_type_name))
if __name__ == "__main__":
main()
| 26.865385 | 75 | 0.703651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 900 | 0.644238 |
f3653c62ef4fa176cfb63a279e20fab0dda730c3 | 188 | py | Python | tests/spec/cms/blogs/test_blogs.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | tests/spec/cms/blogs/test_blogs.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | tests/spec/cms/blogs/test_blogs.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | from hubspot import HubSpot
from hubspot.discovery.cms.blogs.discovery import Discovery
def test_is_discoverable():
apis = HubSpot().cms
assert isinstance(apis.blogs, Discovery)
| 23.5 | 59 | 0.781915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |