max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/plotting/plot_phi_graph.py | cyrusneary/multiscaleLockdownCovid19 | 0 | 12759751 | #%%
import numpy as np
import sys
sys.path.append('..')
from utils.tester import Tester
import pickle
import os
import matplotlib.pyplot as plt
import math
import networkx as nx
import random
city_name = 'Phoenix'
save_file_name = '2021-04-23_14-02-29'
seed = 45
# city_name = 'Seattle'
# save_file_name = '2021-03-21_23-18-05'
# seed = 10
# city_name = 'Dallas'
# save_file_name = '2021-04-09_21-11-28'
fontsize = 20
legend_fontsize = 20
# %%
# Load results data
base_directory = os.getcwd()
base_directory = base_directory[0:base_directory.find('src')+3]
file_path = os.path.join(base_directory, 'optimization', 'save', save_file_name)
with open(file_path,'rb') as f:
tester = pickle.load(f)
if city_name == 'Phoenix':
data_folder_name = 'Phoenix'
if city_name == 'Seattle':
data_folder_name = 'IntercityFlow_Seattle'
if city_name == 'Dallas':
data_folder_name = 'Intercity_Dallas'
# Load city data
city_data_file_path = os.path.join(base_directory, '..', 'data', data_folder_name, 'data_processing_outputs', 'city_data.p')
with open(city_data_file_path,'rb') as f:
city_data = pickle.load(f)
city_list = list(city_data.keys())
num_cities = len(city_list)
num_city = tester.params['m']
num_time = tester.params['n']
num_entity = tester.params['num_entity']
phi_val = np.array(tester.results['phi_best'])
scale_frac = tester.params['scale_frac']
phi_average = np.zeros((num_city, num_city), dtype=np.float)
for i in range(num_city):
for j in range(num_city):
if not (i == j) and np.average(phi_val[:,i,j]) > 0.0:
phi_average[i,j] = np.average(phi_val[:, i, j])
for city_ind in range(num_city):
phi_average[city_ind,:] = phi_average[city_ind,:] * tester.problem_data['Ntot'][city_ind] * scale_frac
phi_average[:,:] = np.log(phi_average[:,:]+1e-3)
max_val = np.max(phi_average[:,:])
phi_average = phi_average / max_val
# print(phi_average)
# %%
edge_weight_list = []
# Visualize the resulting adjacency matrix
G = nx.DiGraph()
for i in range(num_cities):
G.add_node(city_list[i])
for j in range(num_cities):
if phi_average[i,j] > 0.0:
G.add_edge(city_list[i], city_list[j], weight=phi_average[i,j])
edge_weight_list.append(phi_average[i,j])
if city_name == 'Dallas':
city_data['Johnson']['y_loc'] = 32.385655
city_data['Johnson']['x_loc'] = -97.335191
city_data['Ellis']['y_loc'] = 32.362181
city_data['Ellis']['x_loc'] = -96.803901
city_data['Kaufman']['y_loc'] = 32.613997
city_data['Kaufman']['x_loc'] = -96.283543
city_data['Parker']['y_loc'] = 32.783855
city_data['Parker']['x_loc'] = -97.802077
city_data['Rockwall']['y_loc'] = 32.900920
city_data['Rockwall']['x_loc'] = -96.404271
city_data['Collin']['y_loc'] = 33.20671
city_data['Collin']['x_loc'] = -96.587485
city_data['Denton']['y_loc'] = 33.199884
city_data['Denton']['x_loc'] = -97.089478
city_data['Wise']['y_loc'] = 33.219515
city_data['Wise']['x_loc'] = -97.647529
city_data['Tarrant']['y_loc'] = 32.770195
city_data['Tarrant']['x_loc'] = -97.264026
city_data['Dallas']['y_loc'] = 32.77
city_data['Dallas']['x_loc'] = -96.79
pos = dict()
for i in range(num_cities):
city = city_list[i]
x_loc = city_data[city]['x_loc']
y_loc = city_data[city]['y_loc']
pos[city] = np.array([x_loc, y_loc])
edge_width_list = np.array(edge_weight_list)
edge_width_list = np.exp(edge_width_list)
edge_width_list = edge_width_list / np.max(edge_width_list)
edge_width_list = edge_width_list * 5
options = {
"node_color": "#A0CBE2",
"edge_color": edge_weight_list,
"node_size": tester.problem_data['Ntot'],
"width": edge_width_list,
"edge_cmap": plt.cm.Blues,
"with_labels": False,
"edge_vmin": 0.0,
# "edge_vmax": 100.0
}
print(phi_average[1,:])
print(edge_weight_list)
random.seed(seed)
np.random.seed(seed=seed)
pos = nx.spring_layout(G)
# pos = nx.spectral_layout(G)
#print(city_data['Dallas']['population'])
# %%
plt.figure(figsize=(20,10))
# nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_labels(G, pos)
# nx.draw_networkx_edges(G_fully_connected, pos, edge_color='red')
nx.draw(G, pos, **options)
save_location = os.path.join(base_directory, 'plotting', city_name, 'saved_plots')
filename = os.path.join(save_location, '{}scale_cost_by_pop_phi_graph.png'.format(save_file_name))
plt.savefig(filename, bbox_inches='tight')
plt.show()
#plt.title('Adjacency Matrix with Scaled Demand Threshold {},\n Total Number of Edges: {}'.format(0.02, np.sum(adj_mat)), fontsize=15)
# %%
| 2.15625 | 2 |
utest/writer/test_write_configuration.py | ldtri0209/robotframework | 7 | 12759752 | <gh_stars>1-10
import unittest
import os
from robot.errors import DataError
from robot.writer.datafilewriter import WritingContext
from robot.parsing.model import TestCaseFile
from robot.utils.asserts import assert_equals, assert_raises
HTML_SOURCE = os.path.abspath('foo.html')
TXT_SOURCE= os.path.abspath('foo.txt')
class TestOutputFile(unittest.TestCase):
def test_source_file_is_used_by_default(self):
self._assert_output_file(HTML_SOURCE, source=HTML_SOURCE)
def test_given_format_overrides_source_extension(self):
self._assert_output_file(TXT_SOURCE, HTML_SOURCE, format='txt')
def _assert_output_file(self, expected, source=None, format=''):
ctx = WritingContext(TestCaseFile(source=source), format=format)
assert_equals(ctx._output_path() , expected)
class TestFormat(unittest.TestCase):
def test_format_from_source_file_is_used_by_default(self):
self._assert_format('html', source=HTML_SOURCE)
def test_explicit_format_overrides_default(self):
self._assert_format('txt', source=HTML_SOURCE, format='txt')
def test_creating_with_invalid_format_fails(self):
assert_raises(DataError, WritingContext, datafile=None, format='inv')
def _assert_format(self, expected, source, format=''):
data = TestCaseFile(source=source)
ctx = WritingContext(data, format=format)
assert_equals(ctx.format, expected)
| 2.796875 | 3 |
tests/test_fluprodia.py | fwitte/CoolProp.plotting | 8 | 12759753 | <filename>tests/test_fluprodia.py
# from fluprodia import FluidPropertyDiagram
def test_main():
pass
| 0.878906 | 1 |
data_files/LearningPython/if1.py | PmagPy/PmagPy-notebooks | 2 | 12759754 | <reponame>PmagPy/PmagPy-notebooks
#!/usr/bin/env python
from __future__ import print_function
if (2+2)==4: # note the use of '==' and parentheses in comparison statement
print("I can put two and two together!")
| 2.34375 | 2 |
src/django_rest_form_fields/compatibility.py | roveil/django-rest-form-fields | 0 | 12759755 | <filename>src/django_rest_form_fields/compatibility.py
"""
This file contains functions for different python and django version compatibility
"""
import datetime
import pytz
import re
from django.utils.timezone import make_aware, utc
if hasattr(re, 'Pattern'):
PatternType = re.Pattern
else:
PatternType = re._pattern_type
def to_timestamp(dt): # type: (datetime.datetime) -> float
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = make_aware(dt, utc)
else:
dt = dt.astimezone(pytz.utc)
# dt.timestamp() does not work before python 3.3
if hasattr(dt, 'timestamp'):
return dt.timestamp()
else:
return (dt - datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc)).total_seconds()
| 2.359375 | 2 |
base/models.py | khiem111189/oublsite | 0 | 12759756 | <gh_stars>0
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField, StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtailmd.utils import MarkdownField, MarkdownPanel
from .blocks import BaseStreamBlock
# Create your models here.
class HomePage(Page):
body = MarkdownField()
content_panels = Page.content_panels + [
MarkdownPanel("body"),
]
class StandardPage(Page):
"""
A generic content page. On this demo site we use it for an about page but
it could be used for any type of page content that only needs a title,
image, introduction and body field.
https://github.com/wagtail/bakerydemo
"""
introduction = models.TextField(
help_text='Text to describe the page',
blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Landscape mode only; horizontal width between 1000px and 3000px.'
)
body = StreamField(
BaseStreamBlock(), verbose_name="Page body", blank=True
)
content_panels = Page.content_panels + [
FieldPanel('introduction', classname="full"),
StreamFieldPanel('body'),
ImageChooserPanel('image'),
] | 2.25 | 2 |
dbMaint/test/backfillFromHistory.py | lsst-camera-dh/eTraveler-backend | 0 | 12759757 | #!/usr/bin/env python
import os
import os.path
import re
import sqlalchemy
from optparse import OptionParser
class backfillFromHistory():
def __init__(self, dbConnectFile='db_connect.txt',item='activityStatus',
ifNull=True):
self.engine = None
self.dbConnectFile = dbConnectFile
#self.htmlPat = re.compile("<.*?>")
self.connect()
def connect(self):
kwds = {}
try:
with open(self.dbConnectFile) as f:
for line in f:
(key, val) = line.split()
kwds[key] = val
except IOError:
raise IOError, "Unable to open db connect file" + self.dbConnectFile
# Create a new mysql connection object.
db_url = sqlalchemy.engine.url.URL('mysql+mysqldb', **kwds)
self.engine = sqlalchemy.create_engine(db_url)
# If onlyNull is true, only update if activityFinalStatusId is null
def backfill(self, dryRun=1, item='activityStatus', nullOnly=1):
if item == 'activityStatus':
self.itemTable='Activity'
self.itemField='activityFinalStatusId'
self.historyTable='ActivityStatusHistory'
self.historyDataField='activityStatusId'
self.historyItemField='activityId'
else:
if item == 'hardwareLocation':
self.itemTable='Hardware'
self.itemField='locationId'
self.historyTable='HardwareLocationHistory'
self.historyDataField='locationId'
self.historyItemField='hardwareId'
else:
if item == 'hardwareStatus':
self.itemTable='Hardware'
self.itemField = 'hardwareStatusId'
self.historyTable='HardwareStatusHistory'
self.historyDataField='hardwareStatusId'
self.historyItemField='hardwareId'
else:
if item == 'label':
self.backfillLabels(dryRun)
return
else:
print "item ",item, " not supported"
print "Have a nice day"
return
print "dryRun is: ", dryRun
q = 'select id from ' + self.itemTable
if int(nullOnly) == 1: q += ' where ' + self.itemField + ' is null '
q += ' order by id'
results = self.engine.execute(q)
row = results.fetchone()
count = 1
if (row == None):
print "No items needing update found"
return
while (row != None):
id = row['id']
historyQuery = 'select ' + self.historyDataField
historyQuery += ' from ' + self.historyTable + ' as HT'
if item == 'hardwareStatus':
historyQuery += ' join HardwareStatus HS on '
historyQuery += 'HT.hardwareStatusId = HS.id'
historyQuery += ' where ';
if item == 'hardwareStatus':
historyQuery += ' HS.isStatusValue=1 and '
historyQuery += self.historyItemField
historyQuery += " ='" + str(id) + "' order by "
historyQuery += "HT.id desc limit 1"
if int(dryRun) == 1:
if count < 10 or (count % 100 == 0):
print 'About to issue query'
print historyQuery
rs = self.engine.execute(historyQuery)
r = rs.fetchone()
statusId = r[self.historyDataField]
if count < 10 or (count % 100 == 0):
print 'New value for ', self.itemTable, ' entry with id ',id, ' is ',statusId
else:
upd = 'update ' + self.itemTable + ' set ' + self.itemField
upd += '=(' + historyQuery + ") where id='"
upd += str(id) + "'"
if count < 10 or (count % 100 == 0):
print 'About to issue update'
print upd
self.engine.execute(upd)
if count < 10 or (count % 100 == 0):
print 'Updated item in table ', self.itemTable, 'with id=',id
row = results.fetchone()
count += 1
# To start just try a couple
#if count > 5: return
def backfillLabels(self, dryRun=1):
'''
Use a different strategy for labels: just copy from LabelHistory
into LabelCurrent, using ON DUPLICATE KEY UPDATE ..
'''
# First get everything but id out of LabelHistory in order
q = 'select objectId, labelableId, labelId, reason, adding, '
q += 'activityId, createdBy, creationTS from LabelHistory order by id'
results = self.engine.execute(q)
row = results.fetchone()
count = 1
if (row == None):
print "No label history to be copied"
return
print "dryRun is: ",dryRun
while (row != None):
haveAid = (row['activityId'] is not None)
eReason = row['reason'].replace('"', '\\"').replace("'", "\\'")
ins = 'insert into LabelCurrent (objectId,labelableId,labelId,'
ins += 'reason,adding,createdBy,creationTS'
if haveAid: ins +=',activityId'
ins += ') values('+str(row['objectId'])+','+str(row['labelableId'])
ins += ','+str(row['labelId'])+ ',"' + eReason + '",'
ins +=str(row['adding'])+',"'+row['createdBy']
ins += '","' + str(row['creationTS']) + '"'
if haveAid:
ins += ',' + str(row['activityId'])
ins +=') ON DUPLICATE KEY UPDATE reason="' + eReason
ins += '",adding='+str(row['adding'])+',createdBy="'+row['createdBy']
ins += '",creationTS="' + str(row['creationTS']) + '"'
if haveAid:
ins +=',activityId=' + str(row['activityId'])
else:
ins +=',activityId=NULL'
print 'Insert query looks like: '
print ins
if dryRun == '0':
print "executing"
self.engine.execute(ins)
row = results.fetchone()
count = count + 1
#if (count > 5): return
if __name__ == "__main__":
usage = " %prog [options] , e.g. \n python backfillFromHistory.py --db=dev \n or \n python backfillFromHistory.py --connectFile=myConnect.txt "
parser = OptionParser(usage=usage)
parser.add_option("-d", "--db", dest="db", help="used to compute connect file path: ~/.ssh/db_(option-value).txt")
parser.add_option("--connectFile", "-f", dest="connectFile", help="path to file containing db connection information")
parser.add_option("--dryRun", dest="dryRun", help="1 (true) by default. To modify database, use --dryRun=0")
parser.add_option("--item",dest="item",help="field to extract from history. May be activityStatus (default), hardwareLocation, hardwareStatus or label")
parser.add_option("--nullOnly", dest="nullOnly",
help="if set (default) only null fields will be overwritten. To write all fields use --nullOnly=0")
parser.set_defaults(dryRun=1)
parser.set_defaults(nullOnly=1)
parser.set_defaults(item="activityStatus");
parser.set_defaults(db=None)
parser.set_defaults(connectFile=None)
(options, args) = parser.parse_args()
if options.connectFile != None:
if options.db != None:
print "connectFile option takes precedence over db option"
connectFile = options.connectFile
else:
if options.db == None:
raise RuntimeError("one of connectFile, db is required")
else:
connectFile = os.path.join(os.environ['HOME'],
'.ssh/db_' + str(options.db) + '.txt')
back = backfillFromHistory(connectFile)
dryRun = options.dryRun
item = options.item
back.backfill(dryRun=dryRun, item=item, nullOnly=options.nullOnly)
| 2.671875 | 3 |
queries/spanish/query32_sp.py | Gender-Analysis-of-STEM/data-extraction | 0 | 12759758 | import twint
c = twint.Config()
c.Since = "2019-02-01"
c.Until = "2019-03-14"
c.Search = "(mujer OR mujeres OR niña OR niñas OR chica OR chicas) AND \
((ingeniera OR científica OR arquitecta OR programadora OR bióloga) OR \
(ingeniería OR ciencia OR stem)) OR \
(tecnología OR software OR metalurgía OR minería OR agronomía OR automotriz)"
c.Lang = "es"
c.Store_csv = True
c.Output = "./Query3.2_2019.csv"
twint.run.Search(c)
| 2.140625 | 2 |
materials/make_induced_graph.py | lavig17/Knowledge-Graph-Image | 189 | 12759759 | <reponame>lavig17/Knowledge-Graph-Image
import argparse
import json
from nltk.corpus import wordnet as wn
import torch
from glove import GloVe
def getnode(x):
return wn.synset_from_pos_and_offset('n', int(x[1:]))
def getwnid(u):
s = str(u.offset())
return 'n' + (8 - len(s)) * '0' + s
def getedges(s):
dic = {x: i for i, x in enumerate(s)}
edges = []
for i, u in enumerate(s):
for v in u.hypernyms():
j = dic.get(v)
if j is not None:
edges.append((i, j))
return edges
def induce_parents(s, stop_set):
q = s
vis = set(s)
l = 0
while l < len(q):
u = q[l]
l += 1
if u in stop_set:
continue
for p in u.hypernyms():
if p not in vis:
vis.add(p)
q.append(p)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='imagenet-split.json')
parser.add_argument('--output', default='imagenet-induced-graph.json')
args = parser.parse_args()
print('making graph ...')
xml_wnids = json.load(open('imagenet-xml-wnids.json', 'r'))
xml_nodes = list(map(getnode, xml_wnids))
xml_set = set(xml_nodes)
js = json.load(open(args.input, 'r'))
train_wnids = js['train']
test_wnids = js['test']
key_wnids = train_wnids + test_wnids
s = list(map(getnode, key_wnids))
induce_parents(s, xml_set)
s_set = set(s)
for u in xml_nodes:
if u not in s_set:
s.append(u)
wnids = list(map(getwnid, s))
edges = getedges(s)
print('making glove embedding ...')
glove = GloVe('glove.6B.300d.txt')
vectors = []
for wnid in wnids:
vectors.append(glove[getnode(wnid).lemma_names()])
vectors = torch.stack(vectors)
print('dumping ...')
obj = {}
obj['wnids'] = wnids
obj['vectors'] = vectors.tolist()
obj['edges'] = edges
json.dump(obj, open(args.output, 'w'))
| 2.28125 | 2 |
assignment5/mapper.py | IITDU-BSSE06/ads-demystifying-the-logs-mehedi-iitdu | 0 | 12759760 | #!/usr/bin/python
import sys
for line in sys.stdin:
data = line.strip().split(" ")
print(data[6]) | 2.6875 | 3 |
tests/pymcell4/1610_crossing_transparent_compartment_wall_w_unimol_rxn/model.py | mcellteam/mcell-tests | 1 | 12759761 | #!/usr/bin/env python3
import sys
import os
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
if len(sys.argv) == 3 and sys.argv[1] == '-seed':
# overwrite value SEED defined in module parameters
SEED = int(sys.argv[2])
else:
SEED = 1
if len(sys.argv) == 5 and sys.argv[3] == '-bngl':
# overwrite value SEED defined in module parameters
bngl_file = sys.argv[4]
else:
bngl_file = 'test.bngl'
params = m.bngl_utils.load_bngl_parameters(bngl_file)
ITERATIONS = int(params['ITERATIONS'])
if 'MCELL_TIME_STEP' in params:
TIME_STEP = float(params['MCELL_TIME_STEP'])
else:
TIME_STEP = 1e-6
DUMP = True
EXPORT_DATA_MODEL = True
# ---- load bngl file ----
model = m.Model()
if 'MCELL_DEFAULT_COMPARTMENT_VOLUME' in params:
MCELL_DEFAULT_COMPARTMENT_VOLUME = params['MCELL_DEFAULT_COMPARTMENT_VOLUME']
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = MCELL_DEFAULT_COMPARTMENT_VOLUME**(1.0/3.0)
default_compartment = m.geometry_utils.create_box(
'default_compartment', MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
)
model.add_geometry_object(default_compartment)
else:
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = 1
default_compartment = None
viz_output = m.VizOutput(
mode = m.VizMode.ASCII,
output_files_prefix = './viz_data/seed_' + str(SEED).zfill(5) + '/Scene',
every_n_timesteps = 1
)
model.add_viz_output(viz_output)
model.load_bngl(bngl_file, './react_data/seed_' + str(SEED).zfill(5) + '/', default_compartment)
cp = model.find_geometry_object('CP')
transp = m.SurfaceClass(
name = 'transp',
type = m.SurfacePropertyType.TRANSPARENT,
affected_complex_pattern = m.AllMolecules
)
model.add_surface_class(transp)
cp.surface_class = transp
# ---- configuration ----
model.config.time_step = TIME_STEP
model.config.seed = SEED
model.config.total_iterations = ITERATIONS
model.config.partition_dimension = MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
model.config.subpartition_dimension = MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
model.initialize()
if DUMP:
model.dump_internal_state()
if EXPORT_DATA_MODEL and model.viz_outputs:
model.export_data_model()
model.run_iterations(ITERATIONS)
model.end_simulation()
| 2 | 2 |
backend/setup.py | Felipe-Renck/contaxy | 0 | 12759762 | #!/usr/bin/env python
import os
import re
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup # type: ignore
NAME = "contaxy"
MAIN_PACKAGE = NAME # Change if main package != NAME
DESCRIPTION = "Python package template."
URL = "https://github.com/ml-tooling/contaxy"
EMAIL = "<EMAIL>"
AUTHOR = "ML Tool<NAME>"
LICENSE = "MIT"
REQUIRES_PYTHON = ">=3.8"
VERSION = None # Only set version if you like to overwrite the version in _about.py
PWD = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
try:
with open(os.path.join(PWD, "README.md"), encoding="utf-8") as f:
long_description = f.read()
except FileNotFoundError:
long_description = ""
# Extract the version from the _about.py module.
if not VERSION:
try:
with open(os.path.join(PWD, "src", MAIN_PACKAGE, "_about.py")) as f: # type: ignore
VERSION = re.findall(r"__version__\s*=\s*\"(.+)\"", f.read())[0]
except FileNotFoundError:
VERSION = "0.0.0"
# Where the magic happens:
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
license=LICENSE,
packages=find_packages(where="src", exclude=("tests", "test", "examples", "docs")),
package_dir={"": "src"} if os.path.exists("src") else {},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
zip_safe=False,
install_requires=[
"typer", # TODO: remove typer?
"pydantic[dotenv,email]",
"fastapi",
"requests",
# Better logging
"loguru",
# Used for multipart stream parsing in file manager
"streaming_form_data",
"filetype",
"addict",
],
# deprecated: dependency_links=dependency_links,
extras_require={
"server": [
# Add all the runtime requirements here:
"kubernetes",
"docker",
# TODO: Dev only - timing
"fastapi-utils",
# Required by fastapi.security OAuth2PasswordBearer & fastapi.UploadFile for example
"python-multipart",
"psutil",
"uvicorn",
"sqlalchemy>=1.4.0",
# Postgres Driver
"psycopg2",
# Generates concise, unambiguous, URL-safe UUIDs.
"shortuuid",
# Create slugs from unicode strings
"python-slugify",
# Used in MinioFileManager
"minio",
# Used in AzureBlobFileManager
"azure-storage-blob",
# Used for jwt handling
"python-jose[cryptography]",
# Used for password hashing
"passlib[bcrypt]",
# TODO: FOR in-memory dict db: Merge dictionaries via json merge patch
"json-merge-patch",
# TODO: FOR in-memory dict db: Merge dictionaries via json merge patch
"jsonpath-ng",
# TODO: Improve
"jinja2",
# Used for OIDC handling
"requests_oauthlib",
# Create fake data for testing
"faker",
],
"dev": [
"setuptools",
"wheel",
"twine",
"flake8",
"pytest",
"pytest-mock",
"pytest-cov",
"mypy",
"types-python-slugify",
"types-requests",
"types-cachetools",
"black",
"pydocstyle",
"isort",
"lazydocs",
"locust",
# Test profiling
"pyinstrument",
# Export profiling information about the tests
"pytest-profiling",
# For better print debugging via debug
"devtools[pygments]",
# For Jupyter Kernel support
"ipykernel",
# TODO: Move to required when necessary
"universal-build",
"requests",
],
},
include_package_data=True,
package_data={
# If there are data files included in your packages that need to be
# 'sample': ['package_data.dat'],
"contaxy.api.endpoints": ["templates/*"]
},
classifiers=[
# TODO: Update based on https://pypi.org/classifiers/
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
],
project_urls={
"Changelog": URL + "/releases",
"Issue Tracker": URL + "/issues",
"Documentation": URL + "#documentation",
"Source": URL,
},
entry_points={"console_scripts": [f"{NAME}={MAIN_PACKAGE}._cli:cli"]},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
)
| 1.921875 | 2 |
contains-duplicate/contains-duplicate.py | HeftyB/code-challenge | 0 | 12759763 | <reponame>HeftyB/code-challenge<filename>contains-duplicate/contains-duplicate.py
"""
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
Example 1:
Input: [1,2,3,1]
Output: true
Example 2:
Input: [1,2,3,4]
Output: false
Example 3:
Input: [1,1,1,3,3,4,3,2,4,2]
Output: true
"""
# in order to check if an array contains any duplicates:
# FIRST SOLUTION
# make a set from the array (set will ignore duplicates)
# compare length of set to length of array
# if length is == return true else return false
# SECOND SOLUTION
# make a hashtable for the array with each item as the key and a count of occurances as valuus
# loop through the hashtable and return true if any value is > 0
# THIRD SOLUTION
# LOOP THROUGH EACH ITEM IN THE ARRAY
# FOR EACH ITERATION OF LOOP LOOP THROUGH THE WHOLE ARRAY AGAIN(EXCLUDING CURRENT ITEM) AND RETURN TRUE IF A MATCH IS FOUND
input1 = [1,2,3,1]
input2 = [1,2,3,4]
input3 = [1,1,1,3,3,4,3,2,4,2]
def first_solution (arr):
new_set = set(arr)
if len(new_set) == len(arr):
return False
else:
return True
# print(f"First Solution")
# print(first_solution(input1))
# print(first_solution(input2))
# print(first_solution(input3))
def second_solution (arr):
arrDict = {}
for i in arr:
if i not in arrDict:
arrDict[i] = 0
arrDict[i] += 1
for key, value in arrDict.items():
if value > 1:
return True
return False
# print(f"Second Solution")
# print(second_solution(input1))
# print(second_solution(input2))
# print(second_solution(input3))
def third_solution (arr):
for i in range(len(arr)):
for j in arr[i + 1:]:
if arr[i] == j:
return True
return False
print(f"Third Solution")
print(third_solution(input1))
print(third_solution(input2))
print(third_solution(input3)) | 4.28125 | 4 |
test/helpers/helper_brightness_control_tuning_tester.py | LightStage-Aber/LightStage-Repo | 10 | 12759764 | <gh_stars>1-10
import sys
default_path = "../src/"
sys.path.insert(0, default_path)
import unittest
from options import getPropertiesFile
from helper_evaluation_tuning_tester import GetActual_FromIndex
class _Config:
DEBUG = True
class BrightnessControlTuningTester(GetActual_FromIndex):
def __init__(self, *args, **kwords):
unittest.TestCase.__init__(self, *args, **kwords)
GetActual_FromIndex.__init__(self)
def _tune_IterativeRegression_SetThreshold(self, e=None, filename=None, n=None, indexes_are_important=None, support_access=None, threshold=None):
actual_std = threshold
getPropertiesFile()['BrightnessControlTuner']['tune.mode'] = "IterativeRegression"
getPropertiesFile()['BrightnessControlTuner']['tune.debug'] = False
getPropertiesFile()['BrightnessControlTuner']['tune.regression.debug'] = False
getPropertiesFile()['BrightnessControlTuner']['tune.regression.threshold'] = actual_std
getPropertiesFile()['BrightnessControlTuner']['tune.regression.max_improvement_attempts_on_best_score'] = 8
from time_utils.timer import MyTimer
with MyTimer():
actual_tuned = self._get_actual(m=3, e=e, filename=filename, n=n,
indexes_are_important=indexes_are_important, support_access=support_access)
if _Config.DEBUG:
sys.stderr.write(str(actual_tuned)+" ... "+str(threshold)+" ... ")
return actual_std, actual_tuned
def _tune_IterativeRegression(self, e=None, filename=None, n=None, indexes_are_important=None, support_access=None):
actual_std = self._get_actual(m=2, e=e, filename=filename, n=n, indexes_are_important=indexes_are_important, support_access=support_access)
return self._tune_IterativeRegression_SetThreshold(e=e, filename=filename, n=n, indexes_are_important=indexes_are_important, support_access=support_access, threshold=actual_std)
def _tune_SciPyBasinHopping(self, e=None, filename=None, n=None, indexes_are_important=None, support_access=None):
actual_std = self._get_actual(m=2, e=e, filename=filename, n=n, indexes_are_important=indexes_are_important, support_access=support_access)
getPropertiesFile()['BrightnessControlTuner']['tune.mode'] = "L-BFGS-B"
getPropertiesFile()['BrightnessControlTuner']['tune.debug'] = False
getPropertiesFile()['BrightnessControlTuner']['tune.scipy.basinhopping.niter'] = 0
getPropertiesFile()['BrightnessControlTuner']['tune.scipy.basinhopping.niter_success'] = 1
getPropertiesFile()['BrightnessControlTuner']['tune.scipy.basinhopping.lower_bounds'] = 0.5
getPropertiesFile()['BrightnessControlTuner']['tune.scipy.basinhopping.upper_bounds'] = 1.5
getPropertiesFile()['BrightnessControlTuner']['tune.scipy.basinhopping.t'] = 0.5
getPropertiesFile()['BrightnessControlTuner']['tune.scipy.basinhopping.disp'] = False
from time_utils.timer import MyTimer
with MyTimer():
actual_tuned = self._get_actual(m=3, e=e, filename=filename, n=n, indexes_are_important=indexes_are_important, support_access=support_access)
if _Config.DEBUG:
sys.stderr.write(str(actual_tuned)+" ... "+str(threshold)+" ... ")
return actual_std, actual_tuned
| 2.265625 | 2 |
src/Python/Stage_1/calc_power.py | ananthsridharan/vtol_sizing | 10 | 12759765 | #=========================================================================
# Python function to call PRASADUM for power calculations for asymm.
# compound with wing, and store results in an output dictionary
#
# Inputs:
# (a) : max airspeed, knots
# (b) : dictionary with aircraft properties [outputs of driver_sweep]
# (c) : location to save output files and images
#
#=========================================================================
import math, numpy
import shutil, os, sys
from run_CSD import run_CSD
from set_PRASADUM_inputs import set_csd_rotor
from csd_rotor import set_CSD_inputs
from performance_defaults import *
def calc_power(Aircraft, code_dir, save_path):
#=========================================================================
# Copy inputs from template directory to input directory of CSD solver
#=========================================================================
tar_dir = os.path.join(code_dir,'Inputs/')
output_dir = os.path.join(code_dir,'Outputs/')
src_dir = os.path.join(code_dir,'Inputs_samples/Rotor_only/HYDRA_v2/')
if os.path.exists(tar_dir):
shutil.rmtree(tar_dir)
shutil.copytree(src_dir, tar_dir)
#=========================================================================
# parse the information and create derived quantities
#=========================================================================
Flight = Aircraft['Flight']
Vmax = Flight['V']
alt = Flight['alt']
Airframe = Aircraft['Airframe']
Blade = Aircraft['Blade']
Rotor = Aircraft['Rotor']
Wing = Aircraft['Wing']
Wing = wing_calcs(Wing, Airframe, Flight)
Rotor = rotor_calcs(Rotor, Flight)
Blade = blade_calcs(Blade, Rotor)
#=========================================================================
# Flush output folder (create if reqd)
#=========================================================================
output_dict = {}
if os.path.exists(save_path):
shutil.rmtree(save_path)
os.makedirs(save_path)
#=========================================================================
# Easy to use shortcuts
#=========================================================================
eta_p = 0.85
rcout = 0.15
Rotor = Aircraft['Rotor']
Radius = Rotor['Radius']
Mtip = Rotor['Mtip']
Nb = Rotor['Nb']
dirs = {'prasadum': code_dir }
#=========================================================================
# Get constants
#=========================================================================
Weight = Airframe['Wt'] # lbs
atype = Airframe['atype'] # configuration description
CDo = Wing['CDo']
K = Wing['K']
Swing = Wing['Area'] # sq ft
CLw = Wing['CL']
Span = Wing['Span']
#=========================================================================
# Compute total flat-plate area (all sources)
#=========================================================================
fwing = 2*Swing*CDo
#=========================================================================
# Create airspeed loop
#=========================================================================
Varray = [0]
if Vmax <= 0:
keep_extending = False
else:
keep_extending = True
while keep_extending:
Varray.append(Varray[-1]+10)
if Varray[-1] >= Vmax:
keep_extending = False
#Varray = [Vmax]
print 'Airspeed loop (knots) is \n',Varray
#=========================================================================
# Rotor RPM schedule
#=========================================================================
Vlim = Mtip*340.44 # in m/s
Vtip_h = Rotor['Vtiph'] # in m/s
#=========================================================================
# Write header
#=========================================================================
f = open(save_path+'Powercurve.dat','w')
f.write('# Speed(knots) Wing_Lift(lb) Total_Drag(lb) Hub_Drag(lb) Prop_SHP Rotor_SHP lift_offset th0 th1c th1s \n' )
#=========================================================================
# Calculate drag from wing and airframe
#=========================================================================
f_airframe = Aircraft['Airframe']['f']
f_total = f_airframe + fwing
rho = Flight['rho'] # slug/cu.ft
#=========================================================================
# find RPM in high speed cruise at Vmax
#=========================================================================
kts2mps = 1.853*5.0/18.0 # multiply by this number to convert knots to m/s
VSI = Vmax*kts2mps # cruise speed, m/s
hover_omega = Vtip_h/Rotor['Radius'] # rad/s
if Vtip_h + VSI <= Vlim:
cruise_omega = hover_omega
else:
Vtip = Mtip * 340.44 - VSI # m/s
cruise_omega = Vtip / Radius # cruise rotor speed, rad/s
#=========================================================================
# find delta Omega (rad/s) between hover and vcruise
# also find the break points to change RPM
#=========================================================================
domega = hover_omega - cruise_omega
Vbreak = (Vlim - Vtip_h)/kts2mps
if Vbreak < 160:
Vbreak = 160
dV = Vmax - Vbreak
#=========================================================================
# Loop over airspeeds
#=========================================================================
# print domega, hover_omega;quit()
for Vkts in Varray:
VSI = Vkts * 1.853 * 5/18 # m/s
Vinf = VSI / 0.3048 # ft/s
#=========================================================================
# initialize output list
#=========================================================================
output_list = [Vkts]
#=========================================================================
# Create rotor data input file from template
# NOTE: RPM CHANGES WITH AIRSPEED; HAS TO BE HERE!
#=========================================================================
if Vkts <= Vbreak:
Rotor['omega'] = hover_omega
else:
Rotor['omega'] = hover_omega - domega*(Vkts-Vbreak)/dV
#=========================================================================
# Create CSD input file for rotor and blade properties
#=========================================================================
set_CSD_inputs(Rotor, Blade, tar_dir, src_dir)
#=========================================================================
# Calc. download factor
#=========================================================================
if Vkts <= 50:
dwf = 1 - 0.1*(Vkts-50)/50
else:
dwf = 1.0
#=========================================================================
# Wing and body aerodynamic loads
# WIng lift coefficient is constant assumption: same angle of attack when
# there is no interference. AT low speeds, dyn. pressure is so low it doesnt
# matter anyway.. lift is quadratic with Vinf
#=========================================================================
dyn_pr = 0.5*rho * Vinf*Vinf
Drag = dyn_pr * f_total #drag, lbs
L_wing = dyn_pr * Swing * CLw #lift, lbs
#=========================================================================
# Output lists entry # 2: Wing lift
#=========================================================================
output_list.append(L_wing)
#=========================================================================
# Compute CSD targets: SI units.. don't ask
#=========================================================================
T_target = dwf*(Weight - L_wing) / 2.2 * 9.8 # Newtons
T_target = T_target/Rotor['NR']
if atype == 'symmetric':
L_target = 0.0
elif atype == 'coaxial' or atype == 'asymmetric':
L_target = Rotor['loff']*T_target*Rotor['Radius']*Vkts/Vmax # linear variation with airspeed
else:
print 'valid aircraft types are coaxial, asymmetric or symmetric'
quit('unknown configuration: cannot calculate power')
shaft_tilt = 0.0
L_target = 0.0 # FOR SYMMETRIC WING..
#=========================================================================
# Run CSD code and convert outputs to FPS and Hp
#=========================================================================
# print 'weight',Weight, 'wing lift',L_wing
# print 'inputs are',T_target,L_target,VSI
info, Power, Fx = run_CSD(T_target, L_target, VSI, 0.0, alt, dirs)
Fx = Fx / 9.8 * 2.2 # lbs
Power = Power / 746 # Hp
#multiply by #rotors
Fx = Fx * Rotor['NR']
Power = Power * Rotor['NR']
#=========================================================================
# Save CSD outputs in subfolder
#=========================================================================
subfolder_name = 'V' + str(Vkts)
path = os.path.join(save_path,subfolder_name)
shutil.copytree(output_dir, path)
#=========================================================================
# Update total drag
#=========================================================================
Drag = Drag + Fx
# print Drag, Fx;quit()
#=========================================================================
# Output lists entry # 3, 4: Total Drag and hub X-force
#=========================================================================
output_list.append(Drag)
output_list.append(Fx)
#=========================================================================
# Compute propeller power and add rotor power
#=========================================================================
Prop_power = Drag * Vinf / eta_p / 550.00 # in hp
Power = Prop_power + Power # add rotor power
lift_offset = L_target/Radius/T_target/0.3048 # nondiml
#=========================================================================
# Output list entry # 5, 6, 7: Propeller and rotor shaft power, lift offset
#=========================================================================
output_list.append(Prop_power)
output_list.append(Power - Prop_power)
output_list.append(lift_offset)
#=========================================================================
# Read trim_rotor_controls.dat
#=========================================================================
controls_file = os.path.join(path,'trim_rotor_controls_deg.dat')
controls = numpy.loadtxt(controls_file)
#=========================================================================
# Output list entry # 8, 9, 10: Rotor coll, th1c, th1s
#=========================================================================
output_list.append(controls[1])
output_list.append(controls[2])
output_list.append(controls[3])
#=========================================================================
# write to file
#=========================================================================
for item in output_list:
f.write("%15.4f" % item)
f.write("\n")
f.close()
#=========================================================================
# End of operations
#=========================================================================
return output_dict
| 2.65625 | 3 |
env.clearblockd/lib/python2.7/site-packages/pygeoip/timezone.py | Organizational-Proof-Of-Work/clearinghoused_build | 0 | 12759766 | <reponame>Organizational-Proof-Of-Work/clearinghoused_build<gh_stars>0
# -*- coding: utf-8 -*-
"""
Copyright (c) 2010-2014 <NAME>, <NAME>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.txt>.
"""
def time_zone_by_country_and_region(country_code, region_code=None):
"""
Returns time zone from country and region code.
:arg country_code: Country code
:arg region_code: Region code
"""
timezone = country_dict.get(country_code)
if not timezone:
return None
if isinstance(timezone, str):
return timezone
return timezone.get(region_code)
country_dict = {
'AD': 'Europe/Andorra',
'AE': 'Asia/Dubai',
'AF': 'Asia/Kabul',
'AG': 'America/Antigua',
'AI': 'America/Anguilla',
'AL': 'Europe/Tirane',
'AM': 'Asia/Yerevan',
'AN': 'America/Curacao',
'AO': 'Africa/Luanda',
'AR': {
'01': 'America/Argentina/Buenos_Aires',
'02': 'America/Argentina/Catamarca',
'03': 'America/Argentina/Tucuman',
'04': 'America/Argentina/Rio_Gallegos',
'05': 'America/Argentina/Cordoba',
'06': 'America/Argentina/Tucuman',
'07': 'America/Argentina/Buenos_Aires',
'08': 'America/Argentina/Buenos_Aires',
'09': 'America/Argentina/Tucuman',
'10': 'America/Argentina/Jujuy',
'11': 'America/Argentina/San_Luis',
'12': 'America/Argentina/La_Rioja',
'13': 'America/Argentina/Mendoza',
'14': 'America/Argentina/Buenos_Aires',
'15': 'America/Argentina/San_Luis',
'16': 'America/Argentina/Buenos_Aires',
'17': 'America/Argentina/Salta',
'18': 'America/Argentina/San_Juan',
'19': 'America/Argentina/San_Luis',
'20': 'America/Argentina/Rio_Gallegos',
'21': 'America/Argentina/Buenos_Aires',
'22': 'America/Argentina/Catamarca',
'23': 'America/Argentina/Ushuaia',
'24': 'America/Argentina/Tucuman'
},
'AS': 'US/Samoa',
'AT': 'Europe/Vienna',
'AU': {
'01': 'Australia/Canberra',
'02': 'Australia/NSW',
'03': 'Australia/North',
'04': 'Australia/Queensland',
'05': 'Australia/South',
'06': 'Australia/Tasmania',
'07': 'Australia/Victoria',
'08': 'Australia/West'
},
'AW': 'America/Aruba',
'AX': 'Europe/Mariehamn',
'AZ': 'Asia/Baku',
'BA': 'Europe/Sarajevo',
'BB': 'America/Barbados',
'BD': 'Asia/Dhaka',
'BE': 'Europe/Brussels',
'BF': 'Africa/Ouagadougou',
'BG': 'Europe/Sofia',
'BH': 'Asia/Bahrain',
'BI': 'Africa/Bujumbura',
'BJ': 'Africa/Porto-Novo',
'BL': 'America/St_Barthelemy',
'BM': 'Atlantic/Bermuda',
'BN': 'Asia/Brunei',
'BO': 'America/La_Paz',
'BQ': 'America/Curacao',
'BR': {
'01': 'America/Rio_Branco',
'02': 'America/Maceio',
'03': 'America/Sao_Paulo',
'04': 'America/Manaus',
'05': 'America/Bahia',
'06': 'America/Fortaleza',
'07': 'America/Sao_Paulo',
'08': 'America/Sao_Paulo',
'11': 'America/Campo_Grande',
'13': 'America/Belem',
'14': 'America/Cuiaba',
'15': 'America/Sao_Paulo',
'16': 'America/Belem',
'17': 'America/Recife',
'18': 'America/Sao_Paulo',
'20': 'America/Fortaleza',
'21': 'America/Sao_Paulo',
'22': 'America/Recife',
'23': 'America/Sao_Paulo',
'24': 'America/Porto_Velho',
'25': 'America/Boa_Vista',
'26': 'America/Sao_Paulo',
'27': 'America/Sao_Paulo',
'28': 'America/Maceio',
'29': 'America/Sao_Paulo',
'30': 'America/Recife',
'31': 'America/Araguaina'
},
'BS': 'America/Nassau',
'BT': 'Asia/Thimphu',
'BW': 'Africa/Gaborone',
'BY': 'Europe/Minsk',
'BZ': 'America/Belize',
'CA': {
'AB': 'America/Edmonton',
'BC': 'America/Vancouver',
'MB': 'America/Winnipeg',
'NB': 'America/Halifax',
'NL': 'America/St_Johns',
'NS': 'America/Halifax',
'NT': 'America/Yellowknife',
'NU': 'America/Rankin_Inlet',
'ON': 'America/Toronto',
'PE': 'America/Halifax',
'QC': 'America/Montreal',
'SK': 'America/Regina',
'YT': 'America/Whitehorse'
},
'CC': 'Indian/Cocos',
'CD': {
'02': 'Africa/Kinshasa',
'05': 'Africa/Lubumbashi',
'06': 'Africa/Kinshasa',
'08': 'Africa/Kinshasa',
'10': 'Africa/Lubumbashi',
'11': 'Africa/Lubumbashi',
'12': 'Africa/Lubumbashi'
},
'CF': 'Africa/Bangui',
'CG': 'Africa/Brazzaville',
'CH': 'Europe/Zurich',
'CI': 'Africa/Abidjan',
'CK': 'Pacific/Rarotonga',
'CL': 'Chile/Continental',
'CM': 'Africa/Lagos',
'CN': {
'01': 'Asia/Shanghai',
'02': 'Asia/Shanghai',
'03': 'Asia/Shanghai',
'04': 'Asia/Shanghai',
'05': 'Asia/Harbin',
'06': 'Asia/Chongqing',
'07': 'Asia/Shanghai',
'08': 'Asia/Harbin',
'09': 'Asia/Shanghai',
'10': 'Asia/Shanghai',
'11': 'Asia/Chongqing',
'12': 'Asia/Shanghai',
'13': 'Asia/Urumqi',
'14': 'Asia/Chongqing',
'15': 'Asia/Chongqing',
'16': 'Asia/Chongqing',
'18': 'Asia/Chongqing',
'19': 'Asia/Harbin',
'20': 'Asia/Harbin',
'21': 'Asia/Chongqing',
'22': 'Asia/Harbin',
'23': 'Asia/Shanghai',
'24': 'Asia/Chongqing',
'25': 'Asia/Shanghai',
'26': 'Asia/Chongqing',
'28': 'Asia/Shanghai',
'29': 'Asia/Chongqing',
'30': 'Asia/Chongqing',
'31': 'Asia/Chongqing',
'32': 'Asia/Chongqing',
'33': 'Asia/Chongqing'
},
'CO': 'America/Bogota',
'CR': 'America/Costa_Rica',
'CU': 'America/Havana',
'CV': 'Atlantic/Cape_Verde',
'CW': 'America/Curacao',
'CX': 'Indian/Christmas',
'CY': 'Asia/Nicosia',
'CZ': 'Europe/Prague',
'DE': 'Europe/Berlin',
'DJ': 'Africa/Djibouti',
'DK': 'Europe/Copenhagen',
'DM': 'America/Dominica',
'DO': 'America/Santo_Domingo',
'DZ': 'Africa/Algiers',
'EC': {
'01': 'Pacific/Galapagos',
'02': 'America/Guayaquil',
'03': 'America/Guayaquil',
'04': 'America/Guayaquil',
'05': 'America/Guayaquil',
'06': 'America/Guayaquil',
'07': 'America/Guayaquil',
'08': 'America/Guayaquil',
'09': 'America/Guayaquil',
'10': 'America/Guayaquil',
'11': 'America/Guayaquil',
'12': 'America/Guayaquil',
'13': 'America/Guayaquil',
'14': 'America/Guayaquil',
'15': 'America/Guayaquil',
'17': 'America/Guayaquil',
'18': 'America/Guayaquil',
'19': 'America/Guayaquil',
'20': 'America/Guayaquil',
'22': 'America/Guayaquil'
},
'EE': 'Europe/Tallinn',
'EG': 'Africa/Cairo',
'EH': 'Africa/El_Aaiun',
'ER': 'Africa/Asmera',
'ES': {
'07': 'Europe/Madrid',
'27': 'Europe/Madrid',
'29': 'Europe/Madrid',
'31': 'Europe/Madrid',
'32': 'Europe/Madrid',
'34': 'Europe/Madrid',
'39': 'Europe/Madrid',
'51': 'Africa/Ceuta',
'52': 'Europe/Madrid',
'53': 'Atlantic/Canary',
'54': 'Europe/Madrid',
'55': 'Europe/Madrid',
'56': 'Europe/Madrid',
'57': 'Europe/Madrid',
'58': 'Europe/Madrid',
'59': 'Europe/Madrid',
'60': 'Europe/Madrid'
},
'ET': 'Africa/Addis_Ababa',
'FI': 'Europe/Helsinki',
'FJ': 'Pacific/Fiji',
'FK': 'Atlantic/Stanley',
'FO': 'Atlantic/Faeroe',
'FR': 'Europe/Paris',
'FX': 'Europe/Paris',
'GA': 'Africa/Libreville',
'GB': 'Europe/London',
'GD': 'America/Grenada',
'GE': 'Asia/Tbilisi',
'GF': 'America/Cayenne',
'GG': 'Europe/Guernsey',
'GH': 'Africa/Accra',
'GI': 'Europe/Gibraltar',
'GL': {
'01': 'America/Thule',
'02': 'America/Godthab',
'03': 'America/Godthab'
},
'GM': 'Africa/Banjul',
'GN': 'Africa/Conakry',
'GP': 'America/Guadeloupe',
'GQ': 'Africa/Malabo',
'GR': 'Europe/Athens',
'GS': 'Atlantic/South_Georgia',
'GT': 'America/Guatemala',
'GU': 'Pacific/Guam',
'GW': 'Africa/Bissau',
'GY': 'America/Guyana',
'HK': 'Asia/Hong_Kong',
'HN': 'America/Tegucigalpa',
'HR': 'Europe/Zagreb',
'HT': 'America/Port-au-Prince',
'HU': 'Europe/Budapest',
'ID': {
'01': 'Asia/Pontianak',
'02': 'Asia/Makassar',
'03': 'Asia/Jakarta',
'04': 'Asia/Jakarta',
'05': 'Asia/Jakarta',
'06': 'Asia/Jakarta',
'07': 'Asia/Jakarta',
'08': 'Asia/Jakarta',
'09': 'Asia/Jayapura',
'10': 'Asia/Jakarta',
'11': 'Asia/Pontianak',
'12': 'Asia/Makassar',
'13': 'Asia/Makassar',
'14': 'Asia/Makassar',
'15': 'Asia/Jakarta',
'16': 'Asia/Makassar',
'17': 'Asia/Makassar',
'18': 'Asia/Makassar',
'19': 'Asia/Pontianak',
'20': 'Asia/Makassar',
'21': 'Asia/Makassar',
'22': 'Asia/Makassar',
'23': 'Asia/Makassar',
'24': 'Asia/Jakarta',
'25': 'Asia/Pontianak',
'26': 'Asia/Pontianak',
'30': 'Asia/Jakarta',
'31': 'Asia/Makassar',
'33': 'Asia/Jakarta'
},
'IE': 'Europe/Dublin',
'IL': 'Asia/Jerusalem',
'IM': 'Europe/Isle_of_Man',
'IN': 'Asia/Calcutta',
'IO': 'Indian/Chagos',
'IQ': 'Asia/Baghdad',
'IR': 'Asia/Tehran',
'IS': 'Atlantic/Reykjavik',
'IT': 'Europe/Rome',
'JE': 'Europe/Jersey',
'JM': 'America/Jamaica',
'JO': 'Asia/Amman',
'JP': 'Asia/Tokyo',
'KE': 'Africa/Nairobi',
'KG': 'Asia/Bishkek',
'KH': 'Asia/Phnom_Penh',
'KI': 'Pacific/Tarawa',
'KM': 'Indian/Comoro',
'KN': 'America/St_Kitts',
'KP': 'Asia/Pyongyang',
'KR': 'Asia/Seoul',
'KW': 'Asia/Kuwait',
'KY': 'America/Cayman',
'KZ': {
'01': 'Asia/Almaty',
'02': 'Asia/Almaty',
'03': 'Asia/Qyzylorda',
'04': 'Asia/Aqtobe',
'05': 'Asia/Qyzylorda',
'06': 'Asia/Aqtau',
'07': 'Asia/Oral',
'08': 'Asia/Qyzylorda',
'09': 'Asia/Aqtau',
'10': 'Asia/Qyzylorda',
'11': 'Asia/Almaty',
'12': 'Asia/Qyzylorda',
'13': 'Asia/Aqtobe',
'14': 'Asia/Qyzylorda',
'15': 'Asia/Almaty',
'16': 'Asia/Aqtobe',
'17': 'Asia/Almaty'
},
'LA': 'Asia/Vientiane',
'LB': 'Asia/Beirut',
'LC': 'America/St_Lucia',
'LI': 'Europe/Vaduz',
'LK': 'Asia/Colombo',
'LR': 'Africa/Monrovia',
'LS': 'Africa/Maseru',
'LT': 'Europe/Vilnius',
'LU': 'Europe/Luxembourg',
'LV': 'Europe/Riga',
'LY': 'Africa/Tripoli',
'MA': 'Africa/Casablanca',
'MC': 'Europe/Monaco',
'MD': 'Europe/Chisinau',
'ME': 'Europe/Podgorica',
'MF': 'America/Marigot',
'MG': 'Indian/Antananarivo',
'MK': 'Europe/Skopje',
'ML': 'Africa/Bamako',
'MM': 'Asia/Rangoon',
'MN': 'Asia/Choibalsan',
'MO': 'Asia/Macao',
'MP': 'Pacific/Saipan',
'MQ': 'America/Martinique',
'MR': 'Africa/Nouakchott',
'MS': 'America/Montserrat',
'MT': 'Europe/Malta',
'MU': 'Indian/Mauritius',
'MV': 'Indian/Maldives',
'MW': 'Africa/Blantyre',
'MX': {
'01': 'America/Mexico_City',
'02': 'America/Tijuana',
'03': 'America/Hermosillo',
'04': 'America/Merida',
'05': 'America/Mexico_City',
'06': 'America/Chihuahua',
'07': 'America/Monterrey',
'08': 'America/Mexico_City',
'09': 'America/Mexico_City',
'10': 'America/Mazatlan',
'11': 'America/Mexico_City',
'12': 'America/Mexico_City',
'13': 'America/Mexico_City',
'14': 'America/Mazatlan',
'15': 'America/Chihuahua',
'16': 'America/Mexico_City',
'17': 'America/Mexico_City',
'18': 'America/Mazatlan',
'19': 'America/Monterrey',
'20': 'America/Mexico_City',
'21': 'America/Mexico_City',
'22': 'America/Mexico_City',
'23': 'America/Cancun',
'24': 'America/Mexico_City',
'25': 'America/Mazatlan',
'26': 'America/Hermosillo',
'27': 'America/Merida',
'28': 'America/Monterrey',
'29': 'America/Mexico_City',
'30': 'America/Mexico_City',
'31': 'America/Merida',
'32': 'America/Monterrey'
},
'MY': {
'01': 'Asia/Kuala_Lumpur',
'02': 'Asia/Kuala_Lumpur',
'03': 'Asia/Kuala_Lumpur',
'04': 'Asia/Kuala_Lumpur',
'05': 'Asia/Kuala_Lumpur',
'06': 'Asia/Kuala_Lumpur',
'07': 'Asia/Kuala_Lumpur',
'08': 'Asia/Kuala_Lumpur',
'09': 'Asia/Kuala_Lumpur',
'11': 'Asia/Kuching',
'12': 'Asia/Kuala_Lumpur',
'13': 'Asia/Kuala_Lumpur',
'14': 'Asia/Kuala_Lumpur',
'15': 'Asia/Kuching',
'16': 'Asia/Kuching'
},
'MZ': 'Africa/Maputo',
'NA': 'Africa/Windhoek',
'NC': 'Pacific/Noumea',
'NE': 'Africa/Niamey',
'NF': 'Pacific/Norfolk',
'NG': 'Africa/Lagos',
'NI': 'America/Managua',
'NL': 'Europe/Amsterdam',
'NO': 'Europe/Oslo',
'NP': 'Asia/Katmandu',
'NR': 'Pacific/Nauru',
'NU': 'Pacific/Niue',
'NZ': {
'85': 'Pacific/Auckland',
'E7': 'Pacific/Auckland',
'E8': 'Pacific/Auckland',
'E9': 'Pacific/Auckland',
'F1': 'Pacific/Auckland',
'F2': 'Pacific/Auckland',
'F3': 'Pacific/Auckland',
'F4': 'Pacific/Auckland',
'F5': 'Pacific/Auckland',
'F7': 'Pacific/Chatham',
'F8': 'Pacific/Auckland',
'F9': 'Pacific/Auckland',
'G1': 'Pacific/Auckland',
'G2': 'Pacific/Auckland',
'G3': 'Pacific/Auckland'
},
'OM': 'Asia/Muscat',
'PA': 'America/Panama',
'PE': 'America/Lima',
'PF': 'Pacific/Marquesas',
'PG': 'Pacific/Port_Moresby',
'PH': 'Asia/Manila',
'PK': 'Asia/Karachi',
'PL': 'Europe/Warsaw',
'PM': 'America/Miquelon',
'PN': 'Pacific/Pitcairn',
'PR': 'America/Puerto_Rico',
'PS': 'Asia/Gaza',
'PT': {
'02': 'Europe/Lisbon',
'03': 'Europe/Lisbon',
'04': 'Europe/Lisbon',
'05': 'Europe/Lisbon',
'06': 'Europe/Lisbon',
'07': 'Europe/Lisbon',
'08': 'Europe/Lisbon',
'09': 'Europe/Lisbon',
'10': 'Atlantic/Madeira',
'11': 'Europe/Lisbon',
'13': 'Europe/Lisbon',
'14': 'Europe/Lisbon',
'16': 'Europe/Lisbon',
'17': 'Europe/Lisbon',
'18': 'Europe/Lisbon',
'19': 'Europe/Lisbon',
'20': 'Europe/Lisbon',
'21': 'Europe/Lisbon',
'22': 'Europe/Lisbon'
},
'PW': 'Pacific/Palau',
'PY': 'America/Asuncion',
'QA': 'Asia/Qatar',
'RE': 'Indian/Reunion',
'RO': 'Europe/Bucharest',
'RS': 'Europe/Belgrade',
'RU': {
'01': 'Europe/Volgograd',
'02': 'Asia/Irkutsk',
'03': 'Asia/Novokuznetsk',
'04': 'Asia/Novosibirsk',
'05': 'Asia/Vladivostok',
'06': 'Europe/Moscow',
'07': 'Europe/Volgograd',
'08': 'Europe/Samara',
'09': 'Europe/Moscow',
'10': 'Europe/Moscow',
'11': 'Asia/Irkutsk',
'13': 'Asia/Yekaterinburg',
'14': 'Asia/Irkutsk',
'15': 'Asia/Anadyr',
'16': 'Europe/Samara',
'17': 'Europe/Volgograd',
'18': 'Asia/Krasnoyarsk',
'20': 'Asia/Irkutsk',
'21': 'Europe/Moscow',
'22': 'Europe/Volgograd',
'23': 'Europe/Kaliningrad',
'24': 'Europe/Volgograd',
'25': 'Europe/Moscow',
'26': 'Asia/Kamchatka',
'27': 'Europe/Volgograd',
'28': 'Europe/Moscow',
'29': 'Asia/Novokuznetsk',
'30': 'Asia/Vladivostok',
'31': 'Asia/Krasnoyarsk',
'32': 'Asia/Omsk',
'33': 'Asia/Yekaterinburg',
'34': 'Asia/Yekaterinburg',
'35': 'Asia/Yekaterinburg',
'36': 'Asia/Anadyr',
'37': 'Europe/Moscow',
'38': 'Europe/Volgograd',
'39': 'Asia/Krasnoyarsk',
'40': 'Asia/Yekaterinburg',
'41': 'Europe/Moscow',
'42': 'Europe/Moscow',
'43': 'Europe/Moscow',
'44': 'Asia/Magadan',
'45': 'Europe/Samara',
'46': 'Europe/Samara',
'47': 'Europe/Moscow',
'48': 'Europe/Moscow',
'49': 'Europe/Moscow',
'50': 'Asia/Yekaterinburg',
'51': 'Europe/Moscow',
'52': 'Europe/Moscow',
'53': 'Asia/Novosibirsk',
'54': 'Asia/Omsk',
'55': 'Europe/Samara',
'56': 'Europe/Moscow',
'57': 'Europe/Samara',
'58': 'Asia/Yekaterinburg',
'59': 'Asia/Vladivostok',
'60': 'Europe/Kaliningrad',
'61': 'Europe/Volgograd',
'62': 'Europe/Moscow',
'63': 'Asia/Yakutsk',
'64': 'Asia/Sakhalin',
'65': 'Europe/Samara',
'66': 'Europe/Moscow',
'67': 'Europe/Samara',
'68': 'Europe/Volgograd',
'69': 'Europe/Moscow',
'70': 'Europe/Volgograd',
'71': 'Asia/Yekaterinburg',
'72': 'Europe/Moscow',
'73': 'Europe/Samara',
'74': 'Asia/Krasnoyarsk',
'75': 'Asia/Novosibirsk',
'76': 'Europe/Moscow',
'77': 'Europe/Moscow',
'78': 'Asia/Yekaterinburg',
'79': 'Asia/Irkutsk',
'80': 'Asia/Yekaterinburg',
'81': 'Europe/Samara',
'82': 'Asia/Irkutsk',
'83': 'Europe/Moscow',
'84': 'Europe/Volgograd',
'85': 'Europe/Moscow',
'86': 'Europe/Moscow',
'87': 'Asia/Novosibirsk',
'88': 'Europe/Moscow',
'89': 'Asia/Vladivostok'
},
'RW': 'Africa/Kigali',
'SA': 'Asia/Riyadh',
'SB': 'Pacific/Guadalcanal',
'SC': 'Indian/Mahe',
'SD': 'Africa/Khartoum',
'SE': 'Europe/Stockholm',
'SG': 'Asia/Singapore',
'SH': 'Atlantic/St_Helena',
'SI': 'Europe/Ljubljana',
'SJ': 'Arctic/Longyearbyen',
'SK': 'Europe/Bratislava',
'SL': 'Africa/Freetown',
'SM': 'Europe/San_Marino',
'SN': 'Africa/Dakar',
'SO': 'Africa/Mogadishu',
'SR': 'America/Paramaribo',
'SS': 'Africa/Juba',
'ST': 'Africa/Sao_Tome',
'SV': 'America/El_Salvador',
'SX': 'America/Curacao',
'SY': 'Asia/Damascus',
'SZ': 'Africa/Mbabane',
'TC': 'America/Grand_Turk',
'TD': 'Africa/Ndjamena',
'TF': 'Indian/Kerguelen',
'TG': 'Africa/Lome',
'TH': 'Asia/Bangkok',
'TJ': 'Asia/Dushanbe',
'TK': 'Pacific/Fakaofo',
'TL': 'Asia/Dili',
'TM': 'Asia/Ashgabat',
'TN': 'Africa/Tunis',
'TO': 'Pacific/Tongatapu',
'TR': 'Asia/Istanbul',
'TT': 'America/Port_of_Spain',
'TV': 'Pacific/Funafuti',
'TW': 'Asia/Taipei',
'TZ': 'Africa/Dar_es_Salaam',
'UA': {
'01': 'Europe/Kiev',
'02': 'Europe/Kiev',
'03': 'Europe/Uzhgorod',
'04': 'Europe/Zaporozhye',
'05': 'Europe/Zaporozhye',
'06': 'Europe/Uzhgorod',
'07': 'Europe/Zaporozhye',
'08': 'Europe/Simferopol',
'09': 'Europe/Kiev',
'10': 'Europe/Zaporozhye',
'11': 'Europe/Simferopol',
'13': 'Europe/Kiev',
'14': 'Europe/Zaporozhye',
'15': 'Europe/Uzhgorod',
'16': 'Europe/Zaporozhye',
'17': 'Europe/Simferopol',
'18': 'Europe/Zaporozhye',
'19': 'Europe/Kiev',
'20': 'Europe/Simferopol',
'21': 'Europe/Kiev',
'22': 'Europe/Uzhgorod',
'23': 'Europe/Kiev',
'24': 'Europe/Uzhgorod',
'25': 'Europe/Uzhgorod',
'26': 'Europe/Zaporozhye',
'27': 'Europe/Kiev'
},
'UG': 'Africa/Kampala',
'US': {
'AK': 'America/Anchorage',
'AL': 'America/Chicago',
'AR': 'America/Chicago',
'AZ': 'America/Phoenix',
'CA': 'America/Los_Angeles',
'CO': 'America/Denver',
'CT': 'America/New_York',
'DC': 'America/New_York',
'DE': 'America/New_York',
'FL': 'America/New_York',
'GA': 'America/New_York',
'HI': 'Pacific/Honolulu',
'IA': 'America/Chicago',
'ID': 'America/Denver',
'IL': 'America/Chicago',
'IN': 'America/Indianapolis',
'KS': 'America/Chicago',
'KY': 'America/New_York',
'LA': 'America/Chicago',
'MA': 'America/New_York',
'MD': 'America/New_York',
'ME': 'America/New_York',
'MI': 'America/New_York',
'MN': 'America/Chicago',
'MO': 'America/Chicago',
'MS': 'America/Chicago',
'MT': 'America/Denver',
'NC': 'America/New_York',
'ND': 'America/Chicago',
'NE': 'America/Chicago',
'NH': 'America/New_York',
'NJ': 'America/New_York',
'NM': 'America/Denver',
'NV': 'America/Los_Angeles',
'NY': 'America/New_York',
'OH': 'America/New_York',
'OK': 'America/Chicago',
'OR': 'America/Los_Angeles',
'PA': 'America/New_York',
'RI': 'America/New_York',
'SC': 'America/New_York',
'SD': 'America/Chicago',
'TN': 'America/Chicago',
'TX': 'America/Chicago',
'UT': 'America/Denver',
'VA': 'America/New_York',
'VT': 'America/New_York',
'WA': 'America/Los_Angeles',
'WI': 'America/Chicago',
'WV': 'America/New_York',
'WY': 'America/Denver'
},
'UY': 'America/Montevideo',
'UZ': {
'01': 'Asia/Tashkent',
'02': 'Asia/Samarkand',
'03': 'Asia/Tashkent',
'06': 'Asia/Tashkent',
'07': 'Asia/Samarkand',
'08': 'Asia/Samarkand',
'09': 'Asia/Samarkand',
'10': 'Asia/Samarkand',
'12': 'Asia/Samarkand',
'13': 'Asia/Tashkent',
'14': 'Asia/Tashkent'
},
'VA': 'Europe/Vatican',
'VC': 'America/St_Vincent',
'VE': 'America/Caracas',
'VG': 'America/Tortola',
'VI': 'America/St_Thomas',
'VN': 'Asia/Phnom_Penh',
'VU': 'Pacific/Efate',
'WF': 'Pacific/Wallis',
'WS': 'Pacific/Samoa',
'YE': 'Asia/Aden',
'YT': 'Indian/Mayotte',
'YU': 'Europe/Belgrade',
'ZA': 'Africa/Johannesburg',
'ZM': 'Africa/Lusaka',
'ZW': 'Africa/Harare'
}
| 2.203125 | 2 |
possible-new-sites/data.py | purrcat259/thargoid-search-tools | 1 | 12759767 | <reponame>purrcat259/thargoid-search-tools<gh_stars>1-10
import json
import sys
import requests
import numpy
import trilaterate
# Merope
center_merope = numpy.array([-78.59375, -149.625, -340.53125])
# Col 70 Sector FY-N C21-3
center_col70 = numpy.array([687.0625, -362.53125, -697.0625])
class DataRetriever:
def __init__(self):
self._cache = {}
def get_possible_systems(self, coordinates, distances):
center_origin = numpy.array(coordinates)
possible_distances = (
[distances[0], distances[1], distances[2]],
[distances[0], distances[2], distances[1]],
[distances[1], distances[0], distances[2]],
[distances[1], distances[2], distances[0]],
[distances[2], distances[1], distances[0]]
)
possible_coordinates = []
for distance_list in possible_distances:
print('Using distances: {}'.format(distance_list))
try:
answer = trilaterate.trilaterate(center_merope, center_col70, center_origin, distance_list[0], distance_list[1], distance_list[2])
possible_coordinates.append(answer[0])
possible_coordinates.append(answer[1])
except Exception as e:
print(e)
possible_systems = []
for coordinates in possible_coordinates:
systems = self.get_closest_systems(coordinates[0], coordinates[1], coordinates[2], 5)
possible_systems.extend(systems)
return possible_systems
def get_closest_systems(self, x, y, z, radius=10):
# first check the mem cache
key = (<KEY>
if key in self._cache.keys():
print('Retrieved data for: {} from cache'.format(key))
return self._cache[key]
systems = self.get_data_from_edsm(x=x, y=y, z=z, radius=radius)
# flatten the list
system_names = [system['name'] for system in systems]
self.store_in_cache(key=key, result=system_names)
return system_names
def get_data_from_edsm(self, x, y, z, radius):
url = 'https://www.edsm.net/api-v1/sphere-systems'
print('Pinging EDSM with following params: [{}, {}, {}], R: {}'.format(
x, y, z, radius
))
response = requests.get(url=url, params=[('x', x), ('y', y), ('z', z), ('radius', round(radius, 2))])
return json.loads(response.text)
def format_data(self, system_name, responses):
# flatten the responses
possible_sites = set()
for response in responses:
site_data_set = json.loads(response.text)
for site_data in site_data_set:
# skip the site if it is the system we are in
if site_data['name'] == system_name:
continue
possible_sites.add(site_data['name'])
return possible_sites
def store_in_cache(self, key, result):
self._cache[key] = result
size = round(sys.getsizeof(self._cache) / 1024, 2)
print('Added results: {} to key: {}'.format(result, key))
print('Cache has grown to: {}kB'.format(size))
if __name__ == '__main__':
retriever = DataRetriever()
sites = retriever.get_closest_systems(x=-90.75, y=-267.25, z=-309.625, radius=5)
assert 1 == len(sites)
assert 'Aries Dark Region FG-Y d18' == sites[0]
| 2.53125 | 3 |
human_services/services/viewsets.py | DarwishMenna/pathways-backend | 0 | 12759768 | <reponame>DarwishMenna/pathways-backend
from rest_framework import viewsets
from django.utils.decorators import method_decorator
from human_services.services import models, serializers, documentation
from common.filters import (SearchFilter, OrganizationIdFilter, LocationIdFilter,
TaxonomyFilter, MultiFieldOrderingFilter)
# pylint: disable=too-many-ancestors
@method_decorator(name='list', decorator=documentation.get_service_list_schema())
class ServiceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Service.objects.all()
serializer_class = serializers.ServiceSerializer
search_fields = ('translations__name', 'translations__description',)
filter_backends = (MultiFieldOrderingFilter, SearchFilter, OrganizationIdFilter,
LocationIdFilter, TaxonomyFilter,)
ordering_fields = '__all__'
| 1.851563 | 2 |
code/cluster/base.py | jiavila/hpc-client-1 | 0 | 12759769 | <filename>code/cluster/base.py
import inspect, os, subprocess, sys
from util import defn, frame
from .common import Common
class Base(Common):
"""
BaseCluster defines methods that you may need to override.
"""
def set_config_defaults(self):
"""
Use this function to set cluster defaults.
These will be used when the corresponding YAML value is not present.
"""
c = self.config.cast
if c.command is None:
c.command = ['echo', '{{script_path}}']
if c.command_script_stdin is None:
c.command_script_stdin = False
if c.script is None:
c.script = SCRIPT_TEMPLATE
if c.script_executable is None:
c.script_executable = False
def determine_job_settings(self, job):
"""
Parse job settings out of a FW job object.
You will need to override this for cluster-specific config naming. This is also your opportunity to apply defaults for users who forget to specify the relevant options in their gear's manifest.
Important: Security-sensitive.
These values will be passed to command and script templating.
"""
# These value names are not cluster-specific.
# Use this function call when overriding.
s_debug, s_write = self.determine_singularity_settings(job)
# For this Base impl, no extra settings are defined.
# Your cluster type might support these; override this function and add them.
return defn.JobSettings(
fw_id = str(job.id),
singularity_debug = s_debug,
singularity_writable = s_write,
ram = None,
cpu = None,
)
def determine_script_patch(self, job):
"""
Determine where the HPC script file will be placed.
You probably do not need to change this.
"""
return os.path.join(self.config.paths.scripts_path, 'job-' + job.id + '.sh')
def determine_log_patch(self, job):
"""
Determine where the HPC log file will be placed.
You probably do not need to change this.
"""
return os.path.join(self.config.paths.hpc_logs_path, 'job-' + job.id + '.txt')
def execute(self, command, script_path):
# Prevent out-of-order log entries
sys.stdout.flush()
sys.stderr.flush()
# Execute
if not self.config.cast.command_script_stdin:
subprocess.run(command, check=True)
else:
# Some commands, such as bsub, prefer to be fed via stdin
handle = open(script_path)
subprocess.run(command, stdin=handle, check=True)
handle.close()
def handle_each(self, job, values):
"""
Handle a single job.
Override if the general pattern of "generate script, run command" does not work for your cluster type.
"""
script_text, command = self.run_templating(job, values)
self.log.info('Casting job to HPC...')
t = frame.timer()
try:
self.execute(command, values.script_path)
except (FileNotFoundError, subprocess.SubprocessError) as e:
self.log.critical('Error executing command. Exec error follows:')
frame.fatal(e)
ms = str(frame.elapsed_ms(t))
self.log.debug('Casted job in ' + ms + ' ms.')
SCRIPT_TEMPLATE = inspect.cleandoc("""#!/bin/bash
echo "This is an example script. Hello world!!"
echo
echo "The FW job ID is {{job.fw_id}}"
echo
{%- if job.cpu -%}echo "Job CPU is set to {{job.cpu}}"{%- endif %}
{%- if job.ram -%}echo "Job RAM is set to {{job.ram}}"{%- endif %}
echo
echo "This file will be written to"
echo "{{script_path}}"
echo "The log will be written to"
echo "{{script_log_path}}"
""") + '\n\n'
| 2.171875 | 2 |
game/plitk.py | 0niel/DandyBot | 0 | 12759770 | import json
import importlib.resources
import tkinter as tk
def load_tileset(filename):
tileset = json.loads(importlib.resources.read_text('game', filename))
tileset["data"] = importlib.resources.read_binary('game', tileset["file"])
return tileset
def get_tile_ppm(tileset, index):
x = tileset["tile_width"] * (index % tileset["columns"])
y = tileset["tile_height"] * (index // tileset["columns"])
w = tileset["columns"] * tileset["tile_width"]
data = bytes()
for i in range(w * y + x, w * (y + tileset["tile_height"]) + x, w):
data += tileset["data"][i * 3: i * 3 + tileset["tile_width"] * 3]
return bytes("P6\n%d %d\n255\n" % (tileset["tile_width"],
tileset["tile_height"]), "ascii") + data
class PliTk:
def __init__(self, canvas, x, y, cols, rows, tileset, scale):
self.canvas = canvas
self.x, self.y = x, y
self.tileset = tileset
self.scale = scale
self.images = []
self.tiles = []
for i in range(tileset["size"]):
self.images.append(tk.PhotoImage(
data=get_tile_ppm(tileset, i)).zoom(scale))
self.resize(cols, rows)
def resize(self, cols, rows):
self.cols, self.rows = cols, rows
while self.tiles:
self.canvas.delete(self.tiles.pop())
for j in range(rows):
for i in range(cols):
self.tiles.append(self.canvas.create_image(
self.x + i * self.tileset["tile_width"] * self.scale,
self.y + j * self.tileset["tile_height"] * self.scale,
image=self.images[0], anchor="nw"))
def set_tile(self, x, y, index):
self.canvas.itemconfigure(
self.tiles[self.cols * y + x], image=self.images[index]
)
| 2.90625 | 3 |
applications/tensorflow/dynamic_sparsity/ipu_sparse_ops/fp_slot_opt.py | payoto/graphcore_examples | 260 | 12759771 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""
This module exposes an Optimizer wrapper to get regular tf.train.Optimizers to
allow for selecting the slots FP precision independently of the variable type.
Currently only supports Adam
"""
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.training.optimizer import _var_key
from tensorflow.python.training import slot_creator
from tensorflow.python.training.adam import AdamOptimizer
from typing import Type
from logging import getLogger
tf.disable_v2_behavior()
tf.disable_eager_execution()
logger = getLogger(os.path.basename(__file__))
def SelectableSlotFPFormatOptimizer(cls: Type[tf.train.Optimizer]) -> Type[tf.train.Optimizer]:
if not issubclass(cls, AdamOptimizer):
raise ValueError(f'Class {cls} does not inherit from tf.python.training.adam.AdamOptimizer')
class Wrapped(cls):
def __init__(self, slots_dtype, force_fp32_weight_update=True, use_nesterov=False, *args, **kwargs):
self.slots_dtype = tf.as_dtype(slots_dtype)
self.use_nesterov = use_nesterov
self.force_fp32_weight_update = force_fp32_weight_update
super(Wrapped, self).__init__(*args, **kwargs)
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
This is effectively a copy of the original TF optimizer method
excepts this one allows to pass a dtype to `create_zeros_slot`.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name,
dtype=self.slots_dtype)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return tf.cast(named_slots[_var_key(var)], var.dtype)
def _apply_weight_update(self, grad, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, use_nesterov):
if self.force_fp32_weight_update:
# Cast to fp32 for extra precision
weight_update_dtype = tf.float32
else:
weight_update_dtype = var.dtype
# cast all variables to the same desired dtype for the update
m_c = tf.convert_to_tensor(tf.cast(m, weight_update_dtype))
v_c = tf.convert_to_tensor(tf.cast(v, weight_update_dtype))
var_c = tf.cast(var, weight_update_dtype)
lr_c = tf.cast(lr, weight_update_dtype)
beta1_power_c = tf.cast(beta1_power, weight_update_dtype)
beta2_power_c = tf.cast(beta2_power, weight_update_dtype)
beta1_c = tf.cast(beta1, weight_update_dtype)
beta2_c = tf.cast(beta2, weight_update_dtype)
epsilon_c = tf.cast(epsilon, weight_update_dtype)
grad_c = tf.cast(grad, weight_update_dtype)
# correct for the bias of the first and second order moments
alpha = lr_c * math_ops.sqrt(1 - beta2_power_c) / (1 - beta1_power_c)
# update the first order moment
m_t = beta1_c * m_c + (1.0 - beta1_c) * grad_c
# update the second order moment
v_t = beta2_c * v_c + (1.0 - beta2_c) * grad_c * grad_c
# store the moments in the right dtype
assign_m = tf.assign(m, tf.cast(m_t, self.slots_dtype))
assign_v = tf.assign(v, tf.cast(v_t, self.slots_dtype))
# update the variable
with tf.control_dependencies([assign_m, assign_v]):
if use_nesterov:
return tf.cast(var_c - ((grad_c * (1.0 - beta1_c) + beta1_c * m_t) * alpha) / (math_ops.sqrt(v_t) + epsilon_c), var.dtype)
else:
return tf.cast(var_c - (m_t * alpha) / (math_ops.sqrt(v_t) + epsilon_c), var.dtype)
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return var.assign(
self._apply_weight_update(
grad=grad,
var=var,
m=m,
v=v,
beta1_power=beta1_power,
beta2_power=beta2_power,
lr=self._lr_t,
beta1=self._beta1_t,
beta2=self._beta2_t,
epsilon=self._epsilon_t,
use_nesterov=self.use_nesterov))
return Wrapped
| 2.5 | 2 |
Algorithms/Implementation/Lisa's Workbook/solution.py | kitarp29/ds-algo-solutions | 48 | 12759772 | <reponame>kitarp29/ds-algo-solutions<filename>Algorithms/Implementation/Lisa's Workbook/solution.py
a=0
pg=[[]]
# input the number of chapters
# input the maximum number of problems per page
x,y=map(int,input().split(' '))
# input the number of problems in each chapter
l=list(map(int,input().split(' ')))
for i in l:
k = [[] for _ in range(100)]
for j in range(i):
k[j//y].append(j+1)
for i in k:
if i != []:
pg.append(i)
for i in range(len(pg)):
if i in pg[i]:
a+=1
# Print the number of special problems in Lisa's workbook
print(a) | 3.25 | 3 |
NAS/single-path-one-shot/src/MNIST/utils.py | naviocean/SimpleCVReproduction | 923 | 12759773 | <gh_stars>100-1000
import os
import re
import torch
import torch.nn as nn
import random
import json
import numpy as np
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
class ArchLoader():
'''
load arch from json file
'''
def __init__(self, path):
super(ArchLoader, self).__init__()
self.arc_list = []
self.arc_dict = {}
self.get_arch_list_dict(path)
random.shuffle(self.arc_list)
self.idx = -1
self.level_config = {
"level1": [4, 8, 12, 16],
"level2": [4, 8, 12, 16, 20, 24, 28, 32],
"level3": [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]
}
def get_arch_list(self):
return self.arc_list
def get_arch_dict(self):
return self.arc_dict
def get_random_batch(self, bs):
return random.sample(self.arc_list, bs)
def get_part_dict(self):
keys = list(self.arc_dict.keys())[:10]
return dict([(key, self.arc_dict[key]) for key in keys])
def convert_list_arc_str(self, arc_list):
arc_str = ""
arc_list = [str(item)+"-" for item in arc_list]
for item in arc_list:
arc_str += item
return arc_str[:-1]
def __next__(self):
self.idx += 1
if self.idx >= len(self.arc_list):
raise StopIteration
return self.arc_list[self.idx]
def __iter__(self):
return self
def get_arch_list_dict(self, path):
with open(path, "r") as f:
self.arc_dict = json.load(f)
self.arc_list = []
for _, v in self.arc_dict.items():
self.arc_list.append(v["arch"])
def generate_fair_batch(self):
rngs = []
seed = 0
# level1
for i in range(0, 7):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level1'],
len(self.level_config['level1']))*4)
# level2
for i in range(7, 13):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level2'],
len(self.level_config['level2']))*2)
# level3
for i in range(13, 20):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level3'],
len(self.level_config['level3'])))
return np.transpose(rngs)
def generate_niu_fair_batch(self):
rngs = []
seed = 0
# level1
for i in range(0, 7):
seed += 1
random.seed(seed)
tmp_rngs = []
for _ in range(4):
tmp_rngs.extend(random.sample(self.level_config['level1'],
len(self.level_config['level1'])))
rngs.append(tmp_rngs)
# level2
for i in range(7, 13):
seed += 1
random.seed(seed)
tmp_rngs = []
for _ in range(2):
tmp_rngs.extend(random.sample(self.level_config['level2'],
len(self.level_config['level2'])))
rngs.append(tmp_rngs)
# level3
for i in range(13, 20):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level3'],
len(self.level_config['level3'])))
return np.transpose(rngs)
# arch_loader = ArchLoader("Track1_final_archs.json")
# print(arch_loader.generate_niu_fair_batch())
# arc_dc = arch_loader.get_random_batch(1000)
# for i, arc in enumerate(arc_dc):
# print(i, arc)
# cnt = 0
# for i,ac in enumerate(arch_loader):
# print(i,ac)
# cnt += 1
# print(cnt)
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(
1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * \
targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
self.val = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, iters, tag=''):
if not os.path.exists("./models"):
os.makedirs("./models")
filename = os.path.join(
"./models/{}checkpoint-{:06}.pth.tar".format(tag, iters))
torch.save(state, filename)
# latestfilename = os.path.join(
# "./models/{}checkpoint-latest.pth.tar".format(tag))
# torch.save(state, latestfilename)
def get_lastest_model():
if not os.path.exists('./models'):
os.mkdir('./models')
model_list = os.listdir('./models/')
if model_list == []:
return None, 0
model_list.sort()
lastest_model = model_list[-1]
iters = re.findall(r'\d+', lastest_model)
return './models/' + lastest_model, int(iters[0])
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters():
if pname.find('weight') >= 0 and len(p.size()) > 1:
# print('include ', pname, p.size())
group_weight_decay.append(p)
else:
# print('not include ', pname, p.size())
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(
group_weight_decay) + len(group_no_weight_decay)
groups = [dict(params=group_weight_decay), dict(
params=group_no_weight_decay, weight_decay=0.)]
return groups
def bn_calibration_init(m):
""" calculating post-statistics of batch normalization """
if getattr(m, 'track_running_stats', False):
# reset all values for post-statistics
m.reset_running_stats()
# set bn in training mode to update post-statistics
m.training = True
# if use cumulative moving average
if getattr(FLAGS, 'cumulative_bn_stats', False):
m.momentum = None
| 2.296875 | 2 |
john_doe/names/austria.py | xioren/JohnDoe | 0 | 12759774 | male = [
'David',
'Maximilian',
'Lukas',
'Tobias',
'Paul',
'Elias',
'Jakob',
'Jonas',
'Alexander',
'Felix',
'Leon',
'Simon',
'Sebastian',
'Julian',
'Fabian',
'Florian',
'Noah',
'Moritz',
'Samuel',
'Raphael',
'Luca',
'Leo',
'Daniel',
'Valentin',
'Matthias',
'Benjamin',
'Niklas',
'Johannes',
'Luis',
'Michael',
'Lorenz',
'Ben',
'Matteo',
'Philipp',
'Nico',
'Dominik',
'Gabriel',
'Anton',
'Jonathan',
'Liam',
'Emil',
'Max',
'Theodor',
'Adrian',
'Finn',
'Stefan',
'Vincent',
'Josef',
'Oliver',
'Andreas',
'Marcel',
'Konstantin',
'Thomas',
'Jan',
'Manuel',
'Kilian',
'Oskar',
'Theo',
'Fabio',
'Martin'
]
female = [
'Anna',
'Emma',
'Marie',
'Lena',
'Sophia',
'Laura',
'Mia',
'Sophie',
'Emilia',
'Lea',
'Valentina',
'Johanna',
'Leonie',
'Julia',
'Hannah',
'Lara',
'Sarah',
'Elena',
'Luisa',
'Magdalena',
'Katharina',
'Hanna',
'Sara',
'Lina',
'Amelie',
'Lisa',
'Emily',
'Nora',
'Theresa',
'Helena',
'Marlene',
'Isabella',
'Nina',
'Jana',
'Ella',
'Alina',
'Elisa',
'Miriam',
'Maria',
'Valerie',
'Franziska',
'Sofia',
'Clara',
'Paula',
'Annika',
'Klara',
'Viktoria',
'Charlotte',
'Olivia',
'Eva',
'Antonia',
'Elisabeth',
'Pia',
'Rosa',
'Selina',
'Linda',
'Livia',
'Vanessa',
'Mila',
'Elina'
]
last = [
'Gruber',
'Huber',
'Bauer',
'Wagner',
'Müller',
'Pichler',
'Steiner',
'Mayer',
'Moser',
'Hofer',
'Berger',
'Leitner',
'Fuchs',
'Fischer',
'Eder',
'Schmid',
'Weber',
'Schneider',
'Schwarz',
'Winkler',
'Maier',
'Reiter',
'Schmidt',
'Mayr',
'Lang',
'Baumgartner',
'Brunner',
'Wimmer',
'Auer',
'Egger',
'Wolf',
'Lechner',
'Wallner',
'Aigner',
'Binder',
'Ebner',
'Koller',
'Haas',
'Lehner',
'Schuster',
'Graf',
'Holzer',
'Haider',
'Lackner',
'Wieser',
'Koch',
'Strasser',
'Weiss',
'Stadler',
'Böhm',
'König',
'Krenn',
'Kaiser',
'Kaufmann',
'Fink',
'Winter',
'Hofbauer',
'Kern',
'Hauser',
'Mair',
'Fritz',
'Maurer',
'Hofmann',
'Seidl',
'Karner',
'Hackl',
'Riegler',
'Resch',
'Strobl',
'Ortner',
'Posch',
'Reisinger',
'Schober',
'Mayrhofer',
'Riedl',
'Rainer',
'Kogler',
'Klein',
'Neubauer',
'Schwaiger',
'Jäger',
'Frank',
'Friedl',
'Grabner',
'Horvath',
'Unger',
'Müllner',
'Brandstätter',
'Hartl',
'Zimmermann',
'Kainz',
'Hoffmann',
'Sommer',
'Hager',
'Lindner',
'Weiß',
'Schweiger',
'Wiesinger',
'Thaler',
'Höller',
'Richter',
'Walter',
'Haslinger',
'Steininger',
'Herzog',
'Pirker',
'Baumann',
'Mandl',
'Pfeiffer',
'Krammer',
'Rauch',
'Kofler',
'Huemer',
'Zauner',
'Hammer',
'Jovanovic',
'Hahn',
'Brandstetter',
'Ecker',
'Konrad',
'Angerer',
'Köck',
'Novak',
'Schwab',
'Stangl',
'Hauer',
'Fellner',
'Kurz',
'Putz',
'Brandl',
'Holzinger',
'Braun',
'Mayerhofer',
'Bruckner',
'Grill',
'Mader',
'Zach',
'Plank',
'Ertl',
'Steindl',
'Wurm',
'Langer',
'Rieder',
'Hafner',
'Kraus',
'Rath',
'Hartmann',
'Schauer',
'Stocker',
'Neumann',
'Knapp',
'Platzer',
'Singer',
'Rieger',
'Stockinger',
'Fasching',
'Oswald',
'Gassner',
'Neuhold',
'Bayer',
'Stöckl',
'Prinz',
'Haller',
'Kastner',
'Pfeifer',
'Schlager',
'Hutter',
'Sturm',
'Rauscher',
'Peter',
'Roth',
'Gasser',
'Stöger',
'Fröhlich',
'Petrovic',
'Schreiner',
'Knoll',
'Burgstaller',
'Hölzl',
'Lorenz',
'Haberl',
'Feichtinger',
'Karl',
'Pucher',
'Bischof',
'Windisch',
'Deutsch',
'Vogl',
'Schütz',
'Bacher',
'Ziegler',
'Weinberger',
'Hermann',
'Kerschbaumer',
'Trummer',
'Zechner',
'Pilz',
'Gabriel',
'Burger',
'Thurner'
]
| 2.46875 | 2 |
assets/files/TkEntryWidget.py | andrzejQ/El_Prog | 0 | 12759775 | <gh_stars>0
#!/usr/bin/python3 -i
# v.3.7+ order in dict()
import tkinter as tk
# based on https://python-course.eu/tkinter_entry_widgets.php
# https://www.pythontutorial.net/tkinter/tkinter-grid/
def tkForm(fields):
"""tkForm(fields: dict)->dict
fields: {'label1': 'defVal1', ...}
return: modified fields or {} if Esc
>_> tkForm( {'Imię':'iii', 'Imię 2':'iii 2', 'Nazwisko':'Nnn'} )
{'Imię': 'iii 1', 'Imię 2': 'iii 2', 'Nazwisko': 'nnn 3'}
"""
master = tk.Tk()
entries = {}
for i, (field, defVal) in enumerate(fields.items()):
tk.Label(master, text=field).grid(row=i, sticky=tk.E, padx=3)
ent = tk.Entry(master); ent.grid(row=i, column=1, padx=5, pady=5); ent.insert(0, defVal)
if i==0: ent.focus_set()
entries[field] = ent # ent.get() - value
tk.Button(master, text='Esc', command=master.destroy).grid(row=len(fields), column=0, ipadx=5, pady=9)
master.bind('<Escape>', lambda _: master.destroy())
tk.Button(master, text='Ok', command=master.quit).grid(row=len(fields), column=1, ipadx=5, pady=9)
master.bind('<Return>', lambda _: master.quit()) # [Enter] = [Ok]
master.mainloop()
try: # [Ok] - get modified values
entries_di = {}
for k,v in entries.items():
entries_di[k] = v.get()
master.destroy()
except tk.TclError: # on master.destroy() = [x] or [Esc] - cancel
entries_di = {}
return entries_di
def tkFormConf(fields, confCsv='conf_0.csv'):
"""tkFormConf(fields: dict, confCsv: str)->dict (saving data in Csv)
fields: {'label1': 'defVal1', ...} - used if confCsv is missing (on start)
confCsv - text file UTF-16 (=UCS-2) Little Endian with BOM with <tab> separator
return: modified fields saved in confCsv or {} if Esc (confCsv remain unchanged)
>>> with open('conf_0.csv', 'w', encoding='utf-16') as cnf: cnf.write('') # clean Csv for doctest
0
>>> tkFormConf( {'Imię':'iii 1', 'Imię 2':'iii 2', 'Nazwisko':'nnn 3'}, confCsv='conf_0.csv')
{'Imię': 'iii 1', 'Imię 2': 'iii 2', 'Nazwisko': 'nnn 3'}
"""
try: # ordered dict py 3.7+
with open(confCsv, 'r', encoding='utf-16') as cnf:
di = dict([row.split('\t') for row in cnf.read().splitlines()])
except FileNotFoundError:
di = {}
if not di:
di = fields
di = tkForm( di ) # show and edit form
if not di: # ie. [Esc]
return {}
else: #[Ok]
with open(confCsv, 'w', newline='\r\n', encoding='utf-16') as cnf:
cnf.write('\n'.join(['\t'.join(kv) for kv in di.items()]))
return di
if __name__ == '__main__':
# from TkEntryWidget import tkForm
# di = tkForm( {'Imię':'Iii', 'Imię 2': 'iii 2', 'Nazwisko': 'Nnn'} )
# print(di) # {'Imię': 'Iii', 'Imię 2': 'iii 2', 'Nazwisko': 'Nnn'}
print("doctest: press ENTER")
import doctest
doctest.testmod()
| 3.203125 | 3 |
GA.py | JoelYYoung/genetic-algorithm-on-tsp | 0 | 12759776 | <reponame>JoelYYoung/genetic-algorithm-on-tsp<filename>GA.py
import random
import math
import matplotlib.pyplot as plt
MAPWIDTH = 100 # 地图的宽度
MAPLEN = 200 # 地图的长度
class Path:
'''represent hamiltonian cycle.
attributes:
num(int):num of existing edges
path(list):sequence of city ID listed in city_map
city_map(CityMap):related map of cities
'''
def __init__(self, city_map, path_list=[]):
self.num = city_map.num
self.city_map = city_map
if len(path_list) == 0:
self.path = list(range(self.num))
random.shuffle(self.path)
else:
self.path = path_list.copy()
def get_distance(self):
'''return total distance of the cycle.'''
distance = 0
for i in range(self.num):
distance += self.city_map.map[self.path[i]][self.path[(i+1)%self.num]]
return distance
def crossover_path(self, partner):
'''crossover with partner and return nothing.
args:
partner(Path):path to crossover with
'''
min = random.randint(0, self.num-1)
max = random.randint(min, self.num-1)
self.path[min: max+1], partner.path[min: max+1] = \
partner.path[min: max+1], self.path[min: max+1]
# 交换去重
check_area = list(range(self.num))[0: min] +\
list(range(self.num))[max+1: self.num]
ptn_s = 0
for i in check_area:
if self.path[i] in self.path[min:max+1]:
for j in check_area[ptn_s:]:
if partner.path[j] in partner.path[min:max+1]:
self.path[i], partner.path[j] =\
partner.path[j], self.path[i]
ptn_s += 1
break
ptn_s += 1
class CityMap:
'''representation of graph of cities.'''
def __init__(self, num):
self.num = num
# 产生一个初始的城市地图,采用邻接矩阵来存储
self.city = []
for i in range(self.num):
self.city.append((random.randrange(MAPWIDTH),
random.randrange(MAPLEN)))
# 先假设每个城市之间都有路
self.map = [[0]*self.num for i in range(self.num)]
for i in range(self.num):
for j in range(self.num):
self.map[i][j] = distance(self.city[i],
self.city[j])
class Group:
'''representation of Group in revolution.'''
def __init__(self, city_map, init_size):
'''constructor of Group class.
args:
city_map(CityMap):city_map that the group is based on
init_size(int):initial size of group
'''
self.city_map = city_map # 委托模式,仅仅是一个引用而不是新的对象
self.init_size = init_size
self.path = [Path(city_map) for i in range(init_size)]
def score(self, save_rate=1.0):
'''evaluate every path in the group and return a list of score.
args:
save_rate(float):highest surviving possibility
'''
result = []
for i in range(self.init_size):
result.append(float(1)/self.path[i].get_distance())
max_possibility = max(result)
result = [i / max_possibility * save_rate for i in result]
return result
def revolve(self, variation_rate):
'''implement revolution.
args:
variation_rate(float):ratio of variate genes
'''
variation_rate = int(variation_rate * self.city_map.num)
score = self.score()
max_index = score.index(max(score))
result = []
for i in range(self.init_size):
if random.random() > score[i]: # 把这一项变异掉
list = self.path[max_index].path.copy()
for i in range(variation_rate):
a = random.randrange(0, self.city_map.num)
b = random.randrange(0, self.city_map.num)
list[a], list[b] = list[b], list[a]
self.path[i] = Path(self.city_map, list)
result.append(0)
else:
result.append(1)
good_index = [i[0] for i in enumerate(result) if i[1] == 1]
random.shuffle(good_index)
for i in range(int(len(good_index)/2)):
self.path[good_index[2*i]].crossover_path(self.path[good_index[2*i+1]])
def show(self, choice=0):
'''display path as a matplotlib pic.
args:
choice(int):choose the path to display
note:
if choice missing then display the shortest path in the group.
parameter choice should be within the range of 0~num or will arise Exception.
'''
if(choice == 0):
score = self.score()
choice = score.index(max(score))
elif choice>0 and choice<len(self.path)+1:
choice -= 1
else:
raise(Exception, "Wrong choice value.")
plt.figure(figsize = (10, 10), dpi = 100)
x = [i[0] for i in self.city_map.city]
y = [i[1] for i in self.city_map.city]
plt.scatter(x, y)
for i in range(self.path[choice].num):
plt.plot([self.city_map.city[self.path[choice].path[i]][0],\
self.city_map.city[self.path[choice].path[(i+1)%self.path[choice].num]][0]],\
[self.city_map.city[self.path[choice].path[i]][1],\
self.city_map.city[self.path[choice].path[(i+1)%self.path[choice].num]][1]])
plt.show()
def distance(city1, city2):
'''caculate and return distance between two cities.'''
return math.sqrt(math.pow(city1[0]-city2[0], 2)+math.pow(city1[1]-city2[1], 2))
if __name__== '__main__':
init_size = 1000
epoch = 20
city_map = CityMap(100)
group = Group(city_map, init_size)
result = []
group.show()
for i in range(epoch):
group.revolve(10)
score = group.score(0.8)
min_length = group.path[score.index(max(score))].get_distance()
print("min length:", min_length)
result.append(min_length)
group.show()
plt.plot(result)
plt.show()
| 3.109375 | 3 |
tutorial/1_text_format.py | star14ms/manim | 1 | 12759777 | from manimlib import *
class _01_WriteText(Scene):
def construct(self):
text = Text("This is a regular text")
self.play(Write(text))
self.wait(3)
class _02_AddText(Scene):
def construct(self):
text = Text("This is a regular text")
self.add(text)
self.wait(3)
class _03_Formula(Scene):
def construct(self):
formula = Tex("This is a formula")
self.play(Write(formula))
self.wait(3)
class _04_TypesOfText(Scene):
def construct(self):
tipesOfText = Text("""
This is a regular text,
$this is a formula$,
$$this is a formula$$
""")
self.play(Write(tipesOfText))
self.wait(3)
class _05_TypesOfText2(Scene):
def construct(self):
tipesOfText = Text("""
This is a regular text,
$\\frac{x}{y}$,
$$x^2+y^2=a^2$$
""")
self.play(Write(tipesOfText))
self.wait(3)
class _06_DisplayFormula(Scene):
def construct(self):
tipesOfText = Text("""
This is a regular text,
$\\displaystyle\\frac{x}{y}$,
$$x^2+y^2=a^2$$
""")
self.play(Write(tipesOfText))
self.wait(3)
class _07_TextInCenter(Scene):
def construct(self):
text = Text("Text")
self.play(Write(text))
self.wait(3)
class _08_TextOnTopEdge(Scene):
def construct(self):
text = Text("Text")
text.to_edge(UP)
self.play(Write(text))
self.wait(3)
class _09_TextOnBottomEdge(Scene):
def construct(self):
text = Text("Text")
text.to_edge(DOWN)
self.play(Write(text))
self.wait(3)
class _10_TextOnRightEdge(Scene):
def construct(self):
text = Text("Text")
text.to_edge(RIGHT)
self.play(Write(text))
self.wait(3)
class _11_TextOnLeftEdge(Scene):
def construct(self):
text = Text("Text")
text.to_edge(LEFT)
self.play(Write(text))
self.wait(3)
class _12_TextInUpperRightCorner(Scene):
def construct(self):
text = Text("Text")
text.to_edge(UP+RIGHT)
self.play(Write(text))
self.wait(3)
class _13_TextInLowerLeftCorner(Scene):
def construct(self):
text = Text("Text")
text.to_edge(LEFT+DOWN)
self.play(Write(text))
self.wait(3)
class _14_CustomPosition1(Scene):
def construct(self):
textM = Text("Text")
textC = Text("Central text")
textM.move_to(0.25*UP)
self.play(Write(textM),Write(textC))
self.wait(3)
class _15_CustomPosition2(Scene):
def construct(self):
textM = Text("Text")
textC = Text("Central text")
textM.move_to(1*UP+1*RIGHT)
self.play(Write(textM),Write(textC))
self.wait(1)
textM.move_to(1*UP+1*RIGHT)
self.play(Write(textM))
self.wait(3)
class _16_RelativePosition1(Scene):
def construct(self):
textM = Text("Text")
textC = Text("Reference text")
textM.next_to(textC,LEFT,buff=1)
self.play(Write(textM),Write(textC))
self.wait(3)
class _17_RelativePosition2(Scene):
def construct(self):
textM = Text("Text")
textC = Text("Reference text")
textM.shift(UP*0.1)
self.play(Write(textM),Write(textC))
self.wait(3)
class _18_RotateObject(Scene):
def construct(self):
textM = Text("Text")
textC = Text("Reference text")
textM.shift(UP)
textM.rotate(PI/4)
self.play(Write(textM),Write(textC))
self.wait(2)
textM.rotate(PI/4)
self.wait(2)
textM.rotate(PI/4)
self.wait(2)
textM.rotate(PI/4)
self.wait(2)
textM.rotate(PI)
self.wait(2)
class _19_FlipObject(Scene):
def construct(self):
textM = Text("Text")
textM.flip(UP)
self.play(Write(textM))
self.wait(2)
class _20_SizeTextOnLaTeX(Scene):
def construct(self):
textHuge = Text("{\\Huge Huge Text 012.\\#!?} Text")
texthuge = Text("{\\huge huge Text 012.\\#!?} Text")
textLARGE = Text("{\\LARGE LARGE Text 012.\\#!?} Text")
textLarge = Text("{\\Large Large Text 012.\\#!?} Text")
textlarge = Text("{\\large large Text 012.\\#!?} Text")
textNormal = Text("{\\normalsize normal Text 012.\\#!?} Text")
textsmall = Text("{\\small small Text 012.\\#!?} Texto normal")
textfootnotesize = Text("{\\footnotesize footnotesize Text 012.\\#!?} Text")
textscriptsize = Text("{\\scriptsize scriptsize Text 012.\\#!?} Text")
texttiny = Text("{\\tiny tiny Texto 012.\\#!?} Text normal")
textHuge.to_edge(UP)
texthuge.next_to(textHuge,DOWN,buff=0.1)
textLARGE.next_to(texthuge,DOWN,buff=0.1)
textLarge.next_to(textLARGE,DOWN,buff=0.1)
textlarge.next_to(textLarge,DOWN,buff=0.1)
textNormal.next_to(textlarge,DOWN,buff=0.1)
textsmall.next_to(textNormal,DOWN,buff=0.1)
textfootnotesize.next_to(textsmall,DOWN,buff=0.1)
textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1)
texttiny.next_to(textscriptsize,DOWN,buff=0.1)
self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny)
self.wait(3)
class _21_TextFonts(Scene):
def construct(self):
textNormal = Text("{Roman serif text 012.\\#!?} Text")
textItalic = Text("\\textit{Italic text 012.\\#!?} Text")
textTypewriter = Text("\\texttt{Typewritter text 012.\\#!?} Text")
textBold = Text("\\textbf{Bold text 012.\\#!?} Text")
textSL = Text("\\textsl{Slanted text 012.\\#!?} Text")
textSC = Text("\\textsc{Small caps text 012.\\#!?} Text")
textNormal.to_edge(UP)
textItalic.next_to(textNormal,DOWN,buff=.5)
textTypewriter.next_to(textItalic,DOWN,buff=.5)
textBold.next_to(textTypewriter,DOWN,buff=.5)
textSL.next_to(textBold,DOWN,buff=.5)
textSC.next_to(textSL,DOWN,buff=.5)
self.add(textNormal,textItalic,textTypewriter,textBold,textSL,textSC)
self.wait(3)
| 3.0625 | 3 |
stdlib.py | abarnert/stdlib | 2 | 12759778 | <filename>stdlib.py<gh_stars>1-10
#!/usr/bin/env python3
import importlib
import shelve
import sys
# TODO: pkgresource this!
_db = shelve.open('names', 'r')
def __getattr__(self, name):
if name not in _db:
raise AttributeError(name)
values = _db[name]
if len(values) > 1:
possibilities = ', '.join(f"'{mod}.{name}'" for mod, name in values)
raise AttributeError(f"'{name}' is ambiguous: could be {possibilities}")
mod, name = values[0]
try:
return getattr(importlib.import_module(mod), name)
except AttributeError:
pass
return importlib.import_module(f'{mod}.{name}')
def __dir__(self):
return [name for name, values in _db.items() if len(values) == 1]
if sys.version_info < (3, 7):
import types
class DynamicModule(types.ModuleType):
pass
DynamicModule.__getattr__ = __getattr__
DynamicModule.__dir__ = __dir__
sys.modules[__name__].__class__ = DynamicModule
| 2.421875 | 2 |
imagenet/regression-pruning/models/vgg_5x.py | TanayNarshana/rethinking-network-pruning | 3 | 12759779 | <reponame>TanayNarshana/rethinking-network-pruning<gh_stars>1-10
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = [
'vgg_5x', 'vgg_official',
]
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight, mode='fan_out')#, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg_5x = [24, 22, 'M', 41, 51, 'M', 108, 89, 111, 'M', 184, 276, 228, 'M', 512, 512, 512, 'M']
cfg_official = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
def vgg_5x(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg_5x, False), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def vgg_official(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg_official, False), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model | 2.40625 | 2 |
image_rename/api/utils.py | CarsonSlovoka/image-rename | 0 | 12759780 | from contextlib import contextmanager
from pathlib import Path
import os
from typing import Callable, NamedTuple, Type
@contextmanager
def work_dir(dir_path: Path):
"""
Path('.') will change.
"""
org_dir_path = Path(os.getcwd())
os.chdir(dir_path)
try:
yield
finally:
os.chdir(org_dir_path)
@contextmanager
def after_end(cb_fun: Callable):
"""
with after_end(cb_fun) as cb_fun:
...
with after_end(cb_fun=lambda: shutil.rmtree(temp_dir)) as _: # make sure the temp_dir will remove after finished.
...
with after_end(cb_fun=lambda: [os.remove(file) for file in [_ for _ in work_dir.glob('*.*') if _.suffix[1:] in ('html',)]]) as _
...
"""
try:
yield cb_fun
finally:
cb_fun()
def init_namedtuple(init_func_name):
"""
Run the job when the class is born.
USAGE::
@init_namedtuple('init_xxx')
class MyClass(NamedTuple):
def init_xxx(self):
...
"""
def wrap(class_obj: Type[NamedTuple]):
def new_instance(*args, **kwargs):
instance_obj = class_obj(*args, **kwargs)
init_func = getattr(instance_obj, init_func_name)
if init_func:
init_func()
return instance_obj
return new_instance
return wrap
| 2.703125 | 3 |
tools/tcam-capture/tcam_capture/ROICollection.py | vishal-prgmr/tiscamera | 0 | 12759781 | <reponame>vishal-prgmr/tiscamera
# Copyright 2018 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tcam_capture.PropertyWidget import PropertyWidget, Prop
from tcam_capture.ROIGroup import ROIGroup
from tcam_capture.ROIRectItem import ROIRectItem
from tcam_capture.ResizeableRectItem import ResizeableRectItem, ResizeableRectItemSettings
from PyQt5.QtWidgets import (QWidget, QVBoxLayout,
QGroupBox, QHBoxLayout, QFormLayout,
QPushButton, QCheckBox,
QLabel, QGraphicsItem, QSizePolicy)
from PyQt5.QtGui import QColor
from PyQt5.QtCore import pyqtSignal, QRect, QRectF
import logging
log = logging.getLogger(__name__)
class ROICollection(QWidget):
"""
This widget contains the groubox that is responsible
for ROI property display and ROI visualization and selection
It creates the ROIRectItem for visualization
Setting of the PropertyWidgets is done here.
"""
selection_finished = pyqtSignal(QRect)
def __init__(self, group: ROIGroup, parent=None):
super(ROICollection, self).__init__(parent)
self.group = group
self.name = self.group.name
log.info("Created Collection with name: {}".format(self.name))
self.__setup_ui()
self.display_area = None
self.rubberband = None
self.selection_active = False
# bool telling us if the ROI rect was displayed before making a selection
self.rect_was_displayed = False
self.selection_finished.connect(self.apply_selection)
def __setup_ui(self):
self.layout = QVBoxLayout()
self.groupbox = QGroupBox(self.name)
self.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Minimum)
self.layout.addWidget(self.groupbox)
self.__populate_groupbox()
self.setLayout(self.layout)
def __populate_groupbox(self):
"""
"""
layout = QVBoxLayout()
self.groupbox.setLayout(layout)
first_line = QHBoxLayout()
self.visibility_label = QLabel("Show ROI:")
first_line.addWidget(self.visibility_label)
self.visibility_checkbox = QCheckBox(self)
self.visibility_checkbox.setCheckable(True)
self.visibility_checkbox.toggled.connect(self.__checkbox_cb)
first_line.addWidget(self.visibility_checkbox)
self.select_button = QPushButton("+", self)
self.select_button.setToolTip("Select ROI with the mouse")
self.select_button.clicked.connect(self.activate_selection)
first_line.addWidget(self.select_button)
layout.addLayout(first_line)
form_layout = QFormLayout()
layout.addLayout(form_layout)
for prop in self.group.properties:
form_layout.addRow(prop.prop.name, prop)
def __checkbox_cb(self):
"""
SLOT for visibility_checkbox
"""
self.toggle_visibility(self.visibility_checkbox.isChecked())
def __add_roi_rect(self):
"""
Creates a ROI Widgets and adds it to the display area
"""
settings = ResizeableRectItemSettings(50,
QColor(self.group.border_color),
self.group.get_min_size(),
self.group.get_max_size())
rect = QRectF(self.group.get_position(),
self.group.get_size())
self.rubberband = ROIRectItem(rect, settings, self.group)
self.rubberband.setFlag(QGraphicsItem.ItemIsMovable)
self.rubberband.position = self.group.get_position()
self.rubberband.size = self.group.get_size()
for prop in self.group.properties:
prop.value_changed.connect(self.__update_rubberband_values)
self.display_area.add_roi(self.rubberband)
def __remove_rubberband(self):
if not self.rubberband:
return
self.display_area.remove_roi(self.rubberband)
self.rubberband = None
for prop in self.group.properties:
prop.value_changed.disconnect(self.__update_rubberband_values)
def toggle_visibility(self,
be_visible: bool):
"""
be_visible: bool saying if ROI should be visible as an overlay
"""
if be_visible:
if self.rubberband:
self.__remove_rubberband()
self.__add_roi_rect()
else:
self.__remove_rubberband()
def activate_selection(self):
"""
"""
if self.selection_active:
return
if self.rubberband:
self.rect_was_displayed = True
self.__remove_rubberband()
self.display_area.start_roi_capture(self.selection_finished)
self.selection_active = True
def apply_selection(self, rect: QRect):
self.selection_active = False
self.group.set_position(rect.x(), rect.y())
self.group.set_size(rect.width(), rect.height())
if self.rect_was_displayed and self.visibility_checkbox.isChecked():
self.__add_roi_rect()
self.rect_was_displayed = False
def __update_rubberband_values(self, prop: PropertyWidget):
"""
SLOT for value_changed signal from the PropertyWidgets
"""
if not self.rubberband:
return
if self.rubberband.mouse_pressed:
return
if ("Top" in prop.prop.name or
"Left" in prop.prop.name):
self.rubberband.position = self.group.get_position()
if "Left" in prop.prop.name:
self.rubberband.position.x = prop.prop.value
elif "Top" in prop.prop.name:
self.rubberband.position.y = prop.prop.value
# log.info("scenePos{}".format(self.rubberband.scenePos()))
# log.info("pos {}".format(self.group.get_position()))
self.rubberband.update_pos()
elif ("Width" in prop.prop.name or
"Height" in prop.prop.name):
if "Width" in prop.prop.name:
if int(self.rubberband.size.width()) == prop.prop.value:
return
if "Height" in prop.prop.name:
if int(self.rubberband.size.height()) == prop.prop.value:
return
self.rubberband.size = self.group.get_size()
# log.info("size {}".format(self.group.get_size()))
self.rubberband.update_rect()
self.display_area.update()
| 1.679688 | 2 |
manage.py | ncrmro/gringotts | 0 | 12759782 | <gh_stars>0
from flask_graphql import GraphQLView
from src import app, init_db, schema, db
@app.route('/', methods=['GET', 'POST'])
def lambda_handler(event=None, context=None):
return 'hello from Flask!'
app.add_url_rule('/graphql',
view_func=GraphQLView.as_view('graphql', schema=schema,
graphiql=True))
@app.cli.command()
def initdb():
"""Initialize the database."""
init_db()
@app.teardown_appcontext
def shutdown_session(exception=None):
db.remove()
if __name__ == '__main__':
init_db()
app.run()
| 2.234375 | 2 |
3getdataEveryGroup.py | gm130512/SentenceSimilarityBERT | 0 | 12759783 | # coding=utf-8
#python 3getdataEveryGroup.py ./data/data1234.xlsx ./data/data5.xlsx ./data_11_4/
from langconv import Converter
import pandas as pd
import csv
import math
import re
import argparse
def rmSymbol(sent):
return re.sub("|/\n", "", sent)
'''
input : 4個同組的question
output : [[][]
[][]
[][]] 6個question pair
'''
class Combination:
def combine(self, text, n, k):
res = []
self.backtrack(text, n, k, res, [], 1)
return res
def backtrack(self, text, n, k, res, path, index):
if len(path) == k:
res.append(path)
return
for i in range(index, n + 1):
self.backtrack(text, n, k, res, path + [text[i - 1]], i + 1)
if __name__ == '__main__':
#give number of negative sample 1 : 1000 => 6 * 1000 * 999 = 5994000
#取1/3 = 1998000
#取1/3 = 666000 這樣是6000 : 666000 = 1 : 100
numOfNegSample = 666000
parser = argparse.ArgumentParser()
parser.add_argument("input_file")
parser.add_argument("input_file2")
parser.add_argument("output_directory")
args = parser.parse_args()
data = pd.read_excel(args.input_file, sheet_name='Sheet1', header=0)
data2 = pd.read_excel(args.input_file2, sheet_name='Sheet1', header=0)
n = len(data)
m = len(data2)
#n = 4000
#5% 會生成neg數量:60,0000 == (4000 * 0.05 = 200) * 3 * 1000
#2% 會生成neg數量:24,0000 == (4000 * 0.02 = 80) * 3 * 1000. 但dev為 * 4 * 1000 = 32000
#0.2% 會生成neg數量:2,4000 == (4000 * 0.002 = 8) * 3 * 1000
#0.2% 會生成neg數量:1,2000
#0.00025 總共生成4000(每一個問題類一個)
perc = min(3996, math.floor(n * 0.001))
print("negative sample number %d" % perc)
allGroup = [] #for negative sample
with open(args.output_directory + '/train.csv','w', newline='', encoding="utf8") as tCSV, \
open(args.output_directory + '/dev.csv','w', newline='', encoding="utf8") as dCSV:
tCSVW = csv.writer(tCSV, lineterminator='\n')
dCSVW = csv.writer(dCSV, lineterminator='\n')
for i in range(0, n, 4):
curGroup = []
for j in range(4):
index = i + j
curGroup.append(data['測試題'][index])
allGroup.append(curGroup)
#create QuestionPair
combination = Combination()
curQuestionPairs = combination.combine(curGroup, 3, 2)
#add postitive Question pair to .csv
for index, QuestionPair in enumerate(curQuestionPairs):
tCSVW.writerow(
[1,
rmSymbol(QuestionPair[0]),
rmSymbol(QuestionPair[1])])
#add postitive Question pair to .dev
for k in range(0, 3, 1):
dCSVW.writerow(
[1, rmSymbol(curGroup[3]),
rmSymbol(curGroup[k])])
#At last, add negative Question pair to .csv
if i == n - 4:
for firstIndex in range(len(allGroup)):
otherData = data[
data['正確標準問題'] != data['正確標準問題'][firstIndex * 4]]
otherQuestion = otherData['測試題']
for firstEle in range(3):
otherQuestion = otherQuestion.sample(n=perc)
for negativeQuestion in otherQuestion.values:
tCSVW.writerow([
0,
rmSymbol(allGroup[firstIndex][firstEle]),
rmSymbol(negativeQuestion)
])
for firstEleForDev in range(4):
otherQuestionForDev = otherQuestion.sample(n=perc)
for negativeQuestionForDev in otherQuestionForDev.values:
dCSVW.writerow([
0,
rmSymbol(allGroup[firstIndex][firstEleForDev]),
rmSymbol(negativeQuestionForDev)
])
##write to test.csv
# s = 0
# with open(args.output_directory + '/test.csv',
# 'w',
# newline='',
# encoding="utf8") as testCSV:
# testCSVW = csv.writer(testCSV, lineterminator='\n')
# for i in range(m):
# for j in range(n):
# testCSVW.writerow(
# [0, rmSymbol(data2['測試題'][i]),
# rmSymbol(data['測試題'][j])])
##for 查看
# s = s + 1
# if s > 4002:
# break
# if s > 4002:
# break
| 2.921875 | 3 |
application.py | AndrewDongminYoo/myproject | 2 | 12759784 | # -*- coding: utf-8 -*-
from flask import Flask, request, jsonify, render_template, json, redirect, url_for
from flask_cors import CORS
from pymongo import MongoClient # 몽고디비
import requests # 서버 요청 패키지
import os
from pprint import pprint
import hashlib
import jwt
import datetime
from urllib.parse import parse_qsl
KAKAO_REDIRECT_URI = 'https://www.mysmallmeal.shop/redirect'
application = Flask(__name__)
application.config['TEMPLATES_AUTO_RELOAD'] = True
cors = CORS(application, resources={r"/*": {"origins": "*"}})
if application.env == 'development':
os.popen('mongod')
KAKAO_REDIRECT_URI = 'http://localhost:5000/redirect'
# 배포 전에 원격 db로 교체!
client = MongoClient(os.environ.get("DB_PATH"))
os.environ['JWT_KEY'] = 'JARYOGOOJO'
SECRET_KEY = os.environ.get("JWT_KEY")
client_id = 'b702be3ada9cbd8f018e7545d0eb4a8d'
db = client.dbGoojo
restaurant_col = db.restaurant
bookmarked_col = db.bookmark
users = db.users
members = db.members
print(client.address)
# sort_list = 기본 정렬(랭킹순), 별점 순, 리뷰 수, 최소 주문 금액순, 거리 순, 배달 보증 시간순
sort_list = ["rank", "review_avg", "review_count", "min_order_value", "distance"]
order = sort_list[0]
headers = {'accept': 'application/json', 'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/x-www-form-urlencoded',
'referer': 'https://www.yogiyo.co.kr/mobile/',
'sec-ch-ua': '"Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/94.0.4606.71 Safari/537.36',
'x-apikey': 'iphoneap', 'x-apisecret': X_API_SECRET}
@application.route('/')
def hello_world(): # put application's code here
return render_template("index.html")
@application.route('/login')
def login():
msg = request.args.get("msg")
return render_template('login.html', ID=client_id, URI=KAKAO_REDIRECT_URI, msg=msg)
@application.route('/register')
def register():
return render_template('register.html')
@application.route('/kakao_login')
def kakao_login():
return render_template('kakao_login.html')
@application.route('/api/login', methods=['POST'])
def api_login():
request.form = json.loads(request.data)
pprint(request.form)
email_receive = request.form['email']
password = request.form['pw']
# 회원가입 때와 같은 방법으로 pw를 암호화합니다.
hashed_pw = hashlib.sha256(password.encode('utf-8')).hexdigest()
# id, 암호화된 pw 을 가지고 해당 유저를 찾습니다.
result = members.find_one({'email': email_receive, 'pw': hashed_pw}, {"_id": False})
# 찾으면 JWT 토큰을 만들어 발급합니다.
if result:
pprint(result)
# JWT 토큰에는, payload 와 시크릿키가 필요합니다.
# 시크릿키가 있어야 토큰을 디코딩(=풀기) 해서 payload 값을 볼 수 있습니다.
# 아래에선 id와 exp 를 담았습니다. 즉, JWT 토큰을 풀면 유저 ID 값을 알 수 있습니다.
# exp 에는 만료시간을 넣어줍니다. 만료시간이 지나면, 시크릿키로 토큰을 풀 때 만료되었다고 에러가 납니다.
nickname_receive = result['nick']
payload = {
'email': email_receive,
'nick': nickname_receive,
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=3)
}
token = jwt.encode(payload=payload, key=SECRET_KEY, algorithm='HS256')
pprint(payload)
# token 을 줍니다.
return jsonify({'result': 'success', 'token': token})
# 찾지 못하면
else:
return jsonify({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'})
@application.route('/api/register', methods=['POST'])
def api_register():
request.form = json.loads(request.data)
email_receive = request.form['email']
password = request.form['pw']
nickname = request.form['nickname']
uuid = request.form['uuid']
pprint(request.form)
print('api_register uuid', uuid)
hashed_pw = hashlib.sha256(password.encode('utf-8')).hexdigest()
user_exists = bool(members.find_one({"email": email_receive}))
if user_exists:
return jsonify({'result': 'fail', 'msg': '같은 이메일의 유저가 존재합니다.'})
find_member = members.find_one({"email": email_receive}, {"_id": False})
if not find_member:
user = {
'provider': 'mysmallmeal',
'email': email_receive,
'pw': hashed_pw,
'nick': nickname,
'uuid': uuid,
}
pprint(user)
members.update_one({"email": email_receive}, {"$set": user}, upsert=True)
return jsonify({'result': 'success', 'user': nickname, 'msg': '가입이 완료되었습니다.'})
return jsonify({'result': 'fail', 'msg': '가입에 실패했습니다.'})
@application.route('/api/valid', methods=['GET'])
def api_valid():
"""
try 아래를 실행했다가, 에러가 있으면 except 구분으로 가란 얘기입니다.
token 을 시크릿키로 디코딩합니다.
보실 수 있도록 payload 를 print 해두었습니다. 우리가 로그인 시 넣은 그 payload 와 같은 것이 나옵니다.
payload 안에 id가 들어있습니다. 이 id로 유저정보를 찾습니다.
여기에선 그 예로 닉네임을 보내주겠습니다.
:return:
"""
token_receive = request.args.get('token')
try:
payload = jwt.decode(token_receive, key=SECRET_KEY, algorithms=['HS256'])
pprint(payload)
# find_member = members.find_one({'email': payload['email']}, {'_id': 0})
return jsonify({'result': 'success', 'nickname': payload['nick']})
except jwt.ExpiredSignatureError:
print("ExpiredSignatureError:: 로그인 시간이 만료되었습니다!")
return redirect(url_for("login", msg="login timeout"))
except jwt.exceptions.DecodeError:
print("DecodeError:: 로그인 정보가 없습니다!")
return redirect(url_for("login", msg="Cannot Login!"))
@application.route('/redirect')
def kakao_redirect():
# code 가져 오기
qs = dict(parse_qsl(request.query_string))
code = qs.get(b'code').decode('utf-8')
# 토큰요청
url = 'https://kauth.kakao.com/oauth/token'
body = {
"grant_type": "authorization_code",
"client_id": client_id,
"redirect_uri": KAKAO_REDIRECT_URI,
"code": code
}
token_header = {'Content-Type': 'application/x-www-form-urlencoded;charset=urf-8'}
req = requests.post(url=url, headers=token_header, data=body).json()
pprint(req)
# 사용자 정보
url = 'https://kapi.kakao.com/v2/user/me'
info_header = {'Authorization': f'Bearer {req["access_token"]}',
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'}
user_info = requests.post(url, headers=info_header).json()
print(user_info)
kakao_account = user_info.get('kakao_account')
email = kakao_account.get('email')
user_id = user_info.get('id')
prop = user_info.get('properties')
nickname = "Guest"
if prop:
nickname = prop.get('nickname')
profile = prop.get("thumbnail_image")
print(nickname, profile)
user = {
'providerId': user_id,
'nick': nickname,
'provider': 'kakao',
'age': kakao_account.get('age_range')
}
print(user)
# db에 저장
members.update({'email': email},
{"$set": user}, upsert=True)
# jwt 토큰 발급
payload = {
'id': user_id,
'nick': nickname,
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=3)
}
token = jwt.encode(payload=payload, key=SECRET_KEY, algorithm='HS256')
# kakaoLogin 리다이렉트
return redirect(url_for("kakao_login",
token=token, providerId=user_id, email=email, nickname=nickname))
@application.route('/api/like', methods=['POST'])
def like():
"""
메인 로직 중 하나입니다. 웬만하면 건드리지 말기..!
사용자의 id와 점포의 id를 POST 요청의 바디에 담아와,
db에 해당하는 유저가 존재하는 지 확인하고, 있을 경우,
1. 좋아요를 클릭한 경우 점포를 restaurants DB 에도 등록하고, 사용자의 점포 리스트에도 등록한다.
2. 싫어요를 클릭한 경우 점포를 restaurants DB 에서 제외한다.\n
:return: Response(json)
"""
request.form = json.loads(request.data)
uuid = request.form.get('uuid') # uuid
_id = request.form.get('_id') # ssid
action = request.form.get('action')
min_order = request.form.get('min_order')
user = users.find_one({"uuid": uuid})
pprint(request.form)
put_restaurant(_id, min_order)
if action == 'like':
if not user:
good_list = [_id]
users.insert_one({"_id": uuid, "uuid": uuid, "like_list": good_list})
elif _id in user['like_list']:
pass
else:
good_list = user['like_list']
good_list.append(_id)
users.update_one({"_id": uuid, "uuid": uuid}, {"$set": {"like_list": good_list}}, upsert=True)
elif user and _id in user['like_list']:
good_list = user['like_list']
good_list.remove(_id)
users.update_one({"_id": uuid, "uuid": uuid}, {"$set": {"like_list": good_list}}, upsert=True)
return jsonify(user)
@application.route('/api/like', methods=['GET'])
def show_bookmark():
"""
사용자의 uuid 를 조회해 좋아요한 상품들의 리스트를 불러온다.
* 추가할 내용 restaurants DB 에서 해당 상품들 조회해 오기\n
:return: Response(json)
"""
uuid = request.args.get('uuid')
user = users.find_one({"uuid": uuid})
good_list = []
if user:
good_list = user['like_list']
restaurants = []
for restaurant in good_list:
rest = list(bookmarked_col.find({"_id": restaurant}))
if len(rest) > 0:
restaurants.extend(rest)
return jsonify({"user": user, "restaurants": restaurants})
@application.route('/api/shop', methods=['GET'])
def get_restaurant():
"""
위치 권한 허용 시 셋팅되는 기본 메소드. 요기요 서버에 사용자의 위도와 경도를 보내 주변 배달 점포를 조회해서
필요한 데이터만 가공해서 리스트 형태로 프론트 엔드에 넘긴다.\n
:return: Response(json)
"""
lat = request.args.get('lat')
long = request.args.get('lng')
global order
order = request.args.get('order')
if not order:
order = "rank"
url = f'https://www.yogiyo.co.kr/api/v1/restaurants-geo/?category=1인분주문&items=99&lat={lat}&lng={long}&order={order}'
res = requests.get(url, headers=headers).json()
shops = res.get('restaurants')
restaurants = list()
for shop in shops:
rest = dict()
if not bool(int(shop["phone"])):
continue
rest['_id'] = shop.get('id')
rest['name'] = shop.get('name')
rest['reviews'] = shop.get('review_count')
rest['owner'] = shop.get('owner_reply_count')
rest['categories'] = shop.get('categories')
rest['image'] = shop.get('thumbnail_url')
rest['logo'] = shop.get('logo_url')
rest['address'] = shop.get('address')
rest['rating'] = shop.get('review_avg')
rest['time'] = f"{shop.get('begin')[:5]} - {shop.get('end')[:5]}"
rest['min_order'] = shop.get('min_order_amount')
rest['lng'] = shop.get('lng')
rest['lat'] = shop.get('lat')
rest['phone'] = shop.get('phone')
restaurants.append(rest)
restaurant_col.update_one({"_id": shop['id']}, {"$set": rest}, upsert=True)
pprint(restaurants[0])
return jsonify(restaurants)
@application.route('/api/detail', methods=["GET"])
def show_modal():
_id = request.args.get('_id')
restaurant = bookmarked_col.find_one({"_id": int(_id)})
return jsonify(restaurant)
@application.route('/api/address', methods=["POST"])
def search_add():
data = request.get_data()
query = json.loads(data, encoding='utf-8')['query']
# query = request.json.get('query')
return jsonify(search_address(query))
#
#
# @application.route('api/weather', methods=["GET"])
# def declare_weather():
# weather_code = request.args.get('code')
# image_format = request.args.get('size')
# # result = weather.get_weather(code=weather_code, size=image_format)
# return jsonify({'result': result})
def put_restaurant(_id, min_order):
"""
즐겨찾기 버튼을 클릭한 점포를 데이터베이스에 저장합니다.
:param _id: 요기요 데이터베이스 상점 id
:param min_order: 최소 주문금액
:return: None
"""
if list(bookmarked_col.find({"_id": _id})):
return
url = 'https://www.yogiyo.co.kr/api/v1/restaurants/' + str(_id)
req = requests.post(url, headers=headers)
result = req.json()
print(result)
doc = {
"_id": _id,
"time": result.get("open_time_description"),
"phone": result.get("phone"),
"name": result.get("name"),
"categories": result.get("categories"),
"delivery": result.get("estimated_delivery_time"),
"address": result.get("address"),
"image": result.get("background_url"),
"min_order": min_order,
'lat': result.get("lat"),
'lng': result.get("lng"),
}
bookmarked_col.update_one({"_id": _id}, {"$set": doc}, upsert=True)
def search_address(query):
"""
사용자가 검색 창에 직접 주소를 입력했을 때, 카카오맵 api 를 통해 주소를 위도경도로 변환합니다.\n
:param query: 찾고자 하는 주소
:return: doc(dict) {
address: 찾고자 하는 주소 도로명 주소,
lat: 찾고자 하는 지역의 x좌표,
long: 찾고자 하는 지역의 y 좌표
}
"""
url = 'https://dapi.kakao.com/v2/local/search/address.json?query=' + query
_header = {
'Host': 'dapi.kakao.com',
'Authorization': 'KakaoAK <KEY>'}
req = requests.get(url, headers=_header)
result = req.json()
pprint(result)
documents = result['documents'][0]
address = documents['address_name']
lat = documents['y']
lng = documents['x']
doc = {
"address": address,
"lat": lat,
"long": lng
}
return doc
if __name__ == '__main__':
application.debug = True
application.run(port=8000, debug=True)
| 2.125 | 2 |
books/dongbin-na/part2_implementation/ipmt_1.py | livlikwav/Algorithms | 1 | 12759785 | import sys
def myinput():
return sys.stdin.readline()
N = int(myinput())
data = myinput().split()
dict = {
'L' : [0, -1],
'R' : [0, +1],
'U' : [-1, 0],
'D' : [+1, 0],
}
start = [1, 1]
for cmd in data:
next = [start[i] + dict[cmd][i] for i in range(2)]
# print(f'next is {next}')
if next[0] >= 1 and next[1] >= 1:
start = next
# print(f'start is {start}')
else: # out of map
pass
# print(f'start is {start}')
print(start)
'''
<Lesson learned>
list 길이 구하기 len(list)
size()나 length 같은거 아니다!
list 각 요소간의 합은 list comprehension 사용
list + list는 리스트 확장이다.
python의 비교 연산자는 &&이 아니라 and 이다
<Answer>
n = int(input())
x, y = 1, 1
plans = input().split()
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
move_types = ['L', 'R', 'U', 'D']
for plan in plans:
for i in range(len(move_types)):
if plan == move_types[i]:
nx = x + dx[i]
ny = y + dy[i]
if nx < 1 or ny < 1 or nx > n or ny > n:
continue
x, y = nx, ny
print(x, y)
'''
| 3.3125 | 3 |
code/results/04_AUPRC_train_validation.py | data-intelligence-for-health-lab/delirium_prediction | 0 | 12759786 | import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import random
import math
import os
import time
from sklearn.metrics import average_precision_score
# ------------------------------------------------------ loading libraries ----
# --- setting random seed -----------------------------------------------------
seed_n = 42
np.random.seed(seed_n)
random.seed(seed_n)
tf.random.set_seed(seed_n)
combination = 3057
# loading model
model = tf.keras.models.load_model('/project/M-ABeICU176709/delirium/data/outputs/models/{:06d}/model.hdf5'.format(combination))
# loading data
X_adm_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_adm5y_validation.pickle', 'rb'))
X_temp_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_temp_validation.pickle', 'rb'))
y_12h_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_12h_validation.pickle', 'rb'))
y_24h_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_24h_validation.pickle', 'rb'))
# loading data
X_adm_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_adm5y_train.pickle', 'rb'))
X_temp_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_temp_train.pickle', 'rb'))
y_12h_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_12h_train.pickle', 'rb'))
y_24h_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_24h_train.pickle', 'rb'))
# -----------------------------------------------------------------------------
for set in [('train', X_adm_train, X_temp_train, y_12h_train, y_24h_train), ('validation', X_adm_val, X_temp_val, y_12h_val, y_24h_val)]:
# Predicting y_12h and y_24h
results = model.predict(x = [set[1], set[2]],
verbose = 0)
y_12h_hat = results[0]
y_24h_hat = results[1]
AUPRC_12h = average_precision_score(set[3], y_12h_hat)
AUPRC_24h = average_precision_score(set[4], y_24h_hat)
AUPRC_mean = (AUPRC_12h + AUPRC_24h) / 2
print(f'set: {set[0]}, AUPRC_12h: {AUPRC_12h}, AUPRC_24h: {AUPRC_24h}, AUPRC_mean: {AUPRC_mean}')
| 2 | 2 |
tests/test_test_project.py | develtech/django-slugify-processors | 0 | 12759787 | import pytest
from django.apps import apps
@pytest.mark.django_db
def test_models_passthrough(settings):
MyModel = apps.get_model("test_app.MyModel")
entered = "c++"
expected = "c"
m = MyModel(title=entered)
m.save()
assert m.django_extensions_slug == expected
@pytest.mark.django_db
def test_models(settings):
settings.SLUGIFY_PROCESSORS = ["test_app.coding.slugify_programming"]
MyModel = apps.get_model("test_app.MyModel")
entered = "c++"
expected = "cpp"
m = MyModel(title=entered)
m.save()
assert m.django_extensions_slug == expected
| 2.40625 | 2 |
internetdefense/settings/dev.py | gnubrasil/idl-members | 175 | 12759788 | """
Settings specific to development environments
"""
from os import path
from settings.base import PROJECT_DIR, MIDDLEWARE_CLASSES, INSTALLED_APPS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_DIR, 'data', 'data.db'),
}
}
DEBUG = True
TEMPLATE_DEBUG = True
SITE_ID = 1
INCLUDE_DOMAIN = 'localhost:8000'
INCLUDE_URL = INCLUDE_DOMAIN + '/include/'
STATIC_URL = '/static/'
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': show_toolbar,
}
INTERNAL_IPS = ('127.0.0.1', '10.0.1.3',)
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INSTALLED_APPS = INSTALLED_APPS + [
'debug_toolbar',
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': path.join(PROJECT_DIR, 'cache'),
'TIMEOUT': 60 * 60 * 24 * 365
}
}
COMPRESS_ENABLED = True | 1.882813 | 2 |
waymo_toolkit/utils/box_utils.py | DapengFeng/waymo-toolkit | 9 | 12759789 | <gh_stars>1-10
import numpy as np
def get_box_transformation_matrix(box):
"""Create a transformation matrix for a given label box pose."""
tx, ty, tz = box.center_x, box.center_y, box.center_z
c = np.cos(box.heading)
s = np.sin(box.heading)
sl, sh, sw = box.length, box.height, box.width
return np.array(
[[sl * c, -sw * s, 0, tx], [sl * s, sw * c, 0, ty], [0, 0, sh, tz], [0, 0, 0, 1]]
)
def get_3d_box_projected_corners(vehicle_to_image, label):
"""Get the 2D coordinates of the 8 corners of a label's 3D bounding box.
vehicle_to_image: Transformation matrix from the vehicle frame to the image frame.
label: The object label
"""
box = label.box
# Get the vehicle pose
box_to_vehicle = get_box_transformation_matrix(box)
# Calculate the projection from the box space to the image space.
box_to_image = np.matmul(vehicle_to_image, box_to_vehicle)
# Loop through the 8 corners constituting the 3D box
# and project them onto the image
vertices = np.empty([2, 2, 2, 2])
for k in [0, 1]:
for l in [0, 1]:
for m in [0, 1]:
# 3D point in the box space
v = np.array([(k - 0.5), (l - 0.5), (m - 0.5), 1.0])
# Project the point onto the image
v = np.matmul(box_to_image, v)
# If any of the corner is behind the camera, ignore this object.
if v[2] < 0:
return None
vertices[k, l, m, :] = [v[0] / v[2], v[1] / v[2]]
vertices = vertices.astype(np.int32)
return vertices
| 3.078125 | 3 |
src/functional_tests/tests.py | adcarmichael/tracks | 0 | 12759790 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import unittest
import os
from django.test import LiveServerTestCase
from routes import models as mdl
from django.contrib.staticfiles.testing import StaticLiveServerTestCase, LiveServerTestCase
import socket
from routes.tests import sampledata
from django.test import TestCase
from selenium import webdriver
from django.test import Client
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def create_sample_data():
sampledata.add_gym()
sampledata.route_set_inactive(colour='yellow')
sampledata.route_set_active(colour='red')
sampledata.route_set_active(colour='black')
sampledata.create_auth_user()
sampledata.create_sample_route_record()
class FunctionalTestCase(LiveServerTestCase):
host = 'web'
def setUp(self):
self.browser = webdriver.Remote(
command_executor="http://selenium:4444/wd/hub",
desired_capabilities=DesiredCapabilities.CHROME
)
def test_user_registration(self):
breakpoint()
Client().get('/')
self.browser.get(self.live_server_url)
self.assertIn('Django', self.browser.title)
def tearDown(self):
self.browser.close()
class _base(StaticLiveServerTestCase):
user_name = 'user_test'
user_password = 'password'
admin_name = 'admin_test'
admin_password = 'password'
live_server_url = 'http://web:8000'
@classmethod
def setUpClass(self):
self.live_server_url = 'http://{}:8000'.format(
socket.gethostbyname(socket.gethostname())
)
self.browser = webdriver.Remote(
command_executor='http://selenium_hub:4444/wd/hub',
desired_capabilities=DesiredCapabilities.CHROME
)
self.browser.implicitly_wait(10)
# self.firefox = webdriver.Remote(
# command_executor='http://selenium_hub:4444/wd/hub',
# desired_capabilities=DesiredCapabilities.FIREFOX
# )
# self.firefox.implicitly_wait(10)
@classmethod
def tearDownClass(self):
self.browser.quit()
@classmethod
def create_superuser(self):
user = mdl.User.objects.create_superuser(
self.admin_name, '<EMAIL>', self.admin_password)
prof = mdl.Profile.objects.first()
prof.email_confirmed = True
prof.save
@classmethod
def create_user(self):
user = mdl.User.objects.create_user(
self.user_name, '<EMAIL>', self.user_password)
prof = mdl.Profile.objects.first()
prof.email_confirmed = True
prof.save
@classmethod
def create_user_and_add_cookie(self, is_admin=False):
if not is_admin:
self.create_user()
else:
self.create_superuser()
C = Client()
logged_in = C.login(username=self.user_name,
password=<PASSWORD>)
cookie = C.cookies['sessionid']
self.browser.add_cookie(
{'name': 'sessionid', 'value': cookie.value, 'secure': False, 'path': '/'})
self.browser.refresh() # need to update page for logged in user
class SiteTests(_base):
def setUp(self):
super().setUp()
def test_visit_site_with_chrome(self):
self.browser.get(self.live_server_url)
# print(f'{self.chrome.title}')
self.assertIn(
self.browser.title, 'ChalkTracks')
def test_visit_site_with_chrome_(self):
self.browser.get(self.live_server_url)
# print(f'{self.chrome.title}')
breakpoint()
self.assertIn(
self.browser.title, 'ChalkTracks')
class RoutesTests(_base):
def test_home(self):
self.browser.get(self.live_server_url)
self.create_user_and_add_cookie()
def test_user_recording_a_climb(self):
create_sample_data()
self.browser.get(self.live_server_url)
self.create_user_and_add_cookie()
# User goes to routes page
self.browser.get(self.live_server_url + '/users/1/1/routes?grade=red')
breakpoint()
# User clicks on route one to record a climb
route_card = self.browser.find_element_by_id('id-card-1')
route_card.click()
# a modal pops up with the option to record climb; the user clicks
modal_record_climb = self.browser.find_element_by_id(
'modal-sections-record-climb-1')
modal_record_climb.click()
# confirm that route has been recorded
rr = mdl.RouteRecord.objects.all()
self.assertEqual(rr[0].id, 1)
self.assertEqual(rr[0].is_climbed, True)
# def test_visit_site_with_firefox(self):
# self.firefox.get('http://web')
# self.assertIn(self.firefox.title,
# 'Django: the Web framework for perfectionists with deadlines.')
# class NewUserRecord(LiveServerTestCase):
# def setUp(self):
# self.browser = webdriver.Chrome()
# def tearDown(self):
# self.browser.quit()
# def test_that_user_can_record_route_completion(self):
# # User goes to the routes page
# self.browser.get(self.live_server_url)
# breakpoint()
# # User selects green and rounte number 2 and marks it complete
# inputbox = self.browser.find_element_by_id('sign-up')
# time.sleep(1)
# inputbox.send_keys(Keys.ENTER)
# time.sleep(10)
# os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = '0.0.0.0:8000'
# def create_superuser(self):
# user = mdl.User.objects.create_superuser(
# self.admin_name, '<EMAIL>', self.admin_password)
# prof = mdl.Profile.objects.first()
# prof.email_confirmed = True
# prof.save
# def create_user(self):
# user = mdl.User.objects.create_user(
# self.user_name, '<EMAIL>', self.user_password)
# prof = mdl.Profile.objects.first()
# prof.email_confirmed = True
# prof.save
# def add_sample_route_data():
# add_sample_data(colour='purple')
# def create_user_and_add_cookie(self, is_admin=False):
# if not is_admin:
# self.create_user()
# else:
# self.create_superuser()
# self.client.login(username=self.user_name, password=<PASSWORD>)
# cookie = self.client.cookies['sessionid']
# self.browser.add_cookie(
# {'name': 'sessionid', 'value': cookie.value, 'secure': False, 'path': '/'})
# self.browser.refresh() # need to update page for logged in user
# def test_user_recording_a_climb(self):
# add_sample_data(colour='purple')
# self.browser.get(self.live_server_url)
# self.create_user_and_add_cookie()
# # User goes to routes page
# self.browser.get(self.live_server_url + '/users/1/1/routes')
# # User clicks on route one to record a climb
# route_card = self.browser.find_element_by_id('id-card-1')
# route_card.click()
# # a modal pops up with the option to record climb; the user clicks
# modal_record_climb = self.browser.find_element_by_id(
# 'modal-sections-record-climb-1')
# modal_record_climb.click()
# # confirm that route has been recorded
# rr = mdl.RouteRecord.objects.all()
# self.assertEqual(rr[0].id, 1)
# self.assertEqual(rr[0].is_climbed, True)
| 2.09375 | 2 |
realsense2_camera/scripts/echo_metadada.py | yushijinhun/realsense-ros | 0 | 12759791 | <gh_stars>0
# License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2022 Intel Corporation. All Rights Reserved.
#!/usr/bin/env python
import os
import sys
import rclpy
from rclpy.node import Node
from rclpy import qos
from realsense2_camera_msgs.msg import Metadata
import json
def metadata_cb(msg):
aa = json.loads(msg.json_data)
os.system('clear')
print('header:\nstamp:\n secs:', msg.header.stamp.sec, '\n nsecs:', msg.header.stamp.nanosec)
print('\n'.join(['%10s:%-10s' % (key, str(value)) for key, value in aa.items()]))
def main():
if len(sys.argv) < 2 or '--help' in sys.argv or '/?' in sys.argv:
print ('USAGE:')
print('echo_metadata.py <topic>')
print('Demo for listening on given metadata topic.')
print('App subscribes on given topic')
print('App then prints metadata from messages')
print('')
print('Example: echo_metadata.py /camera/depth/metadata')
print('')
exit(-1)
topic = sys.argv[1]
rclpy.init()
node = Node('metadata_tester')
depth_sub = node.create_subscription(Metadata, topic, metadata_cb, qos.qos_profile_sensor_data)
rclpy.spin(node)
if __name__ == '__main__':
main()
| 2.078125 | 2 |
examples/example.py | MertClk/carlaviz | 74 | 12759792 |
import carla
import random
from carla_painter import CarlaPainter
def do_something(data):
pass
def main():
try:
# initialize one painter
painter = CarlaPainter('localhost', 8089)
client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
world = client.get_world()
# set synchronous mode
previous_settings = world.get_settings()
world.apply_settings(carla.WorldSettings(
synchronous_mode=True,
fixed_delta_seconds=1.0 / 30.0))
# randomly spawn an ego vehicle and several other vehicles
spawn_points = world.get_map().get_spawn_points()
blueprints_vehicles = world.get_blueprint_library().filter("vehicle.*")
ego_transform = spawn_points[random.randint(0, len(spawn_points) - 1)]
other_vehicles_transforms = []
for _ in range(3):
other_vehicles_transforms.append(spawn_points[random.randint(0, len(spawn_points) - 1)])
blueprints_vehicles = [x for x in blueprints_vehicles if int(x.get_attribute('number_of_wheels')) == 4]
# set ego vehicle's role name to let CarlaViz know this vehicle is the ego vehicle
blueprints_vehicles[0].set_attribute('role_name', 'ego') # or set to 'hero'
batch = [carla.command.SpawnActor(blueprints_vehicles[0], ego_transform).then(carla.command.SetAutopilot(carla.command.FutureActor, True))]
results = client.apply_batch_sync(batch, True)
if not results[0].error:
ego_vehicle = world.get_actor(results[0].actor_id)
else:
print('spawn ego error, exit')
ego_vehicle = None
return
other_vehicles = []
batch = []
for i in range(3):
batch.append(carla.command.SpawnActor(blueprints_vehicles[i + 1], other_vehicles_transforms[i]).then(carla.command.SetAutopilot(carla.command.FutureActor, True)))
# set autopilot for all these actors
ego_vehicle.set_autopilot(True)
results = client.apply_batch_sync(batch, True)
for result in results:
if not result.error:
other_vehicles.append(result.actor_id)
# attach a camera and a lidar to the ego vehicle
camera = None
blueprint_camera = world.get_blueprint_library().find('sensor.camera.rgb')
blueprint_camera.set_attribute('image_size_x', '640')
blueprint_camera.set_attribute('image_size_y', '480')
blueprint_camera.set_attribute('fov', '110')
blueprint_camera.set_attribute('sensor_tick', '0.1')
transform_camera = carla.Transform(carla.Location(y=+3.0, z=5.0))
camera = world.spawn_actor(blueprint_camera, transform_camera, attach_to=ego_vehicle)
camera.listen(lambda data: do_something(data))
lidar = None
blueprint_lidar = world.get_blueprint_library().find('sensor.lidar.ray_cast')
blueprint_lidar.set_attribute('range', '30')
blueprint_lidar.set_attribute('rotation_frequency', '10')
blueprint_lidar.set_attribute('channels', '32')
blueprint_lidar.set_attribute('lower_fov', '-30')
blueprint_lidar.set_attribute('upper_fov', '30')
blueprint_lidar.set_attribute('points_per_second', '56000')
transform_lidar = carla.Transform(carla.Location(x=0.0, z=5.0))
lidar = world.spawn_actor(blueprint_lidar, transform_lidar, attach_to=ego_vehicle)
lidar.listen(lambda data: do_something(data))
# tick to generate these actors in the game world
world.tick()
# save vehicles' trajectories to draw in the frontend
trajectories = [[]]
while (True):
world.tick()
ego_location = ego_vehicle.get_location()
trajectories[0].append([ego_location.x, ego_location.y, ego_location.z])
# draw trajectories
painter.draw_polylines(trajectories)
# draw ego vehicle's velocity just above the ego vehicle
ego_velocity = ego_vehicle.get_velocity()
velocity_str = "{:.2f}, ".format(ego_velocity.x) + "{:.2f}".format(ego_velocity.y) \
+ ", {:.2f}".format(ego_velocity.z)
painter.draw_texts([velocity_str],
[[ego_location.x, ego_location.y, ego_location.z + 10.0]], size=20)
finally:
if previous_settings is not None:
world.apply_settings(previous_settings)
if lidar is not None:
lidar.stop()
lidar.destroy()
if camera is not None:
camera.stop()
camera.destroy()
if ego_vehicle is not None:
ego_vehicle.destroy()
if other_vehicles is not None:
client.apply_batch([carla.command.DestroyActor(x) for x in other_vehicles])
if __name__ == "__main__":
main()
| 2.640625 | 3 |
hrv/filters.py | raphaelvallat/hrv | 1 | 12759793 | import numpy as np
def quotient(rri):
rri = np.array(rri)
L = len(rri) - 1
indices = np.where((rri[:L - 1] / rri[1:L] < 0.8) |
(rri[:L - 1] / rri[1:L] > 1.2) |
(rri[1:L] / rri[:L - 1] < 0.8) |
(rri[1:L] / rri[:L - 1] > 1.2))
return np.delete(rri, indices)
def moving_average(rri, order=3):
return _moving_function(rri, order, np.mean)
def moving_median(rri, order=3):
return _moving_function(rri, order, np.median)
def _moving_function(rri, order, func):
offset = int(order / 2)
filt_rri = np.array(rri.copy(), dtype=np.float64)
for i in range(offset, len(rri) - offset, 1):
filt_rri[i] = func(rri[i - offset:i + offset + 1])
return filt_rri
| 2.828125 | 3 |
openvim/dhcp_thread.py | acasana/openmano_movilnet | 204 | 12759794 | # -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: <EMAIL>
##
'''
This is thread that interact with the dhcp server to get the IP addresses
'''
__author__="<NAME>, <NAME>"
__date__ ="$4-Jan-2016 12:07:15$"
import threading
import time
import Queue
import paramiko
import random
import subprocess
#TODO: insert a logging system
class dhcp_thread(threading.Thread):
def __init__(self, dhcp_params, db, db_lock, test, dhcp_nets, debug=None):
'''Init a thread.
Arguments: thread_info must be a dictionary with:
'dhcp_params' dhcp server parameters with the following keys:
mandatory : user, host, port, key, ifaces(interface name list of the one managed by the dhcp)
optional: password, key, port(22)
'db' 'db_lock': database class and lock for accessing it
'test': in test mode no acces to a server is done, and ip is invented
'''
threading.Thread.__init__(self)
self.name = "dhcp_thread"
self.dhcp_params = dhcp_params
self.debug = debug
self.db = db
self.db_lock = db_lock
self.test = test
self.dhcp_nets = dhcp_nets
self.ssh_conn = None
self.mac_status ={} #dictionary of mac_address to retrieve information
#ip: None
#retries:
#next_reading: time for the next trying to check ACTIVE status or IP
#created: time when it was added
#active: time when the VM becomes into ACTIVE status
self.queueLock = threading.Lock()
self.taskQueue = Queue.Queue(2000)
def ssh_connect(self):
try:
#Connect SSH
self.ssh_conn = paramiko.SSHClient()
self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_conn.load_system_host_keys()
self.ssh_conn.connect(self.dhcp_params["host"], port=self.dhcp_params.get("port",22),
username=self.dhcp_params["user"], password=self.dhcp_params.get("password"), pkey=self.dhcp_params.get("key"),
timeout=2)
except paramiko.ssh_exception.SSHException as e:
text = e.args[0]
print self.name, ": ssh_connect ssh Exception:", text
def load_mac_from_db(self):
#TODO get macs to follow from the database
print self.name, " load macs from db"
self.db_lock.acquire()
r,c = self.db.get_table(SELECT=('mac','ip_address','nets.uuid as net_id', ),
FROM='ports join nets on ports.net_id=nets.uuid',
WHERE_NOT={'ports.instance_id': None, 'nets.provider': None})
self.db_lock.release()
now = time.time()
self.mac_status ={}
if r<0:
print self.name, ": Error getting data from database:", c
return
for port in c:
if port["net_id"] in self.dhcp_nets:
self.mac_status[ port["mac"] ] = {"ip": port["ip_address"], "next_reading": now, "created": now, "retries":0}
def insert_task(self, task, *aditional):
try:
self.queueLock.acquire()
task = self.taskQueue.put( (task,) + aditional, timeout=5)
self.queueLock.release()
return 1, None
except Queue.Full:
return -1, "timeout inserting a task over host " + self.name
def run(self):
print self.name, " starting, nets", self.dhcp_nets
next_iteration = time.time() + 10
while True:
self.load_mac_from_db()
while True:
self.queueLock.acquire()
if not self.taskQueue.empty():
task = self.taskQueue.get()
else:
task = None
self.queueLock.release()
if task is None:
now=time.time()
if now >= next_iteration:
next_iteration = self.get_ip_from_dhcp()
else:
time.sleep(1)
continue
if task[0] == 'add':
print self.name, ": processing task add mac", task[1]
now=time.time()
self.mac_status[task[1] ] = {"ip": None, "next_reading": now, "created": now, "retries":0}
next_iteration = now
elif task[0] == 'del':
print self.name, ": processing task del mac", task[1]
if task[1] in self.mac_status:
del self.mac_status[task[1] ]
elif task[0] == 'exit':
print self.name, ": processing task exit"
self.terminate()
return 0
else:
print self.name, ": unknown task", task
def terminate(self):
try:
if self.ssh_conn:
self.ssh_conn.close()
except Exception as e:
text = str(e)
print self.name, ": terminate Exception:", text
print self.name, ": exit from host_thread"
def get_ip_from_dhcp(self):
now = time.time()
next_iteration= now + 40000 # >10 hores
#print self.name, "Iteration"
for mac_address in self.mac_status:
if now < self.mac_status[mac_address]["next_reading"]:
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
continue
if self.mac_status[mac_address].get("active") == None:
#check from db if already active
self.db_lock.acquire()
r,c = self.db.get_table(FROM="ports as p join instances as i on p.instance_id=i.uuid",
WHERE={"p.mac": mac_address, "i.status": "ACTIVE"})
self.db_lock.release()
if r>0:
self.mac_status[mac_address]["active"] = now
self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
print self.name, "mac %s VM ACTIVE" % (mac_address)
self.mac_status[mac_address]["retries"] = 0
else:
#print self.name, "mac %s VM INACTIVE" % (mac_address)
if now - self.mac_status[mac_address]["created"] > 300:
#modify Database to tell openmano that we can not get dhcp from the machine
if not self.mac_status[mac_address].get("ip"):
self.db_lock.acquire()
r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
self.db_lock.release()
self.mac_status[mac_address]["ip"] = "0.0.0.0"
print self.name, "mac %s >> set to 0.0.0.0 because of timeout" % (mac_address)
self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
else:
self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
continue
if self.test:
if self.mac_status[mac_address]["retries"]>random.randint(10,100): #wait between 10 and 100 seconds to produce a fake IP
content = self.get_fake_ip()
else:
content = None
elif self.dhcp_params["host"]=="localhost":
try:
command = ['get_dhcp_lease.sh', mac_address]
content = subprocess.check_output(command)
except Exception as e:
text = str(e)
print self.name, ": get_ip_from_dhcp subprocess Exception", text
content = None
else:
try:
if not self.ssh_conn:
self.ssh_connect()
command = 'get_dhcp_lease.sh ' + mac_address
(_, stdout, _) = self.ssh_conn.exec_command(command)
content = stdout.read()
except paramiko.ssh_exception.SSHException as e:
text = e.args[0]
print self.name, ": get_ip_from_dhcp: ssh_Exception:", text
content = None
self.ssh_conn = None
except Exception as e:
text = str(e)
print self.name, ": get_ip_from_dhcp: Exception:", text
content = None
self.ssh_conn = None
if content:
self.mac_status[mac_address]["ip"] = content
#modify Database
self.db_lock.acquire()
r,c = self.db.update_rows("ports", {"ip_address": content}, {"mac": mac_address})
self.db_lock.release()
if r<0:
print self.name, ": Database update error:", c
else:
self.mac_status[mac_address]["retries"] = 0
self.mac_status[mac_address]["next_reading"] = (int(now)/3600 +1)* 36000 # 10 hores
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
print self.name, "mac %s >> %s" % (mac_address, content)
continue
#a fail has happen
self.mac_status[mac_address]["retries"] +=1
#next iteration is every 2sec at the beginning; every 5sec after a minute, every 1min after a 5min
if now - self.mac_status[mac_address]["active"] > 120:
#modify Database to tell openmano that we can not get dhcp from the machine
if not self.mac_status[mac_address].get("ip"):
self.db_lock.acquire()
r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
self.db_lock.release()
self.mac_status[mac_address]["ip"] = "0.0.0.0"
print self.name, "mac %s >> set to 0.0.0.0 because of timeout" % (mac_address)
if now - self.mac_status[mac_address]["active"] > 60:
self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
elif now - self.mac_status[mac_address]["active"] > 300:
self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
else:
self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
return next_iteration
def get_fake_ip(self):
fake_ip= "192.168.%d.%d" % (random.randint(1,254), random.randint(1,254) )
while True:
#check not already provided
already_used = False
for mac_address in self.mac_status:
if self.mac_status[mac_address]["ip"] == fake_ip:
already_used = True
break
if not already_used:
return fake_ip
#EXAMPLE of bash script that must be available at the DHCP server for "isc-dhcp-server" type
# $ cat ./get_dhcp_lease.sh
# #!/bin/bash
# awk '
# ($1=="lease" && $3=="{"){ lease=$2; active="no"; found="no" }
# ($1=="binding" && $2=="state" && $3=="active;"){ active="yes" }
# ($1=="hardware" && $2=="ethernet" && $3==tolower("'$1';")){ found="yes" }
# ($1=="client-hostname"){ name=$2 }
# ($1=="}"){ if (active=="yes" && found=="yes"){ target_lease=lease; target_name=name}}
# END{printf("%s", target_lease)} #print target_name
# ' /var/lib/dhcp/dhcpd.leases
| 2.453125 | 2 |
Tarea2DDSIV/ejercicio2.py | Gorwast/python-tests | 0 | 12759795 | <reponame>Gorwast/python-tests<gh_stars>0
# <NAME> DDSIV
def hello_user(name):
"""Function that returns a string that says hello to the name that you introduce
Args:
name (String): Name that you want to add to the string
Returns:
String: It returns '¡Hola {name}!'
"""
return '¡Hola ' + name + '!'
name = input('Introduce tu nombre: ')
print(hello_user(name))
| 3.8125 | 4 |
medium/130-surrounded-regions.py | wanglongjiang/leetcode | 2 | 12759796 | <reponame>wanglongjiang/leetcode
'''
被围绕的区域
给你一个 m x n 的矩阵 board ,由若干字符 'X' 和 'O' ,找到所有被 'X' 围绕的区域,并将这些区域里所有的 'O' 用 'X' 填充。
被围绕的区间不会存在于边界上,换句话说,任何边界上的 'O' 都不会被填充为 'X'。
任何不在边界上,或不与边界上的 'O' 相连的 'O' 最终都会被填充为 'X'。如果两个元素在水平或垂直方向相邻,则称它们是“相连”的。
'''
from typing import List
'''
思路,图的路径搜索。
把'O'认为图中的顶点,从边界处的'O'出发,能到达的'O'不可以替换,其他不可到达的'O'可以替换为'X'。
时间复杂度:O(mn),建立图,图的遍历都需要O(mn)
空间复杂度:O(mn),最坏情况下需要O(mn)空间建图
'''
class Solution:
def solve(self, board: List[List[str]]) -> None:
row, col = len(board), len(board[0])
g = {} # 图用哈希表和邻接表来表示。哈希表的key为节点id,value为边的list
boundary = set() # 边界顶点集合
# 根据index取得顶点id
def toId(i, j):
return i * col + j
# 根据顶点id,转换成坐标
def toIndex(nodeid):
return divmod(nodeid, col)
# 建立图
for i in range(row):
for j in range(col):
if board[i][j] == 'O':
nodeid = toId(i, j)
g[nodeid] = []
# 判断是否边界
if i == 0 or j == 0 or i == (row - 1) or j == (col - 1):
boundary.add(nodeid)
# 判断是否有到其他的'O'的路径,并添加到顶点的边
if i > 0 and board[i - 1][j] == 'O':
g[nodeid].append(toId(i - 1, j))
if i < (row - 1) and board[i + 1][j] == 'O':
g[nodeid].append(toId(i + 1, j))
if j > 0 and board[i][j - 1] == 'O':
g[nodeid].append(toId(i, j - 1))
if j < (col - 1) and board[i][j + 1] == 'O':
g[nodeid].append(toId(i, j + 1))
# 从边界节点出发,遍历所有能到达的路径,并添加到不可覆盖顶点集合中
noover = set()
def dfs(id):
noover.add(id)
for nextId in g[id]:
if nextId not in noover:
dfs(nextId)
for nodeid in boundary:
dfs(nodeid)
# 对于可以覆盖的'O'进行覆盖
for id in g:
if id not in noover:
i, j = toIndex(id)
board[i][j] = 'X'
s = Solution()
b = [["X", "X", "X", "X"], ["X", "O", "O", "X"], ["X", "X", "O", "X"], ["X", "O", "X", "X"]]
print(b)
s.solve(b)
print(b)
b = [["X"]]
s.solve(b)
print(b)
| 3.203125 | 3 |
TraceSyscalls/subs/parseArgs.py | jvfNontools/jvfNontools | 0 | 12759797 | <filename>TraceSyscalls/subs/parseArgs.py
#!/usr/bin/python3
#Copyright 2018 <NAME>
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
import argparse
import subprocess
import GetArgs
class getArgsCom(GetArgs.getArgs):
def __init__(self):
# use argparse to get input parameters
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--trace", default='trace1.out', help="formatted ftrace file")
parser.add_argument("-u", "--unistd", help="full path to location of unistd.h (which holds syscall number to syscall name #defines)\n default [source directory]/unistd/power_unistd.h or [source directory]/unistd/x86_unistd.h based on machine")
parser.add_argument("-o", "--output", default='syscalls.out', help="output file: default syscalls.out")
setFiles = GetArgs.getArgs.getFiles(self, parser)
# set by default
self.verbose = setFiles.verbose
self.outFile = setFiles.output
self.traceFile = setFiles.trace
self.kallsyms = setFiles.kallsyms
# others need check for default
# done in common
self.machine = setFiles.machine
# set default if not entered
if setFiles.unistd == None:
if setFiles.machine == "x86_64":
self.unistd = sys.path[0] + "/unistd/x86_64_unistd.h"
else:
self.unistd = sys.path[0] + "/unistd/power_unistd.h"
| 2.09375 | 2 |
lenstronomy/LensModel/Profiles/const_mag.py | lucateo/lenstronomy | 1 | 12759798 | <gh_stars>1-10
__author__ = 'gipagano'
import numpy as np
import lenstronomy.Util.util as util
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
__all__ = ['ConstMag']
class ConstMag(LensProfileBase):
"""
this class implements the macromodel potential of `Diego et al. <https://www.aanda.org/articles/aa/pdf/2019/07/aa35490-19.pdf>`_
Convergence and shear are computed according to `Diego2018 <arXiv:1706.10281v2>`_
"""
param_names = ['center_x', 'center_y','mu_r', 'mu_t', 'parity', 'phi_G']
lower_limit_default = {'center_x': -100, 'center_y': -100, 'mu_r':1, 'mu_t': 1000, 'parity': -1, 'phi_G':0.0}
upper_limit_default = {'center_x': 100, 'center_y': 100, 'mu_r':1, 'mu_t': 1000, 'parity': 1, 'phi_G':np.pi}
def function(self, x, y, mu_r, mu_t, parity, phi_G, center_x=0, center_y=0):
"""
:param x: x-coord (in angles)
:param y: y-coord (in angles)
:param mu_r: radial magnification
:param mu_t: tangential magnification
:param parity: parity side of the macromodel. Either +1 (positive parity) or -1 (negative parity)
:param phi_G: shear orientation angle (relative to the x-axis)
:return: lensing potential
"""
# positive parity case
if (parity== 1):
gamma = (1./mu_t-1./mu_r)*0.5
kappa = 1 -gamma-1./mu_r
# negative parity case
elif (parity== -1):
gamma = (1./mu_t+1./mu_r)*0.5
kappa = 1 -gamma+1./mu_r
else:
raise ValueError('%f is not a valid value for the parity of the macromodel. Choose either +1 or -1.' % parity)
# compute the shear along the x and y directions, rotate the vector in the opposite direction than the reference frame (compare with util.rotate)
gamma1, gamma2 = gamma*np.cos(2*phi_G), -gamma*np.sin(2*phi_G)
x_shift = x - center_x
y_shift = y - center_y
f_ = 1./2. * kappa * (x_shift*x_shift + y_shift*y_shift) + 1./2. * gamma1 * (x_shift*x_shift - y_shift*y_shift)-gamma2*x_shift*y_shift
return f_
def derivatives(self, x, y, mu_r, mu_t, parity, phi_G, center_x=0, center_y=0):
"""
:param x: x-coord (in angles)
:param y: y-coord (in angles)
:param mu_r: radial magnification
:param mu_t: tangential magnification
:param parity: parity of the side of the macromodel. Either +1 (positive parity) or -1 (negative parity)
:param phi_G: shear orientation angle (relative to the x-axis)
:return: deflection angle (in angles)
"""
# positive parity case
if (parity== 1):
gamma = (1./mu_t-1./mu_r)*0.5
kappa = 1 -gamma-1./mu_r
# negative parity case
elif (parity== -1):
gamma = (1./mu_t+1./mu_r)*0.5
kappa = 1 -gamma+1./mu_r
else:
raise ValueError('%f is not a valid value for the parity of the macromodel. Choose either +1 or -1.' % parity)
# compute the shear along the x and y directions, rotate the vector in the opposite direction than the reference frame (compare with util.rotate)
gamma1, gamma2 = gamma*np.cos(2*phi_G), -gamma*np.sin(2*phi_G)
x_shift = x - center_x
y_shift = y - center_y
f_x = (kappa+gamma1)*x_shift - gamma2*y_shift
f_y = (kappa-gamma1)*y_shift - gamma2*x_shift
return f_x, f_y
def hessian(self, x, y, mu_r, mu_t, parity, phi_G, center_x=0, center_y=0):
"""
:param x: x-coord (in angles)
:param y: y-coord (in angles)
:param mu_r: radial magnification
:param mu_t: tangential magnification
:param parity: parity of the side of the macromodel. Either +1 (positive parity) or -1 (negative parity)
:param phi_G: shear orientation angle (relative to the x-axis)
:return: hessian matrix (in angles)
"""
# positive parity case
if (parity== 1):
gamma = (1./mu_t-1./mu_r)*0.5
kappa = 1 -gamma-1./mu_r
# negative parity case
elif (parity== -1):
gamma = (1./mu_t+1./mu_r)*0.5
kappa = 1 -gamma+1./mu_r
else:
raise ValueError('%f is not a valid value for the parity of the macromodel. Choose either +1 or -1.' % parity)
# compute the shear along the x and y directions, rotate the vector in the opposite direction than the reference frame (compare with util.rotate)
gamma1, gamma2 = gamma*np.cos(2*phi_G), -gamma*np.sin(2*phi_G)
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = -gamma2
return f_xx, f_yy, f_xy
| 2.046875 | 2 |
raspberrypi/mq_x_smoke_sensor/mq_x_smoke_sensor.py | AlexRogalskiy/Duino | 0 | 12759799 | <filename>raspberrypi/mq_x_smoke_sensor/mq_x_smoke_sensor.py<gh_stars>0
# mq_x_smoke_sensor.py - print smoke level
# (c) BotBook.com - Karvinen, Karvinen, Valtokari
import time
import botbook_mcp3002 as mcp # <1>
smokeLevel = 0
def readSmokeLevel():
global smokeLevel
smokeLevel = mcp.readAnalog() # <2>
def main():
while True: # <3>
readSmokeLevel() # <4>
print("Current smoke level is %i " % smokeLevel) # <5>
if smokeLevel > 120:
print("Smoke detected")
time.sleep(0.5) # s
if __name__ == "__main__":
main()
| 2.421875 | 2 |
documents/amazon-rekognition-developer-guide/code_examples/python_examples/image/python-delete-collection.py | siagholami/aws-documentation | 5 | 12759800 | #Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)
import boto3
from botocore.exceptions import ClientError
from os import environ
if __name__ == "__main__":
collectionId='MyCollection'
print('Attempting to delete collection ' + collectionId)
client=boto3.client('rekognition')
statusCode=''
try:
response=client.delete_collection(CollectionId=collectionId)
statusCode=response['StatusCode']
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print ('The collection ' + collectionId + ' was not found ')
else:
print ('Error other than Not Found occurred: ' + e.response['Error']['Message'])
statusCode=e.response['ResponseMetadata']['HTTPStatusCode']
print('Operation returned Status Code: ' + str(statusCode))
print('Done...')
| 2.140625 | 2 |
bifs/bifs_util/EmpiricalScanner.py | ucsf-deb/bifs | 0 | 12759801 | import itertools
import os
import re
import sys
# scans files to construct an empirical prior
from bifs import BIFS
# numpy >= 1.17
from numpy.random import Generator, PCG64
import numpy as np
class RunningMean:
"""Accepts values one at a time and computes the mean and sd of all values seen so far.
The inputs are arrays, which must all have the same shape. Mean and sd are accumulated
separately for each cell in the array.
"""
def __init__(self, sd=True):
"""If sd is false do not accumulate second moment.
Clients should not request information related to the sd in that case.
"""
self.n = 0
self._second = sd # as in second moment
def observation(self, x):
"x is an array-like object which is considered a single observation"
self.n += 1
if self.n == 1:
self._mns = x
if self._second:
# ss will turn into a matrix later
self._ss = 0.0
else:
lastdelta = x-self._mns
self._mns += (lastdelta)/self.n
if self._second:
# element by element multiplication in next line
self._ss += lastdelta*(x-self._mns)
def mean(self):
"return array of means so far"
return self._mns
def sd(self):
"return array of sd so far"
# element by element square root
return np.sqrt(self._ss/(self.n-1))
class AbstractEmpiricalScanner:
""" This class consumes a list of images and computes statistics on them. Each statistic is computed separately for
each voxel, i.e. the result in the (2, 5) cell refers to all the (2, 5) cells in all the images (or their Fourier counterparts).
All images must have the same dimensions, and they should be aligned with each other
for the results to be meaningful.
The mean and sd of the modulus is always accumulated; values for the phase can be requested as well, as can the correlations between the
phase and modulus (again, at each point in Fourier space).
Finally, one can request a sample of the original voxels in image space.
Concrete classes provide particular ways to get images. They then pass the images to _statsAccumulate and,
optionally, _voxAccumulate (possibly different images for each) and call
_post when done. At that point, and only that point, are results available from self.modulus() and, if requested,
self.phase(), self.corr(), and self.voxels().
For backward compatility, self.mns, self.sds, and self.vox accessor return the mean and sd of self.modulus() and the voxels.
Don't rely on that in new code.
image_mask optionally indicates which areas of the image to ignore.
It must be a boolean array with the same shape as image files.
All voxels selected by image_mask are set to zero before doing BIFS processing.
The mask applies to the original image NOT to the fourier space version, which will
generally have non-0 values in the image_mask region.
It is the subclass responsibility to implement these semantics.
Note the "mask" here is not a mask in the numpy sense of a masked array, which
concerns missing values.
voxel sampling only considers the non-ignored regions, but the number sampled will be based on
the total voxel count before masking.
"""
def __init__(self, sampleFraction=0, seed=85792359, image_mask=None, phase=False, corr=False):
"""Setup for scan of images
if sampleFraction is >0 (and it should be <=1) then that fraction of the image voxels will be retained.
In that case, seed is used to set the random number generator.
If phase is true, accumulate statistics on the phase as well as the modulus.
If corr is true, accumulate statistics on the phase and its covariance with the modulus.
Covariance is on a cell by cell basis.
"""
self.sampleFraction = sampleFraction
self._modulus = RunningMean()
if phase or corr:
self._getPhase = True
self._phase = RunningMean()
if corr:
self._getcorr = True
self._xy = RunningMean(sd=False)
if sampleFraction>0:
self._voxels = []
self._rg = Generator(PCG64(seed))
self.masking = (image_mask is not None)
if self.masking:
self.image_mask = image_mask
self.image_keep = np.logical_not(image_mask)
self._benchmarkHdr = None # checks for consistent headers
self._mismatch = set() # holds keys that had a mismatch
self._bifs = BIFS()
def modulus(self)->RunningMean:
return self._modulus
def phase(self)->RunningMean:
return self._phase
def corr(self):
"Note we return the correlation matrix itself, not an accumulator"
return (self._xy.mean()-self._modulus.mean()*self._phase.mean())/ \
(self._modulus.sd()*self._phase.sd())
def voxels(self):
"return 1-d array sorted by intensity"
return self._voxels
def __getattr__(self, name):
## backward compatibility only
if name == "mns":
return self.modulus().mean()
if name == "sds":
return self.modulus().sd()
if name == "vox":
return self.voxels()
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
def _do_one(self, file):
"file is a path-like object. Read it in and accumulate information"
self._bifs.load_image_file(file)
if self.masking:
# dirty trick. But doesn't invalidate anything else in _bifs.
self._bifs._init_image[self.image_mask] = 0.0
self._modulus.observation(self._bifs.mod_image())
if self._getPhase:
self._phase.observation(self._bifs.phase_image())
if self._getcorr:
# next multiplication is element by element
self._xy.observation(self._bifs.phase_image()*self._bifs.mod_image())
if self.sampleFraction>0:
self._voxAccumulate(self._bifs.init_image())
hdr = self._bifs.read_imfile.header
if not self._benchmarkHdr:
# first header encountered
self._benchmarkHdr = hdr
# could not delete the following key
# it actually doesn't appear in the objects attributes
#del benchmarkHdr.__dict__['db_name'] # differences expected and no concern
else:
for key in self._benchmarkHdr:
if key == 'db_name':
continue
if key.startswith("scl_"):
# values were array(nan, dtype=float32) and I had no luck testing for them
# in various ways
continue
v1 = self._benchmarkHdr[key]
v2 = hdr[key]
if (v1 != v2).any():
self._mismatch.add(key)
def _voxAccumulate(self, m):
"""accumulate voxel values.
In the most likely case, the voxels are from image space while the empirical prior
is from k-space. So we provide seperate functions for the 2 values.
Calling this is pointless unless sampleFraction>0.
"""
# always base number sampled on the complete image size
nSamp = int(m.size*self.sampleFraction)
if self.masking:
self._voxels.append(self._rg.choice(m[self.image_keep], nSamp))
else:
# m.ravel is not an acceptable first argument to choice
# actually, it should have been np.ravel(m)
# m.flatten and the mask selection above both create copies, unfortunately
self._voxels.append(self._rg.choice(m.flatten(), nSamp))
def _statsPost(self):
"""
Finalize computation of voxel by voxel statistics for all images.
Call after all images have been seen.
Results returned as arrays self.mns and self.sds.
"""
# currently handled by RunningMean instances automatically
pass
def _voxPost(self):
"""
Finalize accumulated voxels.
"""
if self.sampleFraction>0:
self._voxels = np.concatenate(self._voxels)
self._voxels.sort()
def _post(self):
"wrap up all processing"
self._statsPost()
self._voxPost()
def nImages(self) -> int:
"number of images processed so far = number of files read unless error"
return self._modulus.n
class EmpiricalScanner(AbstractEmpiricalScanner):
"""Scan selected images on disk, ensuring they are alll compatible.
topDir path like object indicating where in the file system the scan should start
all subdirectories will be scanned recursively unless they are excluded.
matchFile <String> regular expression for the file name of image files we want.
Matching is on the file name only, not its full path.
exclude <String> optional regular expression. Any directory matching this pattern is excluded.
Any file that satisfies matchFile is excluded if it also matches exclude.
ostr A stream-like object that will receive routines notices of skipped files and statistics.
See AbstractEmpiricalScanner for sampleFraction, seed and image_mask.
The files are read in and converted to k-space. We compute the mean and sd of the k-space images,
and optionally accumulate voxels from the original image.
We also check that the headers are consistent. This works for .nii files, and may or may not for others.
"""
def __init__(self, sampleFraction=0, seed=85792359, topDir=".", matchFile="", exclude=None, image_mask=None, phase=False, corr=False, ostr=sys.stdout):
super().__init__(sampleFraction, seed, image_mask, phase, corr)
self._topDir = topDir
self._matchRE = re.compile(matchFile, re.I)
if exclude:
self._excludeRE = re.compile(exclude, re.I)
else:
self._excludeRE = None
self.go(ostr=ostr)
def go(self, ostr=sys.stdout):
"""Actually perform the scan.
Note this is triggered by object initialization.
Repeated calls may not work.
ostr is an output stream
"""
for root, dirs, files in os.walk(self._topDir):
if self._excludeRE:
# avoid directories with our target case for whom we are trying to predict
iKill = [ i for i, d in zip(itertools.count(), dirs) if self._excludeRE.search(d)]
if iKill:
nKill = 0
for i in iKill:
i -= nKill
print("Skipping {}".format(dirs[i]), file=ostr)
del dirs[i-nKill]
nKill += 1
# look for files to import
if files:
for f in files:
if not self._matchRE.search(f):
continue
if self._excludeRE:
if self._excludeRE.search(f):
print("Skipping {}".format(f), file=ostr)
continue
self._do_one(os.path.join(root, f))
self._post()
class FeedScanner(AbstractEmpiricalScanner):
"""A scanner that accepts anything iterable as a list of file names to scan"""
def __init__(self, files, sampleFraction=0, seed=85792359, image_mask=None, phase=False, corr=False, ostr=sys.stdout):
super().__init__(sampleFraction, seed, image_mask, phase, corr)
self._files = files
self.go(ostr=ostr)
def go(self):
for f in self._files:
self._do_one(f)
self._post()
| 2.8125 | 3 |
soil/skeleton/management/commands/request_to_hortplus.py | mabbettbyron/terraprobe | 2 | 12759802 | from django.core.management.base import BaseCommand
from django.utils import timezone
from datetime import timedelta, date
from django.contrib import messages
from skeleton.utils import get_current_season, get_site_season_start_end
from skeleton.models import Reading, Site, Farm, WeatherStation, Season
import os
import json
import requests
import re
# Get an instance of a logger
import logging
logger = logging.getLogger(__name__)
'''
From command line can just run 'python manage.py request_to_hortplus --stations=HAV'
'''
class Command(BaseCommand):
help = 'Requests data from hortplus'
def add_arguments(self, parser):
parser.add_argument('-P', '--purpose', type=str, help='One of process_readings or generate_eoy_data')
parser.add_argument('-s', '--serial', type=str, help='Hortplus serial number generated individually for a user')
parser.add_argument('-p', '--period', type=int, help='The number of records for the specified interval, counting backwards from now (unless a startdate provided)')
parser.add_argument('-d', '--startdate', type=str, help='The date to start providing data from. This forces the period to count forwards from this date. Format YYYY-MMDD')
parser.add_argument('-f', '--format', type=str, help='The format the resulting data should be provided as')
parser.add_argument('-i', '--interval', type=str, help='The type of weather data. H for hourly and D for daily.')
parser.add_argument('-t', '--stations', type=str, help='The list of weather station ids separated by a comma.')
parser.add_argument('-m', '--metvars', type=str, help='The list of weather variable and measurement type TD_M,RN_T combined with an underscore, separated by a comma.')
parser.add_argument('--sites', type=open, help='A list of sites to get request rainfall for.')
def handle(self, *args, **kwargs):
response_text = None
# get arguments from command line or use ones that will be done autoamtically
serial = kwargs['serial'] if kwargs['serial'] else os.getenv('HORTPLUS_JACK_KEY')
if kwargs['purpose'] is None:
data = {
'period': kwargs['period'], # 7
'format': kwargs['format'], # csv
'interval': kwargs['interval'], # D
'stations': kwargs['stations'], # HAV
'metvars' : kwargs['metvars'] # RN_T
}
# startdate is optional
if kwargs['startdate']:
data['startdate'] = kwargs['startdate']
response_text = post_request(data, serial)
elif kwargs['purpose'] == 'process_readings':
logger.info('Start processing of readings')
readings = None
if kwargs['sites']:
sites = kwargs['sites']
logger.info('Starting update of rainfall for sites that have just been uploaded and have a null rain reading.' + str(sites))
readings = Reading.objects.select_related('site__farm__weatherstation').filter(site__in=sites, rain__isnull=True, type=1)
else:
logger.info('Starting update of rainfall for all sites that have a null rain reading')
readings = Reading.objects.select_related('site__farm__weatherstation').filter(rain__isnull=True, type=1)
for reading in readings:
logger.debug('Reading object to process: ' + str(reading))
season = get_current_season()
dates = get_site_season_start_end(reading.site, season)
# If a site has only one reading we cannot calculate the previous reading date. A try block is the only way to catch this
try:
previous_reading = reading.get_previous_by_date(site=reading.site, type=1, date__range=(dates.period_from, dates.period_to))
except:
previous_reading = None
if previous_reading:
site = reading.site
farm = site.farm
weatherstation = farm.weatherstation
days = (reading.date - previous_reading.date).days - 1
logger.debug('Previous Reading:' + str(previous_reading))
logger.debug(days)
startdate = previous_reading.date + timedelta(days=1)
logger.debug('startdate' + str(startdate))
data = {
'period': days,
'startdate' : str(startdate),
'format' : 'csv',
'interval': 'D',
'stations': weatherstation.code,
'metvars' : 'RN_T'
}
response_text = post_request(data, serial)
lines = response_text.split("\n")
del lines[0]
rainfall = 0
for line in lines:
valid = re.search("^\w.*", line) # make sure we have a valid line to split
if valid:
fields = line.split(",")
if fields[3] != '-' and fields[3] != '.':
rainfall += float(fields[3])
logger.debug(str(rainfall))
reading.rain = round(rainfall, 1)
reading.save()
else:
logger.debug('No previous reading for site so cannot calculate a rain reading')
elif kwargs['purpose'] == 'generate_eoy_data':
rain_data = {} # Keyed by the weatherstation code and the value will be the sum of rain / 10 years
#current_rain_data = {}
start_dates = [] # 10 start dates starting at the 1st October of current year. Actually 2nd cause of the way API works
season = Season.objects.get(current_flag=True)
current_year = season.formatted_season_start_year
current_year_date = str(current_year) + '-10-02'
start_dates.append(current_year_date)
station = kwargs['stations']
logger.debug("Generating average rainfall for last 10 years back from " + current_year_date + " for station " + station)
for month in ['10','11','12','01','02','03','04','05','06']:
rain_data[month] = {
'avg' : 0,
'cur' : 0
}
x = 0
while x < 10:
year = (int(current_year) -1) - x
# Start Date will always be 1st of October of year we got for current.
date = str(year) + '-10-02'
start_dates.append(date)
x = x + 1
logger.debug('We will be getting rainfall data for ' + str(start_dates) + ' + 272 days')
# We will have the current year, and the previous 10 years in array
for start_date in start_dates:
data = {
'period': 272, # 272 days will take us to 30th of June (except leap years but don't need to be exact)
'startdate' : start_date,
'format' : 'csv',
'interval': 'D',
'stations': station,
'metvars' : 'RN_T'
}
response_text = post_request(data, serial)
lines = response_text.split("\n")
del lines[0]
for line in lines:
valid = re.search("^\w.*", line) # make sure we have a valid line to split
if valid:
fields = line.split(",")
station = fields[0]
start = fields[1]
split_start = start.split("-") # Split from date "2019-10-17 08:00:00"
month = split_start[1] # Month which is the key to our rain_data dict is the second part of date
rain = fields[3]
if rain != '-' and rain != '.':
if start_date == current_year_date:
rain_data[month]['cur'] += float(rain)
else:
rain_data[month]['avg'] += float(rain)
else:
logger.error("Unidentifiable value for rain of:" + rain)
return json.dumps(rain_data)
else:
logger.error('Unidentified purpose of requesting hortplus data')
'''
post_request
'''
def post_request(data, serial):
try:
r = requests.post('https://hortplus.metwatch.nz/index.php?pageID=wxn_wget_post&serial=' + serial, data=data)
logger.debug('data in request ' + str(data))
if r.status_code == 200:
logger.debug('response ' + str(r.text))
return r.text
else:
raise Exception("Error processing request:" + str(r.text))
except Exception as e:
messages.error(request, "Error: " + str(e))
| 2.328125 | 2 |
code/oldtmpcodes/qlf.py | modichirag/21cmhod | 0 | 12759803 | <filename>code/oldtmpcodes/qlf.py
import numpy as np
from scipy.optimize import minimize, Bounds
from nbodykit.lab import BigFileCatalog, BigFileMesh
h = 0.6776
def moster(Mhalo,z,h=0.6776, scatter=None):
Minf = Mhalo/h
zzp1 = z/(1+z)
M1 = 10.0**(11.590+1.195*zzp1)
mM = 0.0351 - 0.0247*zzp1
beta = 1.376 - 0.826*zzp1
gamma = 0.608 + 0.329*zzp1
Mstar = 2*mM/( (Minf/M1)**(-beta) + (Minf/M1)**gamma )
Mstar*= Minf
if scatter is not None:
Mstar = 10**(np.log10(Mstar) + np.random.normal(0, scatter, Mstar.size))
return Mstar*h
#
def qlf(M, zz, alpha=-2.03, beta=-4, Ms=-27.21, lphi6 = -8.94):
phis = 10**(lphi6 -0.47*(zz-6))
f1 = 10**(0.4*(alpha+1)*(M - Ms))
f2 = 10**(0.4*(beta+1)*(M - Ms))
return phis/(f1+f2)
def mbh(mg, alpha=-3.5, beta=1, scatter=False):
m = mg/h
mb = 1e10 * 10**alpha * (m/1e10)**beta
if scatter: mb = 10**(np.log10(mb) + np.random.normal(scale=scatter, size=mb.size))
return mb*h
def lq(mb, eta=0.1, scatter=False):
m = mb/h
lsun = 3.28e26
if scatter: eta = np.random.lognormal(eta, scatter, m.size)
return 3.3e4*eta*m *lsun
zz = 5
aa = 1/(1+zz)
dpath = '/project/projectdirs/m3127/H1mass/'
scratch = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
bs = 256
sim = '/highres/%d-9100-fixed'%2560
print('Reading files')
halos = BigFileCatalog(dpath + sim+ '/fastpm_%0.4f/halocat/'%aa)
cencat = BigFileCatalog(scratch + sim+ '/fastpm_%0.4f/cencat-m1_00p3mh-alpha-0p8-subvol/'%aa)
satcat = BigFileCatalog(scratch + sim+ '/fastpm_%0.4f/satcat-m1_00p3mh-alpha-0p8-subvol/'%aa)
hpos = halos['Position'].compute()
hmass = halos['Mass'].compute()
cmass = cencat['Mass'].compute()
smass = satcat['Mass'].compute()
allmass = np.concatenate((cmass, smass))
mgal = moster(allmass, zz, scatter=0.3)
mgalshuffle = np.random.permutation(mgal)
magbins = np.linspace(-30, -10, 42)
def qlftomin(p):
fon, alpha, beta = p
mgalshuffle = np.random.permutation(mgal)
mgalon = mgalshuffle[:int(fon*mgal.size)]
mb = mbh(mgalon, scatter=0.3, alpha=alpha, beta=beta)
lum = lq(mb, eta=0.1, scatter=0.3)
mag14 = 72.5+0.29- 2.5*np.log10(lum)
nmag14, xmag14 = np.histogram(mag14, magbins)
xmag14 = -(xmag14[:-1]*xmag14[1:])**0.5
nmag14 = nmag14/np.diff(magbins)[0]
lmag14, _ = np.histogram(mag14, magbins, weights=lum/lum.sum())
lmag14 = lmag14/np.diff(magbins)[0]
ntrue = qlf(xmag14, zz)*(bs/h)**3
chisq = ((ntrue - nmag14)**2 * lmag14).sum()
#chisq = ((ntrue - nmag14)**2).sum()
if (fon > 5e-2) or (fon < 1e-3) : chisq *= 1e10
if (alpha > -1) or (alpha < -5) : chisq *= 1e10
if (beta > 1.5) or (beta < 0.5) : chisq *= 1e10
return chisq
niter = 0
def callback(xk):
global niter
if niter%10==0:
print('For iteration : ', niter)
print(xk, '%0.2e'%qlftomin(xk))
niter +=1
print('Starting minimization')
p0 = [1e-2, -3.5, 1]
bounds = Bounds([1e-3, -5, 0.5], [5e-2, -2, 1.5])
xx = minimize(qlftomin, p0, method='Nelder-Mead', callback=callback, options={'maxfev':1000})
#xx = minimize(qlftomin, p0, callback=callback, options={'maxfev':1000}, bounds=bounds)
#xx = minimize(qlftomin, p0, method='BFGS', callback=callback)
print(xx)
| 2.015625 | 2 |
tests/batch/etrade_csv_ingestor_test.py | rirwin/stock-analysis | 0 | 12759804 | import argparse
import datetime
import mock
import pytest
from batch.etrade_csv_ingestor import EtradeIngestor
from batch.etrade_csv_ingestor import RowParserException
from stock_analysis.logic import order_history
class TestEtradeCsvIngestor(object):
def test_init(self):
batch = EtradeIngestor()
args = batch.arg_parser.parse_args(['--csv-path', './path/to/data.csv'])
assert args == argparse.Namespace(csv_path='./path/to/data.csv')
assert type(batch.order_logic) == order_history.OrderHistoryLogic
def test_run(self):
batch = EtradeIngestor()
csv_path = './path/to/data.csv'
mock_args = mock.Mock()
mock_args.csv_path = csv_path
mock_parsed_orders = mock.Mock()
with \
mock.patch.object(
batch.arg_parser, 'parse_args', return_value=mock_args
) as patch_parse_args, \
mock.patch.object(
batch, 'parse_orders_from_csv', return_value=mock_parsed_orders
) as patch_parse_orders, \
mock.patch.object(batch.order_logic, 'add_orders') as patch_add_orders:
batch.run()
assert patch_parse_args.called
assert patch_parse_orders.call_args_list == [mock.call(csv_path)]
assert patch_add_orders.call_args_list == [mock.call(mock_parsed_orders)]
def test_parse_orders_from_csv(self):
csv_path = '/path/to/csv'
mock_reader = mock.Mock()
mock_orders = mock.Mock()
batch = EtradeIngestor()
with \
mock.patch('builtins.open') as patch_open,\
mock.patch('csv.reader', return_value=mock_reader) as patch_reader,\
mock.patch.object(
batch, 'parse_orders_from_csv_reader', return_value=mock_orders
) as patch_parse_orders:
orders = batch.parse_orders_from_csv(csv_path)
assert patch_open.call_args_list == [mock.call(csv_path)]
assert patch_reader.called
assert patch_parse_orders.call_args_list == [mock.call(mock_reader)]
assert orders == mock_orders
def test_parse_orders_from_csv_reader(self):
batch = EtradeIngestor()
# the reader is reader in a loop (i.e., for row in reader:)
reader = [
'06/12/17,Bought,EQ,NFLX,19,-2924.87,153.6799,4.95,NETFLIX COM INC'.split(','),
'06/08/17,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(','),
]
orders = batch.parse_orders_from_csv_reader(reader)
assert set(orders) == set([
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 12).date(),
19,
153.6799
),
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 8).date(),
39, 151.9
),
])
def test_extract_order_from_row_skips_malformed_row(self):
batch = EtradeIngestor()
row = 'MALFORMED_DATE,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(',')
with pytest.raises(RowParserException):
batch.extract_order_from_row(row)
def test_extract_order_from_row_ignores_non_bought_txn_type(self):
batch = EtradeIngestor()
row = '06/12/17,UKNOWN_TXN_TYPE,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(',')
order = batch.extract_order_from_row(row)
assert order is None
def test_parse_orders_from_csv_reader_skips_malformed_lines(self):
batch = EtradeIngestor()
# the reader is reader in a loop (i.e., for row in reader:)
reader = [
'06/12/17,Bought,EQ,NFLX,19,-2924.87,153.6799,4.95,NETFLIX COM INC'.split(','),
'06/08/17,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(','),
'MALFORMED_DATE,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(','),
]
orders = batch.parse_orders_from_csv_reader(reader)
assert set(orders) == set([
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 12).date(),
19,
153.6799
),
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 8).date(),
39, 151.9
),
])
| 2.546875 | 3 |
hypha/apply/projects/migrations/0021_add_paid_value.py | maxpearl/hypha | 20 | 12759805 | <reponame>maxpearl/hypha
# Generated by Django 2.0.13 on 2019-08-26 12:04
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application_projects', '0020_rename_value_to_requested_value'),
]
operations = [
migrations.AddField(
model_name='paymentrequest',
name='paid_value',
field=models.DecimalField(decimal_places=2, max_digits=10, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))]),
),
]
| 1.96875 | 2 |
pylic/cli/commands/list.py | ubersan/pylic | 5 | 12759806 | <filename>pylic/cli/commands/list.py
from typing import List
from pylic.cli.commands.command import Command
from pylic.cli.console_writer import BLUE, BOLD, END_STYLE, LABEL, UNDERLINE, console_writer
from pylic.licenses import read_all_installed_licenses_metadata
class ListCommand(Command):
targets = ["list"]
token = "list"
def handle(self, options: List[str]) -> int:
if "help" in options:
self._show_help()
return 1
installed_licenses = read_all_installed_licenses_metadata()
unsorted = {
installed["package"]: {"version": installed["version"], "license": installed["license"]} for installed in installed_licenses
}
for package, rest in sorted(unsorted.items(), key=lambda k: k[0].lower()): # type:ignore
console_writer.line(f"{BLUE}{package}{END_STYLE} {LABEL}({rest['version']}){END_STYLE}: {rest['license']}")
return 0
def _show_help(self) -> None:
console_writer.line(f"{BOLD}USAGE{END_STYLE}")
console_writer.line(f" {UNDERLINE}pylic{END_STYLE} {UNDERLINE}list{END_STYLE} [-h]\n")
console_writer.line(f"{BOLD}GLOBAL OPTIONS{END_STYLE}")
console_writer.line(f" {LABEL}-h{END_STYLE} (--help)\tDisplay this help message\n")
console_writer.line(f"{BOLD}DESCRIPTION{END_STYLE}")
console_writer.line(" Lists all installed packages and their corresponding license.")
| 2.515625 | 3 |
src/posts/migrations/0003_post_tags.py | m3h-D/Myinfoblog | 0 | 12759807 | <filename>src/posts/migrations/0003_post_tags.py<gh_stars>0
# Generated by Django 2.2.6 on 2019-11-28 06:01
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_post_previous'),
]
operations = [
migrations.AddField(
model_name='post',
name='tags',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=90), blank=True, null=True, size=5, verbose_name='تگ ها'),
),
]
| 1.320313 | 1 |
day_2/puzzle_1.py | ronaldvandenbroek/advent-of-code-2021 | 0 | 12759808 | <gh_stars>0
# Import Data
commands = []
with open('../input/day_2.txt') as f:
lines = f.read().splitlines()
for line in lines:
splitline = line.split(" ")
splitline[1] = int(splitline[1])
commands.append(splitline)
# Process Data
positionHorizontal = 0
positionVertical = 0
for command in commands:
direction = command[0]
amount = command[1]
if direction == 'forward':
positionHorizontal += amount
elif direction == 'down':
positionVertical += amount
elif direction == 'up':
positionVertical -= amount
print(f'Horizontal Movement: {positionHorizontal}')
print(f'Vertical Movement: {positionVertical}')
print(f'Multiplied Movement: {positionHorizontal * positionVertical}')
| 3.28125 | 3 |
eurlex2lexparency/extraction/meta_data/language_format.py | Lexparency/eurlex2lexparency | 0 | 12759809 | <gh_stars>0
import os
from lxml import etree as et
from urllib.parse import urljoin
from collections import defaultdict
from datetime import date
from eurlex2lexparency.extraction import textify
from eurlex2lexparency.extraction.generic import FormatsNotLoaded, Formats
from eurlex2lexparency.celex_manager import SessionManager, Representation
from eurlex2lexparency.extraction.meta_data.eli_data import EurLexDocumentLandingPage
from eurlex2lexparency.utils import SwingingFileLogger
class EurLexLanguagesAndFormats:
""" self.url is actually the celex id """
def __init__(self, local_path, celex, consoli_date=date(1900, 1, 1)):
os.makedirs(local_path, exist_ok=True)
self.logger = SwingingFileLogger.get('rdf', local_path)
self.celex = celex
self.consoli_date = consoli_date
self.landing_page = EurLexDocumentLandingPage.construct_from(
local_path, celex, consoli_date).document
self.sessionator = SessionManager()
self.languages_to_formats = dict()
try:
self.load_local()
except FormatsNotLoaded:
self.retrieve_online()
def load_local(self):
with self.sessionator() as s:
for row in s.query(Representation)\
.filter(Representation.celex == self.celex,
Representation.date == self.consoli_date):
# the value "None" means unloaded, since at the persist-step
# None-values are converted to empty strings.
if row.url_html is None and len(self.languages_to_formats) == 0:
raise FormatsNotLoaded
self.languages_to_formats[row.language] = Formats(
row.url_html or None, row.url_pdf or None)
if len(self.languages_to_formats) == 0:
raise FormatsNotLoaded("Nothing found.")
def guessed_url(self, language, fmt):
""" Guesses url from the observed systematic by eur-lex
:param language: DE, EN, ES, ...
:param fmt: HTML or PDF
:return: tue guessed URL for the sought-after document.
"""
if self.consoli_date == date(1900, 1, 1):
celex = self.celex
else:
celex = ''.join(('0', self.celex[1:], '-',
self.consoli_date.strftime('%Y%m%d')))
return f'https://eur-lex.europa.eu/' \
f'legal-content/{language}/TXT/{fmt}/?uri=CELEX:{celex}'
def guess(self):
""" Fallback method, if retrieve online fails. """
with self.sessionator() as s:
for row in s.query(Representation)\
.filter(Representation.celex == self.celex,
Representation.date == self.consoli_date):
self.languages_to_formats[row.language] = Formats(
self.guessed_url(row.language, 'HTML'),
self.guessed_url(row.language, 'PDF')
)
def retrieve_online(self):
# noinspection PyBroadException
try:
self._retrieve_online()
except Exception:
self.logger.error('Could not load the format\'s URLs.', exc_info=True)
self.guess()
self.persist()
def _retrieve_online(self):
rows = [
div
for div in self.landing_page.xpath('//div[@id="PP2Contents"]')[0].xpath(
'.//div[@class]')
# mimicking selector string "div.PubFormat", including possibility
# that some divs have several classes:
if 'PubFormat' in div.attrib['class'].split()
]
languages = [
textify(el, with_tail=False, simplify_blanks=True)
for el in rows[0].xpath('.//ul/li')
]
languages_and_formats = defaultdict(list)
languages_and_formats['languages'] = languages
for row in rows[1:]:
format_ = et.tostring(row[0], method='text',
encoding='unicode').strip()
for data in row[1].xpath('./ul/li'):
if data.attrib.get('class') == 'disabled':
languages_and_formats[format_].append(None)
else:
url = data.xpath('./a/@href')[0]
languages_and_formats[format_].append(urljoin(
self.landing_page.url, url).replace('&from=EN', ''))
formats = [
key for key in languages_and_formats.keys() if key != 'languages'
]
deletables = []
for k, language in enumerate(languages_and_formats['languages']):
for format_ in formats:
if languages_and_formats[format_][k]:
break
else: # if no break occurs
deletables.append(k)
for index in reversed(deletables):
for value in languages_and_formats.values():
value.pop(index)
for key in tuple(languages_and_formats.keys()):
if key not in ('languages', 'PDF', 'HTML'):
languages_and_formats.pop(key)
for fmt in ('PDF', 'HTML'):
languages_and_formats[fmt] = [None]\
* len(languages_and_formats['languages']) \
if languages_and_formats[fmt] == []\
else languages_and_formats[fmt]
for language, pdf, html in zip(languages_and_formats['languages'],
languages_and_formats['PDF'],
languages_and_formats['HTML']):
self.languages_to_formats[language] = Formats(html, pdf)
def persist(self):
no_formats = Formats('', '')
with self.sessionator() as s:
for row in s.query(Representation)\
.filter(Representation.celex == self.celex,
Representation.date == self.consoli_date):
# Empty string means that the information has been downloaded
# but no URL available
formats = self.languages_to_formats.get(row.language,
no_formats)
row.url_html = formats.html or ''
row.url_pdf = formats.pdf or ''
if __name__ == '__main__':
from settings import LEXPATH
from eurlex2lexparency.celex_manager import CelexBase
c = CelexBase.from_string('32013R0575')
ellaf = EurLexLanguagesAndFormats(os.path.join(LEXPATH, c.path), str(c), date(2013, 6, 28))
| 2.34375 | 2 |
app/settings.py | Hoxbro/panel-multi | 8 | 12759810 | <filename>app/settings.py
from fastapi.templating import Jinja2Templates
from models import titles
__version__ = "0.1.0"
templates = Jinja2Templates(directory="templates")
GLOBAL_CONTEXT = {"version": __version__, "titles": titles}
SECRET_KEY = "CHANGEMEFORTHELOVEOFGOD!!!!!!!!!"
ALLOWED_HOSTS = ["*"] # Could be made more specific
| 1.742188 | 2 |
bb_IDP/scripts/IDP_T1_GM_parc_gen.py | yilewang/tvb-ukbb | 4 | 12759811 | <gh_stars>1-10
#!/bin/env python
#
# Script name: IDP_T1_GM_parc_gen.py
#
# Description: Script to generate the bb_IDP_T1_GM_parcellation IDP file.
#
## Author: <NAME>
import nibabel as nib
import numpy as np
import sys
import os
def IDP_T1_GM_parc_gen(PARC_LUT,IDP_file,parcel_to_T1,pve_1):
parcel_img = nib.load(parcel_to_T1) #"/Users/justinwang/Documents/McIntosh/sub-003S6264/T1/transforms/parcel_to_T1.nii.gz"
GM_img = nib.load(pve_1) #"/Users/justinwang/Documents/McIntosh/sub-003S6264/T1/T1_fast/T1_brain_pve_1.nii.gz"
parcel_data = parcel_img.get_fdata()
GM_data = GM_img.get_fdata()
result=""
lines = ""
with open(PARC_LUT) as f:
lines = f.read().splitlines()
ROI_num_list = []
for line in lines:
ROI_num_list.append(int(line.split("\t")[0]))
for x in ROI_num_list:
num_voxels_in_ROI = np.count_nonzero(parcel_data==x)
bb_IDP_T1_GM_parcellation="NaN"
if num_voxels_in_ROI != 0:
ROI_voxels = np.where(parcel_data==x)
mean_intensity_in_ROI = np.mean(GM_data[ROI_voxels])
bb_IDP_T1_GM_parcellation = num_voxels_in_ROI * mean_intensity_in_ROI
if result == "":
result = str(bb_IDP_T1_GM_parcellation)
else:
result = result + " " + str(bb_IDP_T1_GM_parcellation)
result = result + "\n"
f = open(IDP_file, "w")
f.write(result)
f.close()
if __name__ == "__main__":
"""Function that reorganizes pve files that have been mislabelled.
Usage
----------
python rename_pve.py brain pve0 pve1 pve2
Arguments
----------
PARC_LUT: full file path to PARC_LUT
IDP_file: full file path to bb_IDP_T1_GM_parcellation.txt
parcel_to_T1: full file path to parcel_to_T1 file
pve_1: full file path to pve_1
"""
# try:
IDP_T1_GM_parc_gen(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
| 2.234375 | 2 |
tests/test_callbacks_misc.py | travisdickey/fastai | 59 | 12759812 | <reponame>travisdickey/fastai
import pytest
from fastai.callbacks.misc import *
from fastai.gen_doc.doctest import this_tests
from fastai.utils.fakes import *
from fastai.utils.text import CaptureStdout
def stop_after_n_batches_run_n_check(learn, bs, run_n_batches_exp):
has_batches = len(learn.data.train_ds)//bs
with CaptureStdout() as cs:
learn.fit_one_cycle(3, max_lr=1e-2)
for s in ['train_loss', 'valid_loss']:
assert s in cs.out, f"expecting '{s}' in \n{cs.out}"
# test that epochs are stopped at epoch 0
assert "\n0" in cs.out, "expecting epoch0"
assert "\n1" not in cs.out, "epoch 1 shouldn't run"
# test that only run_n_batches_exp batches were run
run_n_batches_got = len(learn.recorder.losses)
assert run_n_batches_got == run_n_batches_exp, f"should have run only {run_n_batches_exp}, but got {run_n_batches_got}"
def test_stop_after_n_batches():
this_tests(StopAfterNBatches)
# this should normally give us 10 batches for train_ds
train_length = 20
bs = 2
# but we only want to run 2
run_n_batches = 2
print()
# 1. global assignment
defaults_extra_callbacks_bak = defaults.extra_callbacks
defaults.extra_callbacks = [StopAfterNBatches(n_batches=run_n_batches)]
learn = fake_learner(train_length=train_length, batch_size=bs)
stop_after_n_batches_run_n_check(learn, bs, run_n_batches)
# restore
defaults.extra_callbacks = defaults_extra_callbacks_bak
# 2. dynamic assignment
learn = fake_learner(train_length=train_length, batch_size=bs)
learn.callbacks.append(StopAfterNBatches(n_batches=run_n_batches))
stop_after_n_batches_run_n_check(learn, bs, run_n_batches)
| 1.960938 | 2 |
examples/2D/flight_conditions/aerodynamic_pdf.py | SzymonSzyszko/AeroPy | 1 | 12759813 | <reponame>SzymonSzyszko/AeroPy<filename>examples/2D/flight_conditions/aerodynamic_pdf.py
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import interpolate
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import aeropy.xfoil_module as xf
from aeropy.aero_module import Reynolds
from aeropy.geometry.airfoil import CST, create_x
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from weather.scraper.flight_conditions import properties, Airframe
def expected(data, airFrame):
alpha, V, lift_to_drag = data
pdf = airFrame.pdf.score_samples(np.vstack([alpha.ravel(), V.ravel()]).T)
pdf = np.exp(pdf.reshape(lift_to_drag.shape))
expected_value = 0
numerator_list = []
denominator_list = []
for i in range(len(lift_to_drag)):
numerator = simps(lift_to_drag[i]*pdf[i], alpha[i])
denominator = simps(pdf[i], alpha[i])
numerator_list.append(numerator)
denominator_list.append(denominator)
numerator = simps(numerator_list, V[:, 0])
denominator = simps(denominator_list, V[:, 0])
expected_value = numerator/denominator
return(expected_value)
C172 = pickle.load(open('C172.p', 'rb'))
airfoil_database = pickle.load(open('../2D/fitting.p', 'rb'))
# list of strings
Al_database = np.array(airfoil_database['Al'])
Au_database = np.array(airfoil_database['Au'])
dl_database = np.array(airfoil_database['dl'])
du_database = np.array(airfoil_database['du'])
airfoil = 'from_database_3'
altitude = 10000
chord = 1
[AOAs, velocities] = C172.samples.T
AOAs = AOAs[0]
velocities = velocities[0]
# data = {'Names':airfoil_database['names'], 'AOA':AOAs, 'V':velocities,
# 'L/D':[], 'Expected':[]}
f = open('aerodynamics_3.p', 'rb')
data = pickle.load(f)
f.close()
for j in range(1145, len(Au_database)):
data['L/D'].append([])
print(j, airfoil_database['names'][j])
Au = Au_database[j, :]
Al = Al_database[j, :]
x = create_x(1., distribution = 'linear')
y = CST(x, chord, deltasz=[du_database[j], dl_database[j]],
Al=Al, Au=Au)
xf.create_input(x, y['u'], y['l'], airfoil, different_x_upper_lower = False)
for i in range(len(AOAs)):
AOA = AOAs[i]
V = velocities[i]
try:
Data = xf.find_coefficients(airfoil, AOA,
Reynolds=Reynolds(10000, V, chord),
iteration=100, NACA=False,
delete=True)
lift_drag_ratio = Data['CL']/Data['CD']
except:
lift_drag_ratio = None
increment = 0.1
conv_counter = 0
while lift_drag_ratio is None and conv_counter <3:
print(increment)
Data_f = xf.find_coefficients(airfoil, AOA*(1+increment),
Reynolds=Reynolds(10000, V*(1+increment), chord),
iteration=100, NACA=False,
delete=True)
Data_b = xf.find_coefficients(airfoil, AOA*(1-increment),
Reynolds=Reynolds(10000, V*(1-increment), chord),
iteration=100, NACA=False,
delete=True)
print(Data_f['CL'], Data_f['CD'])
print(Data_b['CL'], Data_b['CD'])
try:
lift_drag_ratio = .5*(Data_f['CL']/Data_f['CD'] +
Data_b['CL']/Data_b['CD'])
except(TypeError):
increment += 0.1
conv_counter += 1
print(airfoil_database['names'][j], AOA, V, lift_drag_ratio)
data['L/D'][-1].append(lift_drag_ratio)
if data['L/D'][-1].count(None) > 3:
break
f = open('aerodynamics_3.p', 'wb')
pickle.dump(data,f)
f.close()
| 2.734375 | 3 |
InstatntMessengerBot.py | adrian88szymanski/Automate_the_Boring_Stuff_with_Python_by_Sweigart | 1 | 12759814 | #!/usr/bin/env python3
"""Sends out a message to a selected group of Google Hangouts contacts."""
import time
import pyautogui
def auto_message(name, message):
"""Searches for friend on Google Hangouts and messages them."""
print("Make sure the Google Hangout 'Conversations' page is visible and "
"your cursor is not currently on the page.")
time.sleep(3)
search_bar = pyautogui.locateOnScreen('search.png')
pyautogui.click(search_bar)
pyautogui.typewrite(name)
time.sleep(1)
online_select = pyautogui.locateOnScreen('online-friend.png')
if online_select is None:
print('Friend not found or currently offline.')
return
else:
pyautogui.doubleClick(online_select)
attempts = 3
while attempts > 0:
message_box = pyautogui.locateOnScreen('message.png')
pyautogui.click(message_box)
pyautogui.typewrite(message)
# If it can no longer be found it is because the message was entered.
if pyautogui.locateOnScreen('message.png') is None:
pyautogui.press('enter')
pyautogui.press('esc')
print('Message sent to {}'.format(name))
break
else:
if attempts == 1:
print('Unable to send message to {}.'.format(name))
pyautogui.press('esc')
else:
print('Sending message to {} failed. Another {} attempts will '
'be made before moving on.'.format(name, attempts))
attempts -= 1
print('Enter the contacts you wish to send a message to (e.g. Bob, Bill):')
send_to = input().split(',')
print('Enter the message you wish to send out to them:')
to_send = input()
for contact in send_to:
user = contact.strip()
auto_message(user, to_send) | 3.625 | 4 |
dependency/installer.py | evetness/Starter | 1 | 12759815 | from dependency.status import Status
from subprocess import run, Popen, PIPE
class Installer:
"""Installer class which chooses from the package manager,
then installs package.
"""
def __init__(self):
self._stat = Status()
def _apt(self, pkg):
"""Installs the required package with apt package manager
:param pkg: str package name
"""
self._stat.status(pkg, 'install')
run(['sudo', 'apt', 'install', pkg])
def _snap(self, pkg, oth=None):
"""Installs the required package with snap manager.
:param pkg: str package name
:param oth: str OPTIONAL classic
"""
if oth == 'classic':
self._stat.status(pkg, 'install')
run(['sudo', 'snap', 'install', pkg, '--' + oth])
else:
self._stat.status(pkg, 'install')
run(['sudo', 'snap', 'install', pkg])
def _deb(self, oth):
"""Checks if gDebi is installed, and installs
the required package with gDebi manager
:param oth: str .deb package link
"""
installer = Popen('dpkg -l gdebi', shell=True, stdout=PIPE)
installer.wait()
if installer.returncode == 1:
self._stat.status('gdebi', 'install')
self.install('gdebi')
pkg_name = oth.rsplit('/', 1)[-1]
self._stat.status(pkg_name, 'install')
run(['wget', oth])
run(['sudo', 'gdebi', pkg_name, '-n'])
run(['rm', '-rf', pkg_name])
def _repo(self, pkg, oth):
"""Adds the required repository and installs the package.
:param pkg: str package name
:param oth: str repository
"""
name = oth.rsplit(':', 1)[-1]
self._stat.status(name, 'add')
run(['sudo', 'add-apt-repository', '-y', oth])
self.update()
self.install(['apt', pkg])
def update(self):
"""Method that updates the system.
"""
self._stat.status('system', 'update')
run(['sudo', 'apt', 'update', '-y'])
def install(self, packages):
"""Calls the required package manager method.
:param packages: list package properties
"""
mgr = packages[0].lower()
pkg = packages[1].lower() if len(packages) >= 2 else None
oth = packages[2] if len(packages) == 3 else None
if mgr == 'apt':
self._apt(pkg)
elif mgr == 'snap':
self._snap(pkg, oth)
elif mgr == 'deb':
self._deb(oth)
elif mgr == 'repo':
self._repo(pkg, oth)
| 2.765625 | 3 |
MMLL/models/POM3/Kmeans/Kmeans.py | Musketeer-H2020/MMLL-Robust | 0 | 12759816 | <reponame>Musketeer-H2020/MMLL-Robust
# -*- coding: utf-8 -*-
'''
Kmeans model under POM3.
'''
__author__ = "<NAME>"
__date__ = "January 2021"
# Code to ensure reproducibility in the results
#from numpy.random import seed
#seed(1)
import numpy as np
from math import floor
from MMLL.models.POM3.CommonML.POM3_CommonML import POM3_CommonML_Master, POM3_CommonML_Worker
from MMLL.models.Common_to_models import Common_to_models
class Kmeans_model(Common_to_models):
"""
This class contains the Kmeans model.
"""
def __init__(self, logger):
"""
Create a :class:`Kmeans_model` instance.
Parameters
----------
logger: :class:`mylogging.Logger`
Logging object instance.
"""
self.logger = logger
self.is_trained = False
self.supported_formats = ['pkl', 'onnx', 'pmml']
self.name = 'Kmeans'
self.centroids = None
def predict(self, X_b):
"""
Uses the Kmeans model to predict new outputs given the inputs.
Parameters
----------
X_b: ndarray
Array containing the input patterns.
Returns
-------
preds: ndarray
Array containing the predictions.
"""
# Calculate the vector with euclidean distances between all observations and the defined centroids
dists = dists = np.sqrt(np.abs(-2 * np.dot(self.centroids, X_b.T) + np.sum(X_b**2, axis=1) + np.sum(self.centroids**2, axis=1)[:, np.newaxis])) # Shape of vector (num_centroids, num_observations_X_b)
min_dists = np.min(dists, axis=0) # Array of distances of every observation to the closest centroid
mean_dists = np.mean(min_dists) # Average distance of all observations to all centroids (scalar)
preds = np.argmin(dists, axis=0) # Identification of closest centroid for every observation. Shape (num_observations_X_b,)
return preds
class Kmeans_Master(POM3_CommonML_Master):
"""
This class implements Kmeans, run at Master node. It inherits from :class:`POM3_CommonML_Master`.
"""
def __init__(self, comms, logger, verbose=False, NC=None, Nmaxiter=None, tolerance=None):
"""
Create a :class:`Kmeans_Master` instance.
Parameters
----------
comms: :class:`Comms_master`
Object providing communication functionalities.
logger: :class:`mylogging.Logger`
Logging object instance.
verbose: boolean
Indicates whether to print messages on screen nor not.
NC: int
Number of clusters.
Nmaxiter: int
Maximum number of iterations.
tolerance: float
Minimum tolerance for continuing training.
"""
self.num_centroids = int(NC)
self.Nmaxiter = int(Nmaxiter)
self.tolerance = tolerance
super().__init__(comms, logger, verbose) # Initialize common class for POM3
self.name = 'POM3_Kmeans_Master' # Name of the class
#self.Init_Environment() # Send initialization messages common to all algorithms
self.iter = -1 # Number of iterations
self.mean_dist = np.inf # Mean distance to centroids
self.is_trained = False # Flag to know if the model has been trained
def train_Master_(self):
"""
Main loop controlling the training of the algorithm.
Parameters
----------
None
"""
self.iter = -1
self.mean_dist = np.inf
self.is_trained = False
self.Init_Environment()
self.state_dict['CN'] = 'START_TRAIN'
while self.state_dict['CN'] != 'TRAINING_READY':
self.Update_State_Master()
self.TakeAction_Master()
self.CheckNewPacket_Master()
# Now communications should work sequentially (not sending a message to next worker until the actual one replied)
self.display(self.name + ': Initialization ready, starting sequential communications')
zero_counts = np.zeros((self.num_centroids, 1))
zero_centroids = np.zeros((self.num_centroids, self.num_features))
encrypted_zero_centroids = np.asarray(self.encrypt_list(zero_centroids, self.public_keys[self.workers_addresses[0]]))
while self.iter != self.Nmaxiter:
#added
if self.iter!=-1:
encrypted_iteration_centroids = encrypted_centroids
encrypted_centroids = encrypted_zero_centroids
new_mean_dist = 0
counts = zero_counts
#end added
for index_worker, worker in enumerate(self.workers_addresses):
if self.iter==-1:
action = 'SEND_CENTROIDS' # Initialize centroids
data = {'accumulated_centroids': encrypted_centroids}
packet = {'to':'MLModel','action': action, 'data': data}
else:
# Get updated centroids from each worker
action = 'COMPUTE_LOCAL_CENTROIDS'
data = {'iteration_centroids': encrypted_iteration_centroids, 'accumulated_centroids': encrypted_centroids, 'counts': counts, 'mean_dist': new_mean_dist}
packet = {'to':'MLModel', 'action': action, 'data': data}
# Send message to specific worker and wait until receiving reply
packet = self.send_worker_and_wait_receive(packet, worker)
encrypted_centroids = packet['data']['accumulated_centroids']
# Transform encrypted centroids to the encrypted domain of the next worker
encrypted_centroids = self.transform_encrypted_domain_workers(encrypted_centroids, worker, self.workers_addresses[(index_worker+1)%self.Nworkers])
if packet['action'] == 'UPDATE_CENTROIDS':
counts = packet['data']['counts']
new_mean_dist = packet['data']['mean_dist']
encrypted_iteration_centroids = packet['data']['iteration_centroids']
encrypted_iteration_centroids = self.transform_encrypted_domain_workers(encrypted_iteration_centroids, worker, self.workers_addresses[(index_worker+1)%self.Nworkers], verbose=False)
self.iter += 1
# Check for termination at the end of each iteration according to the tolerance
if packet['action']=='UPDATE_CENTROIDS':
self.display(self.name + ': Average distance to closest centroid: %0.4f, iteration %d' %(new_mean_dist, self.iter))
# Check for termination at the end of each iteration according to the tolerance
if self.iter == self.Nmaxiter:
self.display(self.name + ': Stopping training, maximum number of iterations reached!')
break
elif self.tolerance >= 0:
if np.abs(self.mean_dist-new_mean_dist) < self.tolerance:
self.display(self.name + ': Stopping training, minimum tolerance reached!')
break
else:
self.mean_dist = new_mean_dist
# Send final model to workers
action = 'SEND_FINAL_MODEL'
for index_worker, worker in enumerate(self.workers_addresses):
data = {'centroids': encrypted_centroids}
packet = {'to':'MLModel', 'action': action, 'data': data}
# Send message to specific worker and wait until receiving reply
packet = self.send_worker_and_wait_receive(packet, worker)
encrypted_centroids = packet['data']['centroids']
encrypted_centroids = self.transform_encrypted_domain_workers(encrypted_centroids, worker, self.workers_addresses[(index_worker+1)%self.Nworkers])
self.is_trained = True
self.display(self.name + ': Training is done')
def Update_State_Master(self):
'''
Function to control the state of the execution.
Parameters
----------
None
'''
if self.state_dict['CN'] == 'START_TRAIN':
self.state_dict['CN'] = 'SET_NUM_CENTROIDS'
if self.checkAllStates('ACK_SET_NUM_CENTROIDS', self.state_dict):
for worker in self.workers_addresses:
self.state_dict[worker] = ''
self.state_dict['CN'] = 'SEND_NUM_FEATURES'
if self.checkAllStates('SET_NUM_FEATURES', self.state_dict):
for worker in self.workers_addresses:
self.state_dict[worker] = ''
self.state_dict['CN'] = 'CHECK_NUM_FEATURES'
if self.checkAllStates('SEND_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE', self.state_dict):
for worker in self.workers_addresses:
self.state_dict[worker] = ''
self.state_dict['CN'] = 'TRAINING_READY'
def TakeAction_Master(self):
"""
Function to take actions according to the state.
Parameters
----------
None
"""
to = 'MLmodel'
# Send the number of centroids to all workers
if self.state_dict['CN'] == 'SET_NUM_CENTROIDS':
action = 'SET_NUM_CENTROIDS'
data = {'num_centroids': self.num_centroids}
packet = {'to': to,'action': action, 'data': data}
self.comms.broadcast(packet, self.workers_addresses)
self.display(self.name + ': Sent ' + action + ' to all workers')
self.state_dict['CN'] = 'WAIT'
# Ask the number of features to all workers
if self.state_dict['CN'] == 'SEND_NUM_FEATURES':
action = 'SEND_NUM_FEATURES'
packet = {'to': to,'action': action}
self.comms.broadcast(packet, self.workers_addresses)
self.display(self.name + ': Sent ' + action + ' to all workers')
self.state_dict['CN'] = 'WAIT_NUM_FEATURES'
# Check that all workers have the same number of features
if self.state_dict['CN'] == 'CHECK_NUM_FEATURES':
if not all(x==self.list_num_features[0] for x in self.list_num_features):
self.display(self.name + ': Workers have different number of features, terminating POM3 execution')
self.state_dict['CN'] = 'END'
return
self.num_features = self.list_num_features[0]
self.display(self.name + ': Storing number of features')
self.state_dict['CN'] = 'SEND_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE'
# Ask encrypted pseudo random sequence to all workers
if self.state_dict['CN'] == 'SEND_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE':
action = 'SEND_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE'
packet = {'to': to,'action': action}
self.comms.broadcast(packet, self.workers_addresses)
self.display(self.name + ': Sent ' + action + ' to all workers')
self.state_dict['CN'] = 'WAIT_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE'
#===============================================================
# Worker
#===============================================================
class Kmeans_Worker(POM3_CommonML_Worker):
'''
Class implementing Kmeans, run at Worker node. It inherits from :class:`POM3_CommonML_Worker`.
'''
def __init__(self, master_address, comms, logger, verbose=False, Xtr_b=None):
"""
Create a :class:`Kmeans_Worker` instance.
Parameters
----------
master_address: string
Identifier of the master instance.
comms: :class:`Comms_worker`
Object providing communication functionalities.
logger: :class:`mylogging.Logger`
Logging object instance.
verbose: boolean
Indicates whether to print messages on screen nor not.
Xtr_b: ndarray
Array containing the inputs for training.
"""
self.Xtr_b = Xtr_b
super().__init__(master_address, comms, logger, verbose) # Initialize common class for POM3
self.name = 'POM3_Kmeans_Worker' # Name of the class
self.num_features = Xtr_b.shape[1] # Number of features
self.model = Kmeans_model(logger) # Model
self.is_trained = False # Flag to know if the model has been trained
def ProcessReceivedPacket_Worker(self, packet):
"""
Process the received packet at worker.
Parameters
----------
packet: dictionary
Packet received from the master.
"""
if packet['action'] == 'SET_NUM_CENTROIDS':
self.display(self.name + ' %s: Storing number of centroids' %self.worker_address)
self.num_centroids = packet['data']['num_centroids'] # Store the number of centroids
# Check maximum number of possible centroids
if self.num_centroids > self.Xtr_b.shape[0]:
self.display(self.name + ' %s: Number of clusters exceeds number of training samples. Terminating training' %self.worker_address)
action = 'EXCEEDED_NUM_CENTROIDS'
packet = {'action': action}
else:
action = 'ACK_SET_NUM_CENTROIDS'
packet = {'action': action}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
if packet['action'] == 'SEND_NUM_FEATURES':
self.display(self.name + ' %s: Sending number of features' %self.worker_address)
action = 'SET_NUM_FEATURES'
data = {'num_features': self.num_features}
packet = {'action': action, 'data': data}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
if packet['action'] == 'SEND_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE':
# Review: include here the code to calculate the length of the sequence to generate (we need to know number of centroids in advance)
# Generate random sequence for encrypting
self.r_values = self.generate_sequence_Rvalues(self.num_centroids*self.num_features)
# Generate pseudo random sequence (the same for all workers)
Xi = self.generate_sequence_Xi(self.num_centroids*self.num_features)
# Encrypt pseudo random sequence using sequence r_values
encrypted_Xi = self.encrypt_flattened_list(Xi)
action = 'SEND_ENCRYPTED_PSEUDO_RANDOM_SEQUENCE'
data = {'encrypted_Xi': encrypted_Xi}
packet = {'action': action, 'data': data}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
if packet['action'] == 'SEND_CENTROIDS':
self.display(self.name + ' %s: Initializing centroids' %self.worker_address)
encrypted_accumulated_centroids = packet['data']['accumulated_centroids']
accumulated_centroids = np.asarray(self.decrypt_list(encrypted_accumulated_centroids))
# Random point initialization (data leakage in POM1)
# Suffle randomly the observations in the training set
# np.random.shuffle(self.Xtr_b)
# centroids_local = self.Xtr_b[:self.num_centroids, :] # Take the first K observations, this avoids selecting the same point twice
# accumulated_centroids += centroids_local/self.num_workers
# Naive sharding initialization (no leakage in POM1)
centroids = self.naive_sharding(self.Xtr_b, self.num_centroids)
accumulated_centroids += centroids/self.num_workers
# Encrypt centroids before sending them to the master
encrypted_accumulated_centroids = np.asarray(self.encrypt_list_rvalues(list(accumulated_centroids)))
action = 'INIT_CENTROIDS'
data = {'accumulated_centroids': encrypted_accumulated_centroids}
packet = {'action': action, 'data': data}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
if packet['action'] == 'COMPUTE_LOCAL_CENTROIDS':
self.display(self.name + ' %s: Updating centroids' %self.worker_address)
old_counts = packet['data']['counts']
old_mean_dist = packet['data']['mean_dist']
encrypted_accumulated_centroids = packet['data']['accumulated_centroids']
encrypted_iteration_centroids = packet['data']['iteration_centroids']
# Unencrypt received centroids
accumulated_centroids = np.asarray(self.decrypt_list(encrypted_accumulated_centroids))
iteration_centroids = np.asarray(self.decrypt_list(encrypted_iteration_centroids))
# Calculate the vector with euclidean distances between all observations and the defined centroids
dists = np.sqrt(np.abs(-2 * np.dot(iteration_centroids, self.Xtr_b.T) + np.sum(self.Xtr_b**2, axis=1) + np.sum(iteration_centroids**2, axis=1)[:, np.newaxis])) # Matrix of euclidean distances between all observations in training set and centroids. Shape of vector (num_centroids x num_observations_X)
min_dists = np.min(dists, axis=0) # Array of distances of every observation to the closest centroid
mean_dist = np.mean(min_dists) # Average distance of all observations to all centroids (scalar)
cluster_allocs = np.argmin(dists, axis=0) # Identification of closest centroid for every observation
counts = np.bincount(cluster_allocs, minlength=self.num_centroids).reshape(-1,1) # Count the number of observations in each cluster (shape (num_centroids, )
# Compute local centroids
clusters = []
centroids = accumulated_centroids.copy()
for i in range(self.num_centroids):
clusters.append(self.Xtr_b[cluster_allocs==i])
if counts[i]>0:
centroids[i,:] = (1/len(clusters[i]))*np.sum(clusters[i], axis=0)
# Update accumulated centroids, counts and mean_dist
new_counts = counts + old_counts
new_mean_dist = np.sum(counts)/np.sum(new_counts)*mean_dist + np.sum(old_counts)/np.sum(new_counts)*old_mean_dist
# Check empty clusters
if np.any(new_counts==0):
new_centroids = accumulated_centroids.copy()
for i in range(self.num_centroids):
if new_counts[i]>0:
new_centroids[i,:] = counts[i]/new_counts[i]*centroids[i,:] + old_counts[i]/new_counts[i]*accumulated_centroids[i,:]
else:
# Average centroids taking into account the number of observations of the training set in each worker with respect to the total, including the training observations of all workers
new_centroids = (counts/new_counts)*centroids + (old_counts/new_counts)*accumulated_centroids # Array broadcasting
# Encrypt centroids before sending them to the master
encrypted_accumulated_centroids = np.asarray(self.encrypt_list_rvalues(new_centroids))
encrypted_iteration_centroids = np.asarray(self.encrypt_list_rvalues(iteration_centroids))
action = 'UPDATE_CENTROIDS'
data = {'iteration_centroids': encrypted_iteration_centroids, 'accumulated_centroids': encrypted_accumulated_centroids, 'counts': new_counts, 'mean_dist': new_mean_dist}
packet = {'action': action, 'data': data}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
if packet['action'] == 'SEND_FINAL_MODEL':
self.display(self.name + ' %s: Receiving final model' %self.worker_address)
encrypted_centroids = packet['data']['centroids']
self.model.centroids = np.asarray(self.decrypt_list(encrypted_centroids))
self.model.is_trained = True
self.is_trained = True
self.display(self.name + ' %s: Final model stored' %self.worker_address)
encrypted_centroids = np.asarray(self.encrypt_list_rvalues(self.model.centroids))
action = 'UPDATE_CENTROIDS_FINAL_MODEL'
data = {'centroids': encrypted_centroids}
packet = {'action': action, 'data': data}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
def naive_sharding(self, ds, k):
"""
Create cluster centroids using deterministic naive sharding algorithm.
Parameters
----------
ds : numpy array
The dataset to be used for centroid initialization.
k : int
The desired number of clusters for which centroids are required.
Returns
-------
centroids : numpy array
Collection of k centroids as a numpy array.
"""
n = ds.shape[1]
m = ds.shape[0]
centroids = np.zeros((k,n))
# Sum all elements of each row, add as col to original dataset, sort
composite = np.sum(ds, axis=1)
composite = np.expand_dims(composite, axis=1)
ds = np.append(composite, ds, axis=1)
ds.sort(axis=0)
# Step value for dataset sharding
step = floor(m/k)
# Vectorize mean ufunc for numpy array
vfunc = np.vectorize(self._get_mean)
# Divide matrix rows equally by k-1 (so that there are k matrix shards)
# Sum columns of shards, get means; these columnar means are centroids
for j in range(k):
if j == k-1:
centroids[j:] = vfunc(np.sum(ds[j*step:,1:], axis=0), step)
else:
centroids[j:] = vfunc(np.sum(ds[j*step:(j+1)*step,1:], axis=0), step)
return centroids
def _get_mean(self, sums, step):
"""
Vectorizable ufunc for getting means of summed shard columns.
Parameters
----------
sums : float
The summed shard columns.
step : int
The number of instances per shard.
Returns
-------
sums/step (means) : numpy array
The means of the shard columns.
"""
return sums/step
| 2.546875 | 3 |
backend/util/request/models/order_item/__init__.py | willrp/willorders-ws | 0 | 12759817 | <reponame>willrp/willorders-ws
from .order_item_request import OrderItemRequest
from .order_item_schema import OrderItemSchema
| 1.023438 | 1 |
problem solving/circular-array-rotation.py | avnoor-488/hackerrank-solutions | 1 | 12759818 | '''
problem:-
<NAME> knows of an operation called a right circular rotation on an array of integers. One rotation operation moves the last array element to the first position and shifts all remaining elements right one. To test Sherlock's abilities, Watson provides Sherlock with an array of integers. Sherlock is to perform the rotation operation a number of times then determine the value of the element at a given position.
For each array, perform a number of right circular rotations and return the value of the element at a given index.
For example, array a=[3,4,5], number of rotations, k=2 and indices to check, m=[1,2].
First we perform the two rotations:
[3,4,5] -> [5,3,4] -> [4,5,3]
Now return the values from the zero-based indices 1 and 2 as indicated in the m array.
a[1]=5
a[2]=3
Function Description:-
Complete the circularArrayRotation function in the editor below. It should return an array of integers representing the values at the specified indices.
circularArrayRotation has the following parameter(s):
a: an array of integers to rotate
k: an integer, the rotation count
queries: an array of integers, the indices to report
Input Format:-
The first line contains 3 space-separated integers, n, k, and q, the number of elements in the integer array, the rotation count and the number of queries.
The second line contains n space-separated integers, where each integer i describes array element a[i].
Each of the q subsequent lines contains a single integer denoting m, the index of the element to return from a.
Output Format:-
For each query, print the value of the element at index m of the rotated array on a new line.
Sample Input 0:-
3 2 3
1 2 3
0
1
2
Sample Output 0:-
2
3
1
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the circularArrayRotation function below.
def circularArrayRotation(a, k, queries):
for i in range(k) :
a.insert(0,a.pop())
result=[]
for i in queries:
result.append(a[i])
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nkq = input().split()
n = int(nkq[0])
k = int(nkq[1])
q = int(nkq[2])
a = list(map(int, input().rstrip().split()))
queries = []
for _ in range(q):
queries_item = int(input())
queries.append(queries_item)
result = circularArrayRotation(a, k, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 4.5625 | 5 |
tools/augment.py | yl305237731/yolo_v1-pytorch | 7 | 12759819 | <gh_stars>1-10
import random
import cv2
import math
import numpy as np
from skimage.util import random_noise
from skimage import exposure
class DataAugmentation:
def __init__(self, rotation_rate=0.5, max_rotation_angle=15, crop_rate=0.3, shift_rate=0.3, change_light_rate=0.3,
add_noise_rate=0.3):
self.rotation_rate = rotation_rate
self.max_rotation_angle = max_rotation_angle
self.crop_rate = crop_rate
self.shift_rate = shift_rate
self.change_light_rate = change_light_rate
self.add_noise_rate = add_noise_rate
def noise(self, img):
return random_noise(img, mode='gaussian', clip=True) * 255
def light(self, img):
flag = random.uniform(0.5, 1.5)
return exposure.adjust_gamma(img, flag)
def rotate(self, img, bboxs, angle=5, scale=1.):
w = img.shape[1]
h = img.shape[0]
rangle = np.deg2rad(angle)
nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w)) * scale
nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w)) * scale
rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, scale)
rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5, 0]))
rot_mat[0, 2] += rot_move[0]
rot_mat[1, 2] += rot_move[1]
rot_img = cv2.warpAffine(img, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
rot_bboxes = list()
for bbox in bboxs:
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
point1 = np.dot(rot_mat, np.array([(xmin + xmax) / 2, ymin, 1]))
point2 = np.dot(rot_mat, np.array([xmax, (ymin + ymax) / 2, 1]))
point3 = np.dot(rot_mat, np.array([(xmin + xmax) / 2, ymax, 1]))
point4 = np.dot(rot_mat, np.array([xmin, (ymin + ymax) / 2, 1]))
concat = np.vstack((point1, point2, point3, point4))
concat = concat.astype(np.int32)
rx, ry, rw, rh = cv2.boundingRect(concat)
rx_min = rx
ry_min = ry
rx_max = rx + rw
ry_max = ry + rh
rot_bboxes.append([rx_min, ry_min, rx_max, ry_max])
return rot_img, rot_bboxes
def crop(self, img, bboxs):
w = img.shape[1]
h = img.shape[0]
x_min = w
x_max = 0
y_min = h
y_max = 0
for bbox in bboxs:
x_min = min(x_min, bbox[0])
y_min = min(y_min, bbox[1])
x_max = max(x_max, bbox[2])
y_max = max(y_max, bbox[3])
d_to_left = x_min
d_to_right = w - x_max
d_to_top = y_min
d_to_bottom = h - y_max
crop_x_min = int(x_min - random.uniform(d_to_left // 2, d_to_left))
crop_y_min = int(y_min - random.uniform(d_to_top // 2, d_to_top))
crop_x_max = int(x_max + random.uniform(d_to_right // 2, d_to_right))
crop_y_max = int(y_max + random.uniform(d_to_bottom // 2, d_to_bottom))
crop_x_min = max(0, crop_x_min)
crop_y_min = max(0, crop_y_min)
crop_x_max = min(w, crop_x_max)
crop_y_max = min(h, crop_y_max)
crop_img = img[crop_y_min:crop_y_max, crop_x_min:crop_x_max]
crop_bboxes = list()
for bbox in bboxs:
crop_bboxes.append([bbox[0] - crop_x_min, bbox[1] - crop_y_min, bbox[2] - crop_x_min, bbox[3] - crop_y_min])
return crop_img, crop_bboxes
def shift(self, img, bboxs):
w = img.shape[1]
h = img.shape[0]
x_min = w
x_max = 0
y_min = h
y_max = 0
for bbox in bboxs:
x_min = min(x_min, bbox[0])
y_min = min(y_min, bbox[1])
x_max = max(x_max, bbox[2])
y_max = max(y_max, bbox[3])
d_to_left = x_min
d_to_right = w - x_max
d_to_top = y_min
d_to_bottom = h - y_max
x = random.uniform(-(d_to_left - 1) / 3, (d_to_right - 1) / 3)
y = random.uniform(-(d_to_top - 1) / 3, (d_to_bottom - 1) / 3)
M = np.float32([[1, 0, x], [0, 1, y]])
shift_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
shift_bboxes = list()
for bbox in bboxs:
shift_bboxes.append([int(bbox[0] + x), int(bbox[1] + y), int(bbox[2] + x), int(bbox[3] + y)])
return shift_img, shift_bboxes
def augment(self, img, bboxs, crop=True, rotate=True, shift=True, light=True, noise=True):
if crop and random.random() < self.crop_rate:
img, bboxs = self.crop(img, bboxs)
if rotate and random.random() < self.rotation_rate:
angle = random.uniform(-self.max_rotation_angle, self.max_rotation_angle)
scale = random.uniform(0.7, 0.8)
img, bboxs = self.rotate(img, bboxs, angle, scale)
if shift and random.random() < self.shift_rate:
img, bboxs = self.shift(img, bboxs)
if light and random.random() < self.change_light_rate:
img = self.light(img)
if noise and random.random() < self.add_noise_rate:
img = self.noise(img)
return img, bboxs | 2.0625 | 2 |
2015/20/code.py | tut-tuuut/advent-of-code-shiny-giggle | 5 | 12759820 | import math
import time
import numpy
import scipy.stats as stats
import utils as u
# Easy part : calculate number of gifts from house number
# part 1 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_
def get_number_of_gifts(house_number):
result = 0
return 10 * sigma(house_number)
# store a little bunch of sigmas: for every prime number under 100
# sigma(x) = x + 1 (a prime number only divides itself and 1)
sigmas = {prime: prime + 1 for prime in u.PRIME_NUMBERS}
def sigma(number):
"""sum of divisors of number"""
# smartness #1: if we know it already, return it
if number in sigmas:
# print(f"smartness # we know already sigma({number})")
return sigmas[number]
# preparation for smartness #3: prime numbers are nice for sigma
is_probably_prime = True
# smartness #2: sigma is a multiplicative function:
# if math.gcd(x,y) == 1 then sigma(x * y) = sigma(x) * sigma(y)
# so we try to use memoization to avoid a huge loop when possible
for x in range(2, number // 2):
# if x does not divide number, continue:
if number % x != 0:
continue
# if we are here, we found a divider to number, it's not a prime
is_probably_prime = False
y = number // x
# if we cannot use the multiplicativeness of sigma, continue:
if math.gcd(x, y) > 1:
continue
# if we are here, great!
# print(f"hey! sigma({number}) = sigma({x})sigma({y})")
sigmas[number] = sigma(x) * sigma(y)
return sigmas[number]
# smartness 3: if we did not find any divider to number,
# calculate sigma as if it was a prime number. It is very difficult,
# as you can see:
if is_probably_prime:
sigmas[number] = number + 1
return sigmas[number]
# no smartness worked, just sum all the dividers already
sigmas[number] = sum(
divisor for divisor in range(1, number + 1) if number % divisor == 0
)
return sigmas[number]
u.assert_equals(get_number_of_gifts(1), 10)
u.assert_equals(get_number_of_gifts(2), 30)
u.assert_equals(get_number_of_gifts(3), 40)
u.assert_equals(get_number_of_gifts(4), 70)
u.assert_equals(get_number_of_gifts(5), 60)
u.assert_equals(get_number_of_gifts(6), 120)
u.assert_equals(get_number_of_gifts(7), 80)
u.assert_equals(get_number_of_gifts(8), 150)
u.assert_equals(get_number_of_gifts(9), 130)
TARGET = 36000000
top_gifts = 0
house_number = 0
init_time = time.time()
for i in range(2, 1000000):
if i % 1000 == 0:
print(f"------ {i} - {time.time() - init_time} -------")
nb_of_gifts = get_number_of_gifts(i)
if nb_of_gifts > top_gifts:
top_gifts = nb_of_gifts
house_number = i
print(f"{nb_of_gifts} gifts in house {i}")
if nb_of_gifts > TARGET:
u.answer_part_1(i)
break
# 36902400 gifts in house 831600
# [PART 1] 831600
# the code need to run for 883 seconds for that >< | 3.34375 | 3 |
autoaim/camera.py | tccoin/FanTongVision | 4 | 12759821 | # -*- coding: utf-8 -*-
import cv2
import numpy as np
import sys
import os
from autoaim import helpers
class Camera():
def __init__(self, source):
self.source = source
self.capture = cv2.VideoCapture(source)
if type(source) is int:
self.__camera = True
def snapshot(self, start, stop, interval, save_to, width=1024, height=768):
'''
start: "hour:minute:second"
stop : "hour:minute:second"
interval: 1000(ms)
save_to: url
'''
capture = self.capture
if self.__camera:
capture.set(cv2.CAP_PROP_FPS, 30)
capture.set(3, width)
capture.set(4, height)
start = self.__parse_time(start)
stop = self.__parse_time(stop)
for i in range(int((stop-start)*1000/interval)):
success, img = capture.read()
if success:
helpers.showoff(img, timeout=interval, update=True)
cv2.imwrite(save_to+str(i)+'.jpeg', img)
else:
fps = round(capture.get(cv2.CAP_PROP_FPS))
start = self.__parse_time(start) * fps
stop = self.__parse_time(stop) * fps
step = int(interval / 1000 * fps)
for i in range(start, stop, step):
capture.set(cv2.CAP_PROP_POS_FRAMES, i)
success, img = capture.read()
if success:
helpers.showoff(img, timeout=interval, update=True)
cv2.imwrite(save_to+str(i)+'.jpeg', img)
def release(self):
self.capture.release()
def __parse_time(self, str):
t = np.array([int(x) for x in str.split(':')])
w = np.array([3600, 60, 1])
return t.dot(w).item(0)
if __name__ == '__main__':
cam = Camera(0)
cam.snapshot('00:00:00', '00:01:00', 200, 'data/capture/')
| 2.6875 | 3 |
People/Rob/Meeting_03/my_calendar.py | rmorgan10/ExpertPythonProgramming | 2 | 12759822 | <gh_stars>1-10
# The outer shell for PyCalendar
from calendar_functions import Display, Calendar, Spot
import curses
import datetime
import os
# Create a calendar
calendar = Calendar()
# Create a display
display = Display(calendar)
def main_wrapper(screen, display):
screen.border(0)
display.main(screen)
current_spot = display.spots[str(display.date.day)]
while True:
# set the current spot to blink
screen.addstr(current_spot.curses_locs[0][0],
current_spot.curses_locs[0][1],
current_spot.name,
current_spot.fmt + curses.A_BLINK)
# save the previous spot information
prev_spot = Spot(current_spot.name,
current_spot.curses_locs,
current_spot.choose,
current_spot.up,
current_spot.down,
current_spot.left,
current_spot.right,
current_spot.fmt)
# stay in this loop till the user presses 'q'
ch = screen.getch()
if ch == ord('q'):
break
#down: 258 up: 259 left: 260 right: 261
elif ch == 258:
if current_spot.down is not None:
current_spot = current_spot.down
screen.addstr(prev_spot.curses_locs[0][0],
prev_spot.curses_locs[0][1],
prev_spot.name,
prev_spot.fmt)
elif ch == 259:
if current_spot.up is not None:
current_spot = current_spot.up
screen.addstr(prev_spot.curses_locs[0][0],
prev_spot.curses_locs[0][1],
prev_spot.name,
prev_spot.fmt)
elif ch == 260:
if current_spot.left is not None:
current_spot = current_spot.left
screen.addstr(prev_spot.curses_locs[0][0],
prev_spot.curses_locs[0][1],
prev_spot.name,
prev_spot.fmt)
elif ch == 261:
if current_spot.right is not None:
current_spot = current_spot.right
screen.addstr(prev_spot.curses_locs[0][0],
prev_spot.curses_locs[0][1],
prev_spot.name,
prev_spot.fmt)
# enter button pressed
elif ch == 10:
if current_spot.choose is not None:
exec("display." + current_spot.choose)
else:
pass
# Update the date to reflect the new spot
if current_spot.name.strip().isdigit():
display.update_date(datetime.date(display.date.year,
display.date.month,
int(current_spot.name.strip())))
display.refresh()
# After ending, save events
display.action_quit()
return
curses.wrapper(main_wrapper, display)
| 3.171875 | 3 |
lesson8/practice.py | yoyo929/learn-python | 0 | 12759823 | # def display_message():
# print("本章学习了定义函数")
# for i in range(10):
# display_message()
# def favourite_book(title):
# print(f"One of my favorite books is {title}.")
# favourite_book("Alice in Wonderlan")
# def make_shirt(a = 'T', b = 'I love Python'):
# return f"订购的shirt的尺码为{a}\n打印的内容为{b}"
# print(make_shirt())
# 8-6
# def city_country(city, country):
# a = f"{city}, {country}"
# return a.title()
# print(city_country("santiago", "chile"))
# 8-7
# def make_album(singer, album, count=None):
# if count != None:
# a = {"name": singer, "album": album, "count": count}
# else:
# a = {"name": singer, "album": album}
# return a
# print(make_album("d", "song1", 4))
# print(make_album("b", "song2"))
# print(make_album("c", "song3"))
# def make_album(singer, album, count=None):
# if count != None:
# a = {"name": singer, "album": album, "count": count}
# return a
# a = {"name": singer, "album": album}
# return a
# 8-8
def make_album(singer, album, count=None):
a = {"name": singer, "album": album}
if count:
a["count"] = count
return a
while True:
b = input("请输入singer: ")
if b == "quit":
break
c = input("请输入专辑的名称:")
if c == "quit":
break
print(make_album(b, c))
| 3.8125 | 4 |
wsServer.py | Nouzan/Exchange-API | 0 | 12759824 | <filename>wsServer.py<gh_stars>0
import asyncio
import websockets
import random
async def hello(websocket, path):
name = await websocket.recv()
if name == 'KEEP':
while True:
await asyncio.sleep(random.randint(5, 10))
for i in range(10):
await asyncio.sleep(0.5)
await websocket.send('KEEP')
print(f'> KEEP')
print(f'< {name}')
greeting = f'Hello {name}!'
await websocket.send(greeting)
print(f"> {greeting}")
startServer = websockets.serve(hello, 'localhost', 8123)
asyncio.get_event_loop().run_until_complete(startServer)
asyncio.get_event_loop().run_forever()
| 3.0625 | 3 |
master_node_scripts/fill_tables.py | ashd97/simple-scalable-ml-backend | 0 | 12759825 | import psycopg2
from psycopg2 import sql, extras
# If there is no venv, run schrodinger_virtualenv.py schrodinger.ve to install pycopg2
# In win powershell as admin:
# >Set-Location -Path "C:\Program Files\Schrodinger2020-3"
# >Set-ExecutionPolicy RemoteSigned
# >schrodinger.ve\Scripts\activate
# or source schrodinger.ve/bin/activate on unix
# Set-Location -Path "C:\Program Files\Schrodinger2020-3\myscripts_sequential"
# This file creates and populates DB, pipeline.py is worker test
# CREATE DATABASE sh_db0
def single_connection_query(check_sql, fetch=True, queue_concurrent=False, dbname='sh_db0_dev', values=[], user='shworker', password='<PASSWORD>'):
conn = psycopg2.connect(dbname=dbname, user=user,
password=password, host='localhost')
cursor = conn.cursor()
if queue_concurrent:
conn.set_isolation_level(extensions.ISOLATION_LEVEL_SERIALIZABLE)
if len(values) > 0:
extras.execute_values (
cursor, insert_query, values, template=None, page_size=100
)
conn.commit()
else:
cursor.execute(check_sql)
conn.commit()
if isinstance(fetch, bool):
if fetch is True:
res = cursor.fetchall()
return res
else:
if fetch == "rowcount":
res = cursor.rowcount
return res
'''
query = "select * from files.files;"
res = single_connection_query(query, True, dbname="sh_db0_dev", user='shworker', password='<PASSWORD>')
print("Fetched!", len(res))
res = [list(r) for r in res]
print("converted")
# Test fill DB
ids = [rec[0] for rec in res]
import random
random.shuffle(ids)
to_insert = []
for idx, item in enumerate(res):
item1 = item
item1[0] = ids[idx]
to_insert.append(item1)
'''
inserts = []
ids = 1602
with open("/home/ubuntu/pres_sub/preserved_substructure_6","r") as f:
for line in f:
line = line.strip()
if len(line) > 3:
inserts.append((line, None,None,None,None,0,1))
ids += 1
to_insert = inserts
insert_query = 'insert into files.files (smiles,num_conformers,docking_score,start_ts,stop_ts,status,priority) values %s'
single_connection_query(insert_query, fetch=False, dbname='sh_db0_dev', values = to_insert)
print(ids)
| 2.625 | 3 |
src/model.py | mbeckmann987/pedestrian-detection | 2 | 12759826 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
""" model.py: A custom model for CityPersons. """
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
import data
def get_model():
''' Returns the model a pretrained model for finetunning on CityPersons. '''
# load a model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# replace the classifier with a new one, that has
# num_classes which is user-defined
num_classes = 2 # 1 class (person) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def get_transform(train):
''' Converts a PIL image into PyTorch tensor. '''
transforms = []
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def save_model(model, path="./models/entire_model.pt"):
torch.save(model, path)
print('Model saved to ' + path)
def load_model(path="./models/entire_model.pt"):
if torch.cuda.is_available():
return torch.load(path)
else:
return torch.load(path, map_location=torch.device('cpu'))
def convert(img, img_raw):
'''
Converts the image from dataset back to the raw format:
* rescales it from [0,1] back to [0,255] range;
* flips the channels back to [height,width,3] format;
* converts from tensor to numpy array;
* converts from numpy array to PIL Image;
* checks if the image was augmented - flipped horizontally
'''
img = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())
img = np.array(img)
print('img shape: %d x %d x %d' % img.shape)
img = Image.fromarray(np.uint8(img)).convert('RGB')
img_flipped = np.array(img.transpose(Image.FLIP_LEFT_RIGHT))
img_raw = np.array(img_raw)
img_was_flipped = np.sum(img_flipped.flatten() == img_raw.flatten()) == img_flipped.shape[0] * img_flipped.shape[1] * img_flipped.shape[2]
print('Image was flipped: %r' % img_was_flipped)
return img
## testing on images from Hambrug
if __name__ == "__main__":
img_path = './datasets/citypersons/hamburg/'
anno_path = './datasets/citypersons/CityPersons/annotations/'
# split dataset into train and test
dataset = data.HamburgDataset(img_path, anno_dict, get_transform(train=True))
dataset_test = data.HamburgDataset(img_path, anno_dict, get_transform(train=False))
# permute the indices
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
# train: 248 - 50 examples
# test: 50 examples
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
if train:
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)
model = get_model()
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# Let's train the model for 10 epochs, evaluating at the end of every epoch.
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
save(model)
else:
model = load_model()
## error analysis
# raw image
img_raw = Image.open(img_path + imgs[0])
anno_raw = anno_dict[imgs[0]]
# same image from the dataset
idx = indices.index(0)
img, anno = dataset[idx]
img = convert_back(img, img_raw)
# put the model in evaluation mode
model.eval()
with torch.no_grad():
prediction = model([img.to(device)])[0]
preds = prediction['boxes'] # predicted bboxes
preds = preds.cpu().data.numpy() # to numpy array
scores = prediction['scores'] # scores of predicted bboxes
scores = scores.cpu().data.numpy()
# keep only bboxes where score > threshold:
threshold = .3
highs = list(np.where(scores > threshold)[0])
# transform the bboxes from tensor to list and back to [x, y, w, h] format
bboxes_x0x1y0y1 = []
for high in highs:
bboxes_x0x1y0y1.append(list(preds[high]))
bboxes = []
for bbox in bboxes_x0x1y0y1:
bbox = list(bbox)
x0, y0 = bbox[0], bbox[1]
x1, y1 = bbox[2], bbox[3]
bboxes.append([x0, y0, x1 - x0, y1 - y0])
# draw the predicted bounding boxes
# TODO: add ground truth bboxes in green
plt.rcParams['figure.figsize'] = [12, 8]
fig, ax = plt.subplots()
ax.imshow(img);
for bbox in bboxes:
rect = patches.Rectangle(
(bbox[0], bbox[1]), bbox[2], bbox[3],
linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.title(img_name)
plt.show()
| 2.734375 | 3 |
python/py-functionals/reduce-function.py | feliposz/hackerrank-solutions | 0 | 12759827 |
def product(fracs):
t = reduce(lambda x, y: x * y, fracs, 1)
return t.numerator, t.denominator
| 2.59375 | 3 |
domain/crypstyx.py | zloyuser/sanic-ccxt | 2 | 12759828 | <filename>domain/crypstyx.py
import aiohttp
import base64
import hashlib
import hmac
import json
from datetime import datetime
from domain.errors import InvalidSymbol
from domain.models import *
class CrypstyxSecurity:
_secret: str
_app: str
_nonce: int
def __init__(self, params: dict):
self._key = params['apiKey']
self._secret = params['secret']
self._nonce = 0
def header(self, method: str, url: str, data=''):
timestamp = int(datetime.utcnow().timestamp())
request_md5 = CrypstyxSecurity.md5(data)
request_base64 = base64.b64encode(request_md5)
signature = self._key + method.upper() + url.lower() + str(timestamp)
signature += str(self._nonce) + request_base64.decode()
signature_hmac = hmac.new(base64.b64decode(self._secret), signature.encode(), hashlib.sha256).digest()
hmac_signature = base64.b64encode(signature_hmac).decode()
return "amx {}:{}:{}:{}".format(self._key, hmac_signature, self._nonce, timestamp)
@staticmethod
def md5(data):
md5 = hashlib.md5()
md5.update(data.encode())
return md5.digest()
class CrypstyxProxy(ExchangeProxy):
_security: CrypstyxSecurity
_features: Dict[str, bool]
_timeframes: Dict[str, str]
_symbols: List[str]
_currencies: Dict[str, Currency]
_pairs: Dict[str, int]
def __init__(self, params: dict):
super().__init__('crypstyx')
self._security = CrypstyxSecurity(params)
self.features = {
"fetchCurrencies": True,
"fetchMarkets": False,
"fetchOHLCV": True,
"fetchTicker": False,
"fetchTrades": False,
"fetchBalance": False,
"fetchOrders": False,
"fetchOpenOrders": False,
"fetchClosedOrders": False,
"fetchOrder": False,
"createOrder": False,
"cancelOrder": False,
}
self._timeframes = {
'1m': 'Minute1',
'5m': 'Minute5',
'15m': 'Minute15',
'30m': 'Minute30',
'1h': 'Hour1',
'6h': 'Hour6',
'12h': 'Hour12',
'1d': 'Day1',
}
self._symbols = []
self._currencies = {}
self._pairs = {}
self._nonce = 0
def features(self) -> dict:
return self._features
async def symbols(self):
await self.__load()
return self._symbols
async def currencies(self):
await self.__load()
return self._currencies
async def markets(self):
pass
async def market(self, symbol: Symbol):
pass
async def ticker(self, symbol: Symbol):
pass
async def ohlcv(self, symbol: Symbol, timeframe: str = '1m', since: int = None, limit: int = None) -> List[dict]:
await self.__load()
if str(symbol) not in self._symbols:
raise InvalidSymbol(symbol)
if timeframe not in self._timeframes:
timeframe = list(self._timeframes.keys())[0]
limit = int(limit) if limit else 100
now = datetime.utcnow()
url = 'https://crypstyx.com/api/trade/graphdata'
data = {
"pairId": self._pairs[str(symbol)],
"endDateTime": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
"depth": limit,
"chartType": self._timeframes[timeframe],
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
k = ['t', 'o', 'h', 'l', 'c', 'v']
ohlcv = []
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json.dumps(data), headers=headers) as resp:
payload = json.loads(await resp.text())
for item in payload:
_time = datetime.strptime(item['dateTime'], "%Y-%m-%dT%H:%M:%S")
ohlcv.append(dict(zip(k, [
int(_time.timestamp() * 1000),
item['open'],
item['high'],
item['low'],
item['close'],
item['volume'],
])))
return ohlcv
async def trades(self, symbol: Symbol, since: int = None, limit: int = None):
pass
async def wallet(self) -> Wallet:
url = 'https://api.crypstyx.com/api/tickers/1'
headers = {
"Accept": "application/json",
"Authorization": self._security.header('GET', url)
}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as resp:
return json.loads(await resp.text())
async def balance(self, base: str) -> Balance:
pass
async def get_orders(self, symbol: Symbol, status: str = None, since: int = None, limit: int = None):
pass
async def get_order(self, symbol: Symbol, _id: str):
pass
async def create_order(self, symbol: Symbol, type: str, side: str, amount: float, price: float = None):
pass
async def cancel_order(self, symbol: Symbol, _id: str):
pass
async def close(self):
pass
async def __load(self):
if len(self._symbols) != 0:
return
url = 'https://crypstyx.com/api/trade/currencypairs'
async with aiohttp.ClientSession() as session:
async with session.post(url) as resp:
payload = json.loads(await resp.text())
for currency in payload:
base = currency['firstCurrency']
self._currencies[base['code']] = Currency(base['id'], base['code'], base['scale'])
for pair in currency['pairs']:
quote = pair['secondCurrency']
symbol = str(Symbol(base['code'], quote['code']))
self._symbols.append(symbol)
self._pairs[symbol] = pair['id']
| 2.359375 | 2 |
nets/unet.py | wanghuajing/unet-pytorch | 0 | 12759829 | <filename>nets/unet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from nets.vgg import VGG16
class unetUp(nn.Module):
def __init__(self, in_size, out_size):
super(unetUp, self).__init__()
self.conv1 = nn.Conv2d(in_size, out_size, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(out_size, out_size, kernel_size=3, padding=1)
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, inputs1, inputs2):
outputs = torch.cat([inputs1, self.up(inputs2)], 1)
outputs = self.conv1(outputs)
outputs = self.conv2(outputs)
return outputs
class Unet(nn.Module):
def __init__(self, num_classes=21, in_channels=3, pretrained=False):
super(Unet, self).__init__()
self.vgg = VGG16(pretrained=pretrained,in_channels=in_channels)
in_filters = [192, 384, 768, 1024]
out_filters = [64, 128, 256, 512]
# upsampling
# 64,64,512
self.up_concat4 = unetUp(in_filters[3], out_filters[3])
# 128,128,256
self.up_concat3 = unetUp(in_filters[2], out_filters[2])
# 256,256,128
self.up_concat2 = unetUp(in_filters[1], out_filters[1])
# 512,512,64
self.up_concat1 = unetUp(in_filters[0], out_filters[0])
# final conv (without any concat)
self.final = nn.Conv2d(out_filters[0], num_classes, 1)
def forward(self, inputs):
feat1 = self.vgg.features[ :4 ](inputs)
feat2 = self.vgg.features[4 :9 ](feat1)
feat3 = self.vgg.features[9 :16](feat2)
feat4 = self.vgg.features[16:23](feat3)
feat5 = self.vgg.features[23:-1](feat4)
up4 = self.up_concat4(feat4, feat5)
up3 = self.up_concat3(feat3, up4)
up2 = self.up_concat2(feat2, up3)
up1 = self.up_concat1(feat1, up2)
final = self.final(up1)
return final
def _initialize_weights(self, *stages):
for modules in stages:
for module in modules.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
| 2.375 | 2 |
utils/progress_bar.py | gongbudaizhe/bilib | 0 | 12759830 | <gh_stars>0
import sys
import time
if __name__ == "__main__":
# show the progress
for progress in range(100):
i = progress / 5
# the general syntax for a format place holder is
# %[flags][width][.precision]type
# reference: http://www.python-course.eu/python3_formatted_output.php
#
# for example, %-20s
# flag "-" means that the output is left adjusted
# width "20" means that the output width is 20
# precision is optional and we don't have it
# type "s" means that the input is string
sys.stdout.write("Processing progress: [%-20s] %d%% \r" %('='*i+'>',progress))
sys.stdout.flush()
time.sleep(1)
| 2.734375 | 3 |
tourify/shortcuts.py | landsurveyorsunited/tour-builder | 0 | 12759831 | #Copyright 2010 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
def slugify(inStr):
removelist = ["a", "an", "as", "at", "before", "but", "by", "for","from","is", "in", "into", "like", "of", "off", "on", "onto","per","since", "than", "the", "this", "that", "to", "up", "via","with"];
for a in removelist:
aslug = re.sub(r'\b'+a+r'\b','',inStr)
aslug = re.sub('[^\w\s-]', '', aslug).strip().lower()
aslug = re.sub('\s+', '-', aslug)
return aslug
| 2.734375 | 3 |
RunBedau.py | LapoFrati/GeneticAlgorithms | 0 | 12759832 | <filename>RunBedau.py
from Bedau.Population import Population
from Bedau.Log import Log
def main():
print("Start")
world_size = 128
pop_size = 1000
mutation_rate = 1.
meta_mutation = 0.66
meta_mutation_range = 0.0025 # from paper
resource_freq = 1
plotting = False
iterations = 1000
pop_log = Population(world_size=world_size,
pop_size=pop_size,
mutation_rate=mutation_rate,
meta_mutation=meta_mutation,
meta_mutation_range=meta_mutation_range,
resource_freq=resource_freq,
iterations=iterations,
plotting=plotting,
progress=True).evolve()
if plotting:
pop_log.plot_world()
pop_log.plot_stats()
print("End")
if __name__ == '__main__':
main()
| 2.65625 | 3 |
reportTem.py | Universoar/gxnu-yzdx-autoreport | 6 | 12759833 | import requests
def reportTemperature(cookie, token):
url = 'http://yiban.gxnu.edu.cn/v4/affairs/health-report/create'
headers = {
'Host': 'yiban.gxnu.edu.cn',
'Content-Type': 'application/json;charset=utf-8',
'X-Requested-With': 'XMLHttpRequest',
'X-Access-Token': token,
'Origin': 'http://yiban.gxnu.edu.cn',
'Cookie': cookie
}
body = {'data': {
'temperature': '36.8',
'remark': ''
}
}
response = requests.post(url=url, headers=headers,
json=body)
return response
| 3.015625 | 3 |
DRACO/inv_changed.py | RahulSajnani/DRACO-Weakly-Supervised-Dense-Reconstruction-And-Canonicalization-of-Objects | 3 | 12759834 | # Code adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
from __future__ import division
from pytorch3d.ops.knn import knn_points
import torch
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import helper_functions
FLOAT_EPS = np.finfo(np.float).eps
pixel_coords = None
import kornia
from scipy.spatial.transform import Rotation as R
def preprocess_depth_output_2_point_cloud_all(depth_maps, masks, intrinsics):
'''
Pre process data for pose network
Function mean subtracts the point cloud to bring it to origin and downsamples it to 2048 points
'''
batch_size, num_views, height, width = depth_maps.size()
depth_maps = helper_functions.sigmoid_2_depth(depth_maps)
point_cloud_list_all_views = []
rotated_point_cloud_list_all_views = []
for view in range(num_views):
src_camera_coords = pixel2cam(depth_maps[:, view].unsqueeze(0), intrinsics.inverse())
src_camera_coords = src_camera_coords.reshape(batch_size, 3, height*width) # [B 3 H*W]
if torch.cuda.is_available():
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).cuda().float() # [B 3 3]
else:
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).float() # [B 3 3]
point_cloud_list = []
rotated_point_cloud_list = []
masks_batch = masks[:, view]
for i in range(batch_size):
src_camera_coords_view = src_camera_coords[i] # [3 H*W]
mask = masks_batch[i] # [H W]
mask = mask.reshape(1, -1).squeeze() # [H*W]
# Extracting the points only within mask region
src_camera_coords_view = src_camera_coords_view[:, (mask == 1.0)]
# Mean center value
src_camera_coords_view = src_camera_coords_view - src_camera_coords_view.mean(axis = 1).unsqueeze(1).repeat(1, src_camera_coords_view.size(1)) #[3 masksize]
# Downsample to 2048 points
src_camera_coords_view = torch.nn.functional.interpolate(src_camera_coords_view.unsqueeze(0), size = 2048).squeeze(0)
point_cloud_list.append(src_camera_coords_view)
src_camera_coords_downsampled = torch.stack(point_cloud_list) # [B 3 2048]
rot_src_camera_coords = random_rotation @ src_camera_coords_downsampled # [B 3 2048]
point_cloud_list_all_views.append(src_camera_coords_downsampled)
rotated_point_cloud_list_all_views.append(rot_src_camera_coords)
camera_point_clouds_downsampled = torch.stack(point_cloud_list_all_views, dim = 1) # [B views 2048]
rotated_camera_point_clouds_downsampled = torch.stack(rotated_point_cloud_list_all_views, dim = 1) # [B views 2048]
return camera_point_clouds_downsampled, rotated_camera_point_clouds_downsampled
def preprocess_depth_output_2_point_cloud(depth_maps, masks_batch, intrinsics):
'''
Pre process data for pose network
Function mean subtracts the point cloud to bring it to origin and downsamples it to 2048 points
'''
batch_size, _, height, width = depth_maps.size()
depth_maps = helper_functions.sigmoid_2_depth(depth_maps)
src_camera_coords = pixel2cam(depth_maps[:, 0].unsqueeze(0), intrinsics.inverse())
src_camera_coords = src_camera_coords.reshape(batch_size, 3, height*width) # [B 3 H*W]
if torch.cuda.is_available():
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).cuda().float() # [B 3 3]
else:
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).float() # [B 3 3]
point_cloud_list = []
rotated_point_cloud_list = []
for i in range(batch_size):
src_camera_coords_view = src_camera_coords[i] # [3 H*W]
mask = masks_batch[i] # [H W]
mask = mask.reshape(1, -1).squeeze() # [H*W]
# Extracting the points only within mask region
src_camera_coords_view = src_camera_coords_view[:, (mask == 1.0)]
# mean center value
src_camera_coords_view = src_camera_coords_view - src_camera_coords_view.mean(axis = 1).unsqueeze(1).repeat(1, src_camera_coords_view.size(1)) #[3 masksize]
# Downsample to 2048 points
src_camera_coords_view = torch.nn.functional.interpolate(src_camera_coords_view.unsqueeze(0), size = 2048).squeeze(0)
point_cloud_list.append(src_camera_coords_view)
src_camera_coords_downsampled = torch.stack(point_cloud_list) # [B 3 2048]
rot_src_camera_coords = random_rotation @ src_camera_coords_downsampled # [B 3 2048]
return src_camera_coords_downsampled, rot_src_camera_coords
def depth_decode(depth_image):
# # first 16 bits (first 2 channels) are 16-bit depth
# R is the 8 LSB and G are the others
depth_image_16 = depth_image[:,:,[1, 0]]
# B are 8-bit version
depth_image_8 = depth_image[:,:,2]
# last 8 are empty
depth_single_channel = np.zeros((depth_image_16.shape[0], depth_image_16.shape[1]))
# convert 16 bit to actual depth values
for i in range(depth_single_channel.shape[0]):
for j in range(depth_single_channel.shape[1]):
bit_str = '{0:08b}'.format(depth_image_16[i, j, 0]) + '{0:08b}'.format(depth_image_16[i, j, 1])
depth_single_channel[i, j] = int(bit_str, 2)
return depth_single_channel
def set_id_grid(depth):
global pixel_coords
b, _, h, w = depth.size()
i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]
j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]
ones = torch.ones(1,h,w).type_as(depth)
#print("i_range",i_range.device)
#print("j_range",j_range.device)
#print("ones",ones.device)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1).type_as(depth) # [1, 3, H, W]
pixel_coords.to(depth.device)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr):
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.float() @ cam_coords_flat
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr.float() # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-4)
X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
# print(pixel_coords.reshape(b,h,w,2).shape)
return pixel_coords.reshape(b,h,w,2)
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
b, _, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
pixel_coords = pixel_coords.to(depth.device)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).reshape(b, 3, -1) # [B, 3, H*W]
#print("-"*10)
#print("Pixel", pixel_coords.device)
#print("Depth", depth.device)
#print("intrinsics_inv",intrinsics_inv.device)
#print("current_pixel_coords",current_pixel_coords.device)
#print("-"*10)
cam_coords = (intrinsics_inv.float() @ current_pixel_coords.float())
cam_coords = cam_coords.reshape(b, 3, h, w)
return cam_coords * depth.clamp(min=1e-1)
def quat2mat(quat):
x, y, z, w = quat[:,0], quat[:,1], quat[:,2], quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
n = w2 + x2 + y2 + z2
x = x / n
y = y / n
z = z / n
w = w / n
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([1 - 2*y2 - 2*z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, 1 - 2*x2 - 2*z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, 1 - 2*x2 - 2*y2], dim=1).reshape(B, 3, 3)
return rotMat
def pose_vec2mat(vec):
size_list = list(vec.size())
if len(size_list) == 3:
# if dimension is [B 4 4] for multiview blender dataset
return vec
else:
# If dimension is [B 7] for multiview nocs dataset
b = vec.size(0)
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:,3:]
rot_mat = quat2mat(rot) # [B, 3, 3]
invert_mat = torch.eye(4)
invert_mat[0, 0] *= -1
invert_mat[1, 1] *= -1
# Adding 0.5 offset for dataset
transform_mat = torch.cat([rot_mat, (translation) + 0.5], dim=2) # [B, 3, 4]
transform_mat = torch.cat([transform_mat, torch.tensor([[0,0,0,1]]).unsqueeze(0).expand(1,1,4).type_as(transform_mat).repeat(b, 1, 1)], dim=1) # [B, 4, 4]
return transform_mat @ invert_mat.type_as(transform_mat)
def inverse_warp(tgt_image, depth, intrinsics, src_pose, tgt_pose):
src_camera_coords = pixel2cam(depth, intrinsics.inverse())
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
src_cam_to_tgt_cam = tgt_pose_mat.inverse() @ src_pose_mat
tgt_cam_2_proj = intrinsics @ src_cam_to_tgt_cam[:, :3, :] # Bx3x3 Bx3x4
rot, tr = tgt_cam_2_proj[:,:,:3], tgt_cam_2_proj[:,:,-1:]
tgt_pix_coords = cam2pixel(src_camera_coords, rot, tr)
tgt_image = tgt_image.type_as(tgt_pix_coords)
projected_img = F.grid_sample(tgt_image, tgt_pix_coords, padding_mode='zeros', align_corners=False)
valid_points = tgt_pix_coords.abs().max(dim=-1)[0] <= 1
return projected_img, valid_points
def inverse_warp_2(tgt_image, depth, intrinsics, src_pose, tgt_pose):
'''
Inverse warp function using Kornia
'''
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
b = tgt_image.size(0)
h = torch.tensor(tgt_image.size(2)).repeat(b)
w = torch.tensor(tgt_image.size(3)).repeat(b)
intrinsics = torch.cat([intrinsics.float(), torch.tensor([[0, 0, 0]]).unsqueeze(2).expand(1, 3, 1).type_as(intrinsics).repeat(b, 1, 1).float()], dim = 2)
intrinsics = torch.cat([intrinsics, torch.tensor([[0, 0, 0, 1]]).expand(1, 1, 4).type_as(intrinsics).repeat(b, 1, 1).float() ], dim = 1)
pinhole_tgt = kornia.geometry.PinholeCamera(intrinsics, tgt_pose_mat.float(), h, w)
pinhole_src = kornia.geometry.PinholeCamera(intrinsics, src_pose_mat.float(), h, w)
image_src = kornia.geometry.depth_warp(pinhole_tgt, pinhole_src, depth.float(), tgt_image.float(), tgt_image.size(2), tgt_image.size(3))
return image_src, image_src
def project_depth_point_cloud(depth, intrinsics, src_pose, tgt_pose):
'''
Project point cloud from src to tgt pose
'''
src_camera_coords = pixel2cam(depth, intrinsics.inverse()) # [B, 3, H, W]
b, _, h, w = src_camera_coords.size()
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
# source camera coordinates
src_camera_coords = src_camera_coords.reshape(b, 3, h*w)
src_cam_to_tgt_cam = tgt_pose_mat.inverse() @ src_pose_mat
ones = torch.ones((b, 1, h*w), device=src_camera_coords.device)
#print("ones",ones.device)
#print("src_camera_coords",src_camera_coords.device)
src_camera_coords_homogeneous = torch.cat([src_camera_coords, ones], dim = 1) # [B, 4, H*W]
# destination camera coordinates
projected_coords = src_cam_to_tgt_cam.float() @ src_camera_coords_homogeneous.float() # [B, 4, H*W]
projected_coords = projected_coords[:, :3, :]
return src_camera_coords, projected_coords
def NOCS_map_2_point_cloud(nocs_image_tensor, mask):
'''
Convert NOCS maps to point cloud
Input:
nocs_image_tensor - [B, 3, H, W] - torch tensor
mask - [B, H, W] - torch tensor
Returns:
nocs_point_cloud_list - B element list - [3, masked dims]
indices_list - B element list - [2, masked dims]
'''
indices_list = []
nocs_point_cloud_list = []
B, views, H, W = nocs_image_tensor.shape
for i in range(nocs_image_tensor.shape[0]):
ind = torch.from_numpy(((mask[i, :, :] > 0.5).nonzero().cpu()).numpy())
h = ind[:, 0]
w = ind[:, 1]
#torch.sigmoid((mask[i, :, :] - 0.5)* 100)
#h = h.detach()
#w = w.detach()
#print(h.max(), w.max(), h.min(), w.min())
nocs_point_cloud = nocs_image_tensor[i, :, h, w] # [3, mask]
nocs_point_cloud.detach_()
nocs_point_cloud_list.append(nocs_point_cloud)
indices_list.append(torch.stack([h, w]).detach()) # [2, mask]
return nocs_point_cloud_list, indices_list
def get_NOCS_correspondences(nocs_image_tensor_source, mask_source, nocs_image_tensor_target, mask_target):
'''
Get NOCS correspondences
Input:
nocs_image_tensor_source - [B, 3, H, W]
mask_source - [B, H, W]
nocs_image_tensor_target - [B, 3, H, W]
mask_target - [B, H, W]
Returns:
indices_depth_list - list of tensors with indices of shape [2, masked_dim]
'''
B, views, H, W = nocs_image_tensor_source.shape
indices_depth_list_target = []
indices_depth_list_source = []
for i in range(B):
nocs_point_cloud_list_source, indices_list_source = NOCS_map_2_point_cloud(nocs_image_tensor_source[i, :, :, :].unsqueeze(0), mask_source[i, 0, :, :].unsqueeze(0))
nocs_point_cloud_list_target, indices_list_target = NOCS_map_2_point_cloud(nocs_image_tensor_target[i, :, :, :].unsqueeze(0), mask_target[i, 0, :, :].unsqueeze(0))
pc_1, ind_1 = nocs_point_cloud_list_source[0], indices_list_source[0] # [3, mask_size], [2, mask_size]
pc_2, ind_2 = nocs_point_cloud_list_target[0], indices_list_target[0] # [3, mask_size]
# Perform NOCS KNN matching
out = knn_points(pc_1.transpose(0, 1).unsqueeze(0), pc_2.transpose(0, 1).unsqueeze(0)) # [1, masked_dim, 3]
corresponding_idx = out.idx[0, :, 0] # [masked_dim]
corresponding_idx = ind_2[:, corresponding_idx]
indices_depth_list_source.append(ind_1)
indices_depth_list_target.append(corresponding_idx)
return indices_depth_list_source, indices_depth_list_target
if __name__ == "__main__":
src_pose = torch.tensor([[1663.45703125, 46.258087158203128, -2127.346435546875, 0.008096654899418354, -0.3257482051849365, 0.0027897413820028307, 0.9454177618026733]])
tgt_pose = torch.tensor([[1889.214599609375, 221.49795532226563, -1699.667724609375, 0.039696164429187778, -0.4065377712249756, 0.01768353208899498, 0.9125999212265015]])
src_pose_2 = torch.tensor([[2011.62060546875, 374.8108215332031, -1255.8643798828125,0.06847226619720459, -0.48349833488464358, 0.03797297552227974, 0.8718366026878357]])
depth = Image.open('./test-images/depth.png')
depth = np.array(depth)
depth = depth_decode(depth)
depth = torch.tensor(depth).unsqueeze(0).unsqueeze(1).float()
# print(depth)
# plt.imshow(depth[0][0])
# plt.show()
tgt_image = cv2.imread('./test-images/rgb.png')
tgt_image = torch.tensor(tgt_image).unsqueeze(0).permute(0, 3, 1, 2).float() / 255.0
intrinsics = torch.tensor([
[617.1,0.0,320.0],
[0.0,617.1,240.0],
[0.0,0.0,1.0],
])
scale_factor = 1
src_pose[0, :3] = src_pose[0, :3] / scale_factor
tgt_pose[0, :3] = tgt_pose[0, :3] / scale_factor
src_pose_2[0, :3] = src_pose_2[0, :3] / scale_factor
x_factor = -1
src_pose[0, 0] = src_pose[0, 0] * x_factor
tgt_pose[0, 0] = tgt_pose[0, 0] * x_factor
src_pose_2[0, 0] = src_pose_2[0, 0] * x_factor
src_pose[0, 4:6] = src_pose[0, 4:6] * -1
tgt_pose[0, 4:6] = tgt_pose[0, 4:6] * -1
src_pose_2[0, 4:6] = src_pose_2[0, 4:6] * -1
intrinsics = intrinsics.unsqueeze(0)
warp=inverse_warp(tgt_image, depth, intrinsics, tgt_pose, src_pose)
warp=warp[0].permute(0,2,3,1)
plt.imshow(warp[0])
plt.show()
| 2.390625 | 2 |
mirrormanager2/login_forms.py | Devyani-Divs/mirrormanager2 | 1 | 12759835 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
MirrorManager2 login forms.
'''
# # pylint cannot import flask extension correctly
# pylint: disable=E0611,F0401
# # The forms here don't have specific methods, they just inherit them.
# pylint: disable=R0903
# # We apparently use old style super in our __init__
# pylint: disable=E1002
# # Couple of our forms do not even have __init__
# pylint: disable=W0232
from flask.ext import wtf
import wtforms
def same_password(form, field):
''' Check if the data in the field is the same as in the password field.
'''
if field.data != form.password.data:
raise wtf.ValidationError('Both password fields should be equal')
class LostPasswordForm(wtf.Form):
""" Form to ask for a password change. """
username = wtforms.TextField(
'username <span class="error">*</span>',
[wtforms.validators.Required()]
)
class ResetPasswordForm(wtf.Form):
""" Form to reset one's password in the local database. """
password = wtforms.PasswordField(
'Password <span class="error">*</span>',
[wtforms.validators.Required()]
)
confirm_password = wtforms.PasswordField(
'Confirm password <span class="error">*</span>',
[wtforms.validators.Required(), same_password]
)
class LoginForm(wtf.Form):
""" Form to login via the local database. """
username = wtforms.TextField(
'username <span class="error">*</span>',
[wtforms.validators.Required()]
)
password = wtforms.PasswordField(
'Password <span class="error">*</span>',
[wtforms.validators.Required()]
)
class NewUserForm(wtf.Form):
""" Form to add a new user to the local database. """
user_name = wtforms.TextField(
'username <span class="error">*</span>',
[wtforms.validators.Required()]
)
display_name = wtforms.TextField(
'Full name',
[wtforms.validators.Optional()]
)
email_address = wtforms.TextField(
'Email address <span class="error">*</span>',
[wtforms.validators.Required(), wtforms.validators.Email()]
)
password = wtforms.PasswordField(
'Password <span class="error">*</span>',
[wtforms.validators.Required()]
)
confirm_password = wtforms.PasswordField(
'Confirm password <span class="error">*</span>',
[wtforms.validators.Required(), same_password]
)
| 1.90625 | 2 |
desafio/desafio053.py | henriquekirchheck/Curso-em-video-Python | 0 | 12759836 | <gh_stars>0
# Crie um programa que leia uma frase qualquer e diga se ela é um palíndromo, desconsiderando os espaços.
frase = str(input('\nDigite uma frase: '))
min = ''.join(frase.lower().split())
rev = ''.join(frase.lower().split())[::-1]
if(min == rev):
print('A frase é um palíndromo')
else:
print('A frase não é um palíndromo') | 3.921875 | 4 |
scripts/report_posts.py | agustibr/metrics | 0 | 12759837 | from glob import glob
import re
import operator
import os
import textwrap
import util
WEEKLY_METRICS_VERSION = "0.1"
ORG_WEEKLY_METRICS_VERSION = "0.1"
MONTHLY_METRICS_VERSION = "0.1"
ORG_MONTHLY_METRICS_VERSION = "0.1"
PATH_TO_METRICS_POSTS = "_posts"
PATH_TO_GRAPHS = "graphs"
WEEKLY_PROJECT_POST = """\
---
layout: weekly-metrics-v{version}
title: Metrics report for {owner}/{repo} | {reportID}
permalink: /{owner}/{repo}/{link}/
owner: {owner}
repo: {repo}
reportID: {reportID}
datestampThisWeek: {datestampThisWeek}
datestampLastWeek: {datestampLastWeek}
---
"""
MONTHLY_PROJECT_POST = """\
---
layout: monthly-metrics-v{version}
title: Metrics report for {owner}/{repo} | {reportID} | {datestampThisMonth}
permalink: /{owner}/{repo}/{link}/
owner: {owner}
repo: {repo}
reportID: {reportID}
datestampThisMonth: {datestampThisMonth}
datestampLastMonth: {datestampLastMonth}
---
"""
# {{% for item in site.data["{owner_in_data}"]["{repo_in_data}"]["{reportID}"]["data"] %}}
WEEKLY_ORG_POST = """\
---
layout: org-weekly-metrics-v{version}
title: TwiterOSS Metrics Report for {owner} | {reportID}
permalink: /{owner}/{link}/
org: {owner}
reportID: {reportID}
datestampThisWeek: {datestampThisWeek}
datestampLastWeek: {datestampLastWeek}
---
"""
# {{% for item in site.data["{owner_in_data}"]["{reportID}"]["data"] %}}
MONTHLY_ORG_POST = """\
---
layout: org-monthly-metrics-v{version}
title: TwiterOSS Metrics Report for {owner} | {reportID}
permalink: /{owner}/{link}/
org: {owner}
reportID: {reportID}
datestampThisMonth: {datestampThisMonth}
datestampLastMonth: {datestampLastMonth}
---
"""
# {{% for item in site.data["{owner_in_data}"]["{reportID}"]["data"] %}}
def add_table_of_metrics(post_text, REPORT_JSON, data_source, ID, add_breakdown=False):
# data_source is not used in the function
# It can be used to create a jekyll loop like below but is being avoided
# {{% for item in data_source %}}
post_text += textwrap.dedent("""
<table class="table table-condensed" style="border-collapse:collapse;">
<thead>
<tr>
<th>Metric</th>
<th>Latest</th>
<th>Previous</th>
<th colspan="2" style="text-align: center;">Difference</th>
</tr>
</thead>
<tbody>
""")
for metric in REPORT_JSON['data']:
color = util.get_metrics_color(metric, REPORT_JSON['data'][metric]['diff'])
if REPORT_JSON['data'][metric]['previous'] != 0:
percentage_change = str(round(REPORT_JSON['data'][metric]['diff']/REPORT_JSON['data'][metric]['previous']*100, 2))
elif REPORT_JSON['data'][metric]['latest'] != 0:
percentage_change = '∞'
else:
percentage_change = '0.0'
post_text += """
<tr data-toggle="collapse" data-target="#col-{5}" class="accordion-toggle" style="cursor: pointer;">
<td>{0:}</td>
<td>{1:,}</td>
<td>{2:,}</td>
<td style="color: {4}" >{3:,}</td>
<td style="color: {4}" >{6}%</td>
</tr>
""".format(util.get_metrics_name(metric),
REPORT_JSON['data'][metric]['latest'],
REPORT_JSON['data'][metric]['previous'],
REPORT_JSON['data'][metric]['diff'],
color,
metric,
percentage_change)
# Add diff breakdown
if add_breakdown and len(REPORT_JSON['data'][metric]['diff_breakdown'].items()):
post_text += """
<td class="hiddenRow" colspan="2"></td>
<td class="hiddenRow" colspan="3" style="padding: 0" ><div class="accordian-body collapse" id="col-{0}">
""".format(metric)
items = list(REPORT_JSON['data'][metric]['diff_breakdown'].items())
items.sort(key=operator.itemgetter(1), reverse=True)
for item, value in items:
href = "/metrics/{}/{}/{}".format(REPORT_JSON['name'], item, ID)
post_text += """<a target="_blank" href="{2}">{0} : {1}</a><br>""".format(item, value, href)
post_text += """</div> </td>"""
post_text += textwrap.dedent("""
</tbody>
</table>
""")
return post_text
def add_augur_metrics(post_text, REPORT_JSON, AUGUR_METRICS, ID, is_project=False):
if is_project:
nameWithOwner = REPORT_JSON["nameWithOwner"]
"""
Bus Factor
"""
try:
bus_factor = AUGUR_METRICS["bus_factor"][nameWithOwner]
except KeyError:
bus_factor = {"best": "N/A", "worst": "N/A"}
post_text += '<br>\n<h4><a target="_blank" href="https://chaoss.community/">CHAOSS</a> Metrics</h4>' + '\n'
post_text += textwrap.dedent(f"""
<table class="table table-condensed" style="border-collapse:collapse;">
<tbody>
<td>Bus Factor</td>
<td>Best: {bus_factor["best"]}</td>
<td>Worst: {bus_factor["worst"]}</td>
</tbody>
</table>
""")
graph_text = ""
# """
# Timeseries of new watchers
# """
# if ID == 'WEEKLY':
# graph_path = f"{PATH_TO_GRAPHS}/{nameWithOwner}/timeseries_new_watchers_per_week.svg"
# elif ID == 'MONTHLY':
# graph_path = f"{PATH_TO_GRAPHS}/{nameWithOwner}/timeseries_new_watchers_per_month.svg"
# if os.path.exists(graph_path):
# graph_text += f'\t<object class="cell" type="image/svg+xml" data="/metrics/{graph_path}">\n'
# graph_text += '\t\tYour browser does not support SVG\n'
# graph_text += '\t</object>\n'
# Add more chaoss graphs here
# After all the graphs
if graph_text:
post_text += '<div class="row">\n'
post_text += graph_text
post_text += '</div>\n'
else: # ORG
pass
return post_text
def add_highlights(post_text, REPORT_JSON, ID):
org = REPORT_JSON["name"]
if REPORT_JSON["highlights"]:
post_text += '<br>\n<h4>Highlights</h4>' + '\n'
post_text += '<ul>' + '\n'
highlights = REPORT_JSON["highlights"]
# Sort based on the number of zeroes!
highlights.sort(key=lambda item: str(item[1]).count('0'), reverse=True)
for highlight in highlights:
repo, number, metric = highlight
post_text += '\t' + f'<li><a href="/metrics/{org}/{repo}/{ID}">{repo}</a>'
post_text += f' crossed {number:,} {util.get_metrics_name(metric)}</li>' + '\n'
post_text += '</ul>' + '\n'
return post_text
def add_github_metrics_graphs(post_text, REPORT_JSON, ID):
"""
Add graphs for orgs' weekly reports
"""
org = REPORT_JSON["name"]
# Treemap graphs
all_treemap_graphs = glob(PATH_TO_GRAPHS + "/" + org + f"/treemap_{ID.lower()}_*.svg")
post_text += '<div class="graph-container">\n'
post_text += '<br>\n<h4>Binary Treemap graphs</h4>\n'
post_text += '<div class="row">\n'
for graph in all_treemap_graphs:
post_text += f'\t<object class="cell" type="image/svg+xml" data="/metrics/{graph}">\n'
post_text += '\t\tYour browser does not support SVG\n'
post_text += '\t</object>\n'
post_text += '</div>\n'
post_text += '</div>\n'
return post_text
def _create_post(REPORT_JSON, AUGUR_METRICS, latest=False, is_project=True):
"""
latest: If True, create a post with permalink /owner/repo/{ID}
project: If False, it means the post is for an org, else for a project
"""
ID = REPORT_JSON['reportID'].split('-')[0]
if is_project:
org, repo = REPORT_JSON['nameWithOwner'].split("/")
else: # org
org, repo = REPORT_JSON['name'], ''
# Create directory for the post, if it does not exist
path_to_post = os.path.join(PATH_TO_METRICS_POSTS, org, repo)
os.makedirs(path_to_post, exist_ok=True)
if latest:
"""
Create file for latest report
"""
# Delete already existing latest posts
re_latest_report = re.compile(r"\d{{4}}-\d{{2}}-\d{{2}}-{}-LATEST.md".format(ID))
for filename in os.listdir(path_to_post):
if re_latest_report.match(filename):
print("LOG: Removing existing latest post", os.path.join(path_to_post, filename))
os.unlink(os.path.join(path_to_post, filename))
# Create latest report file in _posts as well
post_file = "{}/{}-{}-LATEST.md".format(path_to_post, REPORT_JSON["datestamp"]["latest"], ID)
else:
"""
Create file for dated report
"""
# This is a weird filename for sure. But I think I have an explanation for it -
# posts need to start with %Y-%m-%d and the later is sent to page.title variable
# Without the later date, title did not make much sense.
post_file = "{}/{}-{}.md".format(path_to_post, REPORT_JSON["datestamp"]["latest"], REPORT_JSON["reportID"])
if latest:
link = ID
else:
link = REPORT_JSON["reportID"]
if ID == "WEEKLY":
if is_project:
"""
WEEKLY - PROJECT
"""
data_source = 'site.data["{owner_in_data}"]["{repo_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(WEEKLY_PROJECT_POST, REPORT_JSON, data_source, 'WEEKLY')
# post_text = add_augur_metrics(post_text, REPORT_JSON, AUGUR_METRICS, 'WEEKLY', is_project=True)
else:
"""
WEEKLY - ORG
"""
data_source = 'site.data["{owner_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(WEEKLY_ORG_POST, REPORT_JSON, data_source, 'WEEKLY', add_breakdown=True)
post_text = add_highlights(post_text, REPORT_JSON, 'WEEKLY')
post_text = add_github_metrics_graphs(post_text, REPORT_JSON, 'WEEKLY')
post_text = post_text.format(
version=WEEKLY_METRICS_VERSION,
owner=org,
owner_in_data=org.replace('.', ''), # Dots confused jekyll
repo=repo,
repo_in_data=repo.replace('.', ''),
reportID=REPORT_JSON["reportID"],
datestampThisWeek=REPORT_JSON["datestamp"]["latest"],
datestampLastWeek=REPORT_JSON["datestamp"]["previous"],
link=link)
elif ID == "MONTHLY":
if is_project:
"""
MONTHLY - PROJECT
"""
data_source = 'site.data["{owner_in_data}"]["{repo_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(MONTHLY_PROJECT_POST, REPORT_JSON, data_source, 'MONTHLY')
# post_text = add_augur_metrics(post_text, REPORT_JSON, AUGUR_METRICS, 'MONTHLY', is_project=True)
else:
"""
MONTHLY - ORG
"""
data_source = 'site.data["{owner_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(MONTHLY_ORG_POST, REPORT_JSON, data_source, 'MONTHLY', add_breakdown=True)
post_text = add_highlights(post_text, REPORT_JSON, 'MONTHLY')
post_text = add_github_metrics_graphs(post_text, REPORT_JSON, 'MONTHLY')
post_text = post_text.format(
version=MONTHLY_METRICS_VERSION,
owner=org,
owner_in_data=org.replace('.', ''), # Dots confused jekyll
repo=repo,
repo_in_data=repo.replace('.', ''),
reportID=REPORT_JSON["reportID"],
datestampThisMonth=REPORT_JSON["datestamp"]["latest"],
datestampLastMonth=REPORT_JSON["datestamp"]["previous"],
link=link)
with open(post_file, "w+") as f:
f.write(post_text)
if latest:
print("LOG: Created the latest POST", post_file)
else:
print("LOG: Created the POST", post_file)
def create_posts(REPORT_JSON, AUGUR_METRICS, is_project=True):
_create_post(REPORT_JSON, AUGUR_METRICS, latest=False, is_project=is_project)
_create_post(REPORT_JSON, AUGUR_METRICS, latest=True, is_project=is_project)
| 2.15625 | 2 |
image/vgg16/vgg16_save_weights.py | jzstark/ossiriand | 0 | 12759838 | import h5py
import numpy as np
fname = "/home/stark/.keras/models/vgg16_weights_tf_dim_ordering_tf_kernels.h5"
dfname = 'vgg16_owl.hdf5'
f = h5py.File(fname, 'r')
data_file = h5py.File(dfname, 'w')
# conv nodes
k = 1
for i in range(1, 6): # 5 blocks in total
for j in range(1, 4):
# This is how the author of keras network want to name each node
if (j == 3 and (i == 1 or i == 2)): continue
node_name_origin = 'block' + str(i) + '_conv' + str(j)
conv_w = f[node_name_origin][node_name_origin + '_W_1:0'].value.tolist()
conv_b = f[node_name_origin][node_name_origin + '_b_1:0'].value.tolist()
node_name = 'conv2d_' + str(k)
k += 1
data_file.create_dataset(node_name + '_w', data=conv_w)
data_file.create_dataset(node_name + '_b', data=conv_b)
assert(k == 14)
# fc nodes
for i in range(1, 3):
node_name = 'fc' + str(i)
fc_w = f[node_name][node_name + '_W_1:0'].value.tolist()
fc_b = f[node_name][node_name + '_b_1:0'].value.tolist()
data_file.create_dataset(node_name + '_w', data=fc_w)
data_file.create_dataset(node_name + '_b', data=fc_b)
# prediction node
node_name = 'predictions'
p_w = f[node_name][node_name + '_W_1:0'].value.tolist()
p_b = f[node_name][node_name + '_b_1:0'].value.tolist()
data_file.create_dataset('fc3_w', data=p_w) # since the last node is also a fc node
data_file.create_dataset('fc3_b', data=p_b)
data_file.close()
f.close()
# Read file
# f = h5py.File(dfname) | 2.34375 | 2 |
panaroo/post_run_alignment_gen.py | AMARTELKE/Pangenome-with-Panaroo | 116 | 12759839 | import shutil
import tempfile
import os
import networkx as nx
from .generate_output import *
from .isvalid import *
from .__init__ import __version__
def get_options():
import argparse
description = 'Generate multiple sequence alignments after running Panaroo'
parser = argparse.ArgumentParser(description=description,
prog='generate_panaroo_msa')
io_opts = parser.add_argument_group('Input/output')
io_opts.add_argument("-o",
"--out_dir",
dest="output_dir",
required=True,
help="location of the Panaroo output directory",
type=lambda x: is_valid_folder(parser, x))
# alignment
core = parser.add_argument_group('Gene alignment')
core.add_argument(
"-a",
"--alignment",
dest="aln",
help=("Output alignments of core genes or all genes. Options are" +
" 'core' and 'pan'. Default: 'None'"),
type=str,
choices={'core', 'pan'},
default='core')
core.add_argument(
"--aligner",
dest="alr",
help=
"Specify an aligner. Options:'prank', 'clustal', and default: 'mafft'",
type=str,
choices={'prank', 'clustal', 'mafft'},
default="mafft")
core.add_argument("--core_threshold",
dest="core",
help="Core-genome sample threshold (default=0.95)",
type=float,
default=0.95)
# Other options
parser.add_argument("-t",
"--threads",
dest="n_cpu",
help="number of threads to use (default=1)",
type=int,
default=1)
parser.add_argument("--verbose",
dest="verbose",
help="print additional output",
action='store_true',
default=False)
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
return (args)
def main():
args = get_options()
# make sure trailing forward slash is present
args.output_dir = os.path.join(args.output_dir, "")
# Create temporary directory
temp_dir = os.path.join(tempfile.mkdtemp(dir=args.output_dir), "")
# Load isolate names
seen = set()
isolate_names = []
with open(args.output_dir + "gene_data.csv", 'r') as infile:
next(infile)
for line in infile:
iso = line.split(",")[0]
if iso not in seen:
isolate_names.append(iso)
seen.add(iso)
# Load graph
G = nx.read_gml(args.output_dir + "final_graph.gml")
#Write out core/pan-genome alignments
if args.aln == "pan":
if args.verbose: print("generating pan genome MSAs...")
generate_pan_genome_alignment(G, temp_dir, args.output_dir, args.n_cpu,
args.alr, isolate_names)
core_nodes = get_core_gene_nodes(G, args.core, len(isolate_names))
concatenate_core_genome_alignments(core_nodes, args.output_dir)
elif args.aln == "core":
if args.verbose: print("generating core genome MSAs...")
generate_core_genome_alignment(G, temp_dir, args.output_dir,
args.n_cpu, args.alr, isolate_names,
args.core, len(isolate_names))
# remove temporary directory
shutil.rmtree(temp_dir)
return
if __name__ == '__main__':
main()
| 2.59375 | 3 |
NEST-14.0-FPGA/pynest/nest/tests/test_errors.py | OpenHEC/SNN-simulator-on-PYNQcluster | 45 | 12759840 | # -*- coding: utf-8 -*-
#
# test_errors.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for error handling
"""
import unittest
import nest
@nest.check_stack
class ErrorTestCase(unittest.TestCase):
"""Tests if errors are handled correctly"""
def test_Raise(self):
"""Error raising"""
def raise_custom_exception(exc, msg):
raise exc(msg)
message = "test"
exception = nest.NESTError
self.assertRaisesRegex(
exception, message, raise_custom_exception, exception, message)
def test_StackUnderFlow(self):
"""Stack underflow"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "StackUnderflow", nest.sli_run, 'clear ;')
def test_DivisionByZero(self):
"""Division by zero"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "DivisionByZero", nest.sli_run, '1 0 div')
def test_UnknownNode(self):
"""Unknown node"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "UnknownNode", nest.Connect, (99, ), (99, ))
def test_UnknownModel(self):
"""Unknown model name"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "UnknownModelName", nest.Create, -1)
def suite():
suite = unittest.makeSuite(ErrorTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| 2.265625 | 2 |
examples/applications/gaussian/gaussian_relu_rbm_training.py | anukaal/learnergy | 39 | 12759841 | <filename>examples/applications/gaussian/gaussian_relu_rbm_training.py
import torch
import torchvision
from learnergy.models.gaussian import GaussianReluRBM
# Creating training and testing dataset
train = torchvision.datasets.MNIST(
root='./data', train=True, download=True, transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
]))
test = torchvision.datasets.MNIST(
root='./data', train=False, download=True, transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
]))
# Creating a GaussianReluRBM
model = GaussianReluRBM(n_visible=784, n_hidden=256, steps=1, learning_rate=0.001,
momentum=0.9, decay=0, temperature=1, use_gpu=False)
# Training a GaussianReluRBM
mse, pl = model.fit(train, batch_size=128, epochs=5)
# Reconstructing test set
rec_mse, v = model.reconstruct(test)
# Saving model
torch.save(model, 'model.pth')
# Checking the model's history
print(model.history)
| 2.75 | 3 |
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test84.py | YangHao666666/hawq | 450 | 12759842 | <filename>tools/bin/pythonSrc/pychecker-0.8.18/test_input/test84.py
'this crashed pychecker from calendar.py in Python 2.2'
class X:
'd'
def test(self, item):
return [e for e in item].__getslice__()
# this crashed in 2.2, but not 2.3
def f(a):
a.a = [x for x in range(2) if x > 1]
| 2.203125 | 2 |
visualization/fig1/thresholding.py | kosekei/TDA | 0 | 12759843 | <filename>visualization/fig1/thresholding.py
import numpy as np
import pywt,cv2,sys, subprocess
import matplotlib.pyplot as plt
filename = "data/D-051-1_interpolated.txt"
imArray1 = np.loadtxt(filename)
imArray2 = imArray1.T
imArray3 = numpy.flip(imArray2, axis=None)
for i in [-10, -14.5, -15, -15.5, -16.5]:
imArray4 = np.where(imArray3 < i, 0, 1)
cv2.imwrite("data/D-051-1_thr_"+ str(i) + " .png", imArray4*255) | 2.859375 | 3 |
PyBullet_experiments/experiments/ger_sac_minitaur.py | AnonymousLaBER/LaBER | 3 | 12759844 | import pybullet_envs
from stable_baselines3 import SAC_GER
model = SAC_GER('MlpPolicy', 'MinitaurBulletEnv-v0', verbose=1, tensorboard_log="results/long_SAC_GER_MinitaurBullet/")
model.learn(total_timesteps=3000000)
| 1.46875 | 1 |
main/rearrange-array-such-that-even-positioned-are-greater-than-odd/rearrange-array-such-that-even-positioned-are-greater-than-odd-wrong-interpretation.py | EliahKagan/old-practice-snapshot | 0 | 12759845 | #!/usr/bin/env python3
def round_down_to_even(value):
return value & ~1
for _ in range(int(input())):
input() # don't need n
a = list(map(int, input().split()))
for i in range(0, round_down_to_even(len(a)), 2):
if a[i] > a[i + 1]:
a[i], a[i + 1] = a[i + 1], a[i]
print(*a)
| 3.421875 | 3 |
scripts/download_as_csv.py | raunakdaga/eAbsentee | 6 | 12759846 | """
This script is used to SSH into the PythonAnywhere host, and download all user information from our MySQL database into a .CSV. What this script was used for, I do not know.
- <NAME> 1/17/21
"""
import csv
import os
import sshtunnel
from os import environ, path
from datetime import datetime
from sqlalchemy import create_engine, Column, String, DateTime, asc
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from dotenv import load_dotenv
load_dotenv()
sshtunnel.SSH_TIMEOUT = 5.0
sshtunnel.TUNNEL_TIMEOUT = 5.0
host = '127.0.0.1'
user = os.environ["PA_USER"]
passwd = <PASSWORD>["<PASSWORD>"]
db = os.environ["PA_DB"]
with sshtunnel.SSHTunnelForwarder(
('ssh.pythonanywhere.com'),
ssh_password=<PASSWORD>["SSH_PWD"],
ssh_username=os.environ["PA_USER"],
remote_bind_address=(str(os.environ["PA_USER"] + '.mysql.pythonanywhere-services.com'), 3306)
) as server:
print('Server connected via SSH')
port = str(server.local_bind_port)
conn_addr = 'mysql://' + user + ':' + passwd + '@' + host + ':' + port + '/' + db
engine = create_engine(conn_addr, pool_recycle=280)
Base = declarative_base()
Base.metadata.bind = engine
Session = sessionmaker(bind=engine)
session = Session()
class User(Base):
"""Data model for voters and their information."""
__tablename__ = 'users'
application_id = Column(String(64), primary_key=True)
name = Column(String(128), index=False, nullable=False)
county = Column(String(128), index=False, nullable=False)
submission_time = Column(DateTime, nullable=False, default=datetime.utcnow)
email = Column(String(128), index=False)
phonenumber = Column(String(128), index=False)
full_address = Column(String(256), index=False, nullable=False)
ip = Column(String(128), index=False, nullable=False)
group_code = Column(String(128), index=False)
lat = Column(String(32), index=False)
long = Column(String(32), index=False)
def __repr__(self):
return '<Voter {}>'.format(self.name)
def get_address(self):
return self.full_address
def get_lat(self):
return self.lat
def get_long(self):
return self.long
filename = 'INSERT'
with open(filename, 'w', encoding='cp1252', newline='') as csvfile:
csvreader = csv.writer(csvfile)
query = session.query(User).order_by(asc(User.submission_time)).all()
for user in query:
row = [user.application_id, user.name, user.county, str(user.submission_time), user.email, user.phonenumber, user.full_address, user.ip, user.group_code, user.lat, user.long]
csvreader.writerow(row)
| 3.140625 | 3 |
clair_singularity/util.py | dtrudg/clair-singularity | 2 | 12759847 | <gh_stars>1-10
import hashlib
import json
import sys
def sha256(fname):
"""Compute sha256 hash for file fname"""
hash_sha256 = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(65536), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def pretty_json(obj):
"""Format an object into json nicely"""
return json.dumps(obj, separators=(',', ':'), sort_keys=True, indent=2)
def err_and_exit(e, code=1):
"""Write exception to STDERR and exit with supplied code"""
sys.stderr.write(str(e))
sys.exit(code)
# http://code.activestate.com/recipes/576655-wait-for-network-service-to-appear/
#
#
# Copyright (c) 2017 ActiveState Software Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def wait_net_service(server, port, timeout=None):
""" Wait for network service to appear
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
s = socket.socket()
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
except (socket.timeout, socket.error):
pass
else:
s.close()
return True
| 2.921875 | 3 |
test.py | pscly/bisai1 | 0 | 12759848 | # coding: utf-8
# 作者:Pscly
# 创建日期:
# 用意:
# a = [1,2,3,4,5]
# a.pop(0)
# a.append(4)
# print(a)
# for i, j in enumerate(a,1):
# print(i, j)
# print(a[:-1])
a = '123456'
print(a.split('x'))
| 3.546875 | 4 |
src/twitter_utils/tweets_between.py | namuan/twitter-tools | 1 | 12759849 | """
Downloads tweets between two dates.
"""
from __future__ import annotations
import datetime
import sys
from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter
from pathlib import Path
from py_executable_checklist.workflow import run_workflow
from twitter_utils import setup_logging
from twitter_utils.browser_session import BrowserSession
from twitter_utils.workflows.workflow_steps import (
CloseBrowserSession,
CreateBrowserSession,
GetAllTweetsBetweenDateRange,
WriteTweetsToDirectory,
)
def parse_args(args: list[str]) -> Namespace:
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
"-q", "--query", required=True, type=str, help="A twitter handle or a hash tag. Use '#' to start a hash tag."
)
parser.add_argument(
"-s",
"--since",
required=True,
type=datetime.date.fromisoformat,
help="Search from this date. Format YYYY-MM-DD",
)
parser.add_argument(
"-u",
"--until",
required=True,
type=datetime.date.fromisoformat,
help="Search until this date. Format YYYY-MM-DD",
)
parser.add_argument(
"-b",
"--browser",
required=False,
type=str,
default="firefox",
help="Browser to use for web scraping. Default: firefox",
)
parser.add_argument(
"-o",
"--output-directory",
required=True,
type=Path,
help="Directory to save tweets to",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="Display context variables at each step",
)
return parser.parse_args(args=args)
def workflow_steps() -> list:
return [
CreateBrowserSession,
GetAllTweetsBetweenDateRange,
WriteTweetsToDirectory,
CloseBrowserSession,
]
def tweets_between_workflow(context: dict) -> None:
run_workflow(context, workflow_steps())
def main() -> None: # pragma: no cover
setup_logging()
parsed_args = parse_args(sys.argv[1:])
context = parsed_args.__dict__
context["browser_session"] = BrowserSession(parsed_args.browser)
tweets_between_workflow(context)
if __name__ == "__main__": # pragma: no cover
main()
| 3.046875 | 3 |
validate_schema.py | clarafennessyhdruk/datasets | 0 | 12759850 | <filename>validate_schema.py
#!/usr/bin/env python
# usage: validate_schema.py
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2019-2020 <NAME> All Rights Reserved."
__email__ = "<EMAIL>"
__license__ = "Apache 2"
import os
import re
import json
import requests
import copy
from jsonschema import validate, Draft7Validator, FormatChecker, draft7_format_checker
DATASET_SCHEMA = 'https://raw.githubusercontent.com/HDRUK/schemata/master/schema/dataset/dataset.schema.json'
DATASETS_JSON = 'datasets.json'
REPORTING_ATTRIBUTES = {
"A: Summary": ['identifier', 'title', 'abstract', 'publisher', 'contactPoint', 'accessRights', 'group'],
"B: Business": ["description", "releaseDate", "accessRequestCost", "accessRequestDuration", "dataController",
"dataProcessor", "license", "usageRestriction", "derivedDatasets", "linkedDataset"],
"C: Coverage & Detail": ["geographicCoverage", "periodicity", "datasetEndDate", "datasetStartDate",
"jurisdiction", "populationType", "statisticalPopulation", "ageBand",
"physicalSampleAvailability", "keywords"],
"D: Format & Structure": ["conformsTo", "controlledVocabulary", "language", "format", "fileSize"],
"E: Attribution": ["creator", "citations", "doi"],
"F: Technical Metadata": ["dataClassesCount", "tableName", "tableDescription", "columnName", "columnDescription",
"dataType", "sensitive"],
"G: Other Metadata": ["usageRestriction", "purpose", "source", "setting", "accessEnvironment",
"linkageOpportunity", "disambiguatingDescription"],
}
REPORTING_LEVELS = ["A: Summary", "B: Business", "C: Coverage & Detail",
"D: Format & Structure", "E: Attribution", "F: Technical Metadata"]
TM_NAME_LEN = 2
TM_DESC_LEN = 6
REPORTING_ATTRIBUTES = {
"A: Summary": ['identifier', 'title', 'abstract', 'publisher', 'contactPoint', 'accessRights', 'group'],
"B: Business": ["description", "releaseDate", "accessRequestCost", "accessRequestDuration", "dataController",
"dataProcessor", "license", "usageRestriction", "derivedDatasets", "linkedDataset"],
"C: Coverage & Detail": ["geographicCoverage", "periodicity", "datasetEndDate", "datasetStartDate",
"jurisdiction", "populationType", "statisticalPopulation", "ageBand",
"physicalSampleAvailability", "keywords"],
"D: Format & Structure": ["conformsTo", "controlledVocabulary", "language", "format", "fileSize"],
"E: Attribution": ["creator", "citations", "doi"],
"F: Technical Metadata": ["dataClassesCount", "tableName", "tableDescription", "columnName", "columnDescription",
"dataType", "sensitive"],
"G: Other Metadata": ["usageRestriction", "purpose", "source", "setting", "accessEnvironment",
"linkageOpportunity", "disambiguatingDescription"],
}
REPORTING_LEVELS = ["A: Summary", "B: Business", "C: Coverage & Detail",
"D: Format & Structure", "E: Attribution", "F: Technical Metadata"]
def get_json(json_uri):
if isinstance(json_uri,dict):
return json_uri
elif os.path.isfile(json_uri):
with open(json_uri, 'r') as json_file:
return json.load(json_file)
elif json_uri.startswith('http'):
return requests.get(json_uri).json()
else:
raise Exception
def export_json(data, filename, indent=2):
with open(filename, 'w') as jsonfile:
json.dump(data, jsonfile, indent=indent)
def validate_schema(schema, json):
schema = get_json(schema)
json = get_json(json)
v = Draft7Validator(schema, format_checker=draft7_format_checker)
errors = sorted(v.iter_errors(json), key=lambda e: e.path)
print(json['id'], ": Number of validation errors = ", len(errors))
data = []
for error in errors:
err = {}
if len(list(error.path)):
err['attribute'] = list(error.path)[0]
print(err['attribute'], error.message, sep=": ")
err['message'] = ": ".join([err['attribute'], error.message])
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(" ", list(suberror.schema_path)[1], ": ", suberror.message)
err['suberrors'] = " " + list(suberror.schema_path)[1] + ": " + suberror.message
else:
print(error.message)
err['attribute'] = re.findall(r"(.*?)'", error.message)[1]
err['message'] = error.message
data.append(err)
return data
def validate_attribute_schema(schema, data_model):
""" validate each attribute against JSON schema
@param schema: JSON validation schema
@param data_model: uploaded data model
@return: dictionary with all schema errors
"""
schema = get_json(schema)
v = Draft7Validator(schema, format_checker=draft7_format_checker)
errors = sorted(v.iter_errors(data_model), key=lambda e: e.path)
print(data_model['id'], ": Number of validation errors = ", len(errors))
err = {}
for error in errors:
if len(list(error.path)):
attribute = list(error.path)[0]
err.setdefault(attribute, []).append(error.message)
print(attribute, error.message, sep=": ")
# err['attribute'] = list(error.path)[0]
# err['message'] = ": ".join([err['attribute'], error.message])
# for suberror in sorted(error.context, key=lambda e: e.schema_path):
# print(" ", list(suberror.schema_path)[1], ": ", suberror.message)
# err['suberrors'] = " " + list(suberror.schema_path)[1] + ": " + suberror.message
else:
print(error.message)
attribute = re.findall(r"(.*?)'", error.message)[1]
err.setdefault(attribute,[]).append(error.message)
# err['attribute'] = re.findall(r"(.*?)'", error.message)[1]
# err['message'] = error.message
return err
def generate_baseline_from_sections(metadata_sections, metadata_levels=None):
'''
generate the baseline schema from METADATA_SECTIONS, a dictionary of dictionaries
@param metadata_sections: reporting levels and attributes
@param metadata_levels: list of reporting levels
@return: dictionary with reporting levels and reporting attributes
'''
baseline_dict = {}
raw_attributes = generate_attribute_list(metadata_sections, metadata_levels=metadata_levels, add_id=True)
baseline_dict = {attribute: None for attribute in raw_attributes}
return baseline_dict
def generate_attribute_list(metadata_sections=REPORTING_ATTRIBUTES, metadata_levels=REPORTING_LEVELS, add_id=True):
'''
Collect all attributes from all attribute levels
@param metadata_sections: reporting levels and attributes
@param metadata_levels: list of reporting levels
@param add_id: add id field to list
@return: list of all reporting attributes
'''
raw_attributes = []
# collect the attribute names
if metadata_levels:
for level in metadata_levels:
raw_attributes.extend(metadata_sections.get(level, []))
else:
raw_attributes = [attribute for element in metadata_sections.values() for attribute in element]
if add_id:
raw_attributes.insert(0, 'id')
return raw_attributes
def import_dm_tm(datamodel_uri):
"""
Import data-models and process technical metadata
@param dataset_uri: dataset URI or file path
@return: all datasets as a list of JSON/dicts
"""
data_models = get_json(datamodel_uri)
models_with_metadata = 0
for dm in data_models['dataModels']:
if dm.get('dataClassesCount', 0) > 0:
#dm['technicalMetaDataValidation'] = process_technical_metadata(dm.get('dataClasses', []))
technicalMetaDataValidation = process_technical_metadata(dm.get('dataClasses', []))
dm['technicalMetaDataValidation'] = technicalMetaDataValidation
models_with_metadata += 1
return data_models
def process_technical_metadata(data_classes):
"""
Process technical metadata for an uploaded data-model
@param data_classes: uploaded data-classes for a data-model
@return: dictionary containing technical metadata
"""
technical_md = {}
technical_md['tableCount'] = len(data_classes)
technical_md['tableNames'] = 0
technical_md['tableDescriptions'] = 0
technical_md['columnCount'] = 0
technical_md['columnNames'] = 0
technical_md['columnDescriptions'] = 0
technical_md['dataTypes'] = 0
technical_md['sensitive'] = 0
technical_md['tables'] = []
for dc in data_classes:
table_md = {}
table_md['table'] = dc.get('label', dc.get('id', '0'))
table_md['columnCount'] = len(dc.get('dataElements', []))
table_md['columnNames'] = 0
table_md['columnDescriptions'] = 0
table_md['dataTypes'] = 0
if len(str(dc.get('label', ''))) >= TM_NAME_LEN:
table_md['tableName'] = 1
technical_md['tableNames'] += 1
else:
table_md['tableNames'] = 0
if len(str(dc.get('description', ''))) >= TM_DESC_LEN:
table_md['tableDescription'] = 1
technical_md['tableDescriptions'] += 1
else:
table_md['tableDescription'] = 0
for de in dc.get('dataElements', []):
technical_md['columnCount'] += 1
if len(str(de.get('label', ''))) >= TM_NAME_LEN:
table_md['columnNames'] += 1
technical_md['columnNames'] += 1
if len(str(de.get('description', ''))) >= TM_DESC_LEN:
table_md['columnDescriptions'] += 1
technical_md['columnDescriptions'] += 1
if len(list(de.get('dataType', []))) > 0:
table_md['dataTypes'] += 1
technical_md['dataTypes'] += 1
technical_md['tables'].append(table_md)
return technical_md
def check_attribute_completeness(dm, metadata_sections=REPORTING_ATTRIBUTES, reporting_levels=REPORTING_LEVELS):
"""
Count completed (i.e. filled or populated) data-model attributes
@param dm: data-model
@param metadata_sections: reporting attributes and levels
@param reporting_levels: reporting levels
@return: dictionary with completeness for each attribute and level
"""
reporting_dict = init_reporting_dict(metadata_sections=metadata_sections,
reporting_levels=reporting_levels,
txt='filled_attributes')
total_populated = 0
for level in reporting_levels:
level_total = 0
for k in reporting_dict[level].keys():
if 'filled_attributes' == k:
continue
elif 'total_attributes' == k:
continue
elif "dataClassesCount" == k:
reporting_dict[level][k] = 0 if dm.get(k, 0) == 0 else 1
total_populated += reporting_dict[level][k]
level_total += reporting_dict[level][k]
else:
reporting_dict[level][k] = 1 if dm.get(k, None) is not None else 0
total_populated += reporting_dict[level][k]
level_total += reporting_dict[level][k]
reporting_dict[level]['filled_attributes'] = level_total
reporting_dict['filled_attributes'] = total_populated
return reporting_dict
def check_dm_completeness(data_models):
"""
@return:
"""
# schema = get_json(BASELINE_SAMPLE)
schema = generate_baseline_from_sections(REPORTING_ATTRIBUTES, REPORTING_LEVELS, True)
data = []
for data_model in data_models['dataModels']:
dm = copy.deepcopy(data_model)
print("Processing:", dm['id'])
d = {
'id': dm.get('id',None),
'publisher': dm.get('publisher',None),
'title': dm.get('title',None)
}
compute_tech_md_completeness(dm)
for attribute in (set(dm.keys()) - set(schema.keys())):
dm.pop(attribute, None) # any attribute not in the schema, drop from the data model
s = copy.deepcopy(schema)
s.update(dm)
score = check_attribute_completeness(s)
d.update(score)
data.append(d)
return data
def check_attribute_validation(data_models, metadata_sections=REPORTING_ATTRIBUTES, reporting_levels=REPORTING_LEVELS):
"""
Generate dictionary that validates each attribute against the JSON validation schema
@param data_models: data-models for validation
@param metadata_sections: reporting levels and attributes
@param reporting_levels: reporting attributes
@return: dictionary with validation for each attribute
"""
schema = get_json(DATASET_SCHEMA)
validation_attributes = set(generate_attribute_list(metadata_sections, reporting_levels))
data = []
for dm in data_models['dataModels']:
total_errors, level_errors = 0, 0
dm_validate = copy.deepcopy(dm)
compute_tech_md_validation(dm_validate)
for attribute in (set(dm_validate.keys()) - validation_attributes):
dm_validate.pop(attribute, None)
errors = validate_attribute_schema(schema, dm_validate)
d = {
'id': dm.get('id',None),
'publisher': dm.get('publisher',None),
'title': dm.get('title',None)
}
reporting_dict = init_reporting_dict(metadata_sections=metadata_sections,
reporting_levels=reporting_levels,
txt='attributes_with_errors')
total_errors = 0
for level in reporting_levels:
level_errors = 0
if "F: Technical Metadata" == level:
for k in reporting_dict[level].keys():
if 'dataClassesCount' == k:
i = dm_validate.get(k, 0)
reporting_dict[level][k] = int( 1 - (i>1))
elif 'attributes_with_errors' == k:
continue
elif 'total_attributes' == k:
continue
else:
reporting_dict[level][k] = dm_validate.get(k, 0)
level_errors += reporting_dict[level][k]
total_errors += reporting_dict[level][k]
else:
for k in reporting_dict[level].keys():
if 'attributes_with_errors' == k:
continue
elif 'total_attributes' == k:
continue
else:
if k in errors:
zzz_debug = errors[k]
reporting_dict[level][k] = 1
level_errors += 1
total_errors += 1
reporting_dict[level]['attributes_with_errors'] = level_errors
d.update(reporting_dict)
d['attributes_with_errors'] = total_errors
data.append(d)
return data
def generate_baseline_from_sections(metadata_sections=REPORTING_ATTRIBUTES, metadata_levels=REPORTING_LEVELS, add_id=True):
'''
generate the baseline schema from REPORTING_ATTRIBUTES, a dictionary of dictionaries
@param metadata_sections: reporting levels and attributes
@param metadata_levels: reporting attributes
@param add_id: add ID field to levels
@return: dictionary including all attributes
'''
baseline_dict = {}
raw_attributes = generate_attribute_list(metadata_sections, metadata_levels, add_id)
baseline_dict = {attribute: None for attribute in raw_attributes}
return baseline_dict
def compute_tech_md_completeness(data_model):
"""
check if technical metadata is complete
@param data_model: uploaded data-model
"""
if data_model.get('dataClassesCount', 0) < 1:
return
tm = data_model.get('technicalMetaDataValidation', {})
data_model['tableName'] = 1 if tm.get('tableNames', 0) > 0 else 0
data_model['tableDescription'] = 1 if tm.get('tableDescriptions', 0) > 0 else 0
data_model['columnName'] = 1 if tm.get('columnNames', 0) > 0 else 0
data_model['columnDescription'] = 1 if tm.get('columnDescriptions', 0) > 0 else 0
data_model['dataType'] = 1 if tm.get('dataTypes', 0) > 0 else 0
data_model['sensitive'] = None
def init_reporting_dict(metadata_sections = REPORTING_ATTRIBUTES, reporting_levels = REPORTING_LEVELS, txt='attribute_reporting'):
"""
Initialise dictionary that mirrors reporting levels and attributes
@param metadata_sections: reporting levels and attributes
@param reporting_levels: reporting attributes
@param txt: name for aggregation field
@return: reporting attribute dictionary
"""
reporting_dict = {}
attribute_count = 0
for level in reporting_levels:
level_dict = {attr: 0 for attr in metadata_sections[level]}
level_dict[txt] = 0
level_dict['total_attributes'] = len(metadata_sections[level])
attribute_count += len(metadata_sections[level])
reporting_dict[level] = level_dict
reporting_dict[txt] = 0
reporting_dict['total_attributes'] = attribute_count
return reporting_dict
def compute_tech_md_validation(data_model):
"""
validate technical meta-data
@param data_model: uploaded data-model
"""
if data_model.get('dataClassesCount', 0) < 1:
return
tm = data_model.get('technicalMetaDataValidation', {})
table_count = tm.get('tableCount', 0)
column_count = tm.get('columnCount', 0)
data_model['tableName'] = 0 if tm.get('tableNames', 0) == table_count else 1
data_model['tableDescription'] = 0 if tm.get('tableDescriptions', 0) == table_count else 1
data_model['columnName'] = 0 if tm.get('columnNames', 0) == column_count else 1
data_model['columnDescription'] = 0 if tm.get('columnDescriptions', 0) == column_count else 1
data_model['dataType'] = 0 if tm.get('dataTypes', 0) == column_count else 1
data_model['sensitive'] = 1
def flatten_reporting_dict(data_models):
"""
flatten nested reporting dictionary for export to .csv
@param data_models: nested dictionary
@return: flat dictionary
"""
headers = []
data = []
for dm in data_models:
flat_dm = {}
for k, v in dm.items():
if isinstance(v, dict):
i = 0
for nk, nv in v.items(): # nested key, value
if i == 0:
fk = f"{k}, {nk}" # flat key
# i += 1
else:
fk = f"{k[:2]} {nk}"
flat_dm[fk] = nv
if not fk in headers:
headers.append(fk)
else:
flat_dm[k] = v
if not k in headers:
headers.append(k)
data.append(flat_dm)
return data, headers
def main():
# validate_schema(DATASET_SCHEMA, BASELINE_SCHEMA)
# read in datasets
data_models = import_dm_tm(DATASETS_JSON)
# Compile Metadata Completeness Score
attribute_completeness_score = check_dm_completeness(data_models)
export_json(attribute_completeness_score,'reports/attribute_completeness.json')
# export_json(attribute_completeness_score,'reports/attribute_completeness.json')
data, headers = flatten_reporting_dict(attribute_completeness_score)
# Compile Schema Validation Error Score
schema_errors = check_attribute_validation(data_models)
export_json(schema_errors, 'reports/attribute_errors.json')
if __name__ == "__main__":
main()
| 1.726563 | 2 |