max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
data extraction/pipeline/other_journals.py | 1v4n-poi/SuperalloyDigger | 0 | 12767951 | <reponame>1v4n-poi/SuperalloyDigger
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 09:13:02 2020
@author: Weiren
"""
import os
import xlwt
from Phrase_parse import PhraseParse
from Relation_extraciton_orig import RelationExtraciton
from T_pre_processor import TPreProcessor
from file_io import File_IO as FI
from get_all_attributes import AllAttributes
from get_full_text import FilterText
from log_wp import LogWp
from pre_processor import PreProcessor
from sentence_positioner import SentencePositioner
class OtherJ():
def __init__(self):
pass
def mkdir(self, file_name):
pathd = os.path.join(os.getcwd(), file_name)
if os.path.exists(pathd):
for root, dirs, files in os.walk(pathd, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(pathd)
os.mkdir(pathd)
def relation_extraction(self, C_path, origin_text_path, prop_name, triple_path, out_path, m_path):
log_wp = LogWp()
# The path to the folder where the full-text text is stored
text_path = os.path.join(m_path, "full_text")
# Locate the obtained target corpus
TS_path = os.path.join(m_path, "sent.xls")
# Filter to get the full text
FT = FilterText(origin_text_path, text_path)
txt_name,dois = FT.process()
# Get the target corpus
all_x = []
txt_name2 = []
length = len(os.listdir(text_path))
for i in range(0, length):
n_path = text_path + '/' + str(os.listdir(text_path)[i])
with open(n_path, 'r', encoding='utf-8') as file:
data = file.read()
pre_processor = PreProcessor(data, C_path)
filter_data = pre_processor.pre_processor()
processor = TPreProcessor(filter_data, prop_name, C_path)
filter_data = processor.processor()
positioner = SentencePositioner(filter_data, prop_name, C_path)
target_sents = positioner.target_sent()
# print(target_sents)
all_x.append(str(target_sents))
txt_name2.append(n_path)
FI_out = FI(all_x, TS_path, txt_name2)
FI_out.out_to_excel()
# Extraction of triples
data = FI_out.data_from_excel()
xls = xlwt.Workbook()
sht2 = xls.add_sheet("triple_extracion")
triple_lines = 0 # the number of "triple"
file_index = 0 # document indexing
num_of_lines = 0 # the number of sentences
for item in data:
doi = dois[file_index].replace("doi:","")
sht2.write(triple_lines, 0, doi)
if item != []:
out_unit = []
sent_out = {}
l_sent = []
for sent in item:
processor = TPreProcessor(sent, prop_name, C_path)
filter_data = processor.processor()
parse = PhraseParse(filter_data, prop_name, C_path)
sub_order, sub_id, object_list = parse.alloy_sub_search()
RE = RelationExtraciton(prop_name, filter_data, sub_order, sub_id, object_list, C_path)
all_outcome = RE.triple_extraction()
if not all_outcome:
out = 'no target triples'
sht2.write(triple_lines, 2, out)
sht2.write(triple_lines, 3, 'None')
sht2.write(triple_lines, 4, 'None')
sht2.write(num_of_lines, 1, 'no target sentence')
num_of_lines += 1
triple_lines += 1
n_triple = 0
for index, v in all_outcome.items():
out_unit.append(v)
n_triple += 1
for n in range(0, n_triple):
sht2.write(num_of_lines + n, 1, sent)
num_of_lines = num_of_lines + n_triple
for s in range(0, len(out_unit)):
sht2.write(triple_lines + s, 2, out_unit[s][0])
sht2.write(triple_lines + s, 3, out_unit[s][1])
sht2.write(triple_lines + s, 4, out_unit[s][2])
if out_unit:
triple_lines = triple_lines + len(out_unit)
else:
triple_lines += 1
file_index += 1
else:
out = 'no target triples'
sht2.write(triple_lines, 2, out)
sht2.write(triple_lines, 3, 'None')
sht2.write(triple_lines, 4, 'None')
sht2.write(num_of_lines, 1, 'no target sentence')
num_of_lines += 1
triple_lines += 1
file_index += 1
log_wp.excel_save(xls, triple_path)
attributes = AllAttributes(prop_name, txt_name, text_path, triple_path, out_path, C_path, dois)
attributes.get_toexcel()
| 1.835938 | 2 |
course_access_groups/filters.py | appsembler/course-cccess-groups | 4 | 12767952 | """
DRF ViewSet filters.
"""
import django_filters
from django.contrib.auth import get_user_model
from .openedx_modules import CourseOverview
class UserFilter(django_filters.FilterSet):
email_exact = django_filters.CharFilter('email', lookup_expr='iexact')
group = django_filters.NumberFilter('membership__group_id')
no_group = django_filters.BooleanFilter('membership__id', lookup_expr='isnull')
class Meta:
model = get_user_model()
fields = ['email_exact', 'group', 'no_group']
class CourseOverviewFilter(django_filters.FilterSet):
group = django_filters.NumberFilter('group_courses__group_id')
no_group = django_filters.BooleanFilter('group_courses', lookup_expr='isnull')
is_public = django_filters.BooleanFilter('public_course', lookup_expr='isnull', exclude=True)
class Meta:
model = CourseOverview
fields = ['group', 'no_group', 'is_public']
| 2.125 | 2 |
rosegameLibraryDev/rg_sprite.py | ConnectingWithCode/cwc2022_SOLUTION | 0 | 12767953 | <reponame>ConnectingWithCode/cwc2022_SOLUTION
import pygame
import rosegame as rg
def setup(game):
print("Setup stuff")
game.my_fighter = rg.Sprite(game, "images/fighter.png", 40, 300)
def loop(game):
if game.is_key_pressed(pygame.K_UP):
game.my_fighter.move(0, -5)
if game.is_key_pressed(pygame.K_DOWN):
game.my_fighter.move(0, 5)
if game.is_key_pressed(pygame.K_LEFT):
game.my_fighter.move(-5, 0)
if game.is_key_pressed(pygame.K_RIGHT):
game.my_fighter.move(5, 0)
game.my_fighter.draw()
rg.init(setup, loop)
| 3.09375 | 3 |
test.py | dzoladz/mailman3-docker | 1 | 12767954 | #!/usr/bin/env python3
import sys
from pathlib import Path
from build import VARIANTS
DOCKER_TEST="""version: '2'
services:
mailman-core:
image: core-{variant}
mailman-web:
image: web-{variant}
environment:
- SECRET_KEY=<KEY>
"""
def test_setup(variant):
Path('docker-test.yaml').write_text(
DOCKER_TEST.format(variant=variant))
def usage():
print('usage: python test.py (stable|rolling)')
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit(1)
variant = sys.argv[1]
if variant not in VARIANTS:
usage()
sys.exit(1)
test_setup(variant)
| 2.1875 | 2 |
py/plot_rcdistancecomparison.py | jobovy/apogee-maps | 1 | 12767955 | import sys
import numpy
from scipy import special
import statsmodels.api as sm
from galpy.util import bovy_plot
import define_rcsample
def plot_rcdistancecomparison(plotfilename):
# Get the sample
rcdata= define_rcsample.get_rcsample()
# Now plot the differece
bovy_plot.bovy_print()
levels= special.erf(numpy.arange(1,3)/numpy.sqrt(2.))
bovy_plot.scatterplot(rcdata['RC_DIST'],
(rcdata['RC_DIST_H']-rcdata['RC_DIST'])/rcdata['RC_DIST'],
conditional=True,
levels=levels,
linestyle='none',color='k',marker=',',
xrange=[0.,7.49],yrange=[-0.075,0.075],
xlabel=r'$M_{K_s}\!-\!\mathrm{based\ distance\,(kpc)}$',
ylabel=r'$\mathrm{Fractional\ difference\ of}\ M_H\ \mathrm{vs.}\ M_{K_s}$',
onedhistx=True,bins=31)
bovy_plot.bovy_plot([0.,10.],[0.,0.],'--',lw=2.,color='0.75',overplot=True)
# Plot lowess
lowess= sm.nonparametric.lowess
z= lowess((rcdata['RC_DIST_H']-rcdata['RC_DIST'])/rcdata['RC_DIST'],
rcdata['RC_DIST'],frac=.3)
bovy_plot.bovy_plot(z[:,0],z[:,1],'w--',lw=2.,overplot=True)
bovy_plot.bovy_end_print(plotfilename)
return None
if __name__ == '__main__':
plot_rcdistancecomparison(sys.argv[1])
| 2.5 | 2 |
snippets/is_equal_as_previous.py | w13b3/do_not_use | 0 | 12767956 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# is_equal_as_previous.py
"""
is_equal_as_previous is made to be called multiple times
is_equal_as_previous saves the previous value and compares it with the current value
thus is_equal_as_previous can detect changes in sequences
when is_equal_as_previous is called for the first time it returns True
when is_equal_as_previous is called again with the same value as parameter it returns True
when is_equal_as_previous is called again with another value as parameter it returns False
when is_equal_as_previous is called again with the same value as the previous time it returns True
after is_equal_as_previous is called if has two attributes: current and previous
is_equal_as_previous.current returns the latest given value
is_equal_as_previous.previous returns the value before the latest given value
is_same_as_prev can also be used just to check the current with the previous
when keyword-only parameter: just_check is set to True the previous is kept as the previous
"""
# avg. of 1_000_000: 0.322166
def is_equal_as_previous1(current: object, *, just_check: bool = False) -> bool:
""" check if the given current is the same as the previous received """
self = is_equal_as_previous1 # set self for readability
try: # first time doesn't have previous so AttributeError is raised
return bool(current == self.__memory)
except AttributeError:
self.__memory = current # set current as previous array
return self(current) # first call is always True
finally: # always set the attributes
self.previous = self.__memory # set is_same_as_prev.previous
self.current = current # set is_same_as_prev.current
if not bool(just_check): # keep the previous if just_check is set to True
self.__memory = current
# monkey patch
is_equal_as_previous = is_equal_as_previous1
# avg. of 1_000_000: 0.342118
def is_equal_as_previous2(current: object, *, just_check: bool = False) -> bool:
""" check if the given current value is the same as the previous received value """
self = is_equal_as_previous2
__memory = current # set memory as current to prevent UnboundLocalError
try:
__memory = self.prev_cur[1] # take and assign the second of the list
self.prev_cur = [__memory, current] # create a new list
return bool(__memory == current) # equality test
except AttributeError:
self.prev_cur = [current, current] # fill the prev_cur list
self.previous = self.current = current # first call set attributes
return self(current) # first call is always True
finally:
if not bool(just_check): # keep the previous if just_check is set to True
self.previous, self.current = __memory, current
# avg. of 1_000_000: 1.250343
def is_equal_as_previous3(current: object, *, just_check: bool = False) -> bool:
""" check if the given current value is the same as the previous received value """
self = is_equal_as_previous3 # self refers to this function
def get_previous() -> object:
""" get the memory attribute that represents the previous value """
if not hasattr(self, '__memory'):
set_previous(current) # take current from main func. and set it as memory
return self.__memory
def set_previous(replacement: object):
""" set a memory attribute that represents the previous value """
self.__memory = replacement
def equal_as_previous(current_value: object, *, only_check: bool = False) -> bool:
""" test the given value against the previously given value """
previous_value = get_previous()
_equality_test = bool(previous_value == current_value)
# only checking doesn't set previous or re-assign the attributes
if not bool(only_check):
set_previous(current_value)
self.previous, self.current = previous_value, current_value
# assure that even the first call has the attributes
if any(not hasattr(self, _) for _ in ('previous', 'current')):
self.previous, self.current = previous_value, current_value
return _equality_test # -> bool
return equal_as_previous(current, only_check=just_check)
if __name__ == '__main__':
eq = is_equal_as_previous1
# eq = is_equal_as_previous2
# eq = is_equal_as_previous3
print("the function is not called and thus has no attributes")
try:
eq.previous
except AttributeError:
print("is_same_as_prev.previous doesn't yet exist")
try:
eq.current
except AttributeError:
print("is_same_as_prev.current doesn't yet exist")
print("call is_same_as_prev and print the attributes and the result")
first: bool = eq(True)
assert first == True, "the first call of is_same_as_prev is always True"
print(f"previous: {eq.previous}, current: {eq.current} -> {first}")
second: bool = eq(False)
print(f"previous: {eq.previous}, current: {eq.current} -> {second}")
third: bool = eq(False)
print(f"previous: {eq.previous}, current: {eq.current} -> {third}")
print("keep the previous a.k.a. just check")
four: bool = eq(..., just_check=True)
print(f"previous: {eq.previous}, current: {eq.current} -> {four}")
five: bool = eq(None, just_check=True)
print(f"previous: {eq.previous}, current: {eq.current} -> {five}")
print("endless loop:")
import time
from itertools import cycle
for cur in cycle([1, 1, 2, 2, 3]):
result = eq(cur)
print(f"current: {cur} is same as prev.: {eq.previous}", end="\n" if result else " ")
if not result:
print(f"-> {eq.previous} == {eq.current}")
time.sleep(1)
import timeit
# setup = "from __main__ import is_equal_as_previous1, is_equal_as_previous2, is_equal_as_previous3"
# print('is_equal_as_previous1', timeit.timeit("is_equal_as_previous1(None)", setup, number=1_000_000))
# print('is_equal_as_previous2', timeit.timeit("is_equal_as_previous2(None)", setup, number=1_000_000))
# print('is_equal_as_previous3', timeit.timeit("is_equal_as_previous3(None)", setup, number=1_000_000))
| 4.09375 | 4 |
vmapp/users/tests.py | codeasap-pl/vmapp | 0 | 12767957 | from django.test import TestCase
from django.db.utils import IntegrityError
from django.core.validators import ValidationError
from users.models import User
from domains.models import Domain
class TestUsers(TestCase):
def test_create_invalid(self):
with self.assertRaises(IntegrityError):
User.objects.create()
def test_create_invalid_domain(self):
domain = Domain(domain="this-is-never-saved")
with self.assertRaises(ValueError):
User.objects.create(
domain=domain,
username="test-create",
password="<PASSWORD>",
)
def test_username_missing(self):
domain = Domain.objects.create(domain="username-missing")
with self.assertRaises(ValidationError):
user = User(
domain=domain,
username="",
password="<PASSWORD>",
)
user.full_clean()
def test_username_too_long(self):
domain = Domain.objects.create(domain="username-too-long.localhost")
with self.assertRaises(ValidationError):
user = User(
domain=domain,
username="a" * 128,
password="<PASSWORD>",
)
user.full_clean()
def test_create_valid(self):
domain = Domain.objects.create(domain="create-valid.localhost")
user = User(
domain=domain,
username="test-create-valid",
password="<PASSWORD>",
quota=12345,
is_enabled=True,
)
user.full_clean()
user.save()
self.assertTrue(user.id, "saved")
def test_quota(self):
domain = Domain.objects.create(domain="test-quota.localhost")
user = User(
domain=domain,
username="test-quota",
password="<PASSWORD>",
)
user.quota = -10
with self.assertRaises(ValidationError):
user.full_clean()
user.quota = "abcd"
with self.assertRaises(ValidationError):
user.full_clean()
user.quota = 100
user.save()
self.assertEqual(user.quota, 100 * 1024 * 1024, "Quota in bytes")
def test_from_db(self):
domain = Domain.objects.create(domain="test-from-db.localhost")
user = User.objects.create(
domain=domain,
username="test-from-db",
password="<PASSWORD>",
quota=100,
)
self.assertEqual(user.quota, 100 * 1024 * 1024, "Quota in bytes")
user = User.objects.get(pk=user.id)
self.assertEqual(user.quota, 100, "Quota in megabytes")
def test_is_enabled(self):
domain = Domain.objects.create(domain="test-is-enabled.localhost")
user = User.objects.create(
domain=domain,
username="test-is-enabled",
password="<PASSWORD>",
quota=123456,
)
self.assertTrue(user.is_enabled, "Default: is_enabled")
user.is_enabled = False
user.save()
user.refresh_from_db()
self.assertFalse(user.is_enabled, "Default: is_enabled")
def test_password_validation(self):
domain = Domain.objects.create(domain="test-password.localhost")
# too short
with self.assertRaises(ValidationError):
user = User(
domain=domain,
username="test-password-validation",
password="a" * 7,
)
user.full_clean()
# too long
with self.assertRaises(ValidationError):
user = User(
domain=domain,
username="test-password-validation",
password="a" * 257,
)
user.full_clean()
def test_password(self):
password = "<PASSWORD>"
domain = Domain.objects.create(domain="test-password.localhost")
user = User.objects.create(
domain=domain,
username="test-password",
password=password,
quota=123456,
)
user.refresh_from_db()
self.assertNotIn(user.password, password, "Hashed")
self.assertTrue(
user.password.startswith("{SHA512-CRYPT}$6$"),
"Crypted, salted, SHA-512"
)
def test_hashed_password(self):
password = "<PASSWORD>"
domain = Domain.objects.create(domain="test-hashed-password.localhost")
user = User.objects.create(
domain=domain,
username="test-hashed-password",
password=password,
quota=123456,
)
user.refresh_from_db()
self.assertNotIn(user.password, password, "Hashed")
hashed = user.password
self.assertTrue(
hashed.startswith("{SHA512-CRYPT}$6$"),
"Crypted, salted, SHA-512"
)
self.assertNotIn(user.password, password, "Hashed")
# UPDATE RECORD, password should not be changed.
user.quota = 123
user.save()
user.refresh_from_db()
self.assertEqual(user.password, hashed, "Hashed passw does not change")
# Change password.
new_password = password + password
user.password = <PASSWORD>
user.save()
user.refresh_from_db()
self.assertNotEqual(user.password, hashed, "Hashed changed OK")
self.assertNotIn(user.password, new_password, "Updated and hashed")
self.assertTrue(
user.password.startswith("{SHA512-CRYPT}$6$"),
"Crypted, salted, SHA-512"
)
def test_str(self):
domain = Domain.objects.create(domain="test-str.localhost")
user = User.objects.create(
domain=domain,
username="test-str",
password="<PASSWORD>",
)
expected = "test-str<EMAIL>"
self.assertEqual(user.email(), expected, "email()")
self.assertEqual(str(user), expected, "__str__()")
self.assertEqual(str(user), user.email(), "__str__() calls email()")
def test_unique_violation(self):
domain = Domain.objects.create(domain="test-unique.localhost")
values = dict(
domain=domain,
username="test-unique",
password="<PASSWORD>",
)
User.objects.create(**values)
with self.assertRaises(IntegrityError):
User.objects.create(**values)
| 2.5625 | 3 |
Util/postprocessing/urca-tools/plot_fconv_slopes.py | sailoridy/MAESTRO | 17 | 12767958 | #!/usr/bin/env python
"""
Given the output of fconv_slopes, plot the thermodynamic
gradients corresponding to an initial model.
<NAME>
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str,
help='Name of file containing thermodynamic gradients to plot.')
parser.add_argument('-f', '--format', type=str, default='png',
help='Format of the desired output files. Can be, e.g. "png" or "eps". Defaults to "png".')
parser.add_argument('-rup', '--radius_upper', type=float,
help='Upper bound for the plotted radius.')
parser.add_argument('-o', '--outname', type=str, help='Base name of output file to use (w/o extension).')
args = parser.parse_args()
class ConvectiveGradients(object):
def __init__(self, infile=None):
if infile:
self.r, self.actual, self.adiabatic, self.ledoux = np.loadtxt(infile, unpack=True)
self.infile = infile
else:
self.r = []
self.actual = []
self.adiabatic = []
self.ledoux = []
self.infile = ''
def plot(self, fmt=None, rup=None, outname=None, show=False):
fig = plt.figure()
ax = fig.add_subplot(111)
idxup = -1
if rup:
ax.set_xlim([0, rup])
# Get the lowest index where radius > rup
idxup = np.where(self.r > rup)[0][0]
ax.set_xlabel('$\mathrm{r (cm)}$')
# ax2 = ax.twinx()
# ax.plot(self.r[:idxup], self.adiabatic[:idxup], color='blue', linestyle='-', label='adiabatic')
# ax.plot(self.r[:idxup], self.actual[:idxup], color='green', linestyle='--', label='actual')
# ax.plot(self.r[:idxup], self.ledoux[:idxup], color='red', linestyle=':', label='ledoux')
dadiabatic = self.actual[:idxup]-self.adiabatic[:idxup]
neg_idx, pos_idx = self.get_signed_indices(dadiabatic)
# ax2.plot(self.r[:idxup][neg_idx], dadiabatic[neg_idx], color='black', marker='v', markersize=8,
# linestyle='-', label='actual-adiabatic (-)')
# ax2.plot(self.r[:idxup][pos_idx], dadiabatic[pos_idx], color='black', marker='^', markersize=8,
# linestyle='-', label='actual-adiabatic (+)')
dledoux = self.actual[:idxup]-self.ledoux[:idxup]
neg_idx, pos_idx = self.get_signed_indices(dadiabatic)
# ax2.plot(self.r[:idxup][neg_idx], dledoux[neg_idx], color='magenta', marker='v', markersize=8,
# linestyle=':', label='actual-ledoux (-)')
# ax2.plot(self.r[:idxup][pos_idx], dledoux[pos_idx], color='magenta', marker='^', markersize=8,
# linestyle=':', label='actual-ledoux (+)')
ax.plot(self.r[:idxup], dadiabatic, color='blue', linestyle='-', label='adiabatic $\mathrm{\\nabla_{conv}}$')
ax.plot(self.r[:idxup], dledoux, color='red', linestyle='-.', label='ledoux $\mathrm{\\nabla_{conv}}$')
mx = max(np.amax(dadiabatic), np.amax(dledoux))
mn = min(np.amin(dadiabatic), np.amin(dledoux))
mlin = min(abs(mx), abs(mn))
plt.yscale('symlog', linthreshy=0.5*mlin)
ax.set_ylabel('$\mathrm{\\nabla_{actual} - \\nabla_{conv}}$')
plt.legend()
if fmt=='png':
if not outname:
outname = self.infile + '.png'
plt.savefig(outname, dpi=300)
else:
if not outname:
outname = self.infile + '.eps'
plt.savefig(outname)
if show:
plt.show()
plt.close(fig)
def get_signed_indices(self, dvec):
neg_idx = np.where(dvec < 0.0)
pos_idx = np.where(dvec > 0.0)
return neg_idx, pos_idx
if __name__=='__main__':
cg = ConvectiveGradients(args.infile)
cg.plot(args.format, args.radius_upper, args.outname)
| 2.96875 | 3 |
chrome/test/enterprise/e2e/policy/default_search_provider/default_search_provider.py | sarang-apps/darshan_browser | 0 | 12767959 | <filename>chrome/test/enterprise/e2e/policy/default_search_provider/default_search_provider.py<gh_stars>0
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class DefaultSearchProviderTest(ChromeEnterpriseTestCase):
"""Test the DefaultSearchProviderEnabled,
DefaultSearchProviderName,
DefaultSearchProviderSearchURL
https://cloud.google.com/docs/chrome-enterprise/policies/?policy=DefaultSearchProviderEnabled
https://cloud.google.com/docs/chrome-enterprise/policies/?policy=DefaultSearchProviderName
https://cloud.google.com/docs/chrome-enterprise/policies/?policy=DefaultSearchProviderSearchURL
"""
@before_all
def setup(self):
self.InstallChrome('client2012')
self.EnableUITest('client2012')
def _get_search_url(self, instance_name):
local_dir = os.path.dirname(os.path.abspath(__file__))
output = self.RunUITest(
instance_name,
os.path.join(local_dir, 'default_search_provider_webdriver.py'))
return output
@test
def test_default_search_provider_bing(self):
self.SetPolicy('win2012-dc', 'DefaultSearchProviderEnabled', 1, 'DWORD')
self.SetPolicy('win2012-dc', 'DefaultSearchProviderName', 'Bing', 'String')
self.SetPolicy('win2012-dc', 'DefaultSearchProviderSearchURL',
'"https://www.bing.com/search?q={searchTerms}"', 'String')
self.RunCommand('client2012', 'gpupdate /force')
output = self._get_search_url('client2012')
self.assertIn('www.bing.com', output)
@test
def test_default_search_provider_yahoo(self):
self.SetPolicy('win2012-dc', 'DefaultSearchProviderEnabled', 1, 'DWORD')
self.SetPolicy('win2012-dc', 'DefaultSearchProviderName', 'Yahoo', 'String')
self.SetPolicy('win2012-dc', 'DefaultSearchProviderSearchURL',
'"https://search.yahoo.com/search?p={searchTerms}"',
'String')
self.RunCommand('client2012', 'gpupdate /force')
output = self._get_search_url('client2012')
self.assertIn('search.yahoo.com', output)
@test
def test_default_search_provider_disabled(self):
self.SetPolicy('win2012-dc', 'DefaultSearchProviderEnabled', 0, 'DWORD')
self.RunCommand('client2012', 'gpupdate /force')
output = self._get_search_url('client2012')
self.assertIn('http://anything', output)
| 2.09375 | 2 |
src/jig/gitutils/tests/test_branches.py | Lightslayer/jig | 0 | 12767960 | from os import unlink
from os.path import join, isfile
from contextlib import contextmanager
from functools import partial
from itertools import chain, combinations
from git import Repo, Head, Commit
from mock import patch, MagicMock
from jig.tests.testcase import JigTestCase
from jig.exc import (
GitRevListMissing, GitRevListFormatError, GitWorkingDirectoryDirty,
TrackingBranchMissing)
from jig.gitutils.branches import (
parse_rev_range, prepare_working_directory,
_prepare_against_staged_index, _prepare_with_rev_range, Tracked)
@contextmanager
def assert_git_status_unchanged(repository):
"""
Make sure that the working directory remains in the same rough state.
:param string repository: Git repo
"""
def long_status(repository):
output = Repo(repository).git.status('--long')
# Skip the first line, it tells us the branch
return output.splitlines()[1:]
before = long_status(repository)
yield
after = long_status(repository)
assert before == after, "Working directory status has changed"
class PrepareTestCase(JigTestCase):
"""
Base test class for private functions that prepare the working directory.
"""
def setUp(self):
super(PrepareTestCase, self).setUp()
self.reset_gitrepo()
def reset_gitrepo(self):
del self.gitrepodir
self.commits = [
self.commit(self.gitrepodir, 'a.txt', 'a'),
self.commit(self.gitrepodir, 'b.txt', 'b'),
self.commit(self.gitrepodir, 'c.txt', 'c'),
self.commit(self.gitrepodir, 'd.txt', 'd')
]
@property
def repo(self):
return Repo(self.gitrepodir)
def diff_head(self):
return self.repo.index.diff('HEAD')
@contextmanager
def prepare(self):
with assert_git_status_unchanged(self.gitrepodir):
with self.prepare_context_manager() as subject:
yield subject
class TestParseRevRange(JigTestCase):
"""
Git utils revision range parser.
"""
def setUp(self):
super(TestParseRevRange, self).setUp()
self.gitrepo, self.gitrepodir, _ = self.repo_from_fixture('repo01')
def assertIsRevRange(self, rev_range):
self.assertIsInstance(rev_range.a, Commit)
self.assertIsInstance(rev_range.b, Commit)
def test_bad_format(self):
"""
If the revision range doesn't match the expected format.
"""
with self.assertRaises(GitRevListFormatError):
parse_rev_range(self.gitrepodir, 'A-B')
def test_bad_format_missing_rev(self):
"""
If the format is correct but the revisions are missing.
"""
with self.assertRaises(GitRevListFormatError):
parse_rev_range(self.gitrepodir, '..B')
with self.assertRaises(GitRevListFormatError):
parse_rev_range(self.gitrepodir, 'A..')
def test_bad_revs(self):
"""
If the format is good but the revisions do not exist.
"""
with self.assertRaises(GitRevListMissing):
parse_rev_range(self.gitrepodir, 'FOO..BAR')
def test_good_revs(self):
"""
The revisions to exist.
"""
self.assertIsRevRange(parse_rev_range(self.gitrepodir, 'HEAD^1..HEAD'))
def test_local_branch(self):
"""
A branch that is newly created can be referenced.
"""
self.gitrepo.create_head('feature-branch')
self.assertIsRevRange(
parse_rev_range(self.gitrepodir, 'HEAD^1..feature-branch')
)
def test_out_of_range(self):
"""
A revision is out of range.
"""
with self.assertRaises(GitRevListMissing):
parse_rev_range(self.gitrepodir, 'HEAD~1000..HEAD')
class TestPrepareAgainstStagedIndex(PrepareTestCase):
"""
Prepare the working directory against the staged index.
"""
def prepare_context_manager(self):
return _prepare_against_staged_index(self.repo)
def test_working_directory_clean(self):
"""
Working directory is clean.
"""
with self.prepare() as stash:
self.assertIsNone(stash)
def test_untracked(self):
"""
Untracked file present.
"""
self.create_file(self.gitrepodir, 'e.txt', 'e')
expected_untracked = self.repo.untracked_files
with self.prepare() as stash:
self.assertIsNone(stash)
# We have no untracked files, they are stashed
self.assertEqual(expected_untracked, self.repo.untracked_files)
def test_staged(self):
"""
Staged file.
"""
self.stage(self.gitrepodir, 'a.txt', 'aa')
before = self.diff_head()
with self.prepare() as stash:
self.assertIsNone(stash)
# The staged changes are not stashed
self.assertEqual(before, self.diff_head())
def test_modified(self):
"""
Modified file.
"""
self.modify_file(self.gitrepodir, 'a.txt', 'aa')
with self.prepare() as stash:
self.assertIsNotNone(stash)
# The modifications are stashed
self.assertEqual([], self.repo.index.diff(None))
def test_stageremoved(self):
"""
Staged removal of a file.
"""
self.stage_remove(self.gitrepodir, 'a.txt')
with self.prepare() as stash:
self.assertIsNone(stash)
def test_fsremoved(self):
"""
Non-staged removal of a file.
"""
unlink(join(self.gitrepodir, 'a.txt'))
with self.prepare() as stash:
self.assertIsNotNone(stash)
# The file is temporarily restored
self.assertTrue(isfile(join(self.gitrepodir, 'a.txt')))
def test_combinations(self):
"""
Combine all variations of modification, creation, or removal.
"""
def modified():
self.modify_file(self.gitrepodir, 'a.txt', 'aa')
modified.should_stash = True
def staged():
self.stage(self.gitrepodir, 'b.txt', 'bb')
staged.should_stash = False
def indexremoved():
self.stage_remove(self.gitrepodir, 'c.txt')
indexremoved.should_stash = False
def fsremoved():
unlink(join(self.gitrepodir, 'd.txt'))
fsremoved.should_stash = True
def untracked():
self.create_file(self.gitrepodir, 'e.txt', 'e')
untracked.should_stash = False
mutation = partial(
combinations,
(modified, staged, indexremoved, fsremoved, untracked)
)
options = chain.from_iterable(list(map(mutation, [2, 3, 4, 5])))
for option in options:
# Mutate the Git repository
list(map(lambda x: x(), option))
with self.prepare() as stash:
should_stash = any([x.should_stash for x in option])
if should_stash:
self.assertIsNotNone(stash)
else:
self.assertIsNone(stash)
self.reset_gitrepo()
class TestPrepareWithRevRange(PrepareTestCase):
"""
With a given rev range test that we can checkout the repository.
"""
def prepare_context_manager(self):
rev_range_parsed = parse_rev_range(
self.repo.working_dir,
self.rev_range
)
return _prepare_with_rev_range(self.repo, rev_range_parsed)
def test_dirty_working_directory(self):
"""
Dirty working directory will raise an exception.
"""
self.rev_range = 'HEAD~3..HEAD~0'
# Force the working directory to be dirty
self.modify_file(self.gitrepodir, 'a.txt', 'aa')
with self.assertRaises(GitWorkingDirectoryDirty):
self.prepare().__enter__()
def test_yields_git_named_head(self):
"""
The object that is yielded is a :py:class:`git.Head`.
"""
self.rev_range = 'HEAD~1..HEAD~0'
with self.prepare() as head:
self.assertIsInstance(head, Head)
def test_yields_git_detached_head(self):
"""
If detached HEAD, object that is yielded is a :py:class:`git.Commit`.
"""
self.rev_range = 'HEAD~1..HEAD~0'
# Detach the head by checking out the commit hash
Repo(self.gitrepodir).git.checkout(self.commits[-1].hexsha)
with self.prepare() as head:
self.assertIsInstance(head, Commit)
def test_detached_head_right_side_of_rev_range(self):
"""
The head object points to the right side of the rev range.
"""
self.rev_range = 'HEAD~2..HEAD~1'
# HEAD~1 is going to be our second to last commit
expected = self.commits[-2]
with self.prepare():
# The symbolic ref for HEAD should now be our expected commit
self.assertEqual(
Repo(self.gitrepodir).head.commit,
expected
)
def test_returns_to_master(self):
"""
After exiting the context manager, we should be back on master.
"""
self.rev_range = 'HEAD~2..HEAD~1'
with self.prepare():
pass
self.assertEqual(
Repo(self.gitrepodir).head.reference.path,
'refs/heads/master'
)
def test_returns_to_detached_head(self):
"""
From a detached head upon exiting we should be back where we started.
"""
self.rev_range = 'HEAD~2..HEAD~1'
# Detach the head by checking out the commit hash
Repo(self.gitrepodir).git.checkout(self.commits[-2].hexsha)
# HEAD~1 is going to be our third to last commit
expected = self.commits[-3]
with self.prepare():
self.assertEqual(
Repo(self.gitrepodir).head.commit,
expected
)
# And we are back to our detached head we started with
self.assertEqual(
Repo(self.gitrepodir).head.commit,
self.commits[-2]
)
class TestPrepareWorkingDirectory(JigTestCase):
"""
Make the working directory suitable for running Jig.
"""
def setUp(self):
super(TestPrepareWorkingDirectory, self).setUp()
self.gitrepo, self.gitrepodir, _ = self.repo_from_fixture('repo01')
def test_no_rev_range(self):
"""
Should prepare against the staged index if no rev range.
"""
prepare_function = \
'jig.gitutils.branches._prepare_against_staged_index'
with patch(prepare_function) as p:
p.return_value = MagicMock()
with prepare_working_directory(self.gitrepodir):
pass
self.assertTrue(p.return_value.__enter__.called)
def test_rev_range(self):
"""
Should checkout the Git repo at the end of the rev range.
"""
prepare_function = \
'jig.gitutils.branches._prepare_with_rev_range'
with patch(prepare_function) as p:
p.return_value = MagicMock()
rev_range_parsed = parse_rev_range(
self.gitrepodir, 'HEAD~1..HEAD~0'
)
with prepare_working_directory(self.gitrepodir, rev_range_parsed):
pass
self.assertTrue(p.return_value.__enter__.called)
class TestTracked(JigTestCase):
"""
Git repositories can be tracked for CI mode.
"""
def setUp(self):
super(TestTracked, self).setUp()
self.commits = [
self.commit(self.gitrepodir, 'a.txt', 'a'),
self.commit(self.gitrepodir, 'b.txt', 'b'),
self.commit(self.gitrepodir, 'c.txt', 'c'),
]
def test_tracking_branch_does_not_exist(self):
"""
Tracking branch does not exist.
"""
tracked = Tracked(self.gitrepodir)
self.assertFalse(tracked.exists)
def test_tracking_branch_exists(self):
"""
Tracking branch exists.
"""
tracking_branch = Repo(self.gitrepodir).create_head('jig-ci-last-run')
tracking_branch.commit = 'HEAD'
tracked = Tracked(self.gitrepodir)
self.assertTrue(tracked.exists)
def test_tracking_branch_by_a_different_name(self):
"""
Can check existence by a different name than the default.
"""
name = 'different-tracking-name'
tracking_branch = Repo(self.gitrepodir).create_head(name)
tracking_branch.commit = 'HEAD'
tracked = Tracked(self.gitrepodir, name)
self.assertTrue(tracked.exists)
def test_update_defaults_to_head(self):
"""
Updating the tracking branch defaults to current HEAD.
"""
tracked = Tracked(self.gitrepodir)
reference = tracked.update()
self.assertEqual(
reference.commit,
self.commits[-1]
)
def test_non_existent_reference(self):
"""
Without a tracking branch trying to get a references to it raises.
"""
tracked = Tracked(self.gitrepodir)
with self.assertRaises(TrackingBranchMissing):
tracked.reference
def test_tracking_branch_reference(self):
"""
With a tracking branch we can get a reference to it.
"""
tracking_branch = Repo(self.gitrepodir).create_head('jig-ci-last-run')
tracking_branch.commit = 'HEAD~2'
tracked = Tracked(self.gitrepodir)
self.assertEqual(
tracked.reference.commit,
self.commits[0]
)
def test_update_takes_commit_hash(self):
"""
Updating the tracking branch can be done with a commit hash.
"""
tracked = Tracked(self.gitrepodir)
tracked.update(self.commits[0].hexsha)
self.assertEqual(
tracked.reference.commit,
self.commits[0]
)
def test_update_moves_head_forward(self):
"""
The tracking branch reference can be moved forward.
"""
tracking_branch = Repo(self.gitrepodir).create_head('jig-ci-last-run')
tracking_branch.commit = 'HEAD~2'
tracked = Tracked(self.gitrepodir)
tracked.update()
self.assertEqual(
tracked.reference.commit,
self.commits[-1]
)
| 2.265625 | 2 |
src/components/crackChecker/Update.py | Naruita/tinyApps | 1 | 12767961 | <reponame>Naruita/tinyApps
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Content retrieving modules
from urllib.request import urlopen
from json import loads
# Date and Time implemention tools
from datetime import datetime
from time import sleep
"""Updates the program to the latest stage in the API or else, till the API response fails again."""
# Owned
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
# Checks when the program was last updated.
def last_time_check():
date = open("./assets/LastUpdatedOn.txt", "r")
DateAndTime = date.read()
if len(DateAndTime) != 0:
print("Last Updated On " + DateAndTime)
date.close()
# Checks at which page the program last received a throwback error.
def last_page_check():
page = open("./assets/LastPage.txt", "r")
pageNo = page.read()
if len(pageNo) != 0:
startAt = int(pageNo) + 1
else:
startAt = 0
page.close()
return startAt
# Updates the last time the program was run before it quit.
def last_time_update():
date = open("assets/LastUpdatedOn.txt", "w")
now = datetime.now()
DateAndTime = now.strftime("%d/%m/%Y %H:%M:%S")
date.write(DateAndTime)
date.close()
# Updated the last page the program has retrieved till.
def last_page_update(lastUpdatedTill):
page = open("assets/LastPage.txt", "w")
lastUpdatedTill = str(lastUpdatedTill)
page.write(lastUpdatedTill)
page.close()
# The entire process of updating.
def update(link):
try:
url = urlopen(link)
data = loads(url.read())
strdata = str(data)
path = "assets/Data.txt"
with open(path, "a", encoding="utf-8") as f:
f.write(strdata)
f.close()
except:
print(
"API RESPONSE ERROR...\nTry again in a while. We will continue from the page we left off."
)
input("Press any key to exit ")
return False
# Main
if __name__ == "__main__":
last_time_check()
startAt = last_page_check()
print(
"Update Process Started...\nThis might take a while. We will notify you once it is done."
)
lastUpdatedTill = 0
for i in range(startAt, 600):
i = str(i)
url = "https://api.crackwatch.com/api/games?&is_cracked=true&page=" + i
if update(url) is False:
break
print("Page " + i + " has been added")
lastUpdatedTill = i
print("Data has been successfully updated!")
last_page_update(lastUpdatedTill)
last_time_update()
print("-" * 10)
input(
"The data retrieved has been stored in Data.txt, press any key to safely exit."
)
| 3.03125 | 3 |
python_end/main_controller.py | divyansh9711/python-socket.io | 0 | 12767962 | import obs,parenty,socket_controller,threading
def callObs():
obs.main()
socket_controller.init_comm_file()
t = threading.Thread(target = callObs, args=())
t.start()
| 2.21875 | 2 |
dbaas/maintenance/admin/database_maintenance_task.py | didindinn/database-as-a-service | 303 | 12767963 | <reponame>didindinn/database-as-a-service<filename>dbaas/maintenance/admin/database_maintenance_task.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils.html import format_html
from ..models import DatabaseMaintenanceTask
class DatabaseMaintenanceTaskAdmin(admin.ModelAdmin):
list_select_related = None
search_fields = ("database__name", "task__id", "task__task_id")
list_filter = [
"database__team", "status",
]
exclude = ("task", "can_do_retry")
actions = None
list_display = (
"database", "database_team", "current_step", "friendly_status",
"maintenance_action", "link_task", "started_at", "finished_at"
)
readonly_fields = (
"database", "link_task", "started_at", "finished_at",
"current_step", "status", "maintenance_action"
)
ordering = ["-started_at"]
def friendly_status(self, maintenance_task):
html_waiting = '<span class="label label-warning">Waiting</span>'
html_running = '<span class="label label-success">Running</span>'
html_error = '<span class="label label-important">Error</span>'
html_success = '<span class="label label-info">Success</span>'
html_rollback = '<span class="label label-info">Rollback</span>'
html_status = ''
if maintenance_task.status == DatabaseMaintenanceTask.WAITING:
html_status = html_waiting
elif maintenance_task.status == DatabaseMaintenanceTask.RUNNING:
html_status = html_running
elif maintenance_task.status == DatabaseMaintenanceTask.ERROR:
html_status = html_error
elif maintenance_task.status == DatabaseMaintenanceTask.SUCCESS:
html_status = html_success
elif maintenance_task.status == DatabaseMaintenanceTask.ROLLBACK:
html_status = html_rollback
return format_html(html_status)
friendly_status.short_description = "Status"
def database_team(self, maintenance_task):
return maintenance_task.database.team.name
database_team.short_description = "Team"
def link_task(self, maintenance_task):
url = reverse(
'admin:notification_taskhistory_change',
args=[maintenance_task.task.id]
)
return format_html(
"<a href={}>{}</a>".format(url, maintenance_task.task.id)
)
link_task.short_description = "Task"
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
def maintenance_action(self, maintenance_task):
raise NotImplementedError()
maintenance_action.short_description = "Action"
| 1.875 | 2 |
env/model/network.py | Shiduo-zh/pybulletSim | 0 | 12767964 | <reponame>Shiduo-zh/pybulletSim
from tkinter import E
from env.model.mlp import *
from env.model.conv2d import *
from env.model.transformer import *
from env.model.utils import*
import torch
import torch.nn as nn
class localTransformer(nn.Module):
def __init__(self,prop_size=93,hidden_size=256,output_size=128,
input_channels=4,output_channnels=128,
action_size=12,spatial_size=4,batch_size=31):
"""
params:
prop_size:the dimension of proprioceptive state
hidden_size:the hidden layer units size of mlp for proprioceptive state encoder and projection head
"""
super(localTransformer,self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.spatial_size=spatial_size
self.input_channels=input_channels
self.visual_channels=output_channnels
self.output_size=action_size
self.batch_size=batch_size
#propriceptive state encoder
self.prop_encoder=MlpModel(prop_size,hidden_size,output_size)
self.linear=nn.Linear(output_size,output_size)
#depth visual infomation encoder
self.visual_encoder=Conv2dModel(input_channels,[output_channnels],[(16,16)],[(16,16)])
#2 transformer layers
self.translayer1=transformerEncoder(input_dim=output_channnels,
dim_k=output_channnels,
dim_v=output_channnels)
self.translayer2=transformerEncoder(input_dim=output_channnels,
dim_k=output_channnels,
dim_v=output_channnels)
#projection head
self.projection_head=MlpModel(input_size=2*output_channnels,
hidden_sizes=hidden_size,
output_size=action_size)
self.critic_linear =nn.Sequential(init_(nn.Linear(action_size, 1)),nn.ReLU())
def forward(self,props,visions):
visions=visions.reshape((-1,self.input_channels,64,64))
props=props.reshape((-1,93))
if(not torch.is_tensor(visions) or not torch.is_tensor(props)):
props= torch.from_numpy(props).to(torch.float32)
visions=torch.from_numpy(visions).to(torch.float32)
#print(visions.shape)
E_prop=self.prop_encoder(props)
E_vision=self.visual_encoder(visions)
#print(E_prop.shape,E_vision.shape)
#.T.reshape((self.spatial_size,self.spatial_size,self.visual_channels))
# weight,bias=self.prop_encoder.linear_params()
# w_prop=weight[-1]
# b_prop=bias
# t_prop=np.dot(w_prop,E_prop)+b_prop
t_prop=self.linear(E_prop)
batch_size=t_prop.shape[0]
#print(t_prop.shape)
T0=t_prop
for i in range(self.spatial_size):
for j in range(self.spatial_size):
T0=torch.cat((T0,E_vision[:,:,i,j]))
T0=T0.view(batch_size,self.spatial_size**2+1,-1)
T1=self.translayer1(T0)
T2=self.translayer2(T1)
#T2 is a tensor with shape of (N^2+1)*C
vision_output=T2[:,1:,:]
prop_feature=T2[:,0,:]
vision_feature=torch.sum(vision_output,dim=1)/self.spatial_size**2
features=torch.cat((prop_feature,vision_feature)).view(-1)
if(batch_size>1):
features=features.view(batch_size,-1)
#print('features size:',features.size())
action=self.projection_head(features)
value=self.critic_linear(action)
return value,action
| 2.265625 | 2 |
src/conference_stats.py | tjbreshears/VBelo | 0 | 12767965 | import csv
import pandas as pd
import matplotlib.pyplot as plt
games = []
with open("outputs/games_output.csv", 'r') as data:
for line in csv.DictReader(data):
games.append(line)
teams = []
with open("inputs/VBelo - teams.csv", 'r') as data:
for line in csv.DictReader(data):
teams.append(line)
for i in range(len(teams)):
teams[i]['elo'] = int(teams[i]['elo'])
not_tracked = ['D-III','NAIA','NCCAA','n/a']
start_data = {'Conference':['Big West','Carolinas','EIVA','Independent','MIVA','MPSF','SIAC'],
'Non-Conf Matches':[0,0,0,0,0,0,0],'Non-Conf Wins':[0,0,0,0,0,0,0],'Non-Conf Losses':[0,0,0,0,0,0,0],
'Non-Conf Home Matches':[0,0,0,0,0,0,0],'Non-Conf Home Wins':[0,0,0,0,0,0,0],'Non-Conf Home Losses':[0,0,0,0,0,0,0],
'Non-Conf Away Matches':[0,0,0,0,0,0,0],'Non-Conf Away Wins':[0,0,0,0,0,0,0],'Non-Conf Away Losses':[0,0,0,0,0,0,0],
'Non-Conf Neutral Matches':[0,0,0,0,0,0,0],'Non-Conf Neutral Wins':[0,0,0,0,0,0,0],'Non-Conf Neutral Losses':[0,0,0,0,0,0,0],
}
df = pd.DataFrame(start_data)
df=df.set_index('Conference')
def stats (year):
for i in range(len(games)):
if games[i]['season'] == year and games[i]['r_t1'] != '':
t1_c = ''
t2_c = ''
for x in range(len(teams)):
if games[i]['t1'] == teams[x]['short_name']:
t1_c = teams[x]['conference']
if games[i]['t2'] == teams[x]['short_name']:
t2_c = teams[x]['conference']
if t1_c != t2_c:
if t1_c not in not_tracked:
df.loc[t1_c,'Non-Conf Matches'] += 1
df.loc[t1_c,'Non-Conf Wins'] += int(games[i]['r_t1'])
if games[i]['r_t1'] == '0':
df.loc[t1_c,'Non-Conf Losses'] += 1
if games[i]['n'] == '0':
df.loc[t1_c,'Non-Conf Away Matches'] += 1
df.loc[t1_c,'Non-Conf Away Wins'] += int(games[i]['r_t1'])
if games[i]['r_t1'] == '0':
df.loc[t1_c,'Non-Conf Away Losses'] += 1
if games[i]['n'] == '1':
df.loc[t1_c,'Non-Conf Neutral Matches'] += 1
df.loc[t1_c,'Non-Conf Neutral Wins'] += int(games[i]['r_t1'])
if games[i]['r_t1'] == '0':
df.loc[t1_c,'Non-Conf Neutral Losses'] += 1
if t2_c not in not_tracked:
df.loc[t2_c,'Non-Conf Matches'] += 1
df.loc[t2_c,'Non-Conf Wins'] += int(games[i]['r_t2'])
if games[i]['r_t2'] == '0':
df.loc[t2_c,'Non-Conf Losses'] += 1
if games[i]['n'] == '0':
df.loc[t2_c,'Non-Conf Home Matches'] += 1
df.loc[t2_c,'Non-Conf Home Wins'] += int(games[i]['r_t2'])
if games[i]['r_t2'] == '0':
df.loc[t2_c,'Non-Conf Home Losses'] += 1
if games[i]['n'] == '1':
df.loc[t2_c,'Non-Conf Neutral Matches'] += 1
df.loc[t2_c,'Non-Conf Neutral Wins'] += int(games[i]['r_t2'])
if games[i]['r_t2'] == '0':
df.loc[t2_c,'Non-Conf Neutral Losses'] += 1
pd.set_option('display.max_columns', None)
df.to_csv('outputs/conference_stats_2022.csv')
stats('2022')
| 2.84375 | 3 |
src/dandd/__main__.py | Peilonrayz/warhammer | 0 | 12767966 | <gh_stars>0
import fractions
from typing import Callable
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from dice_stats import Dice, Range, from_results
from other import main
# Rage
# Unarmored Defense
# Great Weapon Master
# Reckless Attack
# It would be cool if this could be changed to something like
# (DND(Dice.from_dice(20)) + 2).attack(ac, damage, critical_damage)
class DND:
_attack: Dice
_modifiers: Callable[[Dice], Dice]
def __init__(self, attack, modifiers=lambda dice: dice):
self._attack = attack
self._modifiers = modifiers
def miss(self, dice):
return Dice.from_empty()
def nat_attack(self, damage, critical_damage, miss, crit=20):
return self._attack.apply_chances(
{
Range.from_range(f"[,1]"): miss,
Range.from_range(f"[{crit},]"): critical_damage,
},
damage,
apply=self._modifiers,
)
def mod_attack(self, ac, damage, miss):
def inner(dice):
return dice.as_chance(fractions.Fraction(1, 1)).chances(
{Range.from_range(f"[{ac},]"): damage}, miss
)
return inner
def crit_attack(self, damage):
def inner(dice):
return damage
return inner
def attack(
self, ac, damage, critical, next_attack=None, next_critical_attack=None, crit=20
):
miss = self.miss
if next_critical_attack is not None:
critical += next_critical_attack
elif next_attack is not None:
critical += next_attack
if next_attack is not None:
miss = lambda _: next_attack
damage += next_attack
return self.nat_attack(
self.mod_attack(ac, damage, miss(None)),
self.crit_attack(critical),
miss,
crit,
)
class Weapon:
die: int
damage: Dice
def __init__(self):
self.pre_effects = []
self.post_effects = []
def gwf(self):
self.pre_effects.append(lambda dice: dice.reroll([1, 2]))
return self
def sa(self):
self.post_effects.append(lambda dice: Dice.max(*2 * [dice]))
return self
def add_damage(self, damage):
self.post_effects.append(lambda dice: dice + damage)
return self
def brutal_critical(self, amount):
self.critical_dice = amount
return self
@property
def critical_dice(self):
return getattr(self, "_critical_dice", self.die)
@critical_dice.setter
def critical_dice(self, value):
self._critical_dice = self.critical_dice + value
@property
def attack(self):
damage = self.damage
for effect in self.pre_effects:
damage = effect(damage)
damage = self.die * damage
for effect in self.post_effects:
damage = effect(damage)
return damage
@property
def critical(self):
damage = self.damage
for effect in self.pre_effects:
damage = effect(damage)
damage = (self.die + self.critical_dice) * damage
for effect in self.post_effects:
damage = effect(damage)
return damage
class Greatsword(Weapon):
die = 2
damage = Dice.from_dice(6)
class Greataxe(Weapon):
die = 1
damage = Dice.from_dice(12)
def attack(acs, attack, attack_damage, weapon, *, crit=20, mul=2):
dnd = DND(attack, lambda dice: dice + attack_damage)
for ac in acs:
damages = dnd.attack(ac, weapon.attack, weapon.critical, crit=crit,)
yield ac, mul * damages
def attack_gwm(acs, attack, attack_damage, weapon, *, crit=20):
dnd = DND(attack, lambda dice: dice + attack_damage)
for ac in acs:
base_damages = dnd.attack(ac, weapon.attack, weapon.critical, crit=crit,)
last_attack = dnd.attack(
ac,
weapon.attack,
weapon.critical,
next_critical_attack=base_damages,
crit=crit,
)
first_attack = dnd.attack(
ac,
weapon.attack,
weapon.critical,
next_attack=last_attack,
next_critical_attack=base_damages + base_damages,
crit=crit,
)
yield ac, first_attack
COLOURS = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:cyan",
"tab:pink",
"tab:olive",
"tab:gray",
"tab:brown",
]
if __name__ == "__main__":
main()
fig, (ax_1, ax_2) = plt.subplots(
2, 1, figsize=(8, 8), subplot_kw={"projection": "3d"}
)
for name, weapon, ax in [("Axe", Greataxe, ax_1), ("Sword", Greatsword, ax_2)]:
ACS = range(16, 21, 2)
results = [
attack_gwm(
ACS,
Dice.max(*2 * [Dice.from_dice(20)]),
10,
weapon().brutal_critical(3).add_damage(7 + 4),
crit=20,
),
attack_gwm(
ACS,
Dice.max(*2 * [Dice.from_dice(20)]),
7,
weapon().brutal_critical(3).gwf().add_damage(5 + 4),
crit=19,
),
attack_gwm(
ACS,
Dice.max(*2 * [Dice.from_dice(20)]),
10,
weapon().brutal_critical(3).gwf().add_damage(7 + 4),
crit=20,
),
attack_gwm(
ACS,
Dice.max(*2 * [Dice.from_dice(20)]),
10,
weapon().brutal_critical(3).add_damage(7 + 4),
crit=20,
),
]
labels = [
f"Barb +2Str +UA",
f"Barb Champion +2Str +UA",
f"Barb +2Str +UA +GWF",
f"Barb +2Str +UA +SA",
f"Barb +2Str +UA +GWF +SA",
f"Barb Champion +2Str +UA +SA",
]
for result, colour in zip(from_results(results, only_positive=True), COLOURS):
ax.plot_wireframe(*result, color=colour, alpha=0.5, cstride=0)
ax.set_xlabel("Damage")
ax.set_ylabel("Enemy AC")
ax.set_zlabel("Chance")
ax.set_title(f"Galrog {name.title()} Upgrades")
ax.legend(labels)
plt.show()
| 2.921875 | 3 |
components/isceobj/StripmapProc/runCoherence.py | vincentschut/isce2 | 1,133 | 12767967 | <reponame>vincentschut/isce2
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import logging
import operator
import isceobj
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from mroipac.correlation.correlation import Correlation
logger = logging.getLogger('isce.insar.runCoherence')
## mapping from algorithm method to Correlation instance method name
CORRELATION_METHOD = {
'phase_gradient' : operator.methodcaller('calculateEffectiveCorrelation'),
'cchz_wave' : operator.methodcaller('calculateCorrelation')
}
def runCoherence(self, method="phase_gradient"):
logger.info("Calculating Coherence")
# Initialize the amplitude
# resampAmpImage = self.insar.resampAmpImage
# ampImage = isceobj.createAmpImage()
# IU.copyAttributes(resampAmpImage, ampImage)
# ampImage.setAccessMode('read')
# ampImage.createImage()
#ampImage = self.insar.getResampOnlyAmp().copy(access_mode='read')
# Initialize the flattened inteferogram
topoflatIntFilename = self.insar.topophaseFlatFilename
intImage = isceobj.createIntImage()
#widthInt = self.insar.resampIntImage.getWidth()
widthInt = self.insar.topophaseFlatFilename.getWidth()
intImage.setFilename(topoflatIntFilename)
intImage.setWidth(widthInt)
intImage.setAccessMode('read')
intImage.createImage()
# Create the coherence image
cohFilename = topoflatIntFilename.replace('.flat', '.cor')
cohImage = isceobj.createOffsetImage()
cohImage.setFilename(cohFilename)
cohImage.setWidth(widthInt)
cohImage.setAccessMode('write')
cohImage.createImage()
cor = Correlation()
cor.configure()
cor.wireInputPort(name='interferogram', object=intImage)
#cor.wireInputPort(name='amplitude', object=ampImage)
cor.wireOutputPort(name='correlation', object=cohImage)
cohImage.finalizeImage()
intImage.finalizeImage()
#ampImage.finalizeImage()
try:
CORRELATION_METHOD[method](cor)
except KeyError:
print("Unrecognized correlation method")
sys.exit(1)
pass
return None
| 1.171875 | 1 |
bot.py | fopina/tgbotplug-plugins | 1 | 12767968 | #!/usr/bin/env python
# coding=utf-8
import tgbot
from plugins.echo import EchoPlugin
from plugins.random_choice import RandomPlugin
from plugins.google import GooglePlugin
from plugins.simsimi import SimsimiPlugin
import argparse
from requests.packages import urllib3
urllib3.disable_warnings()
def main():
args = build_parser().parse_args()
tg = tgbot.TGBot(
args.token,
plugins=[
EchoPlugin(),
GooglePlugin(),
RandomPlugin(),
],
no_command=SimsimiPlugin(),
db_url=args.db_url
)
if args.list:
tg.print_commands()
return
if args.create_db:
tg.setup_db()
print 'DB created'
return
if args.webhook is None:
tg.run(polling_time=args.polling)
else:
tg.run_web(args.webhook[0], host='0.0.0.0', port=int(args.webhook[1]))
def build_parser():
parser = argparse.ArgumentParser(description='Run TestBot')
parser.add_argument('--polling', '-p', dest='polling', type=float, default=2,
help='interval (in seconds) to check for message updates')
parser.add_argument('--db_url', '-d', dest='db_url', default='sqlite:///testbot.sqlite3',
help='URL for database (default is sqlite:///testbot.sqlite3)')
parser.add_argument('--list', '-l', dest='list', action='store_const', const=True, default=False,
help='list commands')
parser.add_argument('--webhook', '-w', dest='webhook', nargs=2, metavar=('hook_url', 'port'),
help='use webhooks (instead of polling) - requires bottle')
parser.add_argument('--create_db', dest='create_db', action='store_const',
const=True, default=False,
help='setup database')
parser.add_argument('--token', '-t', dest='token',
help='token provided by @BotFather')
return parser
if __name__ == '__main__':
main()
| 2.296875 | 2 |
samples/DataScience/pos-tagging-neural-nets-keras/pos_tagging_neural_nets_keras.py | g-guichard/IT-Blog | 19 | 12767969 | from __future__ import print_function
import random
import nltk
from nltk.corpus import treebank
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from keras.layers import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils, plot_model
from keras.wrappers.scikit_learn import KerasClassifier
import matplotlib.pyplot as plt
CUSTOM_SEED = 42
def add_basic_features(sentence_terms, index):
""" Compute some very basic word features.
:param sentence_terms: [w1, w2, ...]
:type sentence_terms: list
:param index: the index of the word
:type index: int
:return: dict containing features
:rtype: dict
"""
term = sentence_terms[index]
return {
'nb_terms': len(sentence_terms),
'term': term,
'is_first': index == 0,
'is_last': index == len(sentence_terms) - 1,
'is_capitalized': term[0].upper() == term[0],
'is_all_caps': term.upper() == term,
'is_all_lower': term.lower() == term,
'prefix-1': term[0],
'prefix-2': term[:2],
'prefix-3': term[:3],
'suffix-1': term[-1],
'suffix-2': term[-2:],
'suffix-3': term[-3:],
'prev_word': '' if index == 0 else sentence_terms[index - 1],
'next_word': '' if index == len(sentence_terms) - 1 else sentence_terms[index + 1]
}
def untag(tagged_sentence):
"""
Remove the tag for each tagged term.
:param tagged_sentence: a POS tagged sentence
:type tagged_sentence: list
:return: a list of tags
:rtype: list of strings
"""
return [w for w, _ in tagged_sentence]
def transform_to_dataset(tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [], []
for pos_tags in tagged_sentences:
for index, (term, class_) in enumerate(pos_tags):
# Add basic NLP features for each sentence term
X.append(add_basic_features(untag(pos_tags), index))
y.append(class_)
return X, y
def build_model(input_dim, hidden_neurons, output_dim):
"""
Construct, compile and return a Keras model which will be used to fit/predict
"""
model = Sequential([
Dense(hidden_neurons, input_dim=input_dim),
Activation('relu'),
Dropout(0.2),
Dense(hidden_neurons),
Activation('relu'),
Dropout(0.2),
Dense(output_dim, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):
""" Plot model loss and accuracy through epochs. """
green = '#72C29B'
orange = '#FFA577'
with plt.xkcd():
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))
ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,
label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,
linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
ax1.set_title('Model loss through #epochs', fontweight='bold')
ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,
label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,
linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
ax2.set_title('Model accuracy through #epochs', fontweight='bold')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
nb_samples = 100
# Ensure reproducibility
np.random.seed(CUSTOM_SEED)
sentences = treebank.tagged_sents(tagset='universal')[:nb_samples]
print('a random sentence: \n-> {}'.format(random.choice(sentences)))
tags = set([tag for sentence in treebank.tagged_sents() for _, tag in sentence])
print('nb_tags: {}\ntags: {}'.format(len(tags), tags))
# We use approximately 60% of the tagged sentences for training,
# 20% as the validation set and 20% to evaluate our model.
train_test_cutoff = int(.80 * len(sentences))
training_sentences = sentences[:train_test_cutoff]
testing_sentences = sentences[train_test_cutoff:]
train_val_cutoff = int(.25 * len(training_sentences))
validation_sentences = training_sentences[:train_val_cutoff]
training_sentences = training_sentences[train_val_cutoff:]
# For training, validation and testing sentences, we split the
# attributes into X (input variables) and y (output variables).
X_train, y_train = transform_to_dataset(training_sentences)
X_test, y_test = transform_to_dataset(testing_sentences)
X_val, y_val = transform_to_dataset(validation_sentences)
# Fit our DictVectorizer with our set of features
dict_vectorizer = DictVectorizer(sparse=False)
dict_vectorizer.fit(X_train + X_test + X_val)
# Convert dict features to vectors
X_train_vect = dict_vectorizer.transform(X_train)
X_test_vect = dict_vectorizer.transform(X_test)
X_val_vect = dict_vectorizer.transform(X_val)
# Fit LabelEncoder with our list of classes
label_encoder = LabelEncoder()
label_encoder.fit(y_train + y_test + y_val)
# Encode class values as integers
y_train_enc = label_encoder.transform(y_train)
y_test_enc = label_encoder.transform(y_test)
y_val_enc = label_encoder.transform(y_val)
# Convert integers to dummy variables (one hot encoded)
y_train_dummy = np_utils.to_categorical(y_train_enc)
y_test_dummy = np_utils.to_categorical(y_test_enc)
y_val_dummy = np_utils.to_categorical(y_val_enc)
# Set model parameters
model_params = {
'build_fn': build_model,
'input_dim': X_train_vect.shape[1],
'hidden_neurons': 512,
'output_dim': y_train_dummy.shape[1],
'epochs': 5,
'batch_size': 256,
'verbose': 1,
'validation_data': (X_val_vect, y_val_dummy),
'shuffle': True
}
# Create a new sklearn classifier
clf = KerasClassifier(**model_params)
# Finally, fit our classifier
hist = clf.fit(X_train_vect, y_train_dummy)
# Plot model performance
plot_model_performance(
train_loss=hist.history.get('loss', []),
train_acc=hist.history.get('acc', []),
train_val_loss=hist.history.get('val_loss', []),
train_val_acc=hist.history.get('val_acc', [])
)
# Evaluate model accuracy
score = clf.score(X_test_vect, y_test_dummy, verbose=0)
print('model accuracy: {}'.format(score))
# Compute classification report
y_preds = clf.predict(X_test_vect)
# Our target names are our label encoded targets
target_names = label_encoder.classes_
# Compute classification report
classif_report = classification_report(
y_true=y_test_enc, y_pred=y_preds,
target_names=target_names
)
print(classif_report)
# Visualize model architecture
plot_model(clf.model, to_file='tmp/model_structure.png', show_shapes=True)
# Finally save model
clf.model.save('/tmp/keras_mlp.h5')
| 2.859375 | 3 |
Export.py | NicoRicardi/CCParser | 0 | 12767970 | import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = pd.DataFrame(d)
return df
def ExcitedStateSummary(self, results, fname="es_smry", fmt="csv",
ground_state=False):
""" Exports energy related excited state quantities to file
Parameters
----------
results : CCParser.ParseContainer
Parsing container that holds parsed values.
fname : string
Filename prefix.
fmt : string
Output format ('csv', 'xlsx'/'xls' or 'df' for pandas.DataFrame).
ground_state : bool
Whether to include an empty line in the table for the ground state.
"""
if fmt not in Exporter.valid_formats:
raise ValueError("File format '{0:}' not recognized or supported!".format(fmt))
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
d = {}
# (1) Excitation energies (default minimum)
#if hasattr(results, VarNames.exc_energy_rel):
d[VarNames.exc_energy_rel] = getattr(results, VarNames.exc_energy_rel).data
n_states = len(d[VarNames.exc_energy_rel])
# (2) Oscillator strengths
if hasattr(results, VarNames.osc_str):
d[VarNames.osc_str] = getattr(results, VarNames.osc_str).data
# (3) Amplitudes
if hasattr(results, VarNames.amplitudes):
ampl = getattr(results, VarNames.amplitudes)
pieces = [a.to_dataframe() for a in ampl]
key = [x for x in range(1,len(pieces)+1)]
amp_df = pd.concat(pieces, keys=key, names=["State", "Row ID"])
# prepare MultiIndex (there has to be a better way to do that...)
arrays = [[x for x in range(1, n_states+1)],
[0 for x in range(n_states)]]
tuples = list(zip(*arrays))# asterisk unpacks
df1 = pd.DataFrame(d)
df1.index = pd.MultiIndex.from_tuples(tuples, names=["State", "Row ID"])
df = pd.concat([df1, amp_df], axis=1)
# add row to MultiIndex, see https://stackoverflow.com/q/24917700
if ground_state:
df.loc[(0,0),:] = np.nan
df.sort_index(level=0, inplace=True)
# EXPORT TO FILE or dataframe
fout = fname + "." + fmt
if fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == ("xlsx" or "xls"):
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
elif fmt.lower() == ("dataframe" or "df"):
return df
def ReducedWeights(self, results, nbsfA, extern=None, fmt="print",
fname="AmplAnl", silent=False):
""" Calculate reduced weights based on fragment information.
The reduced weight for a single excitation :math:`i \\rightarrow a` is defined as
:math:`v_{i}^{a} = 0.5\\cdot(c_{i,A}^{2} + c_{a,A}^{2})\\cdot w_{i}^{a}`, with
c and w being the molecular orbital coefficient and transition weight,
respectively.
The MO coefficients from the output first have to be transformed to an
orthonormal basis.
Parameters
----------
results : CCParser.ParseContainer
Container object which contains excited state amplitudes
nbsfA : int
Number of basis functions on System A (assumes system A comes first!)
extern : CCParser.ParseContainer
Optional second container which contains orthonormalisation matrix and/or MO coefficients
fmt : string
Output format. Available are "print", "dataframe", "xlsx" or "csv"
fname : string
Output file name (basename only).
silent : bool
Whether to ignore lengthy printouts.
"""
# consistency
has_extern = True if extern != None else False
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
if not has_extern and not hasattr(results, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif has_extern and not hasattr(extern, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif not has_extern and not hasattr(results, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif has_extern and not hasattr(extern, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif not hasattr(results, VarNames.amplitudes):
raise AttributeError("Could not find amplitudes! Were they parsed?")
elif not hasattr(results, VarNames.n_bas):
raise AttributeError("Could not find number of basis functions! Was it parsed?")
else:
# (1) Orthonormalization matrix, hardcoded last
X = getattr(results, VarNames.orthonorm_matrix).get_last() if not \
has_extern else getattr(extern, VarNames.orthonorm_matrix).get_last()
X_inv = np.linalg.inv(X)
# (2) MO coeffiecients, hardcoded last
C = getattr(results, VarNames.mo_coefficients).get_last() if not \
has_extern else getattr(extern, VarNames.mo_coefficients).get_last()
C_prime = C * X_inv # Szabo, Ostlund, page 142
max_mo = C.shape[0]
# (3) Amplitudes
ampl = getattr(results, VarNames.amplitudes)
n_states = len(ampl)
# (4) Number of basis functions
nbsf = getattr(results, VarNames.n_bas).get_last()
# (4) Output variables
sum_weights = [0 for i in range(n_states)]
sum_redweights = [0 for i in range(n_states)]
# --------------
sos_A = [0 for a in range(C_prime.shape[0])]
sos_B = [0 for a in range(C_prime.shape[0])]
for c, vect in enumerate(C_prime):
for n in range(nbsf):
if n < nbsfA:
sos_A[c] += vect[0,n]**2
else:
sos_B[c] += vect[0,n]**2
for i,a in enumerate(ampl):#state
for t in range(len(a.occ)):#transition
if max(a.virt[t]) > max_mo:
if not silent:
print("State {0:>2d}: Omitting transition with weight \
{1:.1%} due to missing MO coefficients.".format(i+1, a.weights[t]))
continue
if len(a.occ[t]) == 1:#single amplitudes
rw = 0.5*(sos_A[a.occ[t][0]-1] + sos_A[a.virt[t][0]-1]) * a.weights[t]
elif len(a.occ[t]) == 2:#double amplitudes
rw = 0.25*(sos_A[a.occ[t][0]-1] + sos_A[a.occ[t][1]-1] +
sos_A[a.virt[t][0]-1] + sos_A[a.virt[t][1]-1]
)*a.weights[t]
else:
raise IndexError("Currently no more than double \
amplitudes are supported!")
sum_weights[i] += a.weights[t]
sum_redweights[i] += rw
#----------------
# Export as
fout = fname + "." + fmt
d = {"State": [i+1 for i in range(n_states)],
"sum_weight" : sum_weights,
"sum_red_weight" : sum_redweights}
df = pd.DataFrame(d)
df = df.assign(diff=df["sum_weight"]-df["sum_red_weight"],
ratio=df["sum_red_weight"]/df["sum_weight"])
if fmt == "print":
print("State | Sum(W) | Sum(P) | Sum(W) - Sum(P) | ratio P/W |\n",50*"-")
for i in range(n_states):
print(" S{0:>2d} | {1:.3f} | {2:.3f} | {3:15.3f} | {4:.1%}".format(
i+1, sum_weights[i], sum_redweights[i], sum_weights[i] -
sum_redweights[i], sum_redweights[i]/sum_weights[i]))
elif fmt == "dataframe":
return df
elif fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == "xlsx" or fmt == "xls":
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
else:
raise ValueError("Output format not supported!")
def MO_Molden(self, results, atom_basis, fname="molecular_orbitals",
tmp_5d=True):
""" Writes molecular orbitals to a molden file.
Expects molecular geometry in Angstrom.
More information on the molden format at
http://www.cmbi.ru.nl/molden/molden_format.html
Parameters
----------
results : CCParser.ParseContainer
Container object which holds MO coefficients.
exponents : dict
Dictionary mapping GTO exponents/coefficients to atoms. Expected
format of dictionary entry is list of strings.
fname : string
Output file name.
"""
from .QCBase import PeriodicTable
import re
C = results.C.get_last()
xyz = results.xyz.get_last()
en = results.mo_energies.get_last()
PeTa = PeriodicTable()
#TODO: Permutator needed in case of different formats (Molcas, Gaussian)
with open(fname+".molden", "w") as out:
out.write("[Molden Format]\n")
# write XYZ
out.write("[Atoms] (Angs)\n")
for i,atom in enumerate(xyz):
num = PeTa.get_atomic_num(atom[0])
out.write("{0:>3}{1:7d}{2:5d}".format(atom[0], i+1, num))
out.write("".join("{0:16.8f}".format(c) for c in atom[1:])+"\n")
# write basis exponents
out.write("[GTO]\n")
for n in range(len(xyz)):
# atom sequence number, 0
out.write("{0:d}{1:5d}\n".format(n+1, 0))
symb = xyz[n][0].upper()
#a = atom.upper()
basis = atom_basis[symb]
for coeff in basis:
# shell label, number of primitives, 1.00
if re.search(r"[SDPF]", coeff[0]):
out.write("{0:}{1:6d}{2:12.6f}\n".format(
coeff[0], int(coeff[1]), float(coeff[2])))
# exponent, contraction coefficient
else:
out.write("{0:18.8e}{1:18.8e}\n".format(
float(coeff[0]), float(coeff[1])))
out.write("\n")
for imo in range(C.shape[0]):#assumes counting from MO 1 !!
out.write("[MO]\nSym=X\n")
if imo < en.n_occ:#occupied
out.write("Ene={0:12.6f}\n".format(en.occ[imo]))
out.write("Spin=alpha\n")
out.write("Occup=1\n")
else:#virtual
out.write("Ene={0:12.6f}\n".format(en.virt[imo]))
out.write("Spin=alpha\n")
out.write("Occup=0\n")
for i in range(C.shape[1]):
out.write("{0:6d}{1: 22.12e}\n".format(i+1,C[imo, i]))
if tmp_5d:
out.write("[5D]\n")
print("MOs written to Molden file.")
| 2.90625 | 3 |
m/commands/accumulate.py | minersoft/miner | 1 | 12767971 | #
# Copyright <NAME>, 2012-2014
#
import miner_globals
import m.common as common
from base import *
def p_accumulate_coals(p):
'''command :
| ACCUMULATE'''
p[0] = DefaultAccumulateCommand()
def p_accumulate_command(p):
'''command : ACCUMULATE id_list BY expression'''
p[0] = AccumulateCommand(p[2], p[4])
class AccumulateCommand(TypicalCommand):
NAME = "ACCUMULATE BY"
SHORT_HELP = "ACCUMULATE id [,...] BY accumulatorClass - accumulates coal records"
LONG_HELP = """ACCUMULATE id [,...] BY accumulatorClass
Performs custom accumulation logic.
"""
def __init__(self, accumulatorVariables, accumulatorClass):
TypicalCommand.__init__(self)
self.myAccumulatorVariables = accumulatorVariables
self.myAccumulatorClass = accumulatorClass
self.yieldVal = "(accumulated,)" if len(self.myAccumulatorVariables)==1 else "accumulated"
def getStart(self):
return """
import types
_acc = %s
if isinstance(_acc,types.ClassType):
accumulator = _acc()
else:
accumulator = _acc
""" % self.myAccumulatorClass
def getBody(self):
return """
for accumulated in accumulator.accumulate(%s):
yield %s
""" % (", ".join(self.myAccumulatorVariables), self.yieldVal)
def getEnd(self):
return """
for accumulated in accumulator.finish():
yield %s
""" % self.yieldVal
def getRequiredVariables(self):
return []
def getVariableNames(self):
return self.myAccumulatorVariables
class DefaultAccumulateCommand(TypicalCommand):
NAME = "ACCUMULATE"
SHORT_HELP = "ACCUMULATE|<empty> - accumulates coal records"
LONG_HELP = """ACCUMULATE
| <empty> |
Performs context dependent accumulation.
ACCUMULATE id [,...] BY accumulatorClass[(params)]
Performs custom accumulation
"""
def setParent(self, parent):
GeneratorBase.setParent(self, parent)
self.accumulatorTuple = miner_globals.getAccumulator(self.myParent.getVariableNames())
def getStart(self):
if not self.accumulatorTuple:
raise common.CompilationError("Invalid input for accumulation")
return """ accumulator = %s()\n""" % self.accumulatorTuple[1]
def getBody(self):
return """
for accumulated in accumulator.accumulate(%s):
yield (accumulated, )
""" % self.accumulatorTuple[0]
def getEnd(self):
return """
for accumulated in accumulator.finish():
yield (accumulated, )
"""
def getRequiredVariables(self):
return [self.accumulatorTuple[0]]
def getVariableNames(self):
return [self.accumulatorTuple[0]]
miner_globals.addHelpClass(AccumulateCommand)
miner_globals.addHelpClass(DefaultAccumulateCommand)
miner_globals.addKeyWord(command="ACCUMULATE")
miner_globals.addKeyWord(keyword="BY")
| 2.46875 | 2 |
2016/day10.py | andypymont/adventofcode | 0 | 12767972 | """
2016 Day 10
https://adventofcode.com/2016/day/10
"""
from dataclasses import dataclass
from typing import Dict, List, Sequence, Tuple
import re
import aocd # type: ignore
re_value = re.compile(r"value (\d+) goes to (\w+) (\d+)")
re_robot = re.compile(r"bot (\d+) gives low to (\w+) (\d+) and high to (\w+) (\d+)")
@dataclass(frozen=True)
class Target:
"""
Target within the system to deliver numbers to - defined by its genre (bot or output) and its
reference number.
"""
genre: str
number: int
@dataclass
class Robot:
"""
Robot with knowledge of its two recipients/targets. Will wait until receiving its second number
and then that dispatch them back to the system environment to deliver to their targets.
"""
environ: "Environment"
holding: List[int]
give_low: Target
give_high: Target
def __init__(self, environ: "Environment", match_groups: Sequence[str]):
low_genre, low_number, high_genre, high_number = match_groups
self.environ = environ
self.holding = []
self.give_low = Target(low_genre, int(low_number))
self.give_high = Target(high_genre, int(high_number))
def add(self, value: int) -> None:
"""
Add a number to the robot - if this is the second deliver, it will in turn dispatch calls
to envion.deliver(..) with the low and high number held and their respective targets.
"""
self.holding.append(value)
if len(self.holding) == 2:
self.environ.deliver(self.give_low, min(self.holding))
self.environ.deliver(self.give_high, max(self.holding))
@dataclass
class Environment:
"""
An interconnected system of robots and outputs.
"""
robots: Dict[int, Robot]
outputs: Dict[int, int]
def __init__(self, instructions: str):
self.robots = {}
self.outputs = {}
for robot_definition in re_robot.findall(instructions):
bot, args = robot_definition[0], robot_definition[1:]
self.robots[int(bot)] = Robot(self, args)
for (initial_value, to_genre, to_number) in re_value.findall(instructions):
target = Target(to_genre, int(to_number))
self.deliver(target, int(initial_value))
def deliver(self, target: Target, value: int) -> None:
"""
Deliver the given number to its target - either storing it in the outputs of this
Environment object, or delivering it to the relevant robot via its .add(..) method.
"""
if target.genre == "bot":
self.robots[target.number].add(value)
elif target.genre == "output":
self.outputs[target.number] = value
def find_robot_holding(self, search: Tuple[int, int]) -> int:
"""
Find the ID number of the robot which ends up holding the given pair of values.
"""
inventory = set(search)
for number, bot in self.robots.items():
if set(bot.holding) == inventory:
return number
return -1
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2016, day=10)
env = Environment(data)
print(f"Part 1: {env.find_robot_holding((61, 17))}")
print(f"Part 2: {env.outputs[0] * env.outputs[1] * env.outputs[2]}")
if __name__ == "__main__":
main()
| 3.640625 | 4 |
test/scorer_t.py | kariminf/MultiNetSum | 1 | 12767973 | <reponame>kariminf/MultiNetSum
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 <NAME> <<EMAIL>>
#
# ---- AUTHORS ----
# 2018 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#just a code to learn and test (a ==> b)
import tensorflow as tf
import sys
sys.path.insert(0, "..")
from ml2extrasum.scoring.scorer import Scorer
X = [
[1.0, 1.0],
[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0]
]
R = [
[1.0],
[1.0],
[0.0],
[1.0]
]
LEARNING_RATE = 0.05
x = tf.placeholder(tf.float32, shape=[None, 2], name="inputs")
r = tf.placeholder(tf.float32, shape=[None ,1], name="result")
s1 = Scorer("scorer1").add_input(x).add_layer(4,tf.nn.tanh).add_layer(1,tf.nn.tanh)
out = s1.get_output()
output = tf.add(out, 1)
output = tf.multiply(output, 0.5)
cost = - tf.reduce_mean( (r * tf.log(output)) + (1 - r) * tf.log(1.0 - output) )
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(20000):
_, cst, o = sess.run([train_step, cost, out], feed_dict={x: X, r: R})
print i, cst, o
tX = [[1.0, 0.0]]
tR = [[0.0]]
print("Predicted: ", sess.run(output,feed_dict={x: tX}), " Expected: ", tR)
| 2.4375 | 2 |
srp/plots.py | jfemiani/srp-boxes | 0 | 12767974 | <reponame>jfemiani/srp-boxes
import matplotlib.pyplot as plt
import pathlib
import pickle
def plot_rgb(stack):
plt.imshow(stack[:3].transpose(1,2,0).clip(0,1))
plt.xlim(0, stack.shape[1])
plt.ylim(0, stack.shape[2])
def plot_lidar(stack, alpha=1):
pseudo = stack[4:7]
pseudo = pseudo
pseudo = sigmoid(pseudo.transpose(1,2,0))
#pseudo -= pseudo.min()
#pseudo /= pseudo.max()
alpha_ = pseudo.max(2)[...,None]*alpha
pseudo = np.concatenate([pseudo, alpha_], axis=2)
plt.imshow(pseudo)
plt.xlim(0, stack.shape[1])
plt.ylim(0, stack.shape[2])
def _set_grid_spacing(minor, major):
from matplotlib.ticker import MultipleLocator
ax = gca()
ax.xaxis.set_minor_locator(MultipleLocator(minor))
ax.xaxis.set_major_locator(MultipleLocator(major))
ax.yaxis.set_minor_locator(MultipleLocator(minor))
ax.yaxis.set_major_locator(MultipleLocator(major))
ax.grid(which='major')
ax.grid(which='minor', linestyle='--')
def load_stack(path):
if isinstance(sample, pathlib.PosixPath):
path = path.as_posix()
with open(path, 'rb') as handle:
p = pickle.load(handle)
return np.concatenate((p.rgb, p.volumetric))
def plot3(stack, major=None, minor=None):
if major is None:
major = stack.shape[1]//2
if minor is None:
minor = major // 4
subplot(131);
plot_rgb(stack);
_set_grid_spacing(minor, major);
title('rgb')
subplot(132);
plot_rgb(stack);
plot_lidar(stack);
set_grid_spacing(minor, major);
title('mixed')
subplot(133, facecolor='black');
plot_lidar(stack);
set_grid_spacing(minor, major);
title('lidar'); | 2.328125 | 2 |
pydcop/infrastructure/agents.py | rpgoldman/pyDcop | 0 | 12767975 | <reponame>rpgoldman/pyDcop<filename>pydcop/infrastructure/agents.py
# BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Base 'Agent' classes.
An Agent instance is a stand-alone autonomous object. It hosts computations,
which send messages to each other.
Each agent has its own thread, which is used to handle messages as they are
dispatched to computations hosted on this agent.
"""
import logging
import sys
import threading
import traceback
import random
from functools import partial
from importlib import import_module
from threading import Thread
from time import perf_counter, sleep
from typing import Dict, List, Optional, Union, Callable, Tuple, Mapping
from collections import defaultdict
from pydcop.algorithms import AlgorithmDef, ComputationDef, load_algorithm_module
from pydcop.dcop.objects import AgentDef, create_binary_variables
from pydcop.dcop.objects import BinaryVariable
from pydcop.dcop.relations import Constraint
from pydcop.infrastructure.Events import event_bus
from pydcop.infrastructure.communication import Messaging, \
CommunicationLayer, UnreachableAgent
from pydcop.infrastructure.computations import MessagePassingComputation, \
build_computation
from pydcop.infrastructure.discovery import Discovery, UnknownComputation, \
UnknownAgent, _is_technical
from pydcop.infrastructure.ui import UiServer
from pydcop.reparation import create_computation_hosted_constraint, \
create_agent_capacity_constraint, create_agent_hosting_constraint, \
create_agent_comp_comm_constraint
from pydcop.computations_graph import constraints_hypergraph as chg
class AgentException(Exception):
pass
class Agent(object):
"""
Object representing an agent.
An agent communicates with other agents though messages, using a
`CommunicationLayer`
An agent hosts message passing computations and run these computations on
its own thread.
Notes
-----
An agent does not necessarily need to known it's own definition (see
agent_def argument) but is needs it for some use like replication in
resilient DCOP.
Parameters
----------
name: str
name of the agent
comm: CommunicationLayer
object used to send and receive messages
agent_def: AgentDef
definition of this agent, optional
ui_port: int
the port on which to run the ui-server. If not given, no ui-server is
started.
delay: int
An optional delay between message delivery, in second. This delay
only applies to algorithm's messages and is useful when you want to
observe (for example with the GUI) the behavior of the algorithm at
runtime.
daemon: boolean
indicates if the agent should use a daemon thread (defaults to False)
See Also
--------
MessagePassingComputation, CommunicationLayer
"""
def __init__(self, name,
comm: CommunicationLayer,
agent_def: AgentDef = None,
ui_port: int = None,
delay: float = None,
daemon: bool = False):
self._name = name
self.agent_def = agent_def
self.logger = logging.getLogger('pydcop.agent.' + name)
self.agt_metrics = AgentMetrics()
# Setup communication and discovery
self._comm = comm
self.discovery = Discovery(self._name, self.address)
self._comm.discovery = self.discovery
self._messaging = Messaging(name, comm, delay=delay)
self.discovery.discovery_computation.message_sender = \
self._messaging.post_msg
# Ui server
self._ui_port = ui_port
self._ui_server = None
self.t = Thread(target=self._run, name='thread_' + name)
self.t.daemon = daemon
self._stopping = threading.Event()
self._shutdown = threading.Event()
self._running = False
# _idle means that we have finished to handle all incoming messages
self._idle = False
self._computations = {} # type: Dict[str, MessagePassingComputation]
self.t_active = 0
# time when run the first non-technical computation is run
self._run_t = None
# time when starting the agent
self._start_t = None
# Tasks that must run periodically as {callable: (period, last_run)}
self._periodic_cb = {} # type: Dict[Callable, Tuple[float, float]]
# List of paused computations, any computation whose name is in this
# list will not receive any message.
self.paused_computations = []
@property
def communication(self) -> CommunicationLayer:
"""
The communication used by this agent.
Returns
-------
CommunicationLayer
The communication used by this agent.
"""
return self._comm
def add_computation(self, computation: MessagePassingComputation,
comp_name=None, publish=True):
"""
Add a computation to the agent.
The computation will run on this agent thread and receives messages
through his Messaging and CommunicationLayer.
Parameters
----------
computation: a MessagePassingComputation
the computation to be added
comp_name: str
an optional name for the computation, if not given
computation.name will be used.
publish: bool
True (default) is the computation must be published on the
discovery service.
"""
comp_name = computation.name if comp_name is None else comp_name
self.logger.debug('Add computation %s - %s ',
comp_name, self._messaging)
computation.message_sender = self._messaging.post_msg
computation.periodic_action_handler = self
self._computations[comp_name] = computation
self.discovery.register_computation(comp_name, self.name, self.address,
publish=publish)
# start lookup for agent hosting a neighbor computation
if hasattr(computation, 'computation_def') and \
computation.computation_def is not None:
for n in computation.computation_def.node.neighbors:
self.discovery.subscribe_computation(n)
if hasattr(computation, '_on_value_selection'):
computation._on_value_selection = notify_wrap(
computation._on_value_selection,
partial(self._on_computation_value_changed, computation.name))
if hasattr(computation, '_on_new_cycle'):
computation._on_new_cycle = notify_wrap(
computation._on_new_cycle,
partial(self._on_computation_new_cycle, computation.name))
computation.finished = notify_wrap(
computation.finished,
partial(self._on_computation_finished, computation.name))
event_bus.send("agents.add_computation." + self.name,
(self.name, computation))
def remove_computation(self, computation: str) -> None:
"""
Removes a computation from the agent.
Parameters
----------
computation: str
the name of the computation
Raises
------
UnknownComputation
If there is no computation with this name on this agent
"""
try:
comp = self._computations.pop(computation)
except KeyError:
self.logger.error(
'Removing unknown computation %s - current commutations : %s',
computation, self._computations)
raise UnknownComputation(computation)
if comp.is_running:
comp.stop()
self.logger.debug('Removing computation %s', comp)
self.discovery.unregister_computation(computation, self.name)
event_bus.send("agents.rem_computation." + self.name,
(self.name, computation))
def computations(self, include_technical=False) -> \
List[MessagePassingComputation]:
"""
Computations hosted on this agent.
Parameters
----------
include_technical: bool
If True, technical computations (like discovery, etc.) are
included in the list.
Returns
-------
List[MessagePassingComputation]
A list of computations hosted on this agents. This list is a copy
and can be safely modified.
"""
if include_technical:
return list(self._computations.values())
else:
return [c for c in self._computations.values()
if not c.name.startswith('_')]
def computation(self, name: str) -> MessagePassingComputation:
"""
Get a computation hosted by this agent.
Parameters
----------
name: str
The name of the computation.
Returns
-------
The Message passing corresponding to the given name.
Raises
------
UnknownComputation
if the agent has no computation with this name.
See Also
--------
add_computation
"""
try:
return self._computations[name]
except KeyError:
self.logger.error('unknown computation %s', name)
raise UnknownComputation('unknown computation ' + name)
@property
def address(self):
"""
The address this agent can be reached at.
The type of the address depends on the instance and type of the
CommunicationLayer used by this agent.
Returns
-------
The address this agent can be reached at.
"""
return self._comm.address
def start(self, run_computations=False):
"""
Starts the agent.
One started, an agent will dispatch any received message to the
corresponding target computation.
Notes
-----
Each agent has it's own thread, this will start the agent's thread,
run the _on_start callback and waits for message. Incoming message are
added to a queue and handled by calling the _handle_message callback.
The agent (and its thread) will stop once stop() has been called and
he has finished handling the current message, if any.
See Also
--------
_on_start(), stop()
"""
if self.is_running:
raise AgentException('Cannot start agent {}, already running '
.format(self.name))
self.logger.info('Starting agent %s ', self.name)
self._running = True
self.run_computations = run_computations
self._start_t = perf_counter()
self.t.start()
def run(self, computations: Optional[Union[str, List[str]]] = None):
"""
Run computations hosted on this agent.
Notes
-----
Attempting to start an already running computation is harmless : it
will be logged but will not raise an exception.
The first time this method is called, timestamp is stored, which is used
as a reference when computing metrics.
Parameters
----------
computations: Optional[Union[str, List[str]]]
An optional computation name or list of computation names. If None,
all computations hosted on this agent are started.
Raises
------
AgentException
If the agent was not started (using agt.start()) before calling
run().
UnknownComputation
If some of the computations are not hosted on this agent. All
computations really hosted on the agent are started before raising
this Exception.
"""
if not self.is_running:
raise AgentException('Cannot start computation on agent %s which '
'is not started', self.name)
if computations is None:
self.logger.info('Starting all computations')
else:
if isinstance(computations, str):
computations = [computations]
else:
# avoid modifying caller's variable
computations = computations[:]
self.logger.info('Starting computations %s', computations)
if self._run_t is None:
# We start counter time only when the first computation is run,
# to avoid counting idle time when we wait for orders.
self._run_t = perf_counter()
on_start_t = perf_counter()
for c in list(self._computations.values()):
if computations is None:
if c.is_running:
self.logger.debug(f'Do not start computation {c.name}, already '
'running')
else:
c.start()
elif c.name in computations:
if c.is_running:
self.logger.debug(f'Do not start computation {c.name}, already '
'running')
else:
c.start()
computations.remove(c.name)
# add the time spent in on_start to the active time of the agent.
self.t_active += perf_counter() - on_start_t
if computations:
raise UnknownComputation('Could not start unknown computation %s',
computations)
@property
def start_time(self) -> float:
"""
float:
timestamp for the first run computation call. This timestamp is
used as a reference when computing various time-related metrics.
"""
return self._run_t
def clean_shutdown(self):
"""
Perform a clean shutdown of the agent.
All pending messages are handled before stopping the agent thread.
This method returns immediately, use `join` to wait until the agent's
thread has stopped.
"""
self.logger.debug('Clean shutdown requested')
self._shutdown.set()
self._messaging.shutdown()
def stop(self):
"""
Stops the agent
A computation cannot be interrupted while it handle a message,
as a consequence the agent (and its thread) will stop once it he has
finished handling the current message, if any.
"""
self.logger.debug('Stop requested on %s', self.name)
self._stopping.set()
def pause_computations(self, computations: Union[str, Optional[List[str]]]):
"""
Pauses computations.
Parameters
----------
computations: Union[str, Optional[List[str]]]
The name of the computation to pause, or a list of computations
names. If None, all hosted computation will be paused.
Raises
------
AgentException
If the agent was not started (using agt.start()) before calling
pause_computations().
UnknownComputation
If some of the computations are not hosted on this agent. All
computations really hosted on the agent are paused before raising
this exception.
"""
if not self.is_running:
raise AgentException('Cannot pause computations on agent %s which '
'is not started')
if computations is None:
self.logger.info('Pausing all computations')
else:
if isinstance(computations, str):
computations = [computations]
else:
computations = computations[:]
self.logger.info('Pausing computations %s', computations)
for c in self._computations.values():
if computations is None:
if c.is_paused:
self.logger.warning('Cannot pause computation %s, already '
'paused', c.name)
else:
c.pause(True)
elif c.name in computations:
if c.is_paused:
self.logger.warning('Cannot pause computation %s, already '
'paused', c.name)
else:
c.pause(True)
computations.remove(c.name)
if computations:
raise UnknownComputation('Could not pause unknown computation %s',
computations)
def unpause_computations(self,
computations: Union[str, Optional[List[str]]]):
"""
Un-pause (i.e. resume) computations
Parameters
----------
computations: Optional[List[str]]
TThe name of the computation to resume, or a list of computations
names. If None, all hosted computations will be resumed.
Raises
------
AgentException
If the agent was not started (using agt.start()) before calling
unpause_computations().
UnknownComputation
If some of the computations are not hosted on this agent. All
computations really hosted on the agent are resumed before raising
this exception.
"""
if not self.is_running:
raise AgentException('Cannot resume computations on agent %s which '
'is not started')
if computations is None:
self.logger.info('Resuming all computations')
else:
if isinstance(computations, str):
computations = [computations]
else:
computations = computations[:]
self.logger.info('Resuming computations %s', computations)
for c in self._computations.values():
if computations is None:
if not c.is_paused:
self.logger.warning('Do not resume computation %s, not '
'paused', c.name)
else:
c.pause(False)
elif c.name in computations:
if not c.is_paused:
self.logger.warning('Do not resume computation %s, not '
'paused', c.name)
else:
c.pause(False)
computations.remove(c.name)
if computations:
raise UnknownComputation('Could not resume unknown computation %s',
computations)
@property
def name(self):
"""
str:
The name of the agent.
"""
return self._name
@property
def is_stopping(self) -> bool:
"""
bool:
True if the agent is currently stopping (i.e. handling its last
message).
"""
return self._stopping.is_set()
@property
def is_running(self):
"""
bool:
True if the agent is currently running.
"""
return self._running
def join(self):
self.t.join()
def _on_start(self):
"""
This method is called when the agent starts.
Notes
-----
This method is meant to be overwritten in subclasses that might need to
perform some operations on startup. Do NOT forget to call
`super()._on_start()` ! When `super()._on_start()` return `False`,
you must also return `False` !
This method is always run in the agent's thread, even though the
`start()` method is called from an other thread.
Returns
-------
status: boolean
True if all went well, False otherwise
"""
self.logger.debug('on_start for {}'.format(self.name))
if self._ui_port:
event_bus.enabled = True
self._ui_server = UiServer(self, self._ui_port)
self.add_computation(self._ui_server, publish=False)
self._ui_server.start()
else:
self.logger.debug('No ui server for %s', self.name)
self._computations[self.discovery.discovery_computation.name] = \
self.discovery.discovery_computation
while True:
# Check _stopping: do not prevent agent form stopping !
if self._stopping.is_set():
return False
try:
self.discovery.register_computation(
self.discovery.discovery_computation.name,
self.name, self.address)
except UnreachableAgent:
self.logger.warning("Could not reach directory, will retry "
"later")
sleep(1)
else:
break
self.discovery.register_agent(self.name, self.address)
self.discovery.discovery_computation.start()
return True
def _on_stop(self):
"""
This method is called when the agent has stopped.
It is meant to be overwritten in subclasses that might need to
perform some operations on stop, however, when overwriting it,
you MUST call `super()._on_stop()`.
Notes
-----
This method always run in the agent's thread. Messages can still be
sent in this message, but no new message will be received (as the
agent's thread has stopped)
"""
self.logger.debug('on_stop for %s with computations %s ',
self.name, self.computations())
# Unregister computations and agent from discovery.
# This will also unregister any discovery callbacks this agent may still
# have.
for comp in self.computations():
comp.stop()
if not _is_technical(comp.name):
try:
self.discovery.unregister_computation(comp.name)
except UnreachableAgent:
# when stopping the agent, the orchestrator / directory might have
# already left.
pass
if self._ui_server:
self._ui_server.stop()
try:
# Wait a bit to make sure that the stopped message can reach the
# orchestrator before unregistration.
sleep(0.5)
self.discovery.unregister_agent(self.name)
except UnreachableAgent:
# when stopping the agent, the orchestrator / directory might have
# already left.
pass
def _on_computation_value_changed(self, computation: str, value,
cost, cycle):
"""Called when a computation selects a new value """
pass
def _on_computation_new_cycle(self, computation, *args, **kwargs):
"""Called when a computation starts a new cycle"""
pass
def _on_computation_finished(self, computation: str,
*args, **kwargs):
"""
Called when a computation finishes.
This method is meant to be overwritten in sub-classes.
Parameters
----------
computation: str
name of the computation that just ended.
"""
pass
def _handle_message(self, sender_name: str, dest_name: str, msg, t):
# messages are delivered even to computations which have reached their
# stop condition. It's up the the algorithm to decide if it wants to
# handle the message.
dest = self.computation(dest_name)
dest.on_message(sender_name, msg, t)
def metrics(self):
if self._run_t is None:
activity_ratio = 0
else:
total_t = perf_counter() - self._run_t
activity_ratio = self.t_active / total_t
own_computations = {c.name for c in self.computations(include_technical=True)}
m = {
'count_ext_msg': {k: v
for k, v in self._messaging.count_ext_msg.items()
if k in own_computations},
'size_ext_msg': {k: v
for k, v in self._messaging.size_ext_msg.items()
if k in own_computations},
# 'last_msg_time': self._messaging.last_msg_time,
'activity_ratio': activity_ratio,
'cycles': {c.name: c.cycle_count for c in self.computations()}
}
return m
def messages_count(self, computation: str):
return self._messaging.count_ext_msg[computation]
def messages_size(self, computation: str):
return self._messaging.size_ext_msg[computation]
def set_periodic_action(self, period: float, cb: Callable):
"""
Set a periodic action.
The callback `cb` will be called every `period` seconds. The delay
is not strict. The handling of a message is never interrupted,
if it takes longer than `period`, the callback will be delayed and
will only be called once the task has finished.
Parameters
----------
period: float
a period in second
cb: Callable
a callback with no argument
Returns
-------
handle:
An handle that can be used to remove the periodic action.
This handle is actually the callback object itself.
"""
assert period is not None
assert cb is not None
self.logger.debug("Add periodic action %s - %s ", period, cb)
self._periodic_cb[cb] = (period, perf_counter())
return cb
def remove_periodic_action(self, handle):
"""
Remove a periodic action
Parameters
----------
handle:
the handle returned by set_periodic_action
"""
self.logger.debug("Remove action %s ", handle)
self._periodic_cb.pop(handle)
def _run(self):
self.logger.debug('Running agent ' + self._name)
full_msg = None
try:
self._running = True
self._on_start()
if self.run_computations:
self.run()
while not self._stopping.is_set():
# Process messages, if any
full_msg, t = self._messaging.next_msg(0.05)
if full_msg is None:
self._idle = True
if self._shutdown.is_set():
self.logger.info("No message during shutdown, "
"stopping agent thread")
break
else:
current_t = perf_counter()
try:
sender, dest, msg, _ = full_msg
self._idle = False
if not self._stopping.is_set():
self._handle_message(sender, dest, msg, t)
finally:
if self._run_t is not None:
e = perf_counter()
msg_duration = e - current_t
self.t_active += msg_duration
if msg_duration > 1:
self.logger.warning(
'Long message handling (%s) : %s',
msg_duration, msg)
self._process_periodic_action()
except Exception as e:
self.logger.error('Thread %s exits With error : %s \n '
'Was handling message %s ',
self.name, e, full_msg)
self.logger.error(traceback.format_exc())
if hasattr(self, 'on_fatal_error'):
self.on_fatal_error(e)
except: # catch *all* exceptions
e = sys.exc_info()[0]
self.logger.error('Thread exits With un-managed error : %s', e)
self.logger.error(e)
finally:
self._running = False
self._comm.shutdown()
self._on_stop()
self.logger.info('Thread of agent %s stopped', self._name)
def _process_periodic_action(self):
# Process periodic action. Only once the agents runs the
# computations (i.e. self._run_t is not None)
ct = perf_counter()
if self._start_t is not None:
for cb, (p, last_t) in list(self._periodic_cb.items()):
if ct - last_t >= p:
# self.logger.debug('periodic cb %s, %s %s ', cb, ct, last_t)
# Must update the cb entry BEFORE calling the cb, in case
# the cb attemps to modify (e.g. remove) it's own entry by
# calling remove_periodic_action
self._periodic_cb[cb] = (p, ct)
cb()
def is_idle(self):
"""
Indicate if the agent is idle. An idle agent is an agent which has no
pending messages to handle.
:return: True if the agent is idle, False otherwise
"""
return self._idle
def __str__(self):
return 'Agent: ' + self._name
def __repr__(self):
return 'Agent: ' + self._name
def notify_wrap(f, cb):
def wrapped(*args, **kwargs):
f(*args, **kwargs)
cb(*args, **kwargs)
return wrapped
class AgentMetrics(object):
"""
AgentMetrics listen to events from the event_bus to consolidate metrics.
"""
def __init__(self):
self._computation_msg_rcv = defaultdict(lambda: (0, 0))
self._computation_msg_snd = defaultdict(lambda: (0, 0))
event_bus.subscribe('computations.message_rcv.*',
self._on_computation_msg_rcv)
event_bus.subscribe('computations.message_snd.*',
self._on_computation_msg_snd)
def computation_msg_rcv(self, computation: str):
return self._computation_msg_rcv[computation]
def computation_msg_snd(self, computation: str):
return self._computation_msg_snd[computation]
def _on_computation_msg_rcv(self, _topic: str, msg_event):
computation, msg_size = msg_event
prev_count, prev_size = self._computation_msg_rcv[computation]
self._computation_msg_rcv[computation] = \
prev_count + 1, prev_size + msg_size
def _on_computation_msg_snd(self, _topic: str, msg_event):
computation, msg_size = msg_event
prev_count, prev_size = self._computation_msg_snd[computation]
self._computation_msg_snd[computation] = \
prev_count + 1, prev_size + msg_size
repair_algo = load_algorithm_module('mgm2')
class RepairComputationRegistration(object):
def __init__(self, computation: MessagePassingComputation,
status: str, candidate: str):
self.computation = computation
self.status = status
self.candidate = candidate
class ResilientAgent(Agent):
"""
An agent that supports resiliency by replicating it's computations.
Parameters
----------
name: str
name of the agent
comm: CommunicationLayer
object used to send and receive messages
agent_def: AgentDef
definition of this agent, optional
ui_port: int
the port on which to run the ui-server. If not given, no ui-server is
started.
replication: str
name of the replication algorithm
delay: int
An optional delay between message delivery, in second. This delay
only applies to algorithm's messages and is useful when you want to
observe (for example with the GUI) the behavior of the algorithm at
runtime.
"""
def __init__(self, name: str, comm: CommunicationLayer,
agent_def: AgentDef, replication: str, ui_port=None,
delay: float = None):
super().__init__(name, comm, agent_def, ui_port=ui_port, delay=delay)
self.replication_comp = None
if replication is not None:
self.logger.debug('deploying replication computation %s',
replication)
# DCOP computations will be added to the replication computation
# as they are deployed.
algo_module = import_module('pydcop.replication.{}'
.format(replication))
self.replication_comp = algo_module.build_replication_computation(
self, self.discovery)
# self.add_computation(self.replication_comp)
# Do not start the computation yet, the agent is not event started
self._repair_computations = \
{} # type: Dict[str, RepairComputationRegistration]
# the replication level will be set by the when requested to
# replicate, by the ReplicateComputationsMessage
self._replication_level = None
# Register notification for when all computations have been
# replicated.
self.replication_comp.replication_done = notify_wrap(
self.replication_comp.replication_done,
self._on_replication_done)
def _on_start(self):
"""
See Also
--------
Agent._on_start
Returns
-------
status
"""
self.logger.debug('Resilient agent on_start')
if not super()._on_start():
return False
if self.replication_comp is not None:
self.add_computation(self.replication_comp)
self.replication_comp.start()
return True
def _on_stop(self):
if self.replication_comp is not None:
self.replication_comp.stop()
self.discovery.unregister_computation(self.replication_comp.name)
super()._on_stop()
def add_computation(self, computation: MessagePassingComputation,
comp_name=None, publish=True):
"""
Add a computation to the agent.
See Also
--------
Agent.add_computation
Parameters
----------
computation
comp_name
publish
Returns
-------
"""
super().add_computation(computation, comp_name, publish)
if self.replication_comp is not None \
and not computation.name.startswith('_') \
and not computation.name.startswith('B'):
# FIXME : find a better way to filter out repair computation than
# looking at the first character (B).
self.replication_comp.add_computation(computation.computation_def,
computation.footprint())
def remove_computation(self, computation: str):
if self.replication_comp is not None \
and not computation.startswith('_'):
self.replication_comp.remove_computation(computation)
super().remove_computation(computation)
def replicate(self, k: int):
if self.replication_comp is not None:
self._replication_level = k
self.replication_comp.replicate(k)
def setup_repair(self, repair_info):
self.logger.info('Setup repair %s', repair_info)
# create computation for the reparation dcop
# The reparation dcop uses a dcop algorithm where computations maps to
# variable (in order to have another dcop distribution problem) and use
# binary variable for each candidate computation.
# This agent will host one variable-computation for each
# binary variable x_i^m indicating if the candidate computation x_i
# is hosted on this agent a_m. Notice that by construction,
# the agent already have a replica for all the candidates x_i.
# The reparation dcop includes several constraints and variables:
# Variables
# * one binary variable for each orphaned computation
# Constraints
# * hosted constraints : one for each candidate computation
# * capacity constraint : one for this agent
# * hosting costs constraint : one for this agent
# * communication constraint
#
# For reparation, we use a dcop algorithm where computations maps to
# variables of the dcop. On this agent, we host the computations
# corresponding to the variables representing the orphaned computation
# that could be hosted on this agent (aka candidate computation).
# Here, we use MGM
own_name = self.name
# `orphaned_binvars` is a map that contains binary variables for
# orphaned computations.
# Notice that it only contains variables for computations
# that this agents knows of, i.e. computations that could be hosted
# here (aka candidate computations) or that depends on computations
# that could be hosted here.
# There is one binary variable x_i^m for each pair (x_i, a_m),
# where x_i is an orphaned computation and a_m is an agent that could
# host x_i (i.e. has a replica of x_i).
orphaned_binvars = {} # type: Dict[Tuple, BinaryVariable]
# One binary variable x_i^m for each candidate computation x_i that
# could be hosted on this agent a_m. Computation for these variables
# will be hosted in this agent. This is a subset of orphaned_binvars.
candidate_binvars = {} # type: Dict[Tuple, BinaryVariable]
# Agent that will host the computation for each binary var.
# it is a dict { bin var name : agent_name }
# agt_hosting_binvar = {} # type: Dict[str, str]
# `hosted_cs` contains hard constraints ensuring that all candidate
# computations are hosted:
hosted_cs = {} # type: Dict[str, Constraint]
for candidate_comp, candidate_info in repair_info.items():
try:
# This computation is not hosted any more, if we had it in
# discovery, forget about it but do not publish this
# information, this agent is not responsible for updatings
# other's discovery services.
self.discovery.unregister_computation(candidate_comp,
publish=False)
except UnknownComputation:
pass
agts, _, neighbors = candidate_info
# One binary variable for each candidate agent for computation
# candidate_comp:
v_binvar = create_binary_variables(
'B', ([candidate_comp], candidate_info[0]))
# Set initial values for binary decision variable
for v in v_binvar.values():
v._intial_value = 1 if random.random() < 1 / 3 else 0
orphaned_binvars.update(v_binvar)
# the variable representing if the computation will be hosted on
# this agent:
candidate_binvars[(candidate_comp, own_name)] = \
v_binvar[(candidate_comp, own_name)]
# the 'hosted' hard constraint for this candidate variable:
hosted_cs[candidate_comp] = \
create_computation_hosted_constraint(candidate_comp, v_binvar)
self.logger.debug('Hosted hard constraint for computation %s : %r',
candidate_comp, hosted_cs[candidate_comp])
# One binary variable for each pair (x_j, a_n) where x_j is an
# orphaned neighbors of candidate_comp and a_n is an agent that
# could host a_n:
for neighbor in neighbors:
v_binvar = create_binary_variables(
'B', ([neighbor], neighbors[neighbor]))
orphaned_binvars.update(v_binvar)
self.logger.debug('Binary variable for reparation %s ',
orphaned_binvars)
# Agent that will host the computation for each binary var.
# it is a dict { bin var name : agent_name }
agt_hosting_binvar = {v.name: a
for (_, a), v in orphaned_binvars.items()}
self.logger.debug('Agents hosting the computations for these binary '
'variables : %s ', agt_hosting_binvar)
# The capacity (hard) constraint for this agent. This ensures that the
# capacity of the current agent will not be overflown by hosting too
# many candidate computations. This constraints depends on the binary
# variables for the candidate computations.
remaining_capacity = self.agent_def.capacity - \
sum(c.footprint() for c in self.computations())
self.logger.debug('Remaining capacity on agent %s : %s',
self.name, remaining_capacity)
def footprint_func(c_name: str):
# We have a replica for these computation, we known its footprint.
return self.replication_comp.hosted_replicas[c_name][1]
capacity_c = create_agent_capacity_constraint(
own_name, remaining_capacity, footprint_func,
candidate_binvars)
self.logger.debug('Capacity constraint for agt %s : %r',
self.name, capacity_c)
# Hosting costs constraint for this agent. This soft constraint is
# used to minimize the hosting costs on this agent ; it depends on
# the binary variables for the candidate computations.
hosting_c = create_agent_hosting_constraint(
own_name, self.agent_def.hosting_cost,
candidate_binvars)
self.logger.debug('Hosting cost constraint for agt %s : %r',
self.name, hosting_c)
# The communication constraint. This soft constraints is used to
# minimize the communication cost on this agent. As communication
# cost depends on where computation on both side of an edge are
# hosted, it also depends on the binary variables for orphaned
# computations that could not be hosted here.
def comm_func(candidate_comp: str, neighbor_comp: str, agt: str):
# returns the communication cost between the computation
# candidate_name hosted on the current agent and it's neighbor
# computation neigh_comp hosted on agt.
route_cost = self.agent_def.route(agt)
comp_def = self.replication_comp.replicas[candidate_comp]
algo = comp_def.algo.algo
algo_module = load_algorithm_module(algo)
communication_load = algo_module.communication_load
msg_load = 0
for l in comp_def.node.neighbors:
if l == neighbor_comp:
msg_load += communication_load(comp_def.node, neighbor_comp)
com_load = msg_load * route_cost
return com_load
# Now that we have the variables and constraints, we can create
# computation instances for each of the variable this agent is
# responsible for, i.e. the binary variables x_i^m that correspond to
# the candidate variable x_i (and a_m is the current agent)
self._repair_computations.clear()
algo_def = AlgorithmDef.build_with_default_param(
repair_algo.algorithm_name,
{'stop_cycle': 20, 'threshold': 0.2},
mode='min',
parameters_definitions=repair_algo.algo_params)
for (comp, agt), candidate_var in candidate_binvars.items():
self.logger.debug('Building computation for binary variable %s ('
'variable %s on %s)', candidate_var, comp, agt)
comm_c = create_agent_comp_comm_constraint(
agt, comp, repair_info[comp], comm_func, orphaned_binvars)
self.logger.debug('Communication constraint for computation %s '
'on agt %s : %r', comp, self.name, comm_c)
constraints = [comm_c, hosting_c, capacity_c, hosted_cs[comp]]
# constraints.extend(hosted_cs.values())
self.logger.debug('Got %s Constraints for var %s : %s ',
len(constraints), candidate_var, constraints)
node = chg.VariableComputationNode(candidate_var, constraints)
comp_def = ComputationDef(node, algo_def)
computation = repair_algo.build_computation(comp_def)
self.logger.debug('Computation for %s : %r ',
candidate_var, computation)
# add the computation on this agents and register the neighbors
self.add_computation(computation, publish=True)
self._repair_computations[computation.name] = \
RepairComputationRegistration(computation, 'ready', comp)
for neighbor_comp in node.neighbors:
neighbor_agt = agt_hosting_binvar[neighbor_comp]
try:
self.discovery.register_computation(
neighbor_comp, neighbor_agt,
publish=False)
except UnknownAgent:
# If we don't know this agent yet, we must perform a lookup
# and only register the computation once found.
# Note the use of partial, to force the capture of
# neighbor_comp.
def _agt_lookup_done(comp, evt, evt_agt, _):
if evt == 'agent_added':
self.discovery.register_computation(
comp, evt_agt, publish=False)
self.discovery.subscribe_agent(
neighbor_agt,
partial(_agt_lookup_done, neighbor_comp),
one_shot=True)
self.logger.info('Repair setup done one %s, %s computations created, '
'inform orchestrator', self.name,
len(candidate_binvars))
return candidate_binvars
def repair_run(self):
self.logger.info('Agent runs Repair dcop computations')
comps = list(self._repair_computations.values())
for c in comps:
c.computation.start()
c.status = 'started'
def _on_replication_done(self, replica_hosts: Mapping[str, List[str]]):
"""
Called when all computations have been replicated.
This method method is meant to the overwritten in subclasses.
Parameters
----------
replica_hosts: a map { computation name -> List of agt name }
For each active computation hosted by this agent, this map
contains a list of agents that have been selected to host a
replica.
"""
self.logger.info('Replica distribution finished for agent '
'%s : %s (level requested : %s)', self.name,
replica_hosts, self._replication_level)
rep_levels = {computation: len(replica_hosts[computation])
for computation in replica_hosts}
if not all([level >= self._replication_level
for level in rep_levels.values()]):
self.logger.warning('Insufficient replication for computations: '
'%s ',
rep_levels)
def _on_computation_finished(self, computation: str,
*args, **kwargs):
self.logger.debug('Computation %s has finished', computation)
if self.replication_comp and computation in self._repair_computations:
self._on_repair_computation_finished(computation)
def _on_repair_computation_finished(self, computation: str):
repair_comp = self._repair_computations[computation]
repair_comp.status = 'finished'
# deploy the computation if it was selected during reparation:
if repair_comp.computation.current_value == 1:
self.logger.info('Reparation: computation %s selected on %s',
repair_comp.candidate, self.name)
comp_def = self.replication_comp.replicas[repair_comp.candidate]
self.logger.info('Deploying computation %s locally with '
'definition , %r', repair_comp.candidate,
comp_def)
comp = build_computation(comp_def)
self.add_computation(comp, publish=True)
else:
self.logger.info('Reparation: computation %s NOT selected on '
'%s', repair_comp.candidate, self.name)
# Remove replica: it will be re-replicated by its new host.
self.replication_comp.remove_replica(repair_comp.candidate)
if all(c.status == 'finished'
for c in self._repair_computations.values()):
selected_computations = \
[c.candidate for c in self._repair_computations.values()
if c.computation.current_value == 1]
self.logger.info('All repair computations have finished, '
'selected computation : %s',
selected_computations)
metrics = self.metrics()
print(f" metrics repair {self.name} - {metrics}")
repair_metrics = {'count_ext_msg': {}, 'size_ext_msg': {}, 'cycles': {}}
for c in self._repair_computations.values():
c_name = c.computation.name
if c_name in metrics['count_ext_msg']:
repair_metrics['count_ext_msg'][c_name] = metrics['count_ext_msg'][c_name]
else:
repair_metrics['count_ext_msg'][c_name] = 0
if c_name in metrics['size_ext_msg']:
repair_metrics['size_ext_msg'][c_name] = metrics['size_ext_msg'][c_name]
else:
repair_metrics['size_ext_msg'][c_name] = 0
if c_name in metrics['cycles']:
repair_metrics['cycles'][c_name] = metrics['cycles'][c_name]
else:
repair_metrics['cycles'][c_name] = 0
print(f" {self.name} : metrics after repair {repair_metrics}")
self._on_repair_done(selected_computations, repair_metrics=repair_metrics)
if selected_computations:
self.logger.info('Re-replicate newly activated computations '
'on %s : %s , level %s', self.name,
selected_computations,
self._replication_level)
try:
self.replication_comp.replicate(self._replication_level,
selected_computations)
except UnknownComputation:
# avoid crashing if one of the neighbor comp is not repaired yet
pass
self.logger.info('Starting newly activated computations on '
'%s : %s ', self.name,
selected_computations)
for selected in selected_computations:
self.computation(selected).start()
self.computation(selected).pause()
# Remove / undeploy repair comp once repaired
for repair_comp in self._repair_computations.values():
self.remove_computation(repair_comp.computation.name)
self._repair_computations.clear()
def _on_repair_done(self, selected_computations: List[str], **kwargs):
"""
Called when all repair computations have finished.
This method is meant to be overwritten in subclasses.
"""
pass
class RepairComputation(MessagePassingComputation):
"""
"""
def __init__(self, agent: ResilientAgent):
super().__init__('_resilience_' + self.agent.name)
self.agent = agent
self.logger = logging.getLogger('pydcop.agent.repair.' + agent.name)
self._handlers = {
# 'replication': self._on_replication,
# 'setup_repair': self._on_setup_repair,
# 'repair_run': self._on_repair_run,
}
@property
def type(self):
return 'replication'
def on_message(self, var_name, msg, t):
self._handlers[msg.type](msg)
def footprint(self):
return 0
def replication_done(self, replica_hosts: Dict[str, List[str]]):
"""
Called when all computations have been replicated.
The replication algorithm only selects agents to host replicas,
here we send the actual computations definitions to the agents
selected to host a replica.
We also send the obtained replication to the orchestrator.
Parameters
----------
replica_hosts: a map { computation name -> List of agt name }
For each active computation hosted by this agent, this map
contains a list of agents that have been selected to host a
replica.
"""
self.logger.info('Replica distribution finished for agent '
'%s : %s', self.name, replica_hosts)
# self.agent.on_replication_done()
# dist_msg = ComputationReplicatedMessage(self.name, replica_hosts)
# self.message_sender.post_send_to_orchestrator(dist_msg)
| 1.507813 | 2 |
server.py | JohnStarich/python-pool-performance | 32 | 12767976 | #!/usr/bin/env python3
from gunicorn_server import StandaloneApplication
from flask import Flask
import multiprocessing
app = Flask(__name__)
@app.route("/")
def ok():
return "OK"
if __name__ == "__main__":
gunicorn_app = StandaloneApplication(app, options={
'bind': '127.0.0.1:8080',
'workers': (multiprocessing.cpu_count() * 2) + 1,
})
gunicorn_app.run()
| 2.375 | 2 |
src/backend/qai_testbed_backend/usecases/testrunner.py | ads-ad-itcenter/qunomon.forked | 16 | 12767977 | # Copyright © 2019 National Institute of Advanced Industrial Science and Technology (AIST). All rights reserved.
from datetime import datetime, timedelta, timezone
from requests import post, get
from pathlib import Path
import json
from injector import singleton
import os
import shutil
from distutils.dir_util import remove_tree, copy_tree
from sqlalchemy import asc
from qlib.utils.logging import get_logger, log
from reportgenerator import ReportGenerator
from ..across.exception import QAINotFoundException, QAIBadRequestException,\
QAIInvalidRequestException
from ..across.file_checker import FileChecker
from ..controllers.dto.testrunner import PostTestRunnerReq, PostTestRunnerRes, Result, Job, \
GetTestRunnerStatusRes, JobStatus, RunStatus, PostReportGeneratorRes, PostReportGeneratorReq
from ..controllers.dto.testrunner import GetTestRunnerRes
from ..entities.test import TestMapper
from ..entities.test_description import TestDescriptionMapper
from ..entities.ml_component import MLComponentMapper
from ..entities.dowmload import DownloadMapper
from ..entities.setting import SettingMapper
from ..entities.graph import GraphMapper
from ..entities.run import RunMapper
from ..gateways.extensions import sql_db
from ..entities.test_runner import TestRunnerMapper
from sqlalchemy.exc import SQLAlchemyError
logger = get_logger()
@singleton
class TestRunnerService:
def __init__(self):
self.ip_entry_point = SettingMapper.query.get('ip_entry_point').value
self._file_checker = FileChecker()
@log(logger)
def post(self, organizer_id: str, ml_component_id: int, request: PostTestRunnerReq) -> PostTestRunnerRes:
test = TestMapper.query.\
filter(TestMapper.ml_component_id == ml_component_id).\
filter(MLComponentMapper.org_id == organizer_id).first()
if test is None:
raise QAINotFoundException('R14000', 'not found test descriptions')
td_ids = request.test_description_ids
if len(td_ids) == 0:
# 以下条件を除外したTestDescriptionMapperを作る
# 既に削除済み, 既に実行済み(OK or NG)
tds = TestDescriptionMapper.query. \
filter(TestDescriptionMapper.test_id == test.id). \
filter(TestDescriptionMapper.delete_flag == False). \
all()
# 既に実行済み(OK or NG)を削除
# ※sqlalchemyでリレーション先のテーブルをfilterに加えられないため、python側で絞り込む
tds = [t for t in tds if
(t.run is None) or
((t.run is not None) and (t.run.result != 'OK' and t.run.result != 'NG'))]
if not tds:
raise QAINotFoundException('R14001', 'all test descriptions are deleted or executed \n'
'You can\'t re-execute a previously executed TD,'
' so create a new one or duplicate it.')
td_ids = [t.id for t in tds]
else:
# td_id指定時は、それぞれ削除済みか、実行済みかをチェックする
for td_id in td_ids:
td = TestDescriptionMapper.query.get(td_id)
if td is None:
raise QAINotFoundException('R14001', f'test description[id={td_id}] is not exists.')
if td.delete_flag:
raise QAINotFoundException('R14001', 'test description[id={}, name={}] are deleted.'
.format(td_id, td.name))
if (td.run_id is not None) and (td.run.result == 'OK' or td.run.result == 'NG'):
raise QAINotFoundException('R14001', 'test description[id={}, name={}] are executed.\n'
'You can\'t re-execute a previously executed TD,'
' so create a new one or duplicate it.'
.format(td_id, td.name))
# インベントリ登録時とファイルが変更されていないか、ハッシュ値チェック
for td_id in td_ids:
td = TestDescriptionMapper.query.get(td_id)
for inventory_td_mapper in td.inventories:
file_check_result = self._file_checker.execute(inventory_td_mapper.inventory.file_path,
inventory_td_mapper.inventory.file_system_id)
if not file_check_result['exists']:
raise QAINotFoundException('R14002', f'inventory file not found.'
f'file:{inventory_td_mapper.inventory.file_path}')
if file_check_result['hash_sha256'] != inventory_td_mapper.inventory.file_hash_sha256:
raise QAIInvalidRequestException('R14003', f'inventory file hash is not much.'
f'file:{inventory_td_mapper.inventory.file_path}')
res = post(url=self.ip_entry_point + '/' + organizer_id + '/mlComponents/' + str(ml_component_id) + '/job',
headers={'content-type': 'application/json'},
json={'TestDescriptionIds': td_ids})
# レスポンスエラーチェック
if res.status_code != 200:
raise QAIInvalidRequestException('R19999', 'testrunner error: {}'.format(res.text))
job_id = res.json()['JobId']
return PostTestRunnerRes(
result=Result(code='R12000', message="job launch success."),
job=Job(id_=str(job_id),
start_datetime=datetime.now(timezone(timedelta(hours=+9), 'JST')))
)
@log(logger)
def get_test_runners(self) -> GetTestRunnerRes:
test_runners = TestRunnerMapper.query.all() # organizer_id, ml_component_idに関わらずすべて取得
if test_runners is None:
raise QAINotFoundException('I54000', 'not found test runners')
return GetTestRunnerRes(
result=Result(code='I52000', message="get test runners success."),
test_runners=[t.to_template_dto() for t in test_runners]
)
@singleton
class TestRunnerStatusService:
def __init__(self):
self.ip_entry_point = SettingMapper.query.get('ip_entry_point').value
@log(logger)
def get(self, organizer_id: str, ml_component_id: int) -> GetTestRunnerStatusRes:
test = TestMapper.query.\
filter(TestMapper.ml_component_id == ml_component_id).\
filter(MLComponentMapper.org_id == organizer_id).first()
if test is None:
raise QAINotFoundException('R24000', 'not found test descriptions')
if test.job_id is None:
return GetTestRunnerStatusRes(
result=Result(code='R24001', message="job is not found."),
job_status=JobStatus(id_=0, status='NA', result='NA', result_detail='OK:0 NG:0 ERR:0 NA:0'),
run_statuses=[])
return GetTestRunnerStatusRes(
result=Result(code='R22000', message="get job status success."),
job_status=test.job.to_dto(),
run_statuses=[r.to_dto() for r in test.job.runs]
)
@singleton
class ReportGeneratorService:
def __init__(self):
self.func_table = {
"SetParam": self._invoke_set_params,
"Generate": self._invoke_report_generate
}
self.backend_entry_point = SettingMapper.query.get('backend_entry_point').value
# Windowsとそれ以外でマウント先を変更する
if os.name == 'nt':
mount_dst_path = Path(SettingMapper.query.get('mount_src_path').value)
else:
mount_dst_path = Path(SettingMapper.query.get('mount_dst_path').value)
self.report_home_path = mount_dst_path/'report'
self._initialize_report_dir(self.report_home_path)
self.report_generator = ReportGenerator(home_path=str(self.report_home_path)+os.sep)
self.backend_report_home = mount_dst_path/'backend'/'report_gen'
def _initialize_report_dir(self, path):
# workdirを作成
if path.exists():
remove_tree(str(path))
path.mkdir(parents=True, exist_ok=True)
# templateをコピー
copy_src_dir = Path(__file__).joinpath('../../../report/template')
copy_dst_dir = path / 'template'
if copy_dst_dir.exists():
remove_tree(str(copy_dst_dir))
copy_tree(src=str(copy_src_dir.resolve()), dst=str(copy_dst_dir.resolve()))
def _invoke_set_params(self, request: PostReportGeneratorReq, _=None) -> {}:
# 先頭のdestinationのみ反映
td_id = int(request.destination[0])
td = TestDescriptionMapper.query.get(td_id)
if td.run is not None:
for td_graph in td.run.graphs:
param_graphs = [g for g in request.params.graphs if g.id_ == td_graph.id]
if len(param_graphs) > 0:
param_graph = param_graphs[0]
td_graph.report_required = param_graph.report_required
if param_graph.report_required:
td_graph.report_index = param_graph.report_index
td_graph.report_name = param_graph.report_name
else:
td_graph.report_required = False
if request.params.opinion is not None:
td.opinion = request.params.opinion
sql_db.session.commit()
def _invoke_report_generate(self, request: PostReportGeneratorReq,
test_descriptions: [TestDescriptionMapper] = None) -> {}:
# 事前処理
# フォルダ準備
dt_now_jst = datetime.now(timezone(timedelta(hours=9))).strftime('%Y%m%d%H%M%S')
base_dir = self.backend_report_home / dt_now_jst
in_dir = base_dir / 'in'
in_dir.mkdir(parents=True)
out_dir = base_dir / 'out'
out_dir.mkdir(parents=True)
# 入力JSON作成
in_json = {}
target_td_ids = []
# 以下条件を除外したtarget_td_idsを作る
# 既に削除済み, 未実行(None)、実行時失敗(ERR)
if len(request.destination) == 0:
# test_descriptionsは既に削除済みTDを除外したリスト
target_td_ids = [td.id for td in test_descriptions if td.run and td.run.result != 'ERR']
else:
tmp_td_ids = [int(td_id) for td_id in request.destination]
for td_id in tmp_td_ids:
td = TestDescriptionMapper.query\
.filter(TestDescriptionMapper.id == td_id)\
.filter(TestDescriptionMapper.delete_flag == False).first()
if td.run and td.run.result != 'ERR':
target_td_ids.append(td_id)
if len(target_td_ids) == 0:
raise QAINotFoundException('D14004', 'these test description is not running')
file_path_list = []
type_list = []
quality_props_list = []
td_id__list = []
required_list = []
report_name = []
for td_id in target_td_ids:
td = TestDescriptionMapper.query.get(td_id)
if td.run_id is None:
raise QAINotFoundException('D14002', 'test description\'s result is None')
# opinionファイル出力
if len(td.opinion) != 0:
opinion_path = in_dir / ('opinion' + str(td_id) + ".txt")
with open(str(opinion_path), mode='w', encoding='utf-8') as f:
f.write(td.opinion)
file_path_list.append(str(opinion_path))
type_list.append('text')
quality_props_list.append(td.quality_dimension_id)
td_id__list.append(str(td_id))
required_list.append(True)
report_name.append('Opinion')
graphs = GraphMapper.query.\
filter(GraphMapper.run_id == td.run_id).\
filter(GraphMapper.report_required == True).\
order_by(asc(GraphMapper.report_index)).\
all()
for graph in graphs:
file_path_list.append(graph.download.path)
type_list.append(graph.graph_template.resource_type.type)
quality_props_list.append(td.quality_dimension_id)
td_id__list.append(str(td_id))
required_list.append(graph.report_required)
report_name.append(graph.report_name)
in_json['filepath'] = dict(zip(range(len(file_path_list)), file_path_list))
in_json['type'] = dict(zip(range(len(type_list)), type_list))
in_json['quality_props'] = dict(zip(range(len(quality_props_list)), quality_props_list))
in_json['testDescriptionID'] = dict(zip(range(len(td_id__list)), td_id__list))
in_json['required'] = dict(zip(range(len(required_list)), required_list))
in_json['name'] = dict(zip(range(len(report_name)), report_name))
in_json_path = in_dir/'input.json'
with open(str(in_json_path), 'w', encoding='utf-8') as f:
json.dump(in_json, f, indent=4, ensure_ascii=False)
# レポート生成
pdf_file_path = self.report_home_path / 'work' / 'report.pdf'
pdf_file = self.report_generator.report_generate(sql_db, str(in_json_path), str(pdf_file_path))
if not pdf_file or not Path(pdf_file).exists():
raise QAINotFoundException('D16000', 'failed report generate')
# 事後処理
res = {}
try:
dst_path = out_dir / Path(pdf_file).name
shutil.copy(src=pdf_file, dst=str(dst_path))
dl = DownloadMapper(path=pdf_file)
sql_db.session.add(dl)
sql_db.session.commit()
res['ReportUrl'] = self.backend_entry_point + '/download/' + str(dl.id)
except Exception as e:
print('Exception: {}'.format(e))
sql_db.session.rollback()
raise e
return res
@log(logger)
def post(self, organizer_id: str, ml_component_id: int, request: PostReportGeneratorReq) -> PostReportGeneratorRes:
test = TestMapper.query.\
filter(TestMapper.ml_component_id == ml_component_id).\
filter(MLComponentMapper.org_id == organizer_id).first()
if test is None:
raise QAINotFoundException('D14000', 'not found test descriptions')
if request.command not in self.func_table:
raise QAIBadRequestException('D10001', 'invaid command')
# delete_flagがTrueのTDを除外したTestDescriptionMapperを作る
mapper = TestDescriptionMapper.query. \
filter(TestDescriptionMapper.test_id == test.id). \
filter(TestDescriptionMapper.delete_flag == False). \
all()
if not mapper:
raise QAINotFoundException('D14001', 'test descriptions are all deleted')
try:
func = self.func_table[request.command]
out_params = func(request, mapper)
except Exception as e:
print('Exception: {}'.format(e))
sql_db.session.rollback()
raise e
return PostReportGeneratorRes(
result=Result(code='D12000', message="command invoke success."),
out_params=out_params
)
| 1.78125 | 2 |
api/tests/opentrons/protocol_engine/actions/test_action_dispatcher.py | anuwrag/opentrons | 235 | 12767978 | <gh_stars>100-1000
"""Tests for the protocol engine's ActionDispatcher."""
from decoy import Decoy
from opentrons.protocol_engine.actions import (
ActionDispatcher,
ActionHandler,
PlayAction,
)
def test_sink(decoy: Decoy) -> None:
"""It should send all actions to the sink handler."""
action = PlayAction()
sink = decoy.mock(cls=ActionHandler)
subject = ActionDispatcher(sink=sink)
subject.dispatch(action)
decoy.verify(sink.handle_action(action))
def test_add_handler(decoy: Decoy) -> None:
"""It should actions to handlers before the sink."""
action = PlayAction()
handler_1 = decoy.mock(cls=ActionHandler)
handler_2 = decoy.mock(cls=ActionHandler)
sink = decoy.mock(cls=ActionHandler)
subject = ActionDispatcher(sink=sink)
subject.add_handler(handler_1)
subject.add_handler(handler_2)
subject.dispatch(action)
decoy.verify(
handler_1.handle_action(action),
handler_2.handle_action(action),
sink.handle_action(action),
)
| 2.5625 | 3 |
e2e_EL_evaluate/utils/num_docs_anno.py | yifding/e2e_EL_evaluate | 5 | 12767979 | <gh_stars>1-10
import os
import argparse
from e2e_EL_evaluate.utils.gen_anno_from_xml import gen_anno_from_xml
def main(args):
for dataset in args.datasets:
print('dataset: ', dataset)
doc_name2txt, doc_name2anno = gen_anno_from_xml(args.input_dir, dataset)
print('num_of_docs_from_txt: ', len(doc_name2txt))
num_docs_from_anno = 0
for doc_name in doc_name2anno:
if len(doc_name2anno[doc_name]) > 0:
num_docs_from_anno += 1
print('num_of_docs_from_anno: ', num_docs_from_anno)
def parse_args():
parser = argparse.ArgumentParser(
description='process aida data to xml format',
allow_abbrev=False,
)
parser.add_argument(
'--input_dir',
type=str,
default='/scratch365/yding4/e2e_EL_evaluate/data/aida/xml/trans_span2el_span',
help='Specify the input xml data dir',
)
parser.add_argument(
'--datasets',
type=eval,
# default="['aida_testa','aida_testb','aida_train','ace2004','aquaint','clueweb','msnbc','wikipedia']",
default="['aida_testa','aida_testb','aida_train']",
help='datasets to processed',
)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 2.890625 | 3 |
core/scansf.py | faslan1234/socialfish | 2,970 | 12767980 | import nmap
import requests
def nScan(ip):
nm = nmap.PortScanner()
nm.scan(ip, arguments="-F")
for host in nm.all_hosts():
ports = []
protocols = []
states = []
for proto in nm[host].all_protocols():
protocols.append(proto)
lport = nm[host][proto].keys()
for port in lport:
ports.append(port)
states.append(nm[host][proto][port]['state'])
po = []
for p in ports:
n = {
"Port": str(p),
"Name": nm[host][proto][p]['name'],
"Reason": nm[host][proto][p]['reason'],
"State": nm[host][proto][p]['state']
}
po.append(n)
return po | 3.078125 | 3 |
app/views.py | ykatzir/qapp-gsuite-react | 0 | 12767981 | from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
return render_template('config.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
| 2.140625 | 2 |
warehouse/classifiers/models.py | fairhopeweb/warehouse | 3,103 | 12767982 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import CheckConstraint, Column, Integer, Text
from warehouse import db
from warehouse.utils.attrs import make_repr
class Classifier(db.ModelBase):
__tablename__ = "trove_classifiers"
__tableargs__ = CheckConstraint(
"classifier not ilike 'private ::%'",
name="ck_disallow_private_top_level_classifier",
)
__repr__ = make_repr("classifier")
id = Column(Integer, primary_key=True, nullable=False)
classifier = Column(Text, unique=True)
| 1.921875 | 2 |
flox/visualize.py | Illviljan/flox | 0 | 12767983 | <filename>flox/visualize.py
import random
from itertools import product
import numpy as np
import pandas as pd
def draw_mesh(
nrow,
ncol,
*,
draw_line_at=None,
nspaces=0,
space_at=0,
pxin=0.3,
counter=None,
colors=None,
randomize=True,
x0=0,
append=False,
):
import matplotlib as mpl
import matplotlib.pyplot as plt
dx = 2
xpts = x0 + np.arange(0, (ncol + nspaces) * dx, dx)
ypts = np.arange(0, nrow * dx, dx)
if colors is None:
colors = mpl.cm.Set2.colors[:4]
if not append:
plt.figure()
ax = plt.axes()
else:
ax = plt.gca()
ax.set_aspect(1)
ax.set_axis_off()
if not randomize:
colors = iter(colors)
icolor = -1
for n, (y, x) in enumerate(product(ypts, xpts)):
if space_at > 0 and (n % space_at) == 0:
continue
if randomize:
fcolor = random.choice(colors)
else:
fcolor = next(colors)
icolor += 1
if counter is not None:
counter[fcolor] += 1
ax.add_patch(
mpl.patches.Rectangle(
(x, y - 0.5 * dx),
dx,
dx,
edgecolor="w",
linewidth=1,
facecolor=fcolor,
)
)
if draw_line_at is not None and icolor > 0 and icolor % draw_line_at == 0:
plt.plot([x, x], [y - 0.75 * dx, y + 0.75 * dx], color="k", lw=2)
ax.set_xlim((0, max(xpts) + dx))
ax.set_ylim((-0.75 * dx, max(ypts) + 0.75 * dx))
if not append:
plt.gcf().set_size_inches((ncol * pxin, (nrow + 2) * pxin))
def visualize_groups(array, labels, axis=-1, colors=None, cmap=None):
"""
Visualize group distribution for a 1D array of group labels.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
labels = np.asarray(labels)
assert labels.ndim == 1
factorized, unique_labels = pd.factorize(labels)
assert np.array(labels).ndim == 1
chunks = array.chunks[axis]
if colors is None:
if cmap is None:
colors = list(mpl.cm.tab20.colors)
elif cmap is not None:
colors = [cmap((num - 1) / len(unique_labels)) for num in unique_labels]
if len(unique_labels) > len(colors):
raise ValueError("Not enough unique colors")
plt.figure()
i0 = 0
for i in chunks:
lab = labels[i0 : i0 + i]
col = [colors[label] for label in lab] + [(1, 1, 1)]
draw_mesh(
1,
len(lab) + 1,
colors=col,
randomize=False,
append=True,
x0=i0 * 2.3, # + (i0 - 1) * 0.025,
)
i0 += i
pxin = 0.8
plt.gcf().set_size_inches((len(labels) * pxin, 1 * pxin))
| 2.3125 | 2 |
pactman/__init__.py | RobbieClarken/pactman | 0 | 12767984 | """Python methods for interactive with a Pact Mock Service."""
from .mock.consumer import Consumer
from .mock.matchers import EachLike, Like, SomethingLike, Term
from .mock.pact import Pact
from .mock.provider import Provider
__all__ = ('Consumer', 'EachLike', 'Like', 'Pact', 'Provider', 'SomethingLike',
'Term')
| 1.945313 | 2 |
main/online/permissions.py | MahanBi/Back-End | 1 | 12767985 | <reponame>MahanBi/Back-End
from rest_framework import permissions
| 1 | 1 |
soda/core/soda/sodacl/antlr/SodaCLAntlrLexer.py | sodadata/soda-core | 4 | 12767986 | # Generated from /Users/tom/Code/soda-core/soda/core/soda/sodacl/antlr/SodaCLAntlr.g4 by ANTLR 4.9.3
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\28")
buf.write("\u01d3\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5")
buf.write("\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f")
buf.write("\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3")
buf.write("\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\25\3\25\3\25\3\25")
buf.write("\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27")
buf.write("\3\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\32\3\32\3\32")
buf.write("\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34")
buf.write("\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3!\3!\3!\3")
buf.write("!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'\3\'\3(\3(\3)\3)")
buf.write("\3*\3*\3+\3+\3,\3,\3,\3-\3-\3-\3.\3.\3.\3/\3/\3/\3\60")
buf.write("\3\60\3\61\3\61\3\62\3\62\3\63\3\63\3\63\3\63\6\63\u01b6")
buf.write("\n\63\r\63\16\63\u01b7\3\63\3\63\3\64\3\64\3\64\3\64\6")
buf.write("\64\u01c0\n\64\r\64\16\64\u01c1\3\64\3\64\3\65\3\65\7")
buf.write("\65\u01c8\n\65\f\65\16\65\u01cb\13\65\3\66\6\66\u01ce")
buf.write("\n\66\r\66\16\66\u01cf\3\67\3\67\2\28\3\3\5\4\7\5\t\6")
buf.write("\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20")
buf.write("\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65")
buf.write("\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60")
buf.write("_\61a\62c\63e\64g\65i\66k\67m8\3\2\7\3\2$$\3\2bb\5\2C")
buf.write("\\aac|\b\2\"\"*+..>@]]__\3\2\62;\2\u01d8\2\3\3\2\2\2\2")
buf.write("\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3")
buf.write("\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2")
buf.write("\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2")
buf.write("\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3")
buf.write("\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61")
buf.write("\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2")
buf.write("\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3")
buf.write("\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M")
buf.write("\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2")
buf.write("W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2")
buf.write("\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2")
buf.write("\2\2k\3\2\2\2\2m\3\2\2\2\3o\3\2\2\2\5\177\3\2\2\2\7\u0084")
buf.write("\3\2\2\2\t\u0096\3\2\2\2\13\u009e\3\2\2\2\r\u00b1\3\2")
buf.write("\2\2\17\u00c5\3\2\2\2\21\u00d8\3\2\2\2\23\u00da\3\2\2")
buf.write("\2\25\u00dc\3\2\2\2\27\u00de\3\2\2\2\31\u00e8\3\2\2\2")
buf.write("\33\u00f6\3\2\2\2\35\u0101\3\2\2\2\37\u0108\3\2\2\2!\u011b")
buf.write("\3\2\2\2#\u012c\3\2\2\2%\u013b\3\2\2\2\'\u014b\3\2\2\2")
buf.write(")\u014d\3\2\2\2+\u0151\3\2\2\2-\u0155\3\2\2\2/\u015d\3")
buf.write("\2\2\2\61\u0161\3\2\2\2\63\u0164\3\2\2\2\65\u0169\3\2")
buf.write("\2\2\67\u016e\3\2\2\29\u0173\3\2\2\2;\u017a\3\2\2\2=\u017f")
buf.write("\3\2\2\2?\u0183\3\2\2\2A\u0187\3\2\2\2C\u018b\3\2\2\2")
buf.write("E\u018d\3\2\2\2G\u018f\3\2\2\2I\u0191\3\2\2\2K\u0193\3")
buf.write("\2\2\2M\u0195\3\2\2\2O\u0197\3\2\2\2Q\u0199\3\2\2\2S\u019b")
buf.write("\3\2\2\2U\u019d\3\2\2\2W\u019f\3\2\2\2Y\u01a2\3\2\2\2")
buf.write("[\u01a5\3\2\2\2]\u01a8\3\2\2\2_\u01ab\3\2\2\2a\u01ad\3")
buf.write("\2\2\2c\u01af\3\2\2\2e\u01b1\3\2\2\2g\u01bb\3\2\2\2i\u01c5")
buf.write("\3\2\2\2k\u01cd\3\2\2\2m\u01d1\3\2\2\2op\7h\2\2pq\7t\2")
buf.write("\2qr\7g\2\2rs\7u\2\2st\7j\2\2tu\7p\2\2uv\7g\2\2vw\7u\2")
buf.write("\2wx\7u\2\2xy\7\"\2\2yz\7w\2\2z{\7u\2\2{|\7k\2\2|}\7p")
buf.write("\2\2}~\7i\2\2~\4\3\2\2\2\177\u0080\7y\2\2\u0080\u0081")
buf.write("\7k\2\2\u0081\u0082\7v\2\2\u0082\u0083\7j\2\2\u0083\6")
buf.write("\3\2\2\2\u0084\u0085\7t\2\2\u0085\u0086\7q\2\2\u0086\u0087")
buf.write("\7y\2\2\u0087\u0088\7a\2\2\u0088\u0089\7e\2\2\u0089\u008a")
buf.write("\7q\2\2\u008a\u008b\7w\2\2\u008b\u008c\7p\2\2\u008c\u008d")
buf.write("\7v\2\2\u008d\u008e\7\"\2\2\u008e\u008f\7u\2\2\u008f\u0090")
buf.write("\7c\2\2\u0090\u0091\7o\2\2\u0091\u0092\7g\2\2\u0092\u0093")
buf.write("\7\"\2\2\u0093\u0094\7c\2\2\u0094\u0095\7u\2\2\u0095\b")
buf.write("\3\2\2\2\u0096\u0097\7f\2\2\u0097\u0098\7g\2\2\u0098\u0099")
buf.write("\7h\2\2\u0099\u009a\7c\2\2\u009a\u009b\7w\2\2\u009b\u009c")
buf.write("\7n\2\2\u009c\u009d\7v\2\2\u009d\n\3\2\2\2\u009e\u009f")
buf.write("\7u\2\2\u009f\u00a0\7c\2\2\u00a0\u00a1\7o\2\2\u00a1\u00a2")
buf.write("\7g\2\2\u00a2\u00a3\7\"\2\2\u00a3\u00a4\7f\2\2\u00a4\u00a5")
buf.write("\7c\2\2\u00a5\u00a6\7{\2\2\u00a6\u00a7\7\"\2\2\u00a7\u00a8")
buf.write("\7n\2\2\u00a8\u00a9\7c\2\2\u00a9\u00aa\7u\2\2\u00aa\u00ab")
buf.write("\7v\2\2\u00ab\u00ac\7\"\2\2\u00ac\u00ad\7y\2\2\u00ad\u00ae")
buf.write("\7g\2\2\u00ae\u00af\7g\2\2\u00af\u00b0\7m\2\2\u00b0\f")
buf.write("\3\2\2\2\u00b1\u00b2\7u\2\2\u00b2\u00b3\7c\2\2\u00b3\u00b4")
buf.write("\7o\2\2\u00b4\u00b5\7g\2\2\u00b5\u00b6\7\"\2\2\u00b6\u00b7")
buf.write("\7f\2\2\u00b7\u00b8\7c\2\2\u00b8\u00b9\7{\2\2\u00b9\u00ba")
buf.write("\7\"\2\2\u00ba\u00bb\7n\2\2\u00bb\u00bc\7c\2\2\u00bc\u00bd")
buf.write("\7u\2\2\u00bd\u00be\7v\2\2\u00be\u00bf\7\"\2\2\u00bf\u00c0")
buf.write("\7o\2\2\u00c0\u00c1\7q\2\2\u00c1\u00c2\7p\2\2\u00c2\u00c3")
buf.write("\7v\2\2\u00c3\u00c4\7j\2\2\u00c4\16\3\2\2\2\u00c5\u00c6")
buf.write("\7c\2\2\u00c6\u00c7\7p\2\2\u00c7\u00c8\7q\2\2\u00c8\u00c9")
buf.write("\7o\2\2\u00c9\u00ca\7c\2\2\u00ca\u00cb\7n\2\2\u00cb\u00cc")
buf.write("\7{\2\2\u00cc\u00cd\7\"\2\2\u00cd\u00ce\7u\2\2\u00ce\u00cf")
buf.write("\7e\2\2\u00cf\u00d0\7q\2\2\u00d0\u00d1\7t\2\2\u00d1\u00d2")
buf.write("\7g\2\2\u00d2\u00d3\7\"\2\2\u00d3\u00d4\7h\2\2\u00d4\u00d5")
buf.write("\7q\2\2\u00d5\u00d6\7t\2\2\u00d6\u00d7\7\"\2\2\u00d7\20")
buf.write("\3\2\2\2\u00d8\u00d9\7f\2\2\u00d9\22\3\2\2\2\u00da\u00db")
buf.write("\7j\2\2\u00db\24\3\2\2\2\u00dc\u00dd\7o\2\2\u00dd\26\3")
buf.write("\2\2\2\u00de\u00df\7x\2\2\u00df\u00e0\7c\2\2\u00e0\u00e1")
buf.write("\7n\2\2\u00e1\u00e2\7w\2\2\u00e2\u00e3\7g\2\2\u00e3\u00e4")
buf.write("\7u\2\2\u00e4\u00e5\7\"\2\2\u00e5\u00e6\7k\2\2\u00e6\u00e7")
buf.write("\7p\2\2\u00e7\30\3\2\2\2\u00e8\u00e9\7o\2\2\u00e9\u00ea")
buf.write("\7w\2\2\u00ea\u00eb\7u\2\2\u00eb\u00ec\7v\2\2\u00ec\u00ed")
buf.write("\7\"\2\2\u00ed\u00ee\7g\2\2\u00ee\u00ef\7z\2\2\u00ef\u00f0")
buf.write("\7k\2\2\u00f0\u00f1\7u\2\2\u00f1\u00f2\7v\2\2\u00f2\u00f3")
buf.write("\7\"\2\2\u00f3\u00f4\7k\2\2\u00f4\u00f5\7p\2\2\u00f5\32")
buf.write("\3\2\2\2\u00f6\u00f7\7e\2\2\u00f7\u00f8\7j\2\2\u00f8\u00f9")
buf.write("\7g\2\2\u00f9\u00fa\7e\2\2\u00fa\u00fb\7m\2\2\u00fb\u00fc")
buf.write("\7u\2\2\u00fc\u00fd\7\"\2\2\u00fd\u00fe\7h\2\2\u00fe\u00ff")
buf.write("\7q\2\2\u00ff\u0100\7t\2\2\u0100\34\3\2\2\2\u0101\u0102")
buf.write("\7h\2\2\u0102\u0103\7k\2\2\u0103\u0104\7n\2\2\u0104\u0105")
buf.write("\7v\2\2\u0105\u0106\7g\2\2\u0106\u0107\7t\2\2\u0107\36")
buf.write("\3\2\2\2\u0108\u0109\7e\2\2\u0109\u010a\7q\2\2\u010a\u010b")
buf.write("\7p\2\2\u010b\u010c\7h\2\2\u010c\u010d\7k\2\2\u010d\u010e")
buf.write("\7i\2\2\u010e\u010f\7w\2\2\u010f\u0110\7t\2\2\u0110\u0111")
buf.write("\7c\2\2\u0111\u0112\7v\2\2\u0112\u0113\7k\2\2\u0113\u0114")
buf.write("\7q\2\2\u0114\u0115\7p\2\2\u0115\u0116\7u\2\2\u0116\u0117")
buf.write("\7\"\2\2\u0117\u0118\7h\2\2\u0118\u0119\7q\2\2\u0119\u011a")
buf.write("\7t\2\2\u011a \3\2\2\2\u011b\u011c\7h\2\2\u011c\u011d")
buf.write("\7q\2\2\u011d\u011e\7t\2\2\u011e\u011f\7\"\2\2\u011f\u0120")
buf.write("\7g\2\2\u0120\u0121\7c\2\2\u0121\u0122\7e\2\2\u0122\u0123")
buf.write("\7j\2\2\u0123\u0124\7\"\2\2\u0124\u0125\7f\2\2\u0125\u0126")
buf.write("\7c\2\2\u0126\u0127\7v\2\2\u0127\u0128\7c\2\2\u0128\u0129")
buf.write("\7u\2\2\u0129\u012a\7g\2\2\u012a\u012b\7v\2\2\u012b\"")
buf.write("\3\2\2\2\u012c\u012d\7h\2\2\u012d\u012e\7q\2\2\u012e\u012f")
buf.write("\7t\2\2\u012f\u0130\7\"\2\2\u0130\u0131\7g\2\2\u0131\u0132")
buf.write("\7c\2\2\u0132\u0133\7e\2\2\u0133\u0134\7j\2\2\u0134\u0135")
buf.write("\7\"\2\2\u0135\u0136\7v\2\2\u0136\u0137\7c\2\2\u0137\u0138")
buf.write("\7d\2\2\u0138\u0139\7n\2\2\u0139\u013a\7g\2\2\u013a$\3")
buf.write("\2\2\2\u013b\u013c\7h\2\2\u013c\u013d\7q\2\2\u013d\u013e")
buf.write("\7t\2\2\u013e\u013f\7\"\2\2\u013f\u0140\7g\2\2\u0140\u0141")
buf.write("\7c\2\2\u0141\u0142\7e\2\2\u0142\u0143\7j\2\2\u0143\u0144")
buf.write("\7\"\2\2\u0144\u0145\7e\2\2\u0145\u0146\7q\2\2\u0146\u0147")
buf.write("\7n\2\2\u0147\u0148\7w\2\2\u0148\u0149\7o\2\2\u0149\u014a")
buf.write("\7p\2\2\u014a&\3\2\2\2\u014b\u014c\7\60\2\2\u014c(\3\2")
buf.write("\2\2\u014d\u014e\7h\2\2\u014e\u014f\7q\2\2\u014f\u0150")
buf.write("\7t\2\2\u0150*\3\2\2\2\u0151\u0152\7c\2\2\u0152\u0153")
buf.write("\7p\2\2\u0153\u0154\7f\2\2\u0154,\3\2\2\2\u0155\u0156")
buf.write("\7d\2\2\u0156\u0157\7g\2\2\u0157\u0158\7v\2\2\u0158\u0159")
buf.write("\7y\2\2\u0159\u015a\7g\2\2\u015a\u015b\7g\2\2\u015b\u015c")
buf.write("\7p\2\2\u015c.\3\2\2\2\u015d\u015e\7p\2\2\u015e\u015f")
buf.write("\7q\2\2\u015f\u0160\7v\2\2\u0160\60\3\2\2\2\u0161\u0162")
buf.write("\7k\2\2\u0162\u0163\7p\2\2\u0163\62\3\2\2\2\u0164\u0165")
buf.write("\7y\2\2\u0165\u0166\7c\2\2\u0166\u0167\7t\2\2\u0167\u0168")
buf.write("\7p\2\2\u0168\64\3\2\2\2\u0169\u016a\7h\2\2\u016a\u016b")
buf.write("\7c\2\2\u016b\u016c\7k\2\2\u016c\u016d\7n\2\2\u016d\66")
buf.write("\3\2\2\2\u016e\u016f\7r\2\2\u016f\u0170\7c\2\2\u0170\u0171")
buf.write("\7u\2\2\u0171\u0172\7u\2\2\u01728\3\2\2\2\u0173\u0174")
buf.write("\7e\2\2\u0174\u0175\7j\2\2\u0175\u0176\7c\2\2\u0176\u0177")
buf.write("\7p\2\2\u0177\u0178\7i\2\2\u0178\u0179\7g\2\2\u0179:\3")
buf.write("\2\2\2\u017a\u017b\7n\2\2\u017b\u017c\7c\2\2\u017c\u017d")
buf.write("\7u\2\2\u017d\u017e\7v\2\2\u017e<\3\2\2\2\u017f\u0180")
buf.write("\7c\2\2\u0180\u0181\7x\2\2\u0181\u0182\7i\2\2\u0182>\3")
buf.write("\2\2\2\u0183\u0184\7o\2\2\u0184\u0185\7k\2\2\u0185\u0186")
buf.write("\7p\2\2\u0186@\3\2\2\2\u0187\u0188\7o\2\2\u0188\u0189")
buf.write("\7c\2\2\u0189\u018a\7z\2\2\u018aB\3\2\2\2\u018b\u018c")
buf.write("\7]\2\2\u018cD\3\2\2\2\u018d\u018e\7_\2\2\u018eF\3\2\2")
buf.write("\2\u018f\u0190\7}\2\2\u0190H\3\2\2\2\u0191\u0192\7\177")
buf.write("\2\2\u0192J\3\2\2\2\u0193\u0194\7*\2\2\u0194L\3\2\2\2")
buf.write("\u0195\u0196\7+\2\2\u0196N\3\2\2\2\u0197\u0198\7.\2\2")
buf.write("\u0198P\3\2\2\2\u0199\u019a\7\'\2\2\u019aR\3\2\2\2\u019b")
buf.write("\u019c\7-\2\2\u019cT\3\2\2\2\u019d\u019e\7/\2\2\u019e")
buf.write("V\3\2\2\2\u019f\u01a0\7#\2\2\u01a0\u01a1\7?\2\2\u01a1")
buf.write("X\3\2\2\2\u01a2\u01a3\7>\2\2\u01a3\u01a4\7@\2\2\u01a4")
buf.write("Z\3\2\2\2\u01a5\u01a6\7>\2\2\u01a6\u01a7\7?\2\2\u01a7")
buf.write("\\\3\2\2\2\u01a8\u01a9\7@\2\2\u01a9\u01aa\7?\2\2\u01aa")
buf.write("^\3\2\2\2\u01ab\u01ac\7?\2\2\u01ac`\3\2\2\2\u01ad\u01ae")
buf.write("\7>\2\2\u01aeb\3\2\2\2\u01af\u01b0\7@\2\2\u01b0d\3\2\2")
buf.write("\2\u01b1\u01b5\7$\2\2\u01b2\u01b6\n\2\2\2\u01b3\u01b4")
buf.write("\7^\2\2\u01b4\u01b6\7$\2\2\u01b5\u01b2\3\2\2\2\u01b5\u01b3")
buf.write("\3\2\2\2\u01b6\u01b7\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b7")
buf.write("\u01b8\3\2\2\2\u01b8\u01b9\3\2\2\2\u01b9\u01ba\7$\2\2")
buf.write("\u01baf\3\2\2\2\u01bb\u01bf\7b\2\2\u01bc\u01c0\n\3\2\2")
buf.write("\u01bd\u01be\7^\2\2\u01be\u01c0\7b\2\2\u01bf\u01bc\3\2")
buf.write("\2\2\u01bf\u01bd\3\2\2\2\u01c0\u01c1\3\2\2\2\u01c1\u01bf")
buf.write("\3\2\2\2\u01c1\u01c2\3\2\2\2\u01c2\u01c3\3\2\2\2\u01c3")
buf.write("\u01c4\7b\2\2\u01c4h\3\2\2\2\u01c5\u01c9\t\4\2\2\u01c6")
buf.write("\u01c8\n\5\2\2\u01c7\u01c6\3\2\2\2\u01c8\u01cb\3\2\2\2")
buf.write("\u01c9\u01c7\3\2\2\2\u01c9\u01ca\3\2\2\2\u01caj\3\2\2")
buf.write("\2\u01cb\u01c9\3\2\2\2\u01cc\u01ce\t\6\2\2\u01cd\u01cc")
buf.write("\3\2\2\2\u01ce\u01cf\3\2\2\2\u01cf\u01cd\3\2\2\2\u01cf")
buf.write("\u01d0\3\2\2\2\u01d0l\3\2\2\2\u01d1\u01d2\7\"\2\2\u01d2")
buf.write("n\3\2\2\2\t\2\u01b5\u01b7\u01bf\u01c1\u01c9\u01cf\2")
return buf.getvalue()
class SodaCLAntlrLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
FOR = 20
AND = 21
BETWEEN = 22
NOT = 23
IN = 24
WARN = 25
FAIL = 26
PASS = 27
CHANGE = 28
LAST = 29
AVG = 30
MIN = 31
MAX = 32
SQUARE_LEFT = 33
SQUARE_RIGHT = 34
CURLY_LEFT = 35
CURLY_RIGHT = 36
ROUND_LEFT = 37
ROUND_RIGHT = 38
COMMA = 39
PERCENT = 40
PLUS = 41
MINUS = 42
NOT_EQUAL = 43
NOT_EQUAL_SQL = 44
LTE = 45
GTE = 46
EQUAL = 47
LT = 48
GT = 49
IDENTIFIER_DOUBLE_QUOTE = 50
IDENTIFIER_BACKTICK = 51
IDENTIFIER_UNQUOTED = 52
DIGITS = 53
S = 54
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'freshness using'", "'with'", "'row_count same as'", "'default'",
"'same day last week'", "'same day last month'", "'anomaly score for '",
"'d'", "'h'", "'m'", "'values in'", "'must exist in'", "'checks for'",
"'filter'", "'configurations for'", "'for each dataset'", "'for each table'",
"'for each column'", "'.'", "'for'", "'and'", "'between'", "'not'",
"'in'", "'warn'", "'fail'", "'pass'", "'change'", "'last'",
"'avg'", "'min'", "'max'", "'['", "']'", "'{'", "'}'", "'('",
"')'", "','", "'%'", "'+'", "'-'", "'!='", "'<>'", "'<='", "'>='",
"'='", "'<'", "'>'", "' '" ]
symbolicNames = [ "<INVALID>",
"FOR", "AND", "BETWEEN", "NOT", "IN", "WARN", "FAIL", "PASS",
"CHANGE", "LAST", "AVG", "MIN", "MAX", "SQUARE_LEFT", "SQUARE_RIGHT",
"CURLY_LEFT", "CURLY_RIGHT", "ROUND_LEFT", "ROUND_RIGHT", "COMMA",
"PERCENT", "PLUS", "MINUS", "NOT_EQUAL", "NOT_EQUAL_SQL", "LTE",
"GTE", "EQUAL", "LT", "GT", "IDENTIFIER_DOUBLE_QUOTE", "IDENTIFIER_BACKTICK",
"IDENTIFIER_UNQUOTED", "DIGITS", "S" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "FOR", "AND",
"BETWEEN", "NOT", "IN", "WARN", "FAIL", "PASS", "CHANGE",
"LAST", "AVG", "MIN", "MAX", "SQUARE_LEFT", "SQUARE_RIGHT",
"CURLY_LEFT", "CURLY_RIGHT", "ROUND_LEFT", "ROUND_RIGHT",
"COMMA", "PERCENT", "PLUS", "MINUS", "NOT_EQUAL", "NOT_EQUAL_SQL",
"LTE", "GTE", "EQUAL", "LT", "GT", "IDENTIFIER_DOUBLE_QUOTE",
"IDENTIFIER_BACKTICK", "IDENTIFIER_UNQUOTED", "DIGITS",
"S" ]
grammarFileName = "SodaCLAntlr.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 1.367188 | 1 |
baron.py | Alexandra-Baron/belhard_52 | 0 | 12767987 | <filename>baron.py
print('baron')
#surname | 1.257813 | 1 |
networkapi/infrastructure/xml_utils.py | vinicius-marinho/GloboNetworkAPI | 73 | 12767988 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml.dom import InvalidCharacterErr
from xml.dom.minicompat import StringTypes
from xml.dom.minidom import *
class XMLError(Exception):
"""Representa um erro ocorrido durante o marshall ou unmarshall do XML."""
def __init__(self, cause, message):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Erro ao criar ou ler o XML: Causa: %s, Mensagem: %s' % (
self.cause, self.message)
return msg.encode('utf-8', 'replace')
class InvalidNodeNameXMLError(XMLError):
"""Nome inválido para representá-lo como uma TAG de XML."""
def __init__(self, cause, message):
XMLError.__init__(self, cause, message)
class InvalidNodeTypeXMLError(XMLError):
"""Tipo inválido para o conteúdo de uma TAG de XML."""
def __init__(self, cause, message):
XMLError.__init__(self, cause, message)
def _add_text_node(value, node, doc):
if value is None:
return
if not isinstance(value, StringTypes):
text = '%s' % unicode(value)
else:
text = r'%s' % value.replace('%', '%%')
try:
textNode = doc.createTextNode(text)
node.appendChild(textNode)
except TypeError, t:
raise InvalidNodeTypeXMLError(
t, u'Conteúdo de um Nó do XML com tipo de dado inválido: %s ' % value)
def _add_list_node(nodeName, list, parent, doc):
if list:
for value in list:
node = doc.createElement(nodeName)
parent.appendChild(node)
if isinstance(value, dict):
_add_nodes_to_parent(value, node, doc)
else:
_add_text_node(value, node, doc)
else:
node = doc.createElement(nodeName)
parent.appendChild(node)
_add_text_node('', node, doc)
def _add_nodes_to_parent(map, parent, doc):
if map is None:
return
for key, value in map.iteritems():
try:
if isinstance(value, dict):
node = doc.createElement(key)
parent.appendChild(node)
_add_nodes_to_parent(value, node, doc)
elif isinstance(value, type([])):
_add_list_node(key, value, parent, doc)
else:
node = doc.createElement(key)
parent.appendChild(node)
_add_text_node(value, node, doc)
except InvalidCharacterErr, i:
raise InvalidNodeNameXMLError(
i, u'Valor inválido para nome de uma TAG de XML: %s' % key)
def dumps(map, root_name, root_attributes=None):
"""Cria um string no formato XML a partir dos elementos do map.
Os elementos do mapa serão nós filhos do root_name.
Cada chave do map será um Nó no XML. E o valor da chave será o conteúdo do Nó.
Throws: XMLError, InvalidNodeNameXMLError, InvalidNodeTypeXMLError
"""
xml = ''
try:
implementation = getDOMImplementation()
except ImportError, i:
raise XMLError(i, u'Erro ao obter o DOMImplementation')
doc = implementation.createDocument(None, root_name, None)
try:
root = doc.documentElement
if (root_attributes is not None):
for key, value in root_attributes.iteritems():
attribute = doc.createAttribute(key)
attribute.nodeValue = value
root.setAttributeNode(attribute)
_add_nodes_to_parent(map, root, doc)
xml = doc.toxml('UTF-8')
except InvalidCharacterErr, i:
raise InvalidNodeNameXMLError(
i, u'Valor inválido para nome de uma TAG de XML: %s' % root_name)
finally:
doc.unlink()
return xml
def dumps_networkapi(map, version='1.0'):
return dumps(map, 'networkapi', {'versao': version})
def _create_childs_map(parent, force_list):
if parent is None:
return None
if parent.hasChildNodes():
childs = parent.childNodes
childs_map = dict()
childs_values = []
for i in range(childs.length):
child = childs.item(i)
if child.nodeType == Node.ELEMENT_NODE:
if child.nodeName in childs_map:
child_value = _create_childs_map(child, force_list)
if child_value is not None:
value = childs_map[child.nodeName]
if not isinstance(value, type([])):
value = [value]
value.append(child_value)
childs_map[child.nodeName] = value
elif child.nodeName in force_list:
child_value = _create_childs_map(child, force_list)
if child_value is None:
child_value = []
else:
child_value = [child_value]
childs_map[child.nodeName] = child_value
else:
childs_map[child.nodeName] = _create_childs_map(
child, force_list)
elif child.nodeType == Node.TEXT_NODE or child.nodeType == Node.CDATA_SECTION_NODE:
if child.data.strip() != '':
childs_values.append(child.data.replace('%%', '%'))
if len(childs_values) == 0 and len(childs_map) == 0:
return None
if len(childs_values) != 0 and len(childs_map) != 0:
childs_values.append(childs_map)
return childs_values
if len(childs_values) != 0:
if len(childs_values) == 1:
return childs_values[0]
return childs_values
return childs_map
elif parent.nodeType == Node.TEXT_NODE or parent.nodeType == Node.CDATA_SECTION_NODE:
if parent.data.strip() != '':
return parent.data
return None
def loads(xml, force_list=None):
"""Cria um dict com os dados do element root.
O dict terá como chave o nome do element root e como valor o conteúdo do element root.
Quando o conteúdo de um element é uma lista de Nós então o valor do element será
um dict com uma chave para cada nó.
Entretanto, se existir nós, de um mesmo pai, com o mesmo nome, então eles serão
armazenados uma mesma chave do dict que terá como valor uma lista.
Se o element root tem atributo, então também retorna um dict com os atributos.
Throws: XMLError
"""
if force_list is None:
force_list = []
try:
doc = parseString(xml)
except Exception, e:
raise XMLError(e, u'Falha ao realizar o parse do xml.')
root = doc.documentElement
map = dict()
attrs_map = dict()
if root.hasAttributes():
attributes = root.attributes
for i in range(attributes.length):
attr = attributes.item(i)
attrs_map[attr.nodeName] = attr.nodeValue
map[root.nodeName] = _create_childs_map(root, force_list)
return map, attrs_map
if __name__ == '__main__':
map, attrs_map = loads('<teste/>')
print map
list = [{'id': None}, {'id': (2, 6)}]
map = {'ambiente': list}
xml = dumps(map, 'networkapi', {'versao': '1.0'})
print xml
map, attrs_map = loads(xml)
print map
xml = '<?xml version="1.0" encoding="UTF-8"?><networkapi versao="1.0"><!--Comentario--><ambiente><id><!--Comentario--></id></ambiente><ambiente><id>3<teste>geovana</teste>2</id></ambiente></networkapi>'
map, attrs_map = loads(xml)
print map
xml = '<?xml version="1.0" encoding="UTF-8"?><networkapi versao="1.0"><!--Comentario--><ambiente><id><!--Comentario--></id></ambiente><ambiente><id>3 2 5</id></ambiente></networkapi>'
map, attrs_map = loads(xml)
print map
xml = dumps(map, 'networkapi', attrs_map)
print xml
xml = dumps(None, 'networkapi', {'versao': '1.0'})
print xml
xml = """<?xml version="1.0" encoding="UTF-8"?>
<networkapi versao="1.0">
<equipamento>
<id_tipo_equipamento>1</id_tipo_equipamento>
<id_modelo>teste</id_modelo>
<nome>teste</nome>
<id_grupo>teste</id_grupo>
</equipamento>
</networkapi>"""
map, attrs_map = loads(xml)
print map
print dumps(map, 'networkapi', attrs_map)
xml = """<?xml version="1.0" encoding="UTF-8"?>
<networkapi versao="1.0">
<equipamento>
<id ></id>
</equipamento>
<equipamento_grupo>
<id></id>
</equipamento_grupo>
</networkapi>
"""
map, attrs_map = loads(xml)
print map
print dumps(map, 'networkapi', attrs_map)
xml = """<?xml version="1.0" encoding="UTF-8"?>
<networkapi versao="1.0">
<x/>
</networkapi>
"""
map, attrs_map = loads(xml)
print map
print dumps(map, 'networkapi', attrs_map)
| 2.09375 | 2 |
testing/library/tools/initPromLibrary.py | GoogleCloudPlatform/datanucleus-appengine | 7 | 12767989 | <filename>testing/library/tools/initPromLibrary.py
#!/usr/bin/python2.4
import os
import sys
import getopt
import xml.dom.minidom
# Starts parsing the XML file containing the book data entries
#
default_local = "http://localhost:8080/library"
default_corp = "http://gptestshop.prom.corp.google.com/library"
default_uri = default_corp
default_files = ("book.xml", "tech.xml")
class initPromLibrary:
"""Utility to initialize the Prometheus Library
We repeatedly use wget to send Add actions to the
Prometheus Library with lastname, firstname, year
and title parameters in order to build up a catalaog
of entries to run the automated Selenium tests.
The catalog name in the xml file is used to specify
the PromLibrary entityname parameter.
The catalog is built up from a set of xml files
currently consisting of book.xml and tech.xml.
The latter is a small catalog that illustrates all of
the fields.
<?xml version="1.0" encoding="utf-8"?>
<catalog>
<name>TechBooks</name>
<book>
<title>Python in a Nutshell</title>
<author><NAME></author>
<year>2003</year>
</book>
<book>
<title>HTML and XHTML</title>
<author><NAME></author>
<year>2007</year>
</book>
</catalog>
Each book entry consists of:
title - the title of the book
author - the author of the book which gets divided into
a lastname and firstname
year - the year of the book
The uri is specified with the -u option on the command line.
The default uri is http://gptestshop.prom.corp.google.com/library
which is the version on the Prometheus corp cluster.
This is also the uri that is used if -u corp is passed.
If -u local is used, the the uri is http://localhost:8080/library
or the locally deployed version.
"""
def handleCatalog(self, catalog, uri):
"""Parse the XML file and get the catalog name
and call handleBooks to deal with the list of books
"""
XmlBooks = catalog.getElementsByTagName("book")
Xmlname = catalog.getElementsByTagName("name")[0]
name = Xmlname.childNodes[0].data
self.handleBooks(XmlBooks, name, uri)
def handleBooks(self, XmlBooks , name, uri):
"""For each book in the list call handleBook
to extract the entry info.
"""
for XmlBook in XmlBooks:
self.handleBook(XmlBook, name, uri)
def handleBook(self, XmlBook, entityname, uri):
"""Extracts the book data entries from the XML file and
calls AddBook to send it to the Prometheus Library
"""
Xmltitle = XmlBook.getElementsByTagName("title")[0]
XmlAuthor = XmlBook.getElementsByTagName("author")[0]
Xmldate = XmlBook.getElementsByTagName("year")[0]
title = Xmltitle.childNodes[0].data
year = Xmldate.childNodes[0].data
fullname = XmlAuthor.childNodes[0].data.split(None,2)
firstname = fullname[0]
lastname = fullname[1]
self.AddBook(uri, lastname, firstname, title, year, entityname)
def AddBook(self, uri, lastname, firstname, title, year, entityname):
"""Add a Book to the Prometheus library by building the
parameter query string and calling wget. The results
from wget are redirected to /dev/null. This is used
rather than --spider because these types of requests
are not honored by Prometheus.
"""
querystring = '?'
for arg in [ "lastname=%s" % lastname, "firstname=%s" % firstname,
"title=%s" % title, "year=%s" % year, "action_type=Add",
"entity=%s" % entityname ]:
querystring = "%s&%s" % (querystring, arg)
postdatastring = '--post-data "%s"' % (querystring)
cmd = 'wget'
opt = "-O /dev/null"
cmd_str = "%s %s %s %s" % (cmd, opt, postdatastring, uri)
print cmd_str
os.system(cmd_str)
def main(argv):
"""Perform initialization, parse command line options and
arguments, and output usage messages.
If multiple filenames are provided, loop through
each one and call the handleCatalog to process the XML.
"""
uri = default_uri
cmdline_params = argv[1:]
try:
optlist, args = getopt.getopt(cmdline_params, 'u:', ['uri='])
except getopt.GetoptError:
print 'Usage: initPromLibrary.py [-u uri] [files]'
print ' Default files = %s %s' % (default_files[0], default_files[1])
print ' Default uri = %s' % (default_uri)
print " Shorthand uri's"
print ' -u corp = %s' % (default_corp)
print ' -u local = %s' % (default_local)
sys.exit(2)
if optlist:
uri = optlist[0][1]
if uri == 'local':
uri = default_local
if uri == 'corp':
uri = default_corp
if not args:
args = default_files
for arg in args:
if arg:
print arg
if arg[0] == '/':
xmlfilename = arg
else:
xmlfilename = os.path.join(os.path.dirname(__file__), arg)
dom = xml.dom.minidom.parse(xmlfilename)
cat = initPromLibrary()
cat.handleCatalog(dom, uri)
if __name__ == '__main__':
main(sys.argv)
| 3.109375 | 3 |
run_forever.py | kuenishi/machi-py | 1 | 12767990 | <reponame>kuenishi/machi-py
import argparse
import binascii
import random
import time
import numpy
from machi import MachiStore
class Timer:
def __init__(self, stats):
self.stats = stats
def __enter__(self):
self.b = time.time()
def __exit__(self, exc_type, exc_value, traceback):
e = time.time()
self.stats.append(e - self.b)
class Stats:
def __init__(self, name):
self.stats = []
self.name = name
def measure(self):
return Timer(self)
def append(self, duration):
self.stats.append(duration)
def pp(self):
if self.stats:
print(self.name, sum(self.stats) / len(self.stats))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--maxlen", type=int, help="Max len", default=4096)
parser.add_argument("--dir", type=str, help="Data Directory", default="/tmp")
parser.add_argument("--interval", type=int, help="interval", default=1)
args = parser.parse_args()
assert args.interval > 0
print(args)
machi = MachiStore(maxlen=args.maxlen, dir=args.dir)
append_stats = Stats("append time (sec)")
sample_stats = Stats("sample time (sec)")
timer = time.time()
keys = []
counter = 0
tenM = 1024 * 1024 * 10
oneM = 1024 * 1024
while True:
length = random.randint(oneM, tenM)
buf = numpy.random.bytes(length)
crc = binascii.crc32(buf)
with append_stats.measure():
key = machi.append(buf)
keys.append((key, crc))
if len(keys) > 10:
with sample_stats.measure():
test_keys = random.sample(keys, 10)
for key, crc0 in test_keys:
data = machi.get(*key)
crc = binascii.crc32(data)
assert crc0 == crc
if len(keys) > 1024 * 1024:
key = random.sample(keys.keys(), 1)
machi.trim(*key)
counter += 1
t = time.time()
if t > timer + args.interval:
print("Counter:", counter)
append_stats.pp()
sample_stats.pp()
timer = t
if __name__ == "__main__":
main()
| 2.75 | 3 |
mtp_cashbook/misc_views.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | 4 | 12767991 | <filename>mtp_cashbook/misc_views.py
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import gettext_lazy as _
from django.views.generic import View, TemplateView, FormView
from mtp_common.auth import api_client
from mtp_cashbook import READ_ML_BRIEFING_FLAG
from mtp_cashbook.utils import save_user_flags
class BaseView(View):
"""
Base class for all cashbook and disbursement views:
- forces login
- ensures "read ML briefing" flag is set or redirects to ML briefing confirmation screen
"""
@method_decorator(login_required)
def dispatch(self, request, **kwargs):
if not request.read_ml_briefing and getattr(self, 'requires_reading_ml_briefing', True):
return redirect('ml-briefing-confirmation')
return super().dispatch(request, **kwargs)
class LandingView(BaseView, TemplateView):
template_name = 'landing.html'
def get_context_data(self, **kwargs):
if self.request.user.has_perm('auth.change_user'):
response = api_client.get_api_session(self.request).get('requests/', params={'page_size': 1})
kwargs['user_request_count'] = response.json().get('count')
cards = [
{
'heading': _('Digital cashbook'),
'link': reverse_lazy('new-credits'),
'description': _('Credit money into a prisoner’s account'),
},
{
'heading': _('Digital disbursements'),
'link': reverse_lazy('disbursements:start'),
'description': _('Send money out of a prisoner’s account by bank transfer or cheque'),
},
]
kwargs.update(
start_page_url=settings.START_PAGE_URL,
cards=cards,
)
return super().get_context_data(**kwargs)
class MLBriefingConfirmationForm(forms.Form):
read_briefing = forms.ChoiceField(
label=_('Have you read the money laundering briefing?'),
required=True,
choices=(
('yes', _('Yes')),
('no', _('No')),
), error_messages={
'required': _('Please select ‘yes’ or ‘no’'),
},
)
def clean_read_briefing(self):
read_briefing = self.cleaned_data.get('read_briefing')
if read_briefing:
read_briefing = read_briefing == 'yes'
return read_briefing
class MLBriefingConfirmationView(BaseView, FormView):
title = _('Have you read the money laundering briefing?')
form_class = MLBriefingConfirmationForm
template_name = 'ml-briefing-confirmation.html'
success_url = reverse_lazy('home')
requires_reading_ml_briefing = False
def dispatch(self, request, **kwargs):
if request.read_ml_briefing:
return redirect(self.get_success_url())
return super().dispatch(request, **kwargs)
def form_valid(self, form):
read_briefing = form.cleaned_data['read_briefing']
if read_briefing:
save_user_flags(self.request, READ_ML_BRIEFING_FLAG)
messages.success(self.request, _('Thank you, please carry on with your work.'))
return super().form_valid(form)
else:
return redirect('ml-briefing')
class MLBriefingView(BaseView, TemplateView):
title = _('You need to read the money laundering briefing')
template_name = 'ml-briefing.html'
requires_reading_ml_briefing = False
def dispatch(self, request, **kwargs):
if request.read_ml_briefing:
return redirect(MLBriefingConfirmationView.success_url)
return super().dispatch(request, **kwargs)
class PolicyChangeInfo(BaseView, TemplateView):
if settings.BANK_TRANSFERS_ENABLED:
title = _('How Nov 2nd policy changes will affect you')
else:
title = _('Policy changes made on Nov 2nd 2020 that may affect your work')
def get_template_names(self):
if settings.BANK_TRANSFERS_ENABLED:
return ['policy-change-warning.html']
else:
return ['policy-change-info.html']
class FAQView(TemplateView):
title = _('What do you need help with?')
template_name = 'faq.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['breadcrumbs_back'] = reverse_lazy('home')
context['reset_password_url'] = reverse_lazy('reset_password')
context['sign_up_url'] = reverse_lazy('sign-up')
return context
| 2.09375 | 2 |
geomloss/examples/sinkhorn_multiscale/plot_transport_blur.py | ismedina/geomloss | 0 | 12767992 | """
4) Sinkhorn vs. blurred Wasserstein distances
==========================================================
Sinkhorn divergences rely on a simple idea:
by **blurring** the transport plan through the addition of
an entropic penalty, we can reduce the effective dimensionality
of the transportation problem and compute **sensible approximations of the
Wasserstein distance at a low computational cost**.
"""
##################################################
# As discussed in previous notebooks, the *vanilla* Sinkhorn loop
# can be symmetrized, de-biased and turned into a genuine
# multiscale algorithm: available through the
# :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer, the **Sinkhorn divergence**
#
# .. math::
# \text{S}_\varepsilon(\alpha,\beta)~=~ \text{OT}_\varepsilon(\alpha,\beta)
# - \tfrac{1}{2}\text{OT}_\varepsilon(\alpha,\alpha)
# - \tfrac{1}{2}\text{OT}_\varepsilon(\beta,\beta),
#
# is a tractable approximation of the Wasserstein distance
# that **retains its key geometric properties** - positivity, convexity,
# metrization of the convergence in law.
#
# **But is it really the best way of smoothing our transportation problem?**
# When "p = 2" and :math:`\text{C}(x,y)=\tfrac{1}{2}\|x-y\|^2`,
# a very sensible alternative to Sinkhorn divergences is the
# **blurred Wasserstein distance**
#
# .. math::
# \text{B}_\varepsilon(\alpha,\beta) ~=~ \text{W}_2(\,k_{\varepsilon/4}\star\alpha,\,k_{\varepsilon/4}\star\beta\,),
#
# where :math:`\text{W}_2` denotes the *true* Wasserstein distance associated to
# our cost function :math:`\text{C}` and
#
# .. math::
# k_{\varepsilon/4}: (x-y) \mapsto \exp(-\|x-y\|^2 / \tfrac{2}{4}\varepsilon)
#
# is a Gaussian kernel of deviation :math:`\sigma = \sqrt{\varepsilon}/2`.
# On top of making explicit our intuitions on **low-frequency Optimal Transport**, this
# simple divergence enjoys a collection of desirable properties:
#
# - It is the **square of a distance** that metrizes the convergence in law.
# - It takes the "correct" values on atomic **Dirac masses**, lifting
# the ground cost function to the space of positive measures:
#
# .. math::
# \text{B}_\varepsilon(\delta_x,\delta_y)~=~\text{C}(x,y)
# ~=~\tfrac{1}{2}\|x-y\|^2~=~\text{S}_\varepsilon(\delta_x,\delta_y).
#
# - It has the same **asymptotic properties** as the Sinkhorn divergence,
# interpolating between the true Wasserstein distance (when :math:`\varepsilon \rightarrow 0`)
# and a degenerate kernel norm (when :math:`\varepsilon \rightarrow +\infty`).
# - Thanks to the joint convexity of the Wasserstein distance,
# :math:`\text{B}_\varepsilon(\alpha,\beta)` is a **decreasing** function of :math:`\varepsilon`:
# as we remove small-scale details, we lower the overall transport cost.
#
# To compare the Sinkhorn and blurred Wasserstein divergences, a simple experiment
# is to **display their values on pairs of 1D measures** for increasing values of
# the temperature :math:`\varepsilon`:
# having generated random samples :math:`\alpha` and :math:`\beta`
# on the unit interval, we can simply compute :math:`\text{S}_\varepsilon(\alpha,\beta)`
# with our :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer
# while the blurred Wasserstein loss :math:`\text{B}_\varepsilon(\alpha,\beta)` can be
# quickly approximated with the **addition of a Gaussian noise** followed
# by a **sorting pass**.
##############################################
# Setup
# ---------------------
# Standard imports:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity # display as density curves
import torch
from geomloss import SamplesLoss
use_cuda = torch.cuda.is_available()
# N.B.: We use float64 numbers to get nice limits when blur -> +infinity
dtype = torch.cuda.DoubleTensor if use_cuda else torch.DoubleTensor
###############################################
# Display routine:
t_plot = np.linspace(-0.5, 1.5, 1000)[:, np.newaxis]
def display_samples(ax, x, color, label=None):
"""Displays samples on the unit interval using a density curve."""
kde = KernelDensity(kernel="gaussian", bandwidth=0.005).fit(x.data.cpu().numpy())
dens = np.exp(kde.score_samples(t_plot))
dens[0] = 0
dens[-1] = 0
ax.fill(t_plot, dens, color=color, label=label)
###############################################
# Experiment
# -------------
def rweight():
"""Random weight."""
return torch.rand(1).type(dtype)
N = 100 if not use_cuda else 10 ** 3 # Number of samples per measure
C = 100 if not use_cuda else 10000 # number of copies for the Gaussian blur
for _ in range(5): # Repeat the experiment 5 times
K = 5 # Generate random 1D measures as the superposition of K=5 intervals
t = torch.linspace(0, 1, N // K).type(dtype).view(-1, 1)
X_i = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
Y_j = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
# Compute the limits when blur = 0...
x_, _ = X_i.sort(dim=0)
y_, _ = Y_j.sort(dim=0)
true_wass = (0.5 / len(X_i)) * ((x_ - y_) ** 2).sum()
true_wass = true_wass.item()
# and when blur = +infinity:
mean_diff = 0.5 * ((X_i.mean(0) - Y_j.mean(0)) ** 2).sum()
mean_diff = mean_diff.item()
blurs = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
sink, bwass = [], []
for blur in blurs:
# Compute the Sinkhorn divergence:
# N.B.: To be super-precise, we use the well-tested "online" backend
# with a very large 'scaling' coefficient
loss = SamplesLoss("sinkhorn", p=2, blur=blur, scaling=0.99, backend="online")
sink.append(loss(X_i, Y_j).item())
# Compute the blurred Wasserstein distance:
x_i = torch.cat([X_i] * C, dim=0)
y_j = torch.cat([Y_j] * C, dim=0)
x_i = x_i + 0.5 * blur * torch.randn(x_i.shape).type(dtype)
y_j = y_j + 0.5 * blur * torch.randn(y_j.shape).type(dtype)
x_, _ = x_i.sort(dim=0)
y_, _ = y_j.sort(dim=0)
wass = (0.5 / len(x_i)) * ((x_ - y_) ** 2).sum()
bwass.append(wass.item())
# Fancy display:
plt.figure(figsize=(12, 5))
if N < 10 ** 5:
ax = plt.subplot(1, 2, 1)
display_samples(ax, X_i, (1.0, 0, 0, 0.5), label="$\\alpha$")
display_samples(ax, Y_j, (0, 0, 1.0, 0.5), label="$\\beta$")
plt.axis([-0.5, 1.5, -0.1, 5.5])
plt.ylabel("density")
ax.legend()
plt.tight_layout()
ax = plt.subplot(1, 2, 2)
plt.plot([0.01, 10], [true_wass, true_wass], "g", label="True Wasserstein")
plt.plot(blurs, sink, "r-o", label="Sinkhorn divergence")
plt.plot(blurs, bwass, "b-o", label="Blurred Wasserstein")
plt.plot(
[0.01, 10], [mean_diff, mean_diff], "m", label="Squared difference of means"
)
ax.set_xscale("log")
ax.legend()
plt.axis([0.01, 10.0, 0.0, 1.5 * bwass[0]])
plt.xlabel("blur $\\sqrt{\\varepsilon}$")
plt.tight_layout()
plt.show()
##################################################
# Conclusion
# --------------
#
# In practice, the Sinkhorn and blurred Wasserstein divergences
# are **nearly indistinguishable**. But as far as we can tell *today*,
# these two loss functions have very different properties:
#
# - :math:`\text{B}_\varepsilon` is **easy to define**, compute in 1D and
# **analyze** from geometric or statistical point of views...
# But cannot (?) be computed efficiently in higher dimensions,
# where the true OT problem is nearly intractable.
# - :math:`\text{S}_\varepsilon` is simply available through
# the :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer,
# but has a weird, composite definition and is pretty **hard to**
# **study** rigorously - as evidenced by recent, technical proofs
# of `positivity, definiteness (Feydy et al., 2018) <https://arxiv.org/abs/1810.08278>`_
# and `sample complexity (Genevay et al., 2018) <https://arxiv.org/abs/1810.02733>`_.
#
# **So couldn't we get the best of both worlds?**
# In an ideal world, we'd like to tweak the *efficient* multiscale Sinkhorn algorithm
# to compute the *natural* divergence :math:`\text{B}_\varepsilon`...
# but this may be out of reach. A realistic target could be to **quantify**
# **the difference** between these two objects, thus legitimizing the
# use of the :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer
# as a **cheap proxy** for the intuitive and well-understood *blurred Wasserstein distance*.
#
# In my opinion, investigating the link between these two quantities
# is one of the most interesting questions left open in the field of discrete entropic OT.
# The geometric loss functions implemented in GeomLoss are probably *good enough*
# for most practical purposes,
# but getting a **rigorous understanding** of the multiscale,
# wavelet-like behavior of our algorithms
# as we add small details through an exponential decay of
# the blurring scale :math:`\sqrt{\varepsilon}` would be truly insightful.
# In some sense, couldn't we prove a
# `Hilbert <https://en.wikipedia.org/wiki/Orthonormal_basis>`_-`Plancherel <https://en.wikipedia.org/wiki/Plancherel_theorem>`_
# theorem for the Wasserstein distance?
#
| 2.421875 | 2 |
tests/app.py | vortec/hug_sentry | 5 | 12767993 | <reponame>vortec/hug_sentry
import hug
@hug.get('/fail')
def fail(request, amount: hug.types.number):
amount / 0
@hug.get('/routing_fail/{amount}')
def routing_fail(request, amount: hug.types.number):
raise Exception("Oh no!")
| 1.875 | 2 |
test/test.py | taoyizhi68/py-data-augmentation | 16 | 12767994 | <reponame>taoyizhi68/py-data-augmentation<gh_stars>10-100
import numpy as np
import matplotlib.pyplot as plt
import skimage as skimage
from skimage import data, io, filters, transform
from pydaag import pydaag
plt.rcParams['figure.figsize'] = (10, 10)
images = np.zeros((5, 256, 256, 3))
for i in range(5):
filename = 'img\\%d.jpg'%(i+1)
image = skimage.data.imread(filename, as_grey=0)
image = skimage.transform.resize(image, (256, 256, 3))
images[i] = image
for i in range(5):
plt.subplot(1, 5, i+1)
plt.imshow(images[i])
plt.axis('off')
plt.show()
images_ = pydaag.data_augmentation(images, x_slide=0.2, y_slide=0.2,
z_rotation=20, y_rotation=20, x_rotation=20,
blur_max_sigma=3, noise_max_sigma=20)
for i in range(5):
plt.subplot(1, 5, i+1)
plt.imshow(images_[i])
plt.axis('off')
plt.show() | 2.890625 | 3 |
plugins/hello.py | Ruthenic/tesm | 0 | 12767995 | def hw(addr,vaddr,inst):
print("Hello, world!")
return addr,vaddr
| 2.109375 | 2 |
apps/courses/migrations/0043_remove_course_tag.py | islandowner-web/IT-MOOC | 9 | 12767996 | <filename>apps/courses/migrations/0043_remove_course_tag.py
# Generated by Django 2.2 on 2019-12-10 22:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0042_delete_bannercourse'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='tag',
),
]
| 1.34375 | 1 |
tests/data/okta/adminroles.py | sckevmit/cartography | 2,322 | 12767997 | <reponame>sckevmit/cartography
LIST_ASSIGNED_USER_ROLE_RESPONSE = """
[
{
"id": "IFIFAX2BIRGUSTQ",
"label": "Application Administrator",
"type": "APP_ADMIN",
"status": "ACTIVE",
"created": "2019-02-06T16:17:40.000Z",
"lastUpdated": "2019-02-06T16:17:40.000Z",
"assignmentType": "USER",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3"
}
}
},
{
"id": "JBCUYUC7IRCVGS27IFCE2SKO",
"label": "Help Desk Administrator",
"type": "HELP_DESK_ADMIN",
"status": "ACTIVE",
"created": "2019-02-06T16:17:40.000Z",
"lastUpdated": "2019-02-06T16:17:40.000Z",
"assignmentType": "USER",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3"
}
}
}
]
"""
LIST_ASSIGNED_GROUP_ROLE_RESPONSE = """
[
{
"id": "IFIFAX2BIRGUSTQ",
"label": "Application Administrator",
"type": "APP_ADMIN",
"status": "ACTIVE",
"created": "2019-02-27T14:48:59.000Z",
"lastUpdated": "2019-02-27T14:48:59.000Z",
"assignmentType": "GROUP",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/groups/00gsr2IepS8YhHRFf0g3"
}
}
},
{
"id": "JBCUYUC7IRCVGS27IFCE2SKO",
"label": "Help Desk Administrator",
"type": "HELP_DESK_ADMIN",
"status": "ACTIVE",
"created": "2019-02-06T16:17:40.000Z",
"lastUpdated": "2019-02-06T16:17:40.000Z",
"assignmentType": "GROUP",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3"
}
}
}
]
"""
| 1.21875 | 1 |
ipodcopier/__init__.py | kristjano/ipodcopier | 0 | 12767998 | <filename>ipodcopier/__init__.py
from .musiccopier import *
| 1.085938 | 1 |
example/parse_joined_gff_inline_dna.py | pflans/edge | 32 | 12767999 | # flake8: noqa
# Converting GFF format that includes DNA as
#
# ##DNA chrI
# ##AGCTAGAT
# ##end-DNA
#
# to GFF with FASTA
import sys
import urllib
import re
fasta = []
curseq = []
started = False
fn = sys.argv[1]
f = open(fn, "r")
def unescape(s):
return re.sub("\s", "_", urllib.unquote(s))
for l in f.read().split("\n"):
l = l.strip()
if l == "":
continue
if l.startswith("##"):
if l.startswith("##DNA"):
curseq.append(">%s" % unescape(l[6:]))
started = True
elif l.startswith("##end-DNA"):
fasta.append("\n".join(curseq))
curseq = []
started = False
else:
if started:
curseq.append(l[2:])
else:
print l
else:
if l.count("SGD\tchromosome") > 0:
continue
t = l.split("\t")
t[0] = unescape(t[0])
attrs = t[-1].split(";")
for i, attr in enumerate(attrs):
if len(attr.split("=")) != 2:
attrs[i] = 'Unknown_Note="%s"' % attr.strip()
t[-1] = ";".join(attrs)
l = "\t".join(t)
print l
print "##FASTA"
for f in fasta:
print f
| 2.96875 | 3 |
piafedit/extension/ui_api.py | flegac/piaf-edit | 0 | 12768000 | from dataclasses import dataclass
from typing import Callable, Any, Dict, Set
from PyQt5 import QtCore
from PyQt5.QtGui import QKeyEvent, QKeySequence
from piafedit.gui.common.handler.keyboard_handler import KeyboardHandler
@dataclass
class UIAction:
name: str
tooltip: str
shortcut: str
action: Callable[[], Any]
class _Actions:
def __init__(self):
self.by_name: Dict[str, UIAction] = dict()
self.by_shortcut: Dict[str, UIAction] = dict()
self.shortcuts: Set[str] = set()
def action(self, name: str, tooltip: str = None, shortcut: str = None):
def decorator(fn: Callable[[], Any]):
print('register:', name, fn)
action = UIAction(name, tooltip, shortcut, fn)
self.by_name[name] = action
self.by_shortcut[shortcut] = action
return action
return decorator
def handler(self):
return MyKeyboardHandler(self)
class MyKeyboardHandler(KeyboardHandler):
def __init__(self, actions: _Actions):
self.actions = actions
def keyPressEvent(self, ev: QKeyEvent):
from piafedit.editor_api import P
modifiers = ev.modifiers()
if modifiers == QtCore.Qt.ShiftModifier:
print('Shift+Click')
elif modifiers == QtCore.Qt.ControlModifier:
print('Control+Click')
elif modifiers == (QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier):
print('Control+Shift+Click')
else:
print('Click')
key = QKeySequence(ev.modifiers() | ev.key())
print('-----', key.toString() )
for shortcut, action in self.actions.by_shortcut.items():
sequence = QKeySequence(shortcut)
print(sequence.toString())
P.log.debug(sequence)
if key.matches(sequence):
action.action()
class Ui:
actions = _Actions()
@staticmethod
def action(name: str, tooltip: str = None, shortcut: str = None):
return Ui.actions.action(name, tooltip, shortcut)
| 2.578125 | 3 |
app/core/models.py | YumiBunny/Gusoktor | 0 | 12768001 | <reponame>YumiBunny/Gusoktor
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import app.core.patch
# La solución planteada tiene ventajas y desventajas. Como ventaja, se usa el
# sistema de autenticación de django, y no hay que hacer muchas cosas pues ya
# vienen hechas. Cada entidad que es logueable, actua a modo de "perfil" de
# usuario, conteniendo información adicional a los datos básicos que sirven para
# loguear al usuario, etc.
# Además, cada vez que se crea un usuario, sea desde el registro o desde el admin,
# se le crean perfiles asociados (Acá viene la desventaja, si creo un usuario,
# se le crean dos perfiles, uno de desocupado y uno de empresa, a lo cual, siempre
# tengo un perfil que no uso, porq un desocupado no es una empresa, asi que me
# quedan elementos vacíos por varios lados, pero bue)
# Por otro lado, a un usuario se le puede preguntar si es o no un desocupado, o
# si es o no una empresa, y pedir el "perfil" que devuelve o bien una empresa o
# bien un desocupado, dependiendo de lo que se haya cargado.
class Desocupado(models.Model):
# Las cosas logueables tienen que tener este campo adicional.
# Estas entidad actuan entonces como perfil de un usuario, y guardan
# datos adicionales a los que se guarda en un usuario tradicional de Django
user = models.OneToOneField(User, on_delete=models.CASCADE)
# El resto de los campos son los que yo quiero tener el perfil. Notece que
# algunos campos como el nombre, el apellido, o el email, ya están incluidos
# en el usuario de django, pero se pueden clonar tranquilamente acá.
nombre = models.CharField(max_length=50)
apellido = models.CharField(max_length=50)
# Usen guiones bajos o de lo contrario va a fallar Django
fecha_de_nacimiento = models.DateField(null=True)
dni = models.CharField(max_length=20, null=True)
experiencia_laboral = models.CharField(max_length=70, null=True)
# Este lo comento porq requiere algunos detalles extras en el form que
# les queda a ustedes para hacer
# profesion = models.ForeignKey('Rubro')
formacion = models.CharField(max_length=70, null=True)
habilidades = models.CharField(max_length=70, null=True)
desocupado = models.BooleanField(default=True)
# Como se representa como texto, o sea, como se ve en el admin
def __str__(self):
return "Desocupado: " + str(self.nombre) + " " + str(self.apellido) + " de " + str(self.user.username)
# Si se crea un usuario, se crea automáticamente un Desocupado
@receiver(post_save, sender=User)
def update_user_desocupado(sender, instance, created, **kwargs):
if created:
Desocupado.objects.create(user=instance)
instance.desocupado.save()
class Empresa(models.Model):
# La empresa también es logueable, idem desocupado
user = models.OneToOneField(User, on_delete=models.CASCADE)
# El resto de los campos
CUIT = models.CharField(max_length=30)
# Idem acá, las relaciones se las dejo a ustedes
rubro = models.ForeignKey('core.Rubro', null=True)
razon_social = models.CharField(max_length=50)
# Como se representa como texto, o sea, como se ve en el admin
def __str__(self):
return "Empresa" + str(self.razon_social) + " de " + str(self.user.username)
def contratar_desempleado():
pass
# Si se crea un usuario, se crea automáticamente una Empresa
@receiver(post_save, sender=User)
def update_user_empresa(sender, instance, created, **kwargs):
if created:
Empresa.objects.create(user=instance)
instance.empresa.save()
# El resto de las entidades queda a completar.
# Como hice varios cambios comento las relaciones para que no se rompa nada.
class Rubro(models.Model):
tipoDeTrabajo = models.CharField(max_length=30)
def __str__(self):
return self.title
class Empleo(models.Model):
#persona = models.ForeignKey('Persona')
#empresa = models.ForeignKey('Empresa')
#oferta = models.ForeignKey('Oferta')
inicio_contrato = models.DateField()
fin_contrato = models.DateField()
def __str__(self):
return self.title
class Oferta(models.Model):
#empresa = models.ForeignKey('Empresa')
activa = models.BooleanField()
#necesidad = models.ForeignKey('Rubro')
posicion_o_cargo = models.CharField(max_length=40, null=True)
descripcion_del_trabajo = models.CharField(max_length=50, null=True)
profesion = models.CharField(max_length=30, null=True)
carga_horaria = models.CharField(max_length=30, null=True)
def __str__(self):
return self.posicion_o_cargo
| 1.984375 | 2 |
helper.py | luket4/cs3240-labdemo | 0 | 12768002 | <gh_stars>0
__author__ = 'lat9wj'
def greeting(msg):
print(msg) | 1.234375 | 1 |
igramscraper/model/comment.py | 0xflotus/instagram-scraper | 1 | 12768003 | from .initializer_model import InitializerModel
import textwrap
class Comment(InitializerModel):
'''
* @param $value
* @param $prop
'''
def init_properties_custom(self, value, prop):
if prop == 'id':
self.identifier = value
standart_properties = [
'created_at',
'text',
]
if prop in standart_properties:
self.__setattr__(prop, value)
if prop == 'owner':
from .account import Account
self.owner = Account(value)
| 2.765625 | 3 |
src/olympia/access/migrations/0001_initial.py | dante381/addons-server | 0 | 12768004 | <gh_stars>0
# Generated by Django 2.2.5 on 2019-09-12 13:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.fields
import olympia.amo.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('name', models.CharField(default='', max_length=255)),
('rules', models.TextField()),
('notes', models.TextField(blank=True)),
],
options={
'db_table': 'groups',
},
bases=(olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='GroupUser',
fields=[
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='access.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'groups_users',
},
),
migrations.AddField(
model_name='group',
name='users',
field=models.ManyToManyField(related_name='groups', through='access.GroupUser', to=settings.AUTH_USER_MODEL),
),
migrations.AddConstraint(
model_name='groupuser',
constraint=models.UniqueConstraint(fields=('group', 'user'), name='group_id'),
),
]
| 1.71875 | 2 |
tmp/xml_rpc.py | hanzhichao/requestz | 2 | 12768005 | <gh_stars>1-10
import xmlrpclib
__HOST = 'localhost'
__PORT = '8000'
s = xmlrpclib.ServerProxy('http://' + __HOST + ':' + __PORT)
# print s.pow(2, 3)
s.add(2, 3)
s.div(5, 2)
s.system.listMethods() | 1.882813 | 2 |
hokusai/services/deployment.py | eessex/hokusai | 0 | 12768006 | <reponame>eessex/hokusai
import os
import datetime
import json
from tempfile import NamedTemporaryFile
import yaml
from hokusai import CWD
from hokusai.lib.config import HOKUSAI_CONFIG_DIR, config
from hokusai.services.kubectl import Kubectl
from hokusai.services.ecr import ECR, ClientError
from hokusai.lib.common import print_green, print_red, print_yellow, shout, shout_concurrent
from hokusai.services.command_runner import CommandRunner
from hokusai.lib.exceptions import CalledProcessError, HokusaiError
from hokusai.lib.constants import YAML_HEADER
class Deployment(object):
def __init__(self, context, deployment_name=None, namespace=None):
self.context = context
self.namespace = namespace
self.kctl = Kubectl(self.context, namespace=namespace)
self.ecr = ECR()
if deployment_name:
self.cache = [self.kctl.get_object("deployment %s" % deployment_name)]
else:
self.cache = self.kctl.get_objects('deployment', selector="app=%s,layer=application" % config.project_name)
def update(self, tag, constraint, git_remote, timeout,
resolve_tag_sha1=True, update_config=False, filename=None):
if not self.ecr.project_repo_exists():
raise HokusaiError("Project repo does not exist. Aborting.")
if resolve_tag_sha1:
tag = self.ecr.find_git_sha1_image_tag(tag)
if tag is None:
raise HokusaiError("Could not find a git SHA1 for tag %s. Aborting." % tag)
if self.namespace is None:
print_green("Deploying %s to %s..." % (tag, self.context), newline_after=True)
else:
print_green("Deploying %s to %s/%s..." % (tag, self.context, self.namespace), newline_after=True)
if config.pre_deploy is not None:
print_green("Running pre-deploy hook '%s'..." % config.pre_deploy, newline_after=True)
return_code = CommandRunner(self.context, namespace=self.namespace).run(tag, config.pre_deploy, constraint=constraint, tty=False)
if return_code:
raise HokusaiError("Pre-deploy hook failed with return code %s" % return_code, return_code=return_code)
deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f")
if update_config:
if filename is None:
kubernetes_yml = os.path.join(CWD, HOKUSAI_CONFIG_DIR, "%s.yml" % self.context)
else:
kubernetes_yml = filename
print_green("Patching Deployments in spec %s with tag %s" % (kubernetes_yml, tag), newline_after=True)
payload = []
for item in yaml.safe_load_all(open(kubernetes_yml, 'r')):
if item['kind'] == 'Deployment':
item['spec']['template']['metadata']['labels']['deploymentTimestamp'] = deployment_timestamp
item['spec']['progressDeadlineSeconds'] = timeout
for container in item['spec']['template']['spec']['containers']:
if self.ecr.project_repo in container['image']:
container['image'] = "%s:%s" % (self.ecr.project_repo, tag)
payload.append(item)
f = NamedTemporaryFile(delete=False)
f.write(YAML_HEADER)
f.write(yaml.safe_dump_all(payload, default_flow_style=False))
f.close()
print_green("Applying patched spec %s..." % f.name, newline_after=True)
try:
shout(self.kctl.command("apply -f %s" % f.name), print_output=True)
finally:
os.unlink(f.name)
else:
for deployment in self.cache:
containers = [(container['name'], container['image']) for container in deployment['spec']['template']['spec']['containers']]
deployment_targets = [{"name": name, "image": "%s:%s" % (self.ecr.project_repo, tag)} for name, image in containers if self.ecr.project_repo in image]
patch = {
"spec": {
"template": {
"metadata": {
"labels": {"deploymentTimestamp": deployment_timestamp}
},
"spec": {
"containers": deployment_targets
}
},
"progressDeadlineSeconds": timeout
}
}
print_green("Patching deployment %s..." % deployment['metadata']['name'], newline_after=True)
shout(self.kctl.command("patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch))))
print_green("Waiting for deployment rollouts to complete...")
rollout_commands = [self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache]
return_codes = shout_concurrent(rollout_commands, print_output=True)
if any(return_codes):
print_red("One or more deployment rollouts timed out! Rolling back...", newline_before=True, newline_after=True)
rollback_commands = [self.kctl.command("rollout undo deployment/%s" % deployment['metadata']['name']) for deployment in self.cache]
shout_concurrent(rollback_commands, print_output=True)
raise HokusaiError("Deployment failed!")
post_deploy_success = True
if config.post_deploy is not None:
print_green("Running post-deploy hook '%s'..." % config.post_deploy, newline_after=True)
return_code = CommandRunner(self.context, namespace=self.namespace).run(tag, config.post_deploy, constraint=constraint, tty=False)
if return_code:
print_yellow("WARNING: Running the post-deploy hook failed with return code %s" % return_code, newline_before=True, newline_after=True)
print_yellow("The tag %s has been rolled out. However, you should run the post-deploy hook '%s' manually, or re-run this deployment." % (tag, config.post_deploy), newline_after=True)
post_deploy_success = False
if self.namespace is None:
deployment_tag = "%s--%s" % (self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S"))
print_green("Updating ECR deployment tags in %s..." % self.ecr.project_repo, newline_after=True)
try:
self.ecr.retag(tag, self.context)
print_green("Updated ECR tag %s -> %s" % (tag, self.context))
self.ecr.retag(tag, deployment_tag)
print_green("Updated ECR tag %s -> %s" % (tag, deployment_tag), newline_after=True)
except (ValueError, ClientError) as e:
print_yellow("WARNING: Updating ECR deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True)
print_yellow("The tag %s has been rolled out. However, you should create the ECR tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True)
post_deploy_success = False
remote = git_remote or config.git_remote
if remote is not None:
print_green("Pushing Git deployment tags to %s..." % remote, newline_after=True)
try:
shout("git fetch %s" % remote)
shout("git tag -f %s %s" % (self.context, tag), print_output=True)
shout("git tag -f %s %s" % (deployment_tag, tag), print_output=True)
shout("git push -f --no-verify %s refs/tags/%s" % (remote, self.context), print_output=True)
print_green("Updated Git tag %s -> %s" % (tag, self.context))
shout("git push -f --no-verify %s refs/tags/%s" % (remote, deployment_tag), print_output=True)
print_green("Updated Git tag %s -> %s" % (tag, deployment_tag), newline_after=True)
except CalledProcessError as e:
print_yellow("WARNING: Creating Git deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True)
print_yellow("The tag %s has been rolled out. However, you should create the Git tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True)
post_deploy_success = False
if post_deploy_success:
print_green("Deployment succeeded!")
else:
raise HokusaiError("One or more post-deploy steps failed!")
def refresh(self):
deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f")
for deployment in self.cache:
patch = {
"spec": {
"template": {
"metadata": {
"labels": {"deploymentTimestamp": deployment_timestamp}
}
}
}
}
print_green("Refreshing %s..." % deployment['metadata']['name'], newline_after=True)
shout(self.kctl.command("patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch))))
print_green("Waiting for refresh to complete...")
rollout_commands = [self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache]
return_codes = shout_concurrent(rollout_commands, print_output=True)
if any(return_codes):
raise HokusaiError("Refresh failed!")
@property
def names(self):
return [deployment['metadata']['name'] for deployment in self.cache]
@property
def current_tag(self):
images = []
for deployment in self.cache:
containers = deployment['spec']['template']['spec']['containers']
container_images = [container['image'] for container in containers if self.ecr.project_repo in container['image']]
if not container_images:
raise HokusaiError("Deployment has no valid target containers. Aborting.")
if not all(x == container_images[0] for x in container_images):
raise HokusaiError("Deployment's containers do not reference the same image tag. Aborting.")
images.append(container_images[0])
if not all(y == images[0] for y in images):
raise HokusaiError("Deployments do not reference the same image tag. Aborting.")
return images[0].rsplit(':', 1)[1]
| 1.960938 | 2 |
stonesoup/metricgenerator/__init__.py | Red-Portal/Stone-Soup-1 | 157 | 12768007 | # -*- coding: utf-8 -*-
from .base import MetricGenerator
__all__ = ['MetricGenerator']
| 1.078125 | 1 |
Python/EvenOdd.py | udaypandey/BubblyCode | 0 | 12768008 | while True:
num = int(input("Enter a number: "))
if num % 2 == 0:
print(num, "is an even number")
else:
print(f"{num} is a odd number")
| 4.25 | 4 |
hood/forms.py | p-koskey/mtaani | 0 | 12768009 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile, Neighbourhood, Business, Post
from cloudinary.models import CloudinaryField
class SignupForm(UserCreationForm):
email = forms.EmailField(max_length=254)
class Meta:
model = User
fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>',)
class UpdateUserForm(forms.ModelForm):
email = forms.EmailField(max_length=254)
class Meta:
model = User
fields = ('username', 'email')
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['name', 'location', 'profile_picture']
class NeighbourHoodForm(forms.ModelForm):
picture = CloudinaryField('image')
class Meta:
model = Neighbourhood
fields = ('picture', 'name', 'location','health','police')
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
fields = ('name', 'email', 'description')
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'post') | 2.3125 | 2 |
forcepush/inputs/__init__.py | wouterrvdb/ForcePush | 0 | 12768010 | from forcepush.logic import player
from .input_manager import InputManager
input_manager = InputManager()
input_manager.register_handler(player.movement_handler) | 1.445313 | 1 |
Python_to_Acces_Web_Data/Scraping_HTML_Data_with_BeautifulSoup/Following_Links_HTML_BeautifulSoup.py | MickBrown88/Programming_for_everybody | 1 | 12768011 | <gh_stars>1-10
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
n=1
url = input('Enter - ') # http://py4e-data.dr-chuck.net/known_by_Halley.html
count= int(input('Enter count'))+1 # 7
pos=int(input('Enter position')) # 18
new=url
while n<count:
if new == url:
html = urllib.request.urlopen(url, context=ctx).read()
print('Retrieving', url)
html = urllib.request.urlopen(new, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a')
my_tags=tags[pos-1]
new=my_tags.get('href', None)
print('Retrieving' , new)
n=n+1
print(n) | 2.875 | 3 |
oc_ocdm/graph/entities/bibliographic/resource_embodiment.py | arcangelo7/oc_ocdm | 1 | 12768012 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
import re
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional
from rdflib import URIRef
from oc_ocdm.graph.graph_entity import GraphEntity
from oc_ocdm.decorators import accepts_only
from oc_ocdm.graph.entities.bibliographic_entity import BibliographicEntity
class ResourceEmbodiment(BibliographicEntity):
"""Resource embodiment (short: re): the particular physical or digital format in which a
bibliographic resource was made available by its publisher."""
@accepts_only('re')
def merge(self, other: ResourceEmbodiment) -> None:
super(ResourceEmbodiment, self).merge(other)
media_type: Optional[URIRef] = other.get_media_type()
if media_type is not None:
self.has_media_type(media_type)
starting_page: Optional[str] = other.get_starting_page()
if starting_page is not None:
self.has_starting_page(starting_page)
ending_page: Optional[str] = other.get_ending_page()
if ending_page is not None:
self.has_ending_page(ending_page)
url: Optional[URIRef] = other.get_url()
if url is not None:
self.has_url(url)
# HAS FORMAT
def get_media_type(self) -> Optional[URIRef]:
uri: Optional[URIRef] = self._get_uri_reference(GraphEntity.iri_has_format)
return uri
@accepts_only('thing')
def has_media_type(self, thing_ref: URIRef) -> None:
"""It allows one to specify the IANA media type of the embodiment.
"""
self.remove_media_type()
self.g.add((self.res, GraphEntity.iri_has_format, thing_ref))
def remove_media_type(self) -> None:
self.g.remove((self.res, GraphEntity.iri_has_format, None))
# HAS FIRST PAGE
def get_starting_page(self) -> Optional[str]:
return self._get_literal(GraphEntity.iri_starting_page)
@accepts_only('literal')
def has_starting_page(self, string: str) -> None:
"""The first page of the bibliographic resource according to the current embodiment.
"""
self.remove_starting_page()
if re.search("[-–]+", string) is None:
page_number = string
else:
page_number = re.sub("[-–]+.*$", "", string)
self._create_literal(GraphEntity.iri_starting_page, page_number)
def remove_starting_page(self) -> None:
self.g.remove((self.res, GraphEntity.iri_starting_page, None))
# HAS LAST PAGE
def get_ending_page(self) -> Optional[str]:
return self._get_literal(GraphEntity.iri_ending_page)
@accepts_only('literal')
def has_ending_page(self, string: str) -> None:
"""The last page of the bibliographic resource according to the current embodiment.
"""
self.remove_ending_page()
if re.search("[-–]+", string) is None:
page_number = string
else:
page_number = re.sub("^.*[-–]+", "", string)
self._create_literal(GraphEntity.iri_ending_page, page_number)
def remove_ending_page(self) -> None:
self.g.remove((self.res, GraphEntity.iri_ending_page, None))
# HAS URL
def get_url(self) -> Optional[URIRef]:
uri: Optional[URIRef] = self._get_uri_reference(GraphEntity.iri_has_url)
return uri
@accepts_only('thing')
def has_url(self, thing_ref: URIRef) -> None:
"""The URL at which the embodiment of the bibliographic resource is available.
"""
self.remove_url()
self.g.add((self.res, GraphEntity.iri_has_url, thing_ref))
def remove_url(self) -> None:
self.g.remove((self.res, GraphEntity.iri_has_url, None))
# HAS TYPE
def create_digital_embodiment(self) -> None:
"""It identifies the particular type of the embodiment, either digital or print.
"""
self._create_type(GraphEntity.iri_digital_manifestation)
def create_print_embodiment(self) -> None:
"""It identifies the particular type of the embodiment, either digital or print.
"""
self._create_type(GraphEntity.iri_print_object)
| 2.078125 | 2 |
examples/tonumpy.py | milankl/thermofeel | 31 | 12768013 | <gh_stars>10-100
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import sys
import numpy as np
from grib import decode_grib
def save(message):
lats = message["lats"]
lons = message["lons"]
vals = message["values"]
assert lats.size == lons.size
assert lats.size == vals.size
print(lats.size)
shape = (message["Nj"], message["Ni"])
latsmat = np.reshape(lats, shape)
lonsmat = np.reshape(lons, shape)
valsmat = np.reshape(vals, shape)
np.savez(sys.argv[2], lats=latsmat, lons=lonsmat, values=valsmat)
def main():
msgs = decode_grib(sys.argv[1])
for m in msgs:
save(m)
if __name__ == "__main__":
sys.exit(main())
| 2.28125 | 2 |
setup.py | cooljoseph1/battlehack20-minimal | 1 | 12768014 | from setuptools import setup, find_packages
from collections import OrderedDict
long_description="""
This is a minimal engine for the Battlehack20 game.
It lacks the secure of the original engine, but makes up for it
by running 30 times faster.
Read more at the Battlehack website: https://bh2020.battlecode.org.
"""
setup(name='battlehack20-minimal',
version="1.0.6",
description='Battlehack 2020 fancy viewer.',
author='cooljoseph',
long_description=long_description,
author_email='<EMAIL>',
url="https://bh2020.battlecode.org",
license='GNU General Public License v3.0',
packages=find_packages(),
project_urls=OrderedDict((
('Code', 'https://github.com/cooljoseph1/battlehack20-minimal'),
('Documentation', 'https://github.com/cooljoseph1/battlehack20-minimal')
)),
install_requires=[],
python_requires='>=3, <3.8',
zip_safe=False,
include_package_data=True
)
| 1.507813 | 2 |
latex/slides/resources/04tuples/tuples.py | Bizarious/python-lessons | 0 | 12768015 | # Initialisierung mit ()
tupel_eins = (1, True, 4.5, "hallo")
# Elemente abfragen wie bei Listen
viereinhalb = tuple_eins[2] #4.5
hallo = tuple_eins[-1] #'hello'
vordere = tuple_eins[:2] #(1, True)
# Elemente verändern, unmöglich !
tupel_eins[0] = "neuer Wert" #TypeError
# Ganzes Tuple austauschen, möglich !
tupel_eins = ("neuer Wert", True, 4.5, "hallo")
# mehrere Werte gleichzeitig abfragen
positions_tupel = (45.65, 198.12)
(x, y) = positions_tuple # x: 45.65, y: 198.12
# unter anderem nützlich für startwerte
(x, y, z) = (0, 100, 200) #x: 0, y: 100, z:200
# Liste zu Tupel umwandeln
liste = [1,2,3]
tupel_aus_liste = tuple(liste)
| 3.109375 | 3 |
official/nlp/projects/triviaqa/prediction.py | hjkim-haga/TF-OD-API | 1 | 12768016 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for inference."""
import tensorflow as tf
def split_and_pad(strategy, batch_size, x):
"""Split and pad for interence."""
per_replica_size = batch_size // strategy.num_replicas_in_sync
def slice_fn(x, i):
begin = min(x.shape[0], i * per_replica_size)
end = min(x.shape[0], (i + 1) * per_replica_size)
indices = tf.range(begin, end, dtype=tf.int32)
return tf.gather(x, tf.pad(indices, [[0, per_replica_size - end + begin]]))
# pylint: disable=g-long-lambda
return tf.nest.map_structure(
lambda x: strategy.experimental_distribute_values_from_function(
lambda ctx: slice_fn(x, ctx.replica_id_in_sync_group)), x)
# pylint: enable=g-long-lambda
def decode_logits(top_k, max_size, logits, default):
"""Get the span from logits."""
logits = tf.transpose(logits, [0, 2, 1])
values, indices = tf.math.top_k(logits, top_k)
width = (
tf.expand_dims(indices[:, 1, :], -2) -
tf.expand_dims(indices[:, 0, :], -1))
mask = tf.logical_and(width >= 0, width <= max_size)
scores = (
tf.expand_dims(values[:, 0, :], -1) + tf.expand_dims(values[:, 1, :], -2))
scores = tf.where(mask, scores, -1e8)
flat_indices = tf.argmax(tf.reshape(scores, (-1, top_k * top_k)), -1)
begin = tf.gather(
indices[:, 0, :], tf.math.floordiv(flat_indices, top_k), batch_dims=1)
end = tf.gather(
indices[:, 1, :], tf.math.mod(flat_indices, top_k), batch_dims=1)
reduced_mask = tf.math.reduce_any(mask, [-1, -2])
return (tf.where(reduced_mask, begin,
default), tf.where(reduced_mask, end, default),
tf.math.reduce_max(scores, [-1, -2]))
@tf.function
def decode_answer(context, begin, end, token_offsets, end_limit):
i = tf.gather(token_offsets, begin, batch_dims=1)
j = tf.gather(token_offsets, tf.minimum(end + 1, end_limit), batch_dims=1)
j = tf.where(end == end_limit, tf.cast(tf.strings.length(context), tf.int64),
j)
return tf.strings.substr(context, i, j - i)
def distributed_logits_fn(model, x):
return model.distribute_strategy.run(
lambda x: model(x, training=False), args=(x,))
| 1.96875 | 2 |
setz.py | relet/setzkasten | 0 | 12768017 | #!/usr/bin/env python3
import cv2 as cv
import json
import math
import numpy as np
import os
import sys
from requests.utils import requote_uri
from geojson import FeatureCollection, Feature, Polygon, dumps
config = json.load(open("config.json","r"))
target = config.get('target')
tilesize = config.get('tilesize')
maxzoom = config.get('maxzoom')
spacing = config.get('spacing')
tile_format = '.webp'
LLBOUNDS = [-180.0, 180.0, -180.0, 180.0]
match = None
if len(sys.argv)>=2:
match = sys.argv[1]
# pixel coordinates as x,y
# tile coordinates as t,u
def xy_to_latlon(x,y,zoom):
max_x = -float(math.pow(2,zoom-1) * tilesize)
lat = x / max_x * LLBOUNDS[1]
max_y = float(math.pow(2,zoom-1) * tilesize)
lon = y / max_y * LLBOUNDS[3]
return lat,lon
features = []
prev_x, prev_y, prev_zoom = None, None, None
ymax = -1e10
for source in config.get('sources',[]):
if len(source)<7:
continue
filename, xrel, yrel, imgzoom, title, family, date, location, comment, href = source[:10]
# auto-place after spacing
if xrel=="+":
xrel = prev_x + int((2**imgzoom) * spacing)
xrel = xrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW X FROM", prev_x, " AS ", xrel)
if yrel=="+":
yrel = prev_y + int((2**imgzoom) * spacing)
yrel = yrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW Y FROM", prev_y, " AS ", yrel)
print("Processing ",filename)
source_im = cv.imread(filename, cv.IMREAD_UNCHANGED)
w,h = source_im.shape[:2]
# auto-place centered
if yrel=="=":
yrel = prev_yc * (2**(imgzoom-prev_zoom)) - int(h/2)
print("CALCULATED NEW Y FROM CENTER", prev_yc, " AS ", yrel)
# auto-place right of previous column
elif yrel==">":
yrel = (ymax + 1.0/100) * (2**imgzoom)
print("CALCULATED NEW Y FROM YMAX", ymax, " AS ", yrel, imgzoom)
else:
ymax = yrel
# might be off by a factor off two, to be verified.
if title:
print(title)
print("PIXEL COORDINATES ", xrel, yrel, xrel+w, yrel+h)
left, top = xy_to_latlon(xrel, yrel, imgzoom)
right, bottom = xy_to_latlon(xrel+w, yrel+h, imgzoom)
poly = Polygon([[(top, left), (top, right), (bottom, right), (bottom, left), (top, left)]])
feat = Feature(geometry=poly, properties = {
"title": title,
"family": family,
"date": date,
"loc": location,
"comment": comment,
"href": href
})
features.append(feat)
#if imgzoom < maxzoom:
# factor = math.pow(2, maxzoom-imgzoom)
# source_im = cv.resize(source_im, (0, 0), fx=factor, fy=factor)
# FIXME: memory issues when blowing up - add maxzoom (and minzoom) to define display range
# calculate outer borders of previous item to calculate relative positions
prev_x = xrel + w
prev_y = yrel + h
prev_yc = yrel + h/2
prev_yr = float(yrel + h) / (2**imgzoom)
if prev_yr > ymax:
ymax = prev_yr
print("NEW YMAX ", ymax, "FROM", yrel, h)
prev_zoom = imgzoom
if match and not match in filename:
continue
zoom = imgzoom
w = h = 256 # just to pass the first check
while zoom > 1 and w > 2 and h > 2:
if zoom <= maxzoom:
# relative zero (center) at the defined zoom level
x0 = math.floor(tilesize * math.pow(2, zoom-1))
y0 = math.floor(tilesize * math.pow(2, zoom-1))
# image coordinates at that zoom level
xi, yi = x0 + xrel, y0 + yrel
# image size
# NOTE: source images should always be transparent png, or overlaps will be covered
w,h = source_im.shape[:2]
wt = math.ceil(w / tilesize)
ht = math.ceil(h / tilesize)
# first tile to consider
t0 = math.floor(xi / tilesize)
u0 = math.floor(yi / tilesize)
# top left of the considered tile
xA = t0 * tilesize
yA = u0 * tilesize
# offset of the image to the first tile
off_x = xi - xA
off_y = yi - yA
off_t = math.floor(off_x / tilesize)
off_u = math.floor(off_y / tilesize)
# CHECK: adjust range to actually cover the location of the translated image
folders={}
for tx in range(0, wt+1): # TODO: try t0-t0+wt
for ty in range(0, ht+1):
# read current background tile
folder = target+"tiles/"+str(zoom)+"/"+str(u0+ty)
tile_url = folder +"/"+str(t0+tx)+tile_format
#print("Loading "+tile_url)
white_tile = np.zeros([tilesize, tilesize, 4],dtype=np.uint8)
#white_tile.fill(255)
bg = cv.imread(tile_url, cv.IMREAD_UNCHANGED)
if bg is None:
bg = white_tile.copy()
bg = cv.cvtColor(bg, cv.COLOR_BGR2BGRA)
# cut relevant section of source_im
from_x = max(0, tx * tilesize - off_x)
from_y = max(0, ty * tilesize - off_y)
to_x = min(w, (tx+1) * tilesize - off_x)
to_y = min(h, (ty+1) * tilesize - off_y)
cutout = source_im[from_x:to_x, from_y:to_y]
# correct location of background
dest_x = max(0, off_x - tx * tilesize)
dest_y = max(0, off_y - ty * tilesize)
dto_x = dest_x + to_x - from_x
dto_y = dest_y + to_y - from_y
# paste cutout onto background
# TODO: actually paste, not overwrite
# eg. overwrite white_tile, then merge with bg
try:
bg[dest_x:dto_x, dest_y:dto_y] = cutout
except:
continue
#print("SOMETHING FAILED")
#cv.imshow('BG',bg)
#print("CUTOUT SIZE:", (from_x, to_x, from_y, to_y))
#print("FROM Y:", (from_y))
#print("TO Y:", (to_y))
#print("H:", h)
#cv.waitKey(1)
#sys.exit(1)
# then write that tile to file
if not folder in folders:
#print("Writing ",folder)
try:
os.makedirs(folder)
folders[folder]=True
except:
pass
cv.imwrite(tile_url, bg)
zoom = zoom - 1
xrel = math.floor(xrel / 2)
yrel = math.floor(yrel / 2)
source_im = cv.resize(source_im, (0, 0), fx=0.5, fy=0.5)
w = math.floor(w / 2)
h = math.floor(h / 2)
fc = FeatureCollection(features)
fp = open(target+"features.geojson", "w")
fp.write(dumps(fc))
fp.close()
def species_link(s):
return '<li><a href="https://setzkasten.relet.net#?{}">{}</a></li>'.format(requote_uri(s),s)
species_list=map(lambda f:f.properties.get('title'), features)
species_links = "\n".join(map(species_link, sorted(species_list)))
fi = open(target+"species_index.html", "w")
fi.write("<html><body><ul>{}<ul></body><html>".format(species_links))
fi.close()
| 2.34375 | 2 |
0000_book/grasping_stepbystep.py | takuya-ki/wrs | 0 | 12768018 | import math
import numpy as np
import basis.robot_math as rm
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85
import grasping.annotation.utils as gu
import pickle
base = wd.World(cam_pos=[.3, .3, .3], lookat_pos=[0, 0, 0])
gm.gen_frame(length=.05, thickness=.0021).attach_to(base)
# object
object_bunny = cm.CollisionModel("objects/bunnysim.stl")
object_bunny.set_rgba([.9, .75, .35, .3])
object_bunny.attach_to(base)
# hnd_s
# contact_pairs, contact_points = gpa.plan_contact_pairs(object_bunny,
# max_samples=10000,
# min_dist_between_sampled_contact_points=.014,
# angle_between_contact_normals=math.radians(160),
# toggle_sampled_points=True)
# for p in contact_points:
# gm.gen_sphere(p, radius=.002).attach_to(base)
# base.run()
# pickle.dump(contact_pairs, open( "save.p", "wb" ))
contact_pairs = pickle.load(open( "save.p", "rb" ))
for i, cp in enumerate(contact_pairs):
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
rgba = rm.get_rgba_from_cmap(i)
gm.gen_sphere(contact_p0, radius=.002, rgba=rgba).attach_to(base)
gm.gen_arrow(contact_p0, contact_p0+contact_n0*.01, thickness=.0012, rgba = rgba).attach_to(base)
# gm.gen_arrow(contact_p0, contact_p0-contact_n0*.1, thickness=.0012, rgba = rgba).attach_to(base)
gm.gen_sphere(contact_p1, radius=.002, rgba=rgba).attach_to(base)
# gm.gen_dashstick(contact_p0, contact_p1, thickness=.0012, rgba=rgba).attach_to(base)
gm.gen_arrow(contact_p1, contact_p1+contact_n1*.01, thickness=.0012, rgba=rgba).attach_to(base)
# gm.gen_dasharrow(contact_p1, contact_p1+contact_n1*.03, thickness=.0012, rgba=rgba).attach_to(base)
# base.run()
gripper_s = rtq85.Robotiq85()
contact_offset = .002
grasp_info_list = []
for i, cp in enumerate(contact_pairs):
print(f"{i} of {len(contact_pairs)} done!")
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2
if jaw_width > gripper_s.jawwidth_rng[1]:
continue
hndy = contact_n0
hndz = rm.orthogonal_vector(contact_n0)
grasp_info_list += gu.define_grasp_with_rotation(gripper_s,
object_bunny,
gl_jaw_center_pos=contact_center,
gl_jaw_center_z=hndz,
gl_jaw_center_y=hndy,
jaw_width=jaw_width,
gl_rotation_ax=hndy,
rotation_interval=math.radians(30),
toggle_flip=True)
for grasp_info in grasp_info_list:
aw_width, gl_jaw_center, hnd_pos, hnd_rotmat = grasp_info
gripper_s.fix_to(hnd_pos, hnd_rotmat)
gripper_s.jaw_to(aw_width)
gripper_s.gen_meshmodel().attach_to(base)
base.run() | 2.0625 | 2 |
slack_logging_handler/__init__.py | danie1cohen/slack_handler | 0 | 12768019 | <filename>slack_logging_handler/__init__.py
from .slack_logging_handler import SlackHandler, color_picker, build_logger
| 1.203125 | 1 |
TimeWrapper_JE/venv/Lib/site-packages/je_time/modules/__init__.py | JE-Chen/je_old_repo | 0 | 12768020 | <filename>TimeWrapper_JE/venv/Lib/site-packages/je_time/modules/__init__.py
from je_time.modules import get_calendar
from je_time.modules import time_calculate
| 1.34375 | 1 |
organizations/migrations/0005_organizationuser_date_created.py | st8st8/django-organizations | 0 | 12768021 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils import timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('organizations', '0004_set_site'),
]
operations = [
migrations.AddField(
model_name='organizationuser',
name='date_created',
field=models.DateTimeField(default=timezone.now, auto_now_add=True),
preserve_default=False,
),
]
| 1.726563 | 2 |
magicfolder/picklemsg.py | mgax/MagicFolder | 1 | 12768022 | <reponame>mgax/MagicFolder<filename>magicfolder/picklemsg.py
import cPickle as pickle
CHUNK_SIZE = 64 * 1024 # 64 KB
class Remote(object):
def __init__(self, in_file, out_file):
self.in_unpickler = pickle.Unpickler(in_file)
self.out_pickler = pickle.Pickler(out_file, 2) # protocol version 2
self.out_file = out_file
def send(self, msg, payload=None):
self.out_pickler.dump( (msg, payload) )
self.out_file.flush()
self.out_pickler.clear_memo()
def recv(self):
msg, payload = self.in_unpickler.load()
if msg == 'error':
print "error from remote endpoint\n%s" % payload
return msg, payload
def send_file(self, src_file, progress=lambda b: None):
while True:
chunk = src_file.read(CHUNK_SIZE)
if not chunk:
break
self.send('file_chunk', chunk)
progress(len(chunk))
self.send('file_end')
def recv_file(self, dst_file, progress=lambda b: None):
while True:
msg, payload = self.recv()
if msg == 'file_end':
break
assert msg == 'file_chunk'
dst_file.write(payload)
progress(len(payload))
def __iter__(self):
return self
def next(self):
return self.recv()
| 2.5625 | 3 |
app/http/controllers/CarController.py | craigderington/serviceauto | 0 | 12768023 | """A CarController Module."""
from masonite.request import Request
from masonite.view import View
from masonite.controllers import Controller
from app.Car import Car
class CarController(Controller):
"""CarController Controller Class."""
def __init__(self, request: Request):
"""CarController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def show(self, view: View, request: Request):
cars = Car.all()
return view.render("cars.html", {"cars": cars})
def single(self, view: View, request: Request):
param = self.request.id
car = Car.find(param)
return view.render(
"car.html",
{"car": car}
)
| 3.265625 | 3 |
bench/echo_server/mailbox/server.py | victorpoluceno/xwing | 12 | 12768024 | import logging
from xwing.mailbox import initialize, spawn, start
initialize()
logging.basicConfig(level='INFO')
async def run_server(mailbox):
while True:
data = await mailbox.recv()
if not data:
break
sender, message = data
await mailbox.send(sender, message)
if __name__ == '__main__':
spawn(run_server, name='server')
start()
| 2.390625 | 2 |
example/0_Basic_usage_of_the_library/python_feapder/2_air_spider/2_air_spider.py | RecluseXU/learning_spider | 38 | 12768025 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on 2021-03-11 22:18:46
---------
@summary:
AirSpider : 轻量爬虫,学习成本低。面对一些数据量较少,无需断点续爬,无需分布式采集的需求,可采用此爬虫。
AirSpider不支持去重,因此配置文件中的去重配置无效
---------
@author: Administrator
"""
import feapder
class AirSpiderTest(feapder.AirSpider):
__custom_setting__ = dict( # 可以通过 __custom_setting__ 来配置内容,其优先级高于setting.py中的配置
PROXY_EXTRACT_API="127.0.0.1:10808",
)
def start_requests(self):
yield feapder.Request("http://httpbin.org/headers", download_midware=self.xxx)
def parse(self, request, response):
if response.code != 200: # 框架支持重试机制,下载失败或解析函数抛出异常会自动重试请求
raise Exception("非法页面") # 默认最大重试次数为100次,可以通过配置文件setting.py进行修改
print(response.text)
def xxx(self, request):
"""
自定义的下载中间件
"""
request.headers = {'User-Agent':"lalala"}
return request
if __name__ == "__main__":
AirSpiderTest().start() | 1.851563 | 2 |
fable/test/tst_separate_files.py | rimmartin/cctbx_project | 0 | 12768026 | <reponame>rimmartin/cctbx_project<filename>fable/test/tst_separate_files.py
from __future__ import division, print_function
import os
op = os.path
def remove_file_if_necessary(file_name):
if (op.isfile(file_name)): os.remove(file_name)
if (op.exists(file_name)):
from libtbx.str_utils import show_string
raise RuntimeError(
"Unable to remove file: %s" % show_string(file_name))
def exercise(
verbose,
file_names_cpp,
number_of_function_files=None,
separate_files_main_namespace={},
separate_files_separate_namespace={}):
if (verbose): print("next exercise")
import libtbx.load_env
test_valid = libtbx.env.under_dist(
module_name="fable", path="test/valid", test=op.isdir)
import fable.cout
top_cpp = fable.cout.process(
file_names=[op.join(test_valid, "subroutine_3.f")],
top_procedures=["prog"],
namespace="tst_separate_files",
top_cpp_file_name=file_names_cpp[0],
number_of_function_files=number_of_function_files,
separate_files_main_namespace=separate_files_main_namespace,
separate_files_separate_namespace=separate_files_separate_namespace)
from fable import simple_compilation
comp_env = simple_compilation.environment()
from libtbx import easy_run
file_names_obj = []
for file_name_cpp in file_names_cpp:
obj = comp_env.file_name_obj(file_name_cpp=file_name_cpp)
remove_file_if_necessary(file_name=obj)
cmd = comp_env.compilation_command(file_name_cpp=file_name_cpp)
if (verbose): print(cmd)
easy_run.call(command=cmd)
assert op.exists(obj)
file_names_obj.append(obj)
exe_root = "tst_separate_files"
exe = comp_env.file_name_exe(exe_root=exe_root)
remove_file_if_necessary(file_name=exe)
cmd = comp_env.link_command(file_names_obj=file_names_obj, exe_root=exe_root)
if (verbose): print(cmd)
easy_run.call(command=cmd)
cmd = op.join(".", exe)
if (verbose): print(cmd)
assert op.exists(cmd)
stdout = easy_run.fully_buffered(command=cmd).raise_if_errors().stdout_lines
text = "\n".join(stdout)
if (verbose):
print(text)
from fable.tst_cout_compile import read_file_names_and_expected_cout
info = read_file_names_and_expected_cout(test_valid=test_valid).get(
"subroutine_3.f")[0]
from libtbx.test_utils import show_diff
assert not show_diff(text, "\n".join(info.out_lines))
if (verbose): print()
def run(args):
assert args in [[], ["--verbose"]]
verbose = (args == ["--verbose"])
from libtbx.utils import show_times_at_exit
show_times_at_exit()
all = True
if (0 or all):
exercise(verbose,
file_names_cpp=["top.cpp", "functions.cpp"],
number_of_function_files=1)
if (0 or all):
exercise(verbose,
file_names_cpp=["top.cpp", "subs.cpp"],
separate_files_separate_namespace={"subs": ["sub1", "sub2"]})
if (0 or all):
exercise(verbose,
file_names_cpp=["top.cpp", "subs.cpp", "functions.cpp"],
number_of_function_files=1,
separate_files_separate_namespace={"subs": ["sub1", "sub2"]})
if (0 or all):
exercise(verbose,
file_names_cpp=["top.cpp", "subs.cpp"],
separate_files_main_namespace={"subs": ["sub1", "sub2"]})
if (0 or all):
exercise(verbose,
file_names_cpp=["top.cpp", "subs.cpp", "functions.cpp"],
number_of_function_files=1,
separate_files_main_namespace={"subs": ["sub1", "sub2"]})
print("OK")
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
| 2.21875 | 2 |
corehq/apps/ota/tests/__init__.py | dslowikowski/commcare-hq | 1 | 12768027 | from .digest_restore import * | 1.125 | 1 |
snuba/datasets/storages/transactions.py | fpacifici/snuba | 0 | 12768028 | from snuba.clickhouse.columns import (
UUID,
Array,
ColumnSet,
DateTime,
Float,
IPv4,
IPv6,
Nested,
)
from snuba.clickhouse.columns import SchemaModifiers as Modifiers
from snuba.clickhouse.columns import String, UInt
from snuba.clusters.storage_sets import StorageSetKey
from snuba.datasets.schemas.tables import WritableTableSchema
from snuba.datasets.storage import WritableTableStorage
from snuba.datasets.storages import StorageKey
from snuba.datasets.storages.transaction_column_processor import (
TransactionColumnProcessor,
)
from snuba.datasets.table_storage import KafkaStreamLoader
from snuba.datasets.transactions_processor import TransactionsMessageProcessor
from snuba.query.processors.arrayjoin_keyvalue_optimizer import (
ArrayJoinKeyValueOptimizer,
)
from snuba.query.processors.mapping_optimizer import MappingOptimizer
from snuba.query.processors.prewhere import PrewhereProcessor
from snuba.web.split import TimeSplitQueryStrategy
columns = ColumnSet(
[
("project_id", UInt(64)),
("event_id", UUID()),
("trace_id", UUID()),
("span_id", UInt(64)),
("transaction_name", String()),
("transaction_hash", UInt(64, Modifiers(readonly=True))),
("transaction_op", String()),
("transaction_status", UInt(8)),
("start_ts", DateTime()),
("start_ms", UInt(16)),
("finish_ts", DateTime()),
("finish_ms", UInt(16)),
("duration", UInt(32)),
("platform", String()),
("environment", String(Modifiers(nullable=True))),
("release", String(Modifiers(nullable=True))),
("dist", String(Modifiers(nullable=True))),
("ip_address_v4", IPv4(Modifiers(nullable=True))),
("ip_address_v6", IPv6(Modifiers(nullable=True))),
("user", String()),
("user_hash", UInt(64, Modifiers(readonly=True))),
("user_id", String(Modifiers(nullable=True))),
("user_name", String(Modifiers(nullable=True))),
("user_email", String(Modifiers(nullable=True))),
("sdk_name", String()),
("sdk_version", String()),
("http_method", String(Modifiers(nullable=True))),
("http_referer", String(Modifiers(nullable=True))),
("tags", Nested([("key", String()), ("value", String())])),
("_tags_flattened", String()),
("_tags_hash_map", Array(UInt(64), Modifiers(readonly=True))),
("contexts", Nested([("key", String()), ("value", String())])),
("_contexts_flattened", String()),
("measurements", Nested([("key", String()), ("value", Float(64))]),),
("partition", UInt(16)),
("offset", UInt(64)),
("message_timestamp", DateTime()),
("retention_days", UInt(16)),
("deleted", UInt(8)),
]
)
schema = WritableTableSchema(
columns=columns,
local_table_name="transactions_local",
dist_table_name="transactions_dist",
storage_set_key=StorageSetKey.TRANSACTIONS,
mandatory_conditions=[],
prewhere_candidates=["event_id", "transaction_name", "transaction", "title"],
)
storage = WritableTableStorage(
storage_key=StorageKey.TRANSACTIONS,
storage_set_key=StorageSetKey.TRANSACTIONS,
schema=schema,
query_processors=[
MappingOptimizer("tags", "_tags_hash_map", "tags_hash_map_enabled"),
TransactionColumnProcessor(),
ArrayJoinKeyValueOptimizer("tags"),
ArrayJoinKeyValueOptimizer("measurements"),
PrewhereProcessor(),
],
stream_loader=KafkaStreamLoader(
processor=TransactionsMessageProcessor(), default_topic="events",
),
query_splitters=[TimeSplitQueryStrategy(timestamp_col="finish_ts")],
writer_options={"insert_allow_materialized_columns": 1},
)
| 1.484375 | 1 |
.tox/scenario/lib/python2.7/site-packages/oslo_utils/tests/test_excutils.py | bdrich/neutron-lbaas | 0 | 12768029 | # Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import time
import mock
from oslotest import base as test_base
from oslotest import moxstubout
from oslo_utils import excutils
from oslo_utils import timeutils
mox = moxstubout.mox
class Fail1(excutils.CausedByException):
pass
class Fail2(excutils.CausedByException):
pass
class CausedByTest(test_base.BaseTestCase):
def test_caused_by_explicit(self):
e = self.assertRaises(Fail1,
excutils.raise_with_cause,
Fail1, "I was broken",
cause=Fail2("I have been broken"))
self.assertIsInstance(e.cause, Fail2)
e_p = e.pformat()
self.assertIn("I have been broken", e_p)
self.assertIn("Fail2", e_p)
def test_caused_by_implicit(self):
def raises_chained():
try:
raise Fail2("I have been broken")
except Fail2:
excutils.raise_with_cause(Fail1, "I was broken")
e = self.assertRaises(Fail1, raises_chained)
self.assertIsInstance(e.cause, Fail2)
e_p = e.pformat()
self.assertIn("I have been broken", e_p)
self.assertIn("Fail2", e_p)
class SaveAndReraiseTest(test_base.BaseTestCase):
def test_save_and_reraise_exception(self):
e = None
msg = 'foo'
try:
try:
raise Exception(msg)
except Exception:
with excutils.save_and_reraise_exception():
pass
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
@mock.patch('logging.getLogger')
def test_save_and_reraise_exception_dropped(self, get_logger_mock):
logger = get_logger_mock()
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except Exception:
with excutils.save_and_reraise_exception():
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
self.assertTrue(logger.error.called)
def test_save_and_reraise_exception_no_reraise(self):
"""Test that suppressing the reraise works."""
try:
raise Exception('foo')
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = False
@mock.patch('logging.getLogger')
def test_save_and_reraise_exception_dropped_no_reraise(self,
get_logger_mock):
logger = get_logger_mock()
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
self.assertFalse(logger.error.called)
def test_save_and_reraise_exception_provided_logger(self):
fake_logger = mock.MagicMock()
try:
try:
raise Exception('foo')
except Exception:
with excutils.save_and_reraise_exception(logger=fake_logger):
raise Exception('second exception')
except Exception:
pass
self.assertTrue(fake_logger.error.called)
class ForeverRetryUncaughtExceptionsTest(test_base.BaseTestCase):
def setUp(self):
super(ForeverRetryUncaughtExceptionsTest, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
@excutils.forever_retry_uncaught_exceptions
def exception_generator(self):
exc = self.exception_to_raise()
while exc is not None:
raise exc
exc = self.exception_to_raise()
def exception_to_raise(self):
return None
def my_time_sleep(self, arg):
pass
def exc_retrier_common_start(self):
self.stubs.Set(time, 'sleep', self.my_time_sleep)
self.mox.StubOutWithMock(logging, 'exception')
self.mox.StubOutWithMock(timeutils, 'now',
use_mock_anything=True)
self.mox.StubOutWithMock(self, 'exception_to_raise')
def exc_retrier_sequence(self, exc_id=None,
exc_count=None, before_timestamp_calls=(),
after_timestamp_calls=()):
self.exception_to_raise().AndReturn(
Exception('unexpected %d' % exc_id))
# Timestamp calls that happen before the logging is possibly triggered.
for timestamp in before_timestamp_calls:
timeutils.now().AndReturn(timestamp)
if exc_count != 0:
logging.exception(mox.In(
'Unexpected exception occurred %d time(s)' % exc_count))
# Timestamp calls that happen after the logging is possibly triggered.
for timestamp in after_timestamp_calls:
timeutils.now().AndReturn(timestamp)
def exc_retrier_common_end(self):
self.exception_to_raise().AndReturn(None)
self.mox.ReplayAll()
self.exception_generator()
self.addCleanup(self.stubs.UnsetAll)
def test_exc_retrier_1exc_gives_1log(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1, exc_count=1,
after_timestamp_calls=[0])
self.exc_retrier_common_end()
def test_exc_retrier_same_10exc_1min_gives_1log(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
after_timestamp_calls=[0], exc_count=1)
# By design, the following exception don't get logged because they
# are within the same minute.
for i in range(2, 11):
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[i],
exc_count=0)
self.exc_retrier_common_end()
def test_exc_retrier_same_2exc_2min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
after_timestamp_calls=[0], exc_count=1)
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[65], exc_count=1,
after_timestamp_calls=[65, 66])
self.exc_retrier_common_end()
def test_exc_retrier_same_10exc_2min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
after_timestamp_calls=[0], exc_count=1)
for ts in [12, 23, 34, 45]:
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[ts],
exc_count=0)
# The previous 4 exceptions are counted here
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[106],
exc_count=5,
after_timestamp_calls=[106, 107])
# Again, the following are not logged due to being within
# the same minute
for ts in [117, 128, 139, 150]:
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[ts],
exc_count=0)
self.exc_retrier_common_end()
def test_exc_retrier_mixed_4exc_1min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
# The stop watch will be started,
# which will consume one timestamp call.
after_timestamp_calls=[0], exc_count=1)
# By design, this second 'unexpected 1' exception is not counted. This
# is likely a rare thing and is a sacrifice for code simplicity.
self.exc_retrier_sequence(exc_id=1, exc_count=0,
# Since the exception will be the same
# the expiry method will be called, which
# uses up a timestamp call.
before_timestamp_calls=[5])
self.exc_retrier_sequence(exc_id=2, exc_count=1,
# The watch should get reset, which uses
# up two timestamp calls.
after_timestamp_calls=[10, 20])
# Again, trailing exceptions within a minute are not counted.
self.exc_retrier_sequence(exc_id=2, exc_count=0,
# Since the exception will be the same
# the expiry method will be called, which
# uses up a timestamp call.
before_timestamp_calls=[25])
self.exc_retrier_common_end()
def test_exc_retrier_mixed_4exc_2min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
# The stop watch will now be started.
after_timestamp_calls=[0], exc_count=1)
# Again, this second exception of the same type is not counted
# for the sake of code simplicity.
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[10], exc_count=0)
# The difference between this and the previous case is the log
# is also triggered by more than a minute expiring.
self.exc_retrier_sequence(exc_id=2, exc_count=1,
# The stop watch will now be restarted.
after_timestamp_calls=[100, 105])
self.exc_retrier_sequence(exc_id=2,
before_timestamp_calls=[110], exc_count=0)
self.exc_retrier_common_end()
def test_exc_retrier_mixed_4exc_2min_gives_3logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
# The stop watch will now be started.
after_timestamp_calls=[0], exc_count=1)
# This time the second 'unexpected 1' exception is counted due
# to the same exception occurring same when the minute expires.
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[10], exc_count=0)
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[100],
after_timestamp_calls=[100, 105],
exc_count=2)
self.exc_retrier_sequence(exc_id=2, exc_count=1,
after_timestamp_calls=[110, 111])
self.exc_retrier_common_end()
| 2.25 | 2 |
scripts/ui/common.py | Hiwatts/facebook360_dep | 221 | 12768030 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Common functions used across the UI tabs.
The UI shares several common functions across its tabs. Unlike dep_util, this file
contains functions that specifically reference elements in the tab. This means, if
further extension of the UI is pursued, this file should be reserved for common
functions that are *explicitly* tied to the UI and dep_util for functions that could
be used in contexts outside the UI.
"""
import collections
import datetime
import glob
import os
import shutil
import subprocess
import sys
from PyQt5 import QtCore, QtWidgets
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "aws"))
sys.path.append(os.path.join(dir_scripts, "render"))
sys.path.append(os.path.join(dir_scripts, "util"))
import dep_util
import glog_check as glog
import scripts.render.config as config
from log_reader import LogReader
from scripts.aws.create import (
get_staging_info,
get_render_pid,
has_render_flag,
run_ssh_command,
)
from scripts.aws.util import AWSUtil
from scripts.render.network import LAN
from scripts.util.system_util import (
get_flags,
get_flags_from_flagfile,
image_type_paths,
run_command,
)
from slider_image_thresholds import SliderWidget
script_dir = os.path.dirname(os.path.realpath(__file__))
scripts_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
dep_dir = os.path.join(scripts_dir, os.pardir)
dep_bin_dir = os.path.join(dep_dir, "build", "bin")
dep_res_dir = os.path.join(dep_dir, "res")
dep_flags_dir = os.path.join(dep_res_dir, "flags")
os.makedirs(dep_flags_dir, exist_ok=True)
source_root = os.path.join(dep_dir, "source")
depth_est_src = os.path.join(source_root, "depth_estimation")
render_src = os.path.join(source_root, "render")
render_scripts = os.path.join(scripts_dir, "render")
type_color_var = "color_variance"
type_fg_mask = "fg_mask"
threshold_sliders = {
# attr: type, printed name, slider index, max value, default value
"noise": [type_color_var, "Noise variance", 1, 1.5e-3, 4e-5],
"detail": [type_color_var, "Detail variance", 2, 2e-2, 1e-3],
"blur": [type_fg_mask, "Blur radius", 1, 20, 2],
"closing": [type_fg_mask, "Closing size", 2, 20, 4],
"thresh": [type_fg_mask, "Threshold", 3, 1, 3e-2],
}
def init(parent):
"""Sets up all the UI global internals (logs, data, and flags) and any
tab specific components.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
parent.is_refreshing_data = True
parent.initialize_paths()
parent.set_default_top_level_paths()
parent.setup_logs()
parent.setup_data()
parent.setup_flags()
if "retrieve_missing_flagfiles" in dir(parent):
parent.retrieve_missing_flagfiles()
if "add_default_flags" in dir(parent):
parent.add_default_flags()
if "setup_thresholds" in dir(parent):
parent.setup_thresholds()
if "add_data_type_validators" in dir(parent):
parent.add_data_type_validators()
if "setup_farm" in dir(parent):
parent.setup_farm()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.is_refreshing_data = False
def setup_aws_config(parent):
"""Sets up the configuration of the Kubernetes cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if parent.parent.is_aws:
create_flagfile = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_create]
)
if os.path.exists(create_flagfile):
create_flags = get_flags_from_flagfile(create_flagfile)
if "cluster_size" in create_flags:
spin_num_workers = getattr(
parent.dlg, f"spin_{parent.tag}_farm_num_workers", None
)
spin_num_workers.setValue(int(create_flags["cluster_size"]))
if "instance_type" in create_flags:
dd_ec2 = getattr(parent.dlg, f"dd_{parent.tag}_farm_ec2", None)
dd_ec2.setCurrentText(create_flags["instance_type"])
def setup_farm(parent):
"""Sets up the UI to interact with a LAN cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
initialize_farm_groupbox(parent)
ip_begin, _ = parent.parent.ui_flags.master.rsplit(".", 1)
parent.lan = LAN(f"{ip_begin}.255")
def get_tooltip(parent, app_name):
"""Gets the help tooltip display of a binary.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
app_name (str): Name of the binary.
Returns:
str: Help from the binary.
"""
dir = scripts_dir if app_name.endswith(".py") else dep_bin_dir
tooltip = dep_util.get_tooltip(os.path.join(dir, app_name))
if not tooltip:
parent.log_reader.log_warning(f"Cannot get tooltip for: {app_name}")
return tooltip
def initialize_paths(parent):
"""Initializes paths for scripts and flags depending on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
parent.app_name_to_flagfile = {}
if tag in ["bg", "depth", "export"]:
parent.app_name = "render/render.py"
if tag in ["depth", "export"]:
parent.app_aws_clean = "aws/clean.py"
parent.app_aws_create = "aws/create.py"
parent.app_name_to_flagfile[parent.app_aws_clean] = "clean.flags"
if tag == "calibrate":
parent.app_name = "Calibration"
parent.flagfile_basename = "calibration.flags"
elif tag == "bg":
parent.flagfile_basename = "render_background.flags"
elif tag == "depth":
parent.flagfile_basename = "render_depth.flags"
parent.app_name_to_flagfile[parent.app_aws_create] = "aws_create_video.flags"
elif tag == "export":
parent.flagfile_basename = "render_export.flags"
parent.app_name_to_flagfile[parent.app_aws_create] = "aws_create_export.flags"
parent.app_aws_download_meshes = "aws/download_meshes.py"
parent.app_name_to_flagfile[
parent.app_aws_download_meshes
] = "download_meshes.flags"
parent.app_name_to_flagfile[parent.app_name] = parent.flagfile_basename
parent.tooltip = get_tooltip(parent, parent.app_name)
parent.is_refreshing_data = False
parent.is_process_killed = False
parent.threshs_tooltip = "Click and drag to pan, scroll to zoom in and out"
parent.script_dir = script_dir
def setup_logs(parent):
"""Sets up logging system for dialog on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
LogReader: Reader configured for the current tab.
"""
tag = parent.tag
qt_text_edit = getattr(parent.dlg, f"text_{tag}_log", None)
qt_tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = qt_tab_widget.count() - 1 # log is always the last tab
ts = dep_util.get_timestamp("%Y%m%d%H%M%S.%f")
name = parent.__class__.__name__
log_file = os.path.join(parent.path_logs, f"{name}_{ts}")
log_reader = LogReader(qt_text_edit, parent, log_file)
log_reader.set_tab_widget(qt_tab_widget, tab_idx)
return log_reader
def setup_flagfile_tab(parent):
"""Sets up the flags according to the corresponding flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
qt_text_edit = getattr(dlg, f"text_{tag}_flagfile_edit", None)
qt_btn_save = getattr(dlg, f"btn_{tag}_flagfile_save", None)
qt_text_edit.textChanged.connect(parent.on_changed_flagfile_edit)
qt_btn_save.clicked.connect(parent.save_flag_file)
qt_btn_save.setEnabled(False)
def setup_file_explorer(parent):
"""Creates the file explorer rooted on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
dlg = parent.dlg
parent.fs_tree = dlg.tree_file_explorer
path = parent.path_project
parent.fs_model, parent.fs_tree = dep_util.setup_file_explorer(parent.fs_tree, path)
parent.fs_tree.clicked.connect(lambda: preview_file(parent))
def preview_file(parent):
"""Displays the file and its label on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
dlg = parent.dlg
frame = dlg.label_preview_image
label = dlg.label_preview_path
project = parent.path_project
prefix = f"{project}/"
dep_util.preview_file(parent.fs_model, parent.fs_tree, frame, label, prefix)
def switch_ui_elements_for_processing(parent, gb, state):
"""Switches element interaction when processing on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
state (str): Identifier of the callback state.
"""
# Buttons
parent.update_buttons(gb)
# Switch all other sections, except the file explorer
dlg = parent.dlg
for gbi in dlg.findChildren(QtWidgets.QGroupBox):
if gbi != gb and not gbi.objectName().endswith("_file_explorer"):
gbi.setEnabled(state)
# Switch current group box elements
prefixes = ["cb_", "dd_", "val_", "label_"]
dep_util.switch_objects_prefix(gb, prefixes, state)
# Switch tabs that are not image preview or log
for w in dlg.findChildren(QtWidgets.QWidget):
name = w.objectName()
ignore = name.endswith("_preview") or name.endswith("_log")
if name.startswith("tab_") and not ignore:
w.setEnabled(state)
# Switch other sections
for s in parent.parent.sections:
if s != parent:
dep_util.set_tab_enabled(parent.dlg.w_steps, s.tag, state)
def cancel_process(parent):
"""Stops a running process on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
running_render = False # Render has to be explicitly killed since it runs detached
if parent.is_farm and parent.is_aws:
processes = parent.log_reader.get_processes()
for process in processes:
if process == "run_aws_create" or process.startswith("run_export"):
running_render = True
if running_render:
aws_util = AWSUtil(
parent.path_aws_credentials, region_name=parent.parent.aws_util.region_name
)
_, ip_staging = get_staging_info(aws_util, parent.path_aws_ip_file)
if ip_staging:
render_pid = get_render_pid(parent.path_aws_key_fn, ip_staging)
if render_pid is not None:
run_ssh_command(
parent.path_aws_key_fn, ip_staging, f"kill -9 {render_pid}"
)
parent.log_reader.kill_all_processes()
parent.is_process_killed = True
if "reset_run_button_text" in dir(parent):
parent.reset_run_button_text()
def is_cloud_running_process(parent):
"""Checks if a render process is being run on the cloud"""
key_fn = parent.path_aws_key_fn
if not parent.is_aws or not parent.is_farm or not os.path.isfile(key_fn):
return False
aws_util = AWSUtil(
parent.path_aws_credentials, region_name=parent.parent.aws_util.region_name
)
_, ip_staging = get_staging_info(
aws_util, parent.path_aws_ip_file, start_instance=False
)
if not ip_staging:
return False
tag = parent.tag
if tag not in ["depth", "export"]:
return False
flag = "run_depth_estimation"
value = tag == "depth"
return has_render_flag(key_fn, ip_staging, flag, value)
def sync_with_s3(parent, gb, subdirs):
"""Synchronizes data from the local directory to S3.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
subdirs (list[str]): Local path to be synced.
"""
run_silently = not parent.parent.ui_flags.verbose
cmds = []
parent.log_reader.log_notice(f"Syncing frames with S3...")
for subdir in subdirs:
local = os.path.join(config.DOCKER_INPUT_ROOT, subdir)
remote = os.path.join(parent.parent.ui_flags.project_root, subdir)
if "_levels" in subdir:
locals = [
os.path.join(local, f"level_{l}") for l in range(len(config.WIDTHS))
]
else:
locals = [local]
# Tar frames
tar_app_path = os.path.join(scripts_dir, "util", "tar_frame.py")
for local_i in locals:
frames = dep_util.get_frame_list(local_i)
if not frames:
if not run_silently:
print(glog.yellow(f"No frames found for S3 syncing in {local_i}"))
continue
for frame in frames:
cmds.append(f"python3.7 {tar_app_path} --src={local_i} --frame={frame}")
cmds.append(f"aws s3 sync {local} {remote} --exclude '*' --include '*.tar'")
p_id = f"sync_results_s3_{parent.tag}"
cmd_and = " && ".join(cmds)
cmd = f'/bin/sh -c "{cmd_and}"'
start_process(parent, cmd, gb, p_id, run_silently)
def on_process_finished(parent, p_id):
"""Callback event handler for a process completing on the specified tab.
Args:
p_id (str): PID of completed process.
"""
if not p_id or p_id.startswith("run"):
parent.log_reader.remove_processes()
else:
parent.log_reader.remove_process(p_id)
parent.refresh_data()
if p_id.startswith("run") and "_export_" not in p_id:
if "update_frame_names" in dir(parent):
parent.update_frame_names()
if "sync_with_s3" in dir(parent) and not parent.is_process_killed:
if parent.parent.is_aws:
parent.sync_with_s3()
if len(parent.log_reader.get_processes()) == 0:
# Re-enable UI elements
switch_ui_elements_for_processing(parent, parent.log_reader.gb, True)
# We may have data to enable other tabs
if p_id.startswith("run"):
[s.refresh_data() for s in parent.parent.sections if s != parent]
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.is_process_killed = False
def populate_dropdown(parent, gb, dd):
"""Populates a dropdown on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd (QtWidgets.QComboBox): Dropdown UI element.
"""
project = parent.parent.path_project
t = dep_util.remove_prefix(gb.objectName(), "gb_")
dd_prev_text = dd.currentText() if dd.count() > 0 else ""
tag = dep_util.remove_prefix(dd.objectName(), f"dd_{t}_")
ps = parent.get_files(tag)
dep_util.populate_dropdown(dd, ps, f"{project}/")
dep_util.update_qt_dropdown(dd, dd_prev_text, add_if_missing=False)
def populate_dropdowns(parent, gb, dd_first=None):
"""Populates the dropdowns on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd_first (list[QtWidgets.QGroupBox], optional): Dropdowns to populate first.
"""
if not dd_first:
dd_first = []
for dd in dd_first:
populate_dropdown(parent, gb, dd)
for dd in gb.findChildren(QtWidgets.QComboBox):
if dd not in dd_first:
populate_dropdown(parent, gb, dd)
def refresh_data(parent):
"""Updates UI elements to be in sync with data on disk on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
tab = getattr(dlg, f"t_{tag}", None)
if tag in ["bg", "depth", "export"]:
parent.path_rig_json = get_calibrated_rig_json(parent)
if tag == "depth":
parent.update_bg_checkbox()
# This locks the dropdown callbacks while we re-populate them
parent.is_refreshing_data = True
for gb in tab.findChildren(QtWidgets.QGroupBox):
gb.setEnabled(True)
parent.populate_dropdowns(gb)
parent.update_buttons(gb)
if "flagfile_fn" in dir(parent):
sync_data_and_flagfile(parent, parent.flagfile_fn)
parent.disable_tab_if_no_data()
parent.is_refreshing_data = False
def update_flagfile_edit(parent, flagfile_fn, switch_to_flag_tab=False):
"""Updates the edit box for the flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
if not os.path.isfile(flagfile_fn):
return
tag = parent.tag
dlg = parent.dlg
text = getattr(dlg, f"text_{tag}_flagfile_edit", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
text.setPlainText(open(flagfile_fn).read())
if switch_to_flag_tab:
preview.setCurrentIndex(1)
def update_data_or_flags(
parent, flagfile_fn, flagfile_from_data, switch_to_flag_tab=False
):
"""Updates the flagfile from the UI elements or vice versa on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
flagfile_from_data (bool): Whether to load the flagfile from the data (True) or
vice versa (False).
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
if not flagfile_fn:
return
flags = get_flags_from_flagfile(flagfile_fn)
if flagfile_from_data:
parent.update_flags_from_data(flags)
else:
parent.update_data_from_flags(flags)
if flagfile_from_data:
# Overwrite flag file
sorted_flags = collections.OrderedDict(sorted(flags.items()))
dep_util.write_flagfile(flagfile_fn, sorted_flags)
# Refresh flagfile edit window
parent.update_flagfile_edit(flagfile_fn, switch_to_flag_tab)
def sync_data_and_flagfile(
parent, flagfile_fn, set_label=True, switch_to_flag_tab=False
):
"""Synchronizes displayed UI elements and contents of the flagfile.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
set_label (bool, optional): Whether or not to update the flagfile label in the UI.
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
tag = parent.tag
dlg = parent.dlg
label = getattr(dlg, f"label_{tag}_flagfile_path", None)
flagfile = os.path.basename(flagfile_fn)
label.setText(flagfile)
# flag file to data first, then data to flag file for missing info
flagfile_from_data = False
parent.update_data_or_flags(flagfile_fn, flagfile_from_data, switch_to_flag_tab)
parent.update_data_or_flags(flagfile_fn, not flagfile_from_data, switch_to_flag_tab)
def disable_tab_if_no_data(parent, btn_run):
"""Prevents navigation to the tab if the required data is not present on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
btn_run (QtWidgets.QPushButton): UI button for tab switch.
"""
if not btn_run.isEnabled():
dep_util.set_tab_enabled(parent.dlg.w_steps, parent.tag, enabled=False)
def setup_project(parent, mkdirs=False):
"""Retrieves any missing flagfiles and sets the default flags on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
mkdirs (bool, optional): Whether or not to make the defined directories.
"""
parent.is_refreshing_data = True
parent.log_reader.log_header()
parent.refresh_data()
parent.is_refreshing_data = False
def save_flag_file(parent, flagfile_fn):
"""Saves flagfile from the UI to disk on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
"""
if not os.path.isfile(flagfile_fn):
return
tag = parent.tag
dlg = parent.dlg
text_edit = getattr(dlg, f"text_{tag}_flagfile_edit", None)
btn_save = getattr(dlg, f"btn_{tag}_flagfile_save", None)
with open(flagfile_fn, "w") as f:
f.write(text_edit.toPlainText())
f.close()
# Disable save button
btn_save.setEnabled(False)
# Update corresponding groupbox
flagfile_from_data = False # flagfile to data
parent.update_data_or_flags(flagfile_fn, flagfile_from_data)
def update_flagfile(parent, flagfile_fn):
"""Updates the edit box for the flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
"""
parent.update_data_or_flags(flagfile_fn, flagfile_from_data=True)
def retrieve_missing_flagfiles(parent):
"""Copies the missing flagfiles to project for local modification on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
if tag == "calibrate":
ff_base = "calibration.flags"
elif tag in ["bg", "depth", "export"]:
ff_base = "render.flags"
ffs_expected = [[ff_base, parent.flagfile_fn]]
if tag in ["depth", "export"]:
ff_aws_create = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_create]
)
ffs_expected.append(["aws_create.flags", ff_aws_create])
for ff_src_rel, ff_dst_abs in ffs_expected:
if not os.path.isfile(ff_dst_abs):
ff_src_abs = os.path.join(dep_flags_dir, ff_src_rel)
os.makedirs(os.path.dirname(ff_dst_abs), exist_ok=True)
shutil.copyfile(ff_src_abs, ff_dst_abs)
update_flagfile(parent, ff_dst_abs)
def add_default_flags(parent):
"""Retrieves the default flags to the local flagfile on the specified tab from
either the source or scripts binaries.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
default_flags = {}
tag = parent.tag
if tag in ["bg", "depth"]:
default_flags.update(
{
os.path.join(depth_est_src, "DerpCLI.cpp"): {
"max_depth_m",
"min_depth_m",
"resolution",
"var_high_thresh",
"var_noise_floor",
}
}
)
if tag == "depth":
default_flags.update(
{
os.path.join(render_scripts, "setup.py"): {"do_temporal_filter"},
os.path.join(depth_est_src, "TemporalBilateralFilter.cpp"): {
"time_radius"
},
os.path.join(render_src, "GenerateForegroundMasks.cpp"): {
"blur_radius",
"morph_closing_size",
"threshold",
},
}
)
elif tag == "export":
default_flags.update(
{
os.path.join(render_src, "SimpleMeshRenderer.cpp"): {"width"},
os.path.join(render_src, "ConvertToBinary.cpp"): {"output_formats"},
}
)
flagfile_fn = os.path.join(parent.path_flags, parent.flagfile_basename)
flags = get_flags_from_flagfile(flagfile_fn)
for source in default_flags:
if os.path.isfile(source):
source_flags = get_flags(source)
else:
source_flags
desired_flags = default_flags[source]
for source_flag in source_flags:
flag_name = source_flag["name"]
# Only add the default flag if not already present in current flags
if flag_name in desired_flags:
if flag_name not in flags or flags[flag_name] == "":
flags[flag_name] = source_flag["default"]
# Add run flags
if tag == "bg":
flags["run_generate_foreground_masks"] = False
flags["run_precompute_resizes"] = True
flags["run_depth_estimation"] = True
flags["run_convert_to_binary"] = False
flags["run_fusion"] = False
flags["run_simple_mesh_renderer"] = False
flags["use_foreground_masks"] = False
elif tag == "depth":
flags["run_depth_estimation"] = True
flags["run_precompute_resizes"] = True
flags["run_precompute_resizes_foreground"] = True
flags["run_convert_to_binary"] = False
flags["run_fusion"] = False
flags["run_simple_mesh_renderer"] = False
elif tag == "export":
flags["run_generate_foreground_masks"] = False
flags["run_precompute_resizes"] = False
flags["run_precompute_resizes_foreground"] = False
flags["run_depth_estimation"] = False
# Overwrite flag file
sorted_flags = collections.OrderedDict(sorted(flags.items()))
dep_util.write_flagfile(flagfile_fn, sorted_flags)
def get_calibrated_rig_json(parent):
"""Finds calibrated rig in the project.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
str: Name of the calibrated rig (assumes the rig contains "_calibrated.json").
"""
has_log_reader = "log_reader" in dir(parent)
ps = dep_util.get_files_ext(parent.path_rigs, "json", "calibrated")
if len(ps) == 0:
if has_log_reader:
parent.log_reader.log_warning(f"No rig files found in {parent.path_rigs}")
return ""
if len(ps) > 1:
ps_str = "\n".join(ps)
if has_log_reader:
parent.log_reader.log_warning(
f"Too many rig files found in {parent.path_rigs}:\n{ps_str}"
)
return ""
return ps[0]
def update_run_button_text(parent, btn):
"""Updates the text of the Run button depending on the existance of a process
running on the cloud
"""
text_run_btn = "Run"
if is_cloud_running_process(parent):
text_run_btn = "Re-attach"
btn.setText(text_run_btn)
def update_buttons(parent, gb, ignore=None):
"""Enables buttons and dropdowns according to whether or not data is present on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
ignore (list[QtWidgets.QGroupBox], optional): Buttons to not update.
Returns:
tuple[bool, bool, bool]: Whether or not the UI is currently running a process and if it
has all its dropdowns.
"""
if not ignore:
ignore = []
has_all_dropdowns = True
for dd in gb.findChildren(QtWidgets.QComboBox):
if not dd.currentText() and dd not in ignore:
has_all_dropdowns = False
break
has_all_values = True
for v in gb.findChildren(QtWidgets.QLineEdit):
if v.objectName() and not v.text() and v not in ignore:
has_all_values = False
break
is_running = parent.log_reader.is_running()
for btn in gb.findChildren(QtWidgets.QPushButton):
btn_name = btn.objectName()
if btn in ignore:
continue
if btn_name.endswith("_run"):
btn.setEnabled(not is_running and has_all_dropdowns and has_all_values)
elif btn_name.endswith("_cancel"):
btn.setEnabled(is_running)
elif btn_name.endswith("_threshs"):
btn.setEnabled(not is_running and has_all_dropdowns)
elif btn_name.endswith("_view"):
btn.setEnabled(not is_running)
elif btn_name.endswith("_download_meshes"):
btn.setEnabled(not is_running)
return is_running, has_all_dropdowns, is_running
def on_changed_dropdown(parent, gb, dd):
"""Callback event handler for changed dropdown on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd (QtWidgets.QComboBox): Dropdown UI element.
"""
if not parent.is_refreshing_data:
name = dd.objectName()
if not name.endswith(
"_farm_ec2"
): # farm_ec2 dropdowns are not used in flagfile
parent.update_flagfile(parent.flagfile_fn)
# Check if we need to update the threshold image
if name.endswith(("_camera", "_frame_bg", "_first")):
# Check if we are already in a threshold tab, else default to color variance
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = tab_widget.currentIndex()
if tab_widget.widget(tab_idx).objectName().endswith("_fg_mask"):
type = type_fg_mask
else:
type = type_color_var
if "run_thresholds" in dir(parent):
parent.run_thresholds(type)
def on_changed_line_edit(parent, gb, le):
"""Callback event handler for changed line edit on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
le (_): Ignore
"""
if not parent.is_refreshing_data:
parent.update_buttons(gb)
parent.update_flagfile(parent.flagfile_fn)
def setup_groupbox(gb, callbacks):
"""Sets up callbacks for any groupboxes on the specified tab.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
if gb.isCheckable() and gb in callbacks:
gb.toggled.connect(callbacks[gb])
def setup_checkboxes(gb, callbacks):
"""Sets up callbacks for any checkboxes on the specified tab.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
for cb in gb.findChildren(QtWidgets.QCheckBox):
if cb in callbacks:
cb.stateChanged.connect(callbacks[cb])
def setup_dropdowns(parent, gb):
"""Sets up callbacks for any dropdowns on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QComboBox): Group box for the tab.
"""
if "on_changed_dropdown" in dir(parent):
for dd in gb.findChildren(QtWidgets.QComboBox):
dd.currentTextChanged.connect(
lambda state, y=gb, z=dd: parent.on_changed_dropdown(y, z)
)
dd.activated.connect(
lambda state, y=gb, z=dd: parent.on_changed_dropdown(y, z)
)
def setup_lineedits(parent, gb):
"""Sets up callbacks for any line edits on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if "on_changed_line_edit" in dir(parent):
for le in gb.findChildren(QtWidgets.QLineEdit):
le.textChanged.connect(
lambda state, y=gb, z=le: parent.on_changed_line_edit(y, z)
)
def setup_buttons(parent, gb, callbacks):
"""Sets up callbacks for any buttons on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QPushButton, func : QEvent -> _]): Callbacks for the UI elements.
"""
for btn in gb.findChildren(QtWidgets.QPushButton):
if btn in callbacks:
callback = callbacks[btn]
else:
name = btn.objectName()
callback = None
if name.endswith("_refresh"):
callback = parent.refresh
elif name.endswith("_run"):
callback = parent.run_process
elif name.endswith("_cancel"):
callback = parent.cancel_process
elif name.endswith("_threshs"):
callback = parent.run_thresholds
elif name.endswith("_logs"):
callback = parent.get_logs
else:
parent.log_reader.log_error(f"Cannot setup button {name}")
if callback:
btn.clicked.connect(callback)
def on_changed_preview(parent):
"""Callback event handler for changed image previews on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = tab_widget.currentIndex()
tab_name = tab_widget.widget(tab_idx).objectName()
if "_threshs_" in tab_name:
if tab_name.endswith("_fg_mask"):
type = type_fg_mask
else:
type = type_color_var
if not parent.is_refreshing_data:
parent.run_thresholds(type)
def setup_preview(parent):
"""Creates preview window in the UI and connects a callback on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
btn_log_clear = getattr(dlg, f"btn_{tag}_log_clear", None)
text_log = getattr(dlg, f"text_{tag}_log", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
btn_log_clear.clicked.connect(lambda: text_log.clear())
preview.setCurrentIndex(0)
if "on_changed_preview" in dir(parent):
preview.currentChanged.connect(parent.on_changed_preview)
def setup_data(parent, callbacks=None):
"""Sets up callbacks and initial UI element statuses on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
tag = parent.tag
dlg = parent.dlg
tab = getattr(dlg, f"t_{tag}", None)
if not callbacks:
callbacks = {}
for gb in tab.findChildren(QtWidgets.QGroupBox):
setup_groupbox(gb, callbacks)
setup_checkboxes(gb, callbacks)
setup_dropdowns(parent, gb)
setup_lineedits(parent, gb)
setup_buttons(parent, gb, callbacks)
# Preview tabs
setup_preview(parent)
def update_noise_detail(parent, noise, detail):
"""Updates noise/detail thresholds interaction on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
noise (float): Noise threshold.
detail (float): Detail threshold.
"""
# Modify flagfile
parent.update_data_or_flags(
parent.flagfile_fn, flagfile_from_data=True, switch_to_flag_tab=False
)
# Update flagfile edit window
parent.update_flagfile_edit(parent.flagfile_fn, switch_to_flag_tab=False)
def update_fg_masks_thresholds(parent, blur, closing, thresh):
"""Updates thresholds and display for the foreground masking on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
blur (int, optional): Gaussian blur radius.
closing (int, optional): Closure (for sealing holes).
thresh (int, optional): Threshold applied to segment foreground and background
"""
# Modify flagfile
parent.update_data_or_flags(
parent.flagfile_fn, flagfile_from_data=True, switch_to_flag_tab=False
)
# Update flagfile edit window
parent.update_flagfile_edit(parent.flagfile_fn, switch_to_flag_tab=False)
def log_missing_image(parent, path_color, cam_id, frame):
"""Prints a warning if an image cannot be located.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
path_color (str): Path to the directory with color images.
cam_id (str): Name of the camera.
frame (str): Name of the frame (0-padded, six digits).
"""
parent.log_reader.log_warning(f"Cannot find frame {cam_id}/{frame} in {path_color}")
def update_thresholds_color_variance(parent, path_color, labels=None):
"""Updates the displayed thresholds for color variance on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
path_color (str): Path to the directory with color images.
labels (list[str], optional): Labels used to filter UI elements to update.
"""
labels = labels if labels is not None else ("_frame_bg", "_first")
dlg = parent.dlg
for dd in parent.dlg.findChildren(QtWidgets.QComboBox):
name = dd.objectName()
if name.endswith(labels):
frame = dd.currentText()
elif name.endswith("_camera"):
cam_id = dd.currentText()
image_path = dep_util.get_level_image_path(path_color, cam_id, frame)
if not image_path:
log_missing_image(parent, path_color, cam_id, frame)
return
tag = parent.tag
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type_color_var}", None)
# Foreground masks are generated at the finest level of the pyramid
res = max(config.WIDTHS)
w_image.color_var.set_image(image_path, res)
noise = float(parent.slider_noise.get_label_text())
detail = float(parent.slider_detail.get_label_text())
project = parent.parent.path_project
fn = dep_util.remove_prefix(image_path, f"{project}/")
getattr(dlg, f"label_{tag}_threshs_filename_{type_color_var}", None).setText(fn)
# Force update
w_image.update_thresholds(noise=noise, detail=detail)
def update_thresholds_fg_mask(parent, paths_color):
"""Updates thresholds and display for the foreground masking using values from UI
on the specified tab."
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
paths_color (list[str]): Paths to the directory with color images.
"""
dlg = parent.dlg
frames = [None] * 2
for dd in parent.dlg.findChildren(QtWidgets.QComboBox):
name = dd.objectName()
if name.endswith("_frame_bg"):
frames[0] = dd.currentText()
elif name.endswith("_first"):
frames[1] = dd.currentText()
elif name.endswith("_camera"):
cam_id = dd.currentText()
bg_image_path = dep_util.get_level_image_path(paths_color[0], cam_id, frames[0])
if not bg_image_path:
log_missing_image(parent, paths_color[0], cam_id, frames[0])
return
fg_image_path = dep_util.get_level_image_path(paths_color[1], cam_id, frames[1])
if not fg_image_path:
log_missing_image(parent, paths_color[1], cam_id, frames[1])
return
tag = parent.tag
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type_fg_mask}", None)
# Foreground masks are generated at the finest level of the pyramid
res = max(config.WIDTHS)
w_image.fg_mask.set_images(bg_image_path, fg_image_path, res)
blur = float(parent.slider_blur.get_label_text())
closing = float(parent.slider_closing.get_label_text())
thresh = float(parent.slider_thresh.get_label_text())
project = parent.parent.path_project
fn_bg = dep_util.remove_prefix(bg_image_path, f"{project}/")
fn_fg = dep_util.remove_prefix(fg_image_path, f"{project}/")
getattr(dlg, f"label_{tag}_threshs_filename_{type_fg_mask}", None).setText(
f"{fn_bg} vs {fn_fg}"
)
# Force update
w_image.update_thresholds(blur=blur, closing=closing, thresh=thresh)
def run_thresholds_after_wait(parent, type):
"""Computes the threshold and displays after a delay on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
# Apply flag file values in case it had unsaved changes
parent.save_flag_file()
tag = parent.tag
dlg = parent.dlg
label = getattr(dlg, f"label_{tag}_threshs_tooltip_{type}", None)
label.setToolTip(parent.threshs_tooltip)
getattr(dlg, f"w_{tag}_threshs_image_{type}", None).set_zoom_level(0)
if type == type_color_var:
parent.setup_thresholds_color_variance()
parent.update_thresholds_color_variance()
elif type == type_fg_mask:
parent.setup_thresholds_fg_masks()
parent.update_thresholds_fg_mask()
def run_thresholds(parent, type):
"""Runs thresholding based on values in the UI and update UI display on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds are run.
"""
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
dep_util.switch_tab(tab_widget, f"_threshs_{type}")
# HACK: if we try to draw on a widget too quickly after switching tabs the resulting image
# does not span all the way to the width of the widget. We can wait a few milliseconds to
# let the UI "settle"
parent.timer = QtCore.QTimer(parent.parent)
parent.timer.timeout.connect(lambda: parent.run_thresholds_after_wait(type))
parent.timer.setSingleShot(True)
parent.timer.start(10) # 10ms
def output_has_images(output_dirs):
"""Whether or not outputs already have results.
Args:
output_dirs (list[str]): List of directories where outputs will be saved.
Returns:
bool: Whether or not the output directories all have at least one valid file.
"""
for d in output_dirs:
if dep_util.get_first_file_path(d):
return True
return False
def run_process_check_existing_output(parent, gb, app_name, flagfile_fn, p_id):
"""Run terminal process and raise on failure.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
app_name (str): Name of the binary being executed.
flagfile_fn (str): Name of the flagfile.
p_id (str): PID name of the process to be run.
"""
tag = parent.tag
cb_recompute = getattr(parent.dlg, f"cb_{tag}_recompute", None)
if cb_recompute is not None:
needs_rename = cb_recompute.isChecked()
if needs_rename:
# Rename current output directories using timestamp and create new empty ones
ts = dep_util.get_timestamp()
for d in parent.output_dirs:
if not os.path.isdir(d):
continue
d_dst = f"{d}_{ts}"
parent.log_reader.log_notice(
f"Saving copy of {d} to {d_dst} before re-computing"
)
shutil.move(d, d_dst)
os.makedirs(d, exist_ok=True)
run_process(parent, gb, app_name, flagfile_fn, p_id, not needs_rename)
def start_process(parent, cmd, gb, p_id, run_silently=False):
"""Runs a terminal process and disables UI element interaction.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
cmd (str): Command to run in the terminal.
gb (QtWidgets.QGroupBox): Group box for the tab.
p_id (str): PID name of the process being started.
"""
if not run_silently:
parent.log_reader.log(f"CMD: {cmd}")
parent.log_reader.gb = gb
parent.log_reader.setup_process(p_id)
parent.log_reader.start_process(p_id, cmd)
# Switch to log tab
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
dep_util.switch_tab(tab_widget, "_log")
# Disable UI elements
parent.switch_ui_elements_for_processing(False)
def run_process(
parent, gb, app_name=None, flagfile_fn=None, p_id="run", overwrite=False
):
"""Runs an application on the terminal, using the associated flagfile.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
app_name (str, optional): Name of the binary being executed.
flagfile_fn (str, optional): Name of the flagfile to supply to the binary. this
will default to the flagfile associated with the binary if unspecified.
p_id (str, optional): PID name of the process being started.
overwrite (bool, optional): Whether or not to overwrite the local flagfile on disk.
"""
# Apply flag file values in case it had unsaved changes
parent.save_flag_file()
if not app_name:
app_name = parent.app_name
is_py_script = app_name.endswith(".py")
dir = scripts_dir if is_py_script else dep_bin_dir
app_path = os.path.join(dir, app_name)
if not os.path.isfile(app_path):
parent.log_reader.log_warning(f"App doesn't exist: {app_path}")
return
if not flagfile_fn:
flagfile_fn = parent.flagfile_fn
if output_has_images(parent.output_dirs) and not overwrite:
run_process_check_existing_output(parent, gb, app_name, flagfile_fn, p_id)
return
cmd = f'{app_path} --flagfile="{flagfile_fn}"'
if is_py_script:
cmd = f"python3.7 -u {cmd}"
start_process(parent, cmd, gb, p_id)
def update_thresholds(parent, gb, type):
"""Updates the displayed thresholds for either color variance or foreground masks.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
if type == type_color_var:
noise = parent.slider_noise.get_label_text()
detail = parent.slider_detail.get_label_text()
parent.update_noise_detail(noise, detail)
elif type == type_fg_mask:
blur = parent.slider_blur.get_label_text()
closing = parent.slider_closing.get_label_text()
thresh = parent.slider_thresh.get_label_text()
parent.update_fg_masks_thresholds(blur, closing, thresh)
# Update buttons
parent.update_buttons(gb)
def on_state_changed_partial_360(parent):
"""Callback event handler for changed "partial coverage" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if not parent.is_refreshing_data:
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_recompute(parent):
"""Callback event handler for changed "recompute" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if not parent.is_refreshing_data:
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_use_bg(parent, gb):
"""Callback event handler for changed "use background" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if not parent.is_refreshing_data:
parent.update_buttons(gb)
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_farm(parent, state):
"""Callback event handler for changed "AWS" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
state (str): Identifier of the callback state.
"""
parent.is_farm = state > 0
if not parent.is_refreshing_data:
if "update_frame_range_dropdowns" in dir(parent):
parent.update_frame_range_dropdowns()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.update_flagfile(parent.flagfile_fn)
def setup_thresholds(parent, types):
"""Sets necessary thresholds apps on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
tag = parent.tag
dlg = parent.dlg
for attr in threshold_sliders:
type, printed, num, max, default = threshold_sliders[attr]
if type in types:
name = getattr(dlg, f"label_{tag}_threshs_{num}_name_{type}", None)
hs = getattr(dlg, f"hs_{tag}_threshs_{num}_{type}", None)
label = getattr(dlg, f"label_{tag}_threshs_{num}_{type}", None)
slider = SliderWidget(type, attr, name, printed, hs, label, max, default)
setattr(parent, f"slider_{attr}", slider)
for type in types:
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type}", None)
w_viewer = getattr(dlg, f"w_{tag}_image_viewer_{type}", None)
w_image.set_image_viewer(w_viewer)
def setup_thresholds_color_variance(parent):
"""Sets color variance thresholds apps on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
for slider in [parent.slider_noise, parent.slider_detail]:
slider.setup(callback=parent.on_changed_slider)
def setup_thresholds_fg_masks(parent):
"""Sets up the default thresholds on foreground masks on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
for slider in [parent.slider_blur, parent.slider_closing, parent.slider_thresh]:
slider.setup(callback=parent.on_changed_slider)
def update_data_from_flags(
parent,
flags,
dropdowns=None,
values=None,
checkboxes=None,
labels=None,
prefix=None,
):
"""Updates UI elements from the flags on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flags (dict[str, _]): Flags and their corresponding values.
dropdowns (list[QtWidgets.QComboBox], optional): Dropdowns in the tab.
values (dict[QtWidgets.QLineEdit, _], optional): Map from UI elements to values.
checkboxes (list[QtWidgets.QCheckBox], optional): Checkboxes in the tab.
labels (list[QtWidgets.QLabel], optional): Labels in the tab.
prefix (str, optional): Prefix to append to values in the population of tab values.
"""
if not dropdowns:
dropdowns = []
if not values:
values = []
if not checkboxes:
checkboxes = []
if not labels:
labels = []
flagfile = parent.flagfile_basename
if not prefix:
prefix = f"{parent.parent.path_project}/"
for key, dd in dropdowns:
error = dep_util.update_qt_dropdown_from_flags(flags, key, prefix, dd)
if error:
parent.log_reader.log_warning(f"{flagfile}: {error}")
for key, val in values:
dep_util.update_qt_lineedit_from_flags(flags, key, prefix, val)
for key, cb in checkboxes:
error = dep_util.update_qt_checkbox_from_flags(flags, key, prefix, cb)
if error:
parent.log_reader.log_warning(f"{flagfile}: {error}")
for key, label in labels:
dep_util.update_qt_label_from_flags(flags, key, prefix, label)
def get_notation(parent, attr):
"""Gets standard format for attribute on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
attr (str): Name of the attribute.
Returns:
str: Format string corresponding to the display notation.
"""
if attr in ["noise", "detail", "thresh"]:
notation = "{:.3e}"
elif attr in ["blur", "closing"]:
notation = "{:d}"
else:
parent.log_reader.log_error(f"Invalid slider attr: {attr}")
return notation
def on_changed_slider(parent, slider, value):
"""Callback event handler for changes to a slider UI element on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
slider (QtWidgets.QSlider): Slider UI element.
value (int/float): Value of the slider element.
"""
type = slider.type
attr = slider.attr
notation = get_notation(parent, attr)
if notation == "{:d}":
value = int(value)
slider.set_label(value, notation)
tag = parent.tag
w_image = getattr(parent.dlg, f"w_{tag}_threshs_image_{type}", None)
if w_image.update_thresholds(**{attr: value}):
# Update thresholds in flagfile
parent.update_thresholds(type)
def initialize_farm_groupbox(parent):
"""Sets up the farm render box for the project path, i.e. AWS is displayed if
rendering on an S3 project path and LAN if on a SMB drive.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
gb_farm = getattr(dlg, f"gb_{tag}_farm", None)
grid_s3 = getattr(dlg, f"w_{tag}_farm_s3", None)
grid_lan = getattr(dlg, f"w_{tag}_farm_lan", None)
parent.is_aws = parent.parent.is_aws
parent.is_lan = parent.parent.is_lan
if not parent.is_aws and not parent.is_lan:
gb_farm.hide()
elif parent.is_aws:
grid_lan.hide()
elif parent.is_lan:
grid_s3.hide()
parent.ec2_instance_types_cpu = []
parent.ec2_instance_types_gpu = []
if parent.is_aws:
# Get list of EC2 instances
client = parent.parent.aws_util.session.client("ec2")
ts = client._service_model.shape_for("InstanceType").enum
ts = [t for t in ts if not t.startswith(config.EC2_UNSUPPORTED_TYPES)]
parent.ec2_instance_types_cpu = [t for t in ts if t.startswith("c")]
parent.ec2_instance_types_gpu = [t for t in ts if t.startswith(("p", "g"))]
# Check if flagfile has farm attributes
flagfile_fn = os.path.join(parent.path_flags, parent.flagfile_basename)
flags = get_flags_from_flagfile(flagfile_fn)
parent.is_farm = False
for farm_attr in ["master", "workers", "cloud"]:
if flags[farm_attr] != "":
parent.is_farm = True
break
call_force_refreshing(parent, gb_farm.setChecked, parent.is_farm)
def show_resources(parent):
"""Displays resources used in the container.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
str: Resources (memory and CPU) being used.
"""
return run_command("top -b -n 1")
def show_aws_resources(parent):
"""Displays resources used across the AWS cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
src: Resources (memory and CPU) being used in the farm.
"""
return "\n".join(parent.parent.aws_util.ec2_get_running_instances())
def get_aws_workers():
"""Get names of the instances in the AWS farm.
Returns:
list[str]: Instances IDs of EC2 instances in the farm.
"""
with open(config.DOCKER_AWS_WORKERS) as f:
lines = f.readlines()
return lines
def set_aws_workers(workers):
"""Sets names of the instances in the AWS farm.
Args:
workers (list[str]): Instance IDs of EC2 instances in the farm.
"""
with open(config.DOCKER_AWS_WORKERS, "w") as f:
f.writelines([worker.id for worker in workers])
def popup_ec2_dashboard_url(parent):
"""Displays a link to the EC2 dashboard in a popup on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
region = parent.parent.aws_util.region_name
prefix = f"{region}." if region else ""
url = f"https://{prefix}console.aws.amazon.com/ec2#Instances"
dep_util.popup_message(parent.parent, url, "EC2 Dashboard")
def popup_logs_locations(parent):
"""Displays the path to local logs in a popup on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
logs = [parent.log_reader.log_file]
logs_workers = glob.iglob(f"{parent.path_logs}/Worker-*", recursive=False)
for log in logs_workers:
ts_log = datetime.datetime.fromtimestamp(os.path.getmtime(log))
if ts_log > parent.parent.ts_start:
logs.append(log)
project = parent.parent.path_project
logs = [dep_util.remove_prefix(l, f"{project}/") for l in logs]
dep_util.popup_message(parent.parent, "\n".join(logs), "Logs")
def run_process_aws(parent, gb, p_id=None):
"""Runs the process to create a cluster on AWS and perform the render job.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
flags = {}
flags["key_dir"] = os.path.dirname(parent.path_aws_key_fn)
flags["key_name"] = os.path.splitext(os.path.basename(parent.path_aws_key_fn))[0]
flags["csv_path"] = parent.path_aws_credentials
flags["ec2_file"] = parent.path_aws_ip_file
spin_num_workers = getattr(parent.dlg, f"spin_{parent.tag}_farm_num_workers", None)
flags["cluster_size"] = int(spin_num_workers.value())
flags["region"] = parent.parent.aws_util.region_name
dd_ec2 = getattr(parent.dlg, f"dd_{parent.tag}_farm_ec2", None)
flags["instance_type"] = dd_ec2.currentText()
flags["tag"] = parent.tag
# Overwrite flag file
app_name = parent.app_aws_create
flagfile_fn = os.path.join(parent.path_flags, parent.app_name_to_flagfile[app_name])
dep_util.write_flagfile(flagfile_fn, flags)
if not p_id:
p_id = "run_aws_create"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def on_download_meshes(parent, gb):
"""Downloads meshes from S3. This is a no-op if not an S3 project.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if not parent.parent.is_aws:
return
subdir = image_type_paths["video_bin"]
flags = {}
flags["csv_path"] = parent.path_aws_credentials
flags["local_dir"] = os.path.join(config.DOCKER_INPUT_ROOT, subdir)
flags["s3_dir"] = os.path.join(parent.parent.ui_flags.project_root, subdir)
flags["verbose"] = parent.parent.ui_flags.verbose
flags["watch"] = True # NOTE: watchdog sometimes gets stale file handles in Windows
# Overwrite flag file
app_name = parent.app_aws_download_meshes
flagfile_fn = os.path.join(parent.path_flags, parent.app_name_to_flagfile[app_name])
dep_util.write_flagfile(flagfile_fn, flags)
p_id = "download_meshes"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def on_terminate_cluster(parent, gb):
"""Terminates a running AWS cluster. This is a no-op if no cluster is up.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
flags = {}
flags["key_dir"] = os.path.dirname(parent.path_aws_key_fn)
flags["key_name"] = os.path.splitext(os.path.basename(parent.path_aws_key_fn))[0]
flags["csv_path"] = parent.path_aws_credentials
flags["ec2_file"] = parent.path_aws_ip_file
flags["region"] = parent.parent.aws_util.region_name
# Overwrite flag file
flagfile_fn = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_clean]
)
dep_util.write_flagfile(flagfile_fn, flags)
app_name = parent.app_aws_clean
p_id = "terminate_cluster"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def get_workers(parent):
"""Finds workers in a LAN farm.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
list[str]: IPs of workers in the local farm.
"""
if parent.parent.ui_flags.master == config.LOCALHOST:
return []
else:
return parent.lan.scan()
def call_force_refreshing(parent, fun, *args):
already_refreshing = parent.is_refreshing_data
if not already_refreshing:
parent.is_refreshing_data = True
fun(*args)
if not already_refreshing:
parent.is_refreshing_data = False
| 1.703125 | 2 |
sugarpidisplay/sugarpiconfig/__init__.py | szpaku80/SugarPiDisplay | 1 | 12768031 |
from flask import Flask
app = Flask(__name__)
app.secret_key = 'my secret key 123'
from .views import *
| 1.359375 | 1 |
cate/util/safe.py | TomBlock/cate | 0 | 12768032 | # The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from typing import Dict, Any, Callable
__author__ = "<NAME> (Brockmann Consult GmbH)"
def _get_safe_globals_accessor() -> Callable[[], dict]:
safe_builtin_names = [
"abs",
"all",
"any",
"ascii",
"bin",
"bool",
"bytearray",
"bytes",
"callable",
"chr",
"complex",
"dict",
"divmod",
"enumerate",
"filter",
"float",
"format",
"frozenset",
"getattr",
"hasattr",
"hash",
"hex",
"int",
"isinstance",
"issubclass",
"iter",
"len",
"list",
"map",
"max",
"min",
"next",
"object",
"oct",
"ord",
"pow",
"range",
"repr",
"reversed",
"round",
"set",
"slice",
"sorted",
"str",
"sum",
"tuple",
"type",
"zip",
]
safe_module_names = [
'cmath',
'math',
]
builtins = sys.modules['builtins']
safe_globals = {}
for name in safe_builtin_names:
safe_globals[name] = builtins.__dict__[name]
for name in safe_module_names:
safe_globals[name] = __import__(name)
safe_globals['__builtins__'] = None
safe_globals['builtins'] = None
def _get_safe_globals_closure():
"""Return a global environment for safe expression evaluation."""
return dict(safe_globals)
return _get_safe_globals_closure
get_safe_globals = _get_safe_globals_accessor()
def safe_eval(expression: str, local_namespace: Dict[str, Any] = None):
"""
The **expression** argument is parsed and evaluated as a Python expression
using the **local_namespace** mapping as local namespace.
The **expression** has no access to the current environment and only limited access to the standard builtins, i.e.
only functions considered safe are allowed, e.g. *abs*, *min*, *max*, etc.
Syntax errors are reported as exceptions.
:param expression: A Python expression.
:param local_namespace: The local namespace in which **expression** is evaluated.
:return: The result of the evaluated expression.
"""
return eval(expression, get_safe_globals(), local_namespace or {})
| 1.554688 | 2 |
fw/tiny2040/keypad.py | RyuSLunK/t9-macropad-circuitpython | 22 | 12768033 | <gh_stars>10-100
import digitalio
import board
from digitalio import DigitalInOut
from digitalio import Direction
import adafruit_matrixkeypad
class Keypad():
def __init__(self, keys):
cols = [DigitalInOut(x) for x in (board.GP3, board.GP4, board.GP5)]
rows = [DigitalInOut(x) for x in (board.GP29_A3, board.GP28_A2, board.GP27_A1, board.GP26_A0)]
self.keypad = adafruit_matrixkeypad.Matrix_Keypad(rows, cols, keys)
@property
def pressed_keys(self):
return self.keypad.pressed_keys | 2.671875 | 3 |
ntext.py | mc2pw/ntext | 0 | 12768034 | # This file is licensed under the terms of the MIT license.
# See the LICENSE file in the root of this repository for complete details.
"""Data structure for representing string diagrams"""
from collections import namedtuple
from copy import deepcopy
__all__ = [
'Ntext',
'Composition',
'NotComposable',
'NotCompatible',
'ncell',
'zcell',
'icell',
]
_Ntext = namedtuple('Ntext', [
'cells',
'dim',
'source',
'target',
'smap',
'tmap',
])
class Ntext(_Ntext):
"""Represents a string diagram.
Attributes:
cells (dict): indicates how cells connect to each other.
dim (int): the dimension.
source (Ntext): the source diagram.
target (Ntext): the target diagram.
smap (dict): indicates how the source maps to the cells.
tmap (dict): indicates how the target maps to the cells.
"""
__slots__ = ()
def adapt(self, n, perm):
"""Applies a permutation to the cells of the n-source.
This is useful to allow composition of ntexts when the target of one
and source of the other coincide only up to permutation.
Args:
n: the n-source is the source (of the source) n times.
perm: the permutation represented as a list of non repeated
indices.
"""
return adapt(self, n, perm)
def compatible(self, target):
"""This diagram can be a source for the given target.
The source and the target satisfy the globular conditions. This means
that the source of the source is the same as source of the target and
the target of the source is the same as the target of the target.
Args:
target: the target against which compatibility is checked.
Returns:
bool
"""
return compatible(self, target)
def composable(self, ntext, n):
"""This diagram can be n-composed with the given diagram."""
return composable(self, ntext, n, None)
class Composition:
"""Composition of diagrams"""
def __init__(self, ntext):
self._ntext = icell(ntext.source)
def ntext(self):
return deepcopy(self._ntext)
def compose(self, factor, n):
ntext, ok = compose(self._ntext, factor, n, None)
if not ok:
raise NotComposable('Target of self is not source of factor.')
self._ntext = ntext
class Error(Exception):
"""Base class for ntext exceptions."""
class NotComposable(Error):
"""Raised when trying to compose cells.
Composing cells requires target of the first cell
to be the same as the source of the second cell.
"""
class NotCompatible(Error):
"""Raised when trying to create a cell.
Creating a cell with a source and target requires the source and target to
satisfy the globular conditions. See `Ntext.compatible`.
"""
# source and target are maps.
NtextCell = namedtuple('NtextCell', ['source', 'target'])
_ref = namedtuple('_ref', ['index', 'is_new'])
def ncell(name, source, target):
if not compatible(source, target):
raise NotCompatible('Source and target are not compatible.')
return _ncell(name, source, target)
def _ncell(name, source, target):
def apply_offset(ct, o):
for m, p in ct.items():
ct[m] = [j+o[m] for j in p]
# Assumes name is an unused name.
source = deepcopy(source)
target = deepcopy(target)
dim = source.dim
ntext = dict()
smap = dict()
tmap = dict()
ncsource = dict()
nctarget = dict()
ntext[name] = [NtextCell(ncsource, nctarget)]
if dim > 0:
offset = dict()
for n, cells in source.cells.items():
ntext[n] = [deepcopy(c) for c in cells]
smap[n] = list(range(len(cells)))
ncsource[n] = smap[n][:]
for n, cells in target.cells.items():
if n not in ntext:
ntext[n] = []
off = len(ntext[n])
offset[n] = off
ntext[n].extend(deepcopy(c) for c in cells)
tmap[n] = list(range(off, len(cells)+off))
nctarget[n] = tmap[n][:]
for n, cells in ntext.items():
for i in range(offset[n], len(cells)):
for cell in cells[i]:
apply_offset(cell.source, offset)
apply_offset(cell.target, offset)
else:
sn = source.cells
ntext[sn] = 1
smap[sn] = [0]
ncsource[sn] = [0]
tn = target.cells
if tn in ntext:
ntext[tn] += 1
tmap[tn] = [1]
nctarget[tn] = [1]
else:
ntext[tn] = 1
tmap[tn] = [0]
nctarget[tn] = [0]
return Ntext(ntext, dim+1, source, target, smap, tmap)
def zcell(name):
return Ntext(name, 0, None, None, None, None)
def icell(ntext):
dim = ntext.dim + 1
if ntext.dim == 0:
cells = {ntext.cells: 1}
smap = {ntext.cells: [0]}
else:
cells = deepcopy(ntext.cells)
smap = dict()
for n, p in cells.items():
smap[n] = range(len(p))
source = deepcopy(ntext)
target = deepcopy(ntext)
tmap = deepcopy(smap)
return Ntext(cells, dim, source, target, smap, tmap)
def pre_stitch(dst, src, mapping):
for name, maps in src.items():
if isinstance(maps, int):
length = maps
cells = dst.get(name, 0)
clength = cells
else:
length = len(maps)
cells = dst.get(name) or []
clength = len(cells)
if not cells:
dst[name] = cells
for i in range(length):
ni = (name, i)
if ni not in mapping:
mapping[ni] = _ref(clength, True)
clength += 1
if isinstance(maps, int):
dst[name] = clength
else:
cells.extend(None for i in range(len(cells), clength))
def stitch(dst, src, mapping):
pre_stitch(dst, src, mapping)
# dst and src are ntext maps.
for name, maps in src.items():
if isinstance(maps, int):
continue
cells = dst[name]
for i, cell in enumerate(maps):
j, is_new = mapping[(name, i)]
if is_new:
assert cells[j] is None
source = dict()
target = dict()
for t, ct in [(source, cell.source), (target, cell.target)]:
for n, p in ct.items():
t[n] = [mapping[(n, k)].index for k in p]
cells[j] = NtextCell(source, target)
def compatible(source, target):
r = (source.source == target.source
and source.target == target.target)
if r:
assert source.dim == target.dim
return r
def compose(dst, src, dim, mapping):
mapping = dict() if mapping is None else mapping
c = composable(dst, src, dim, mapping)
if not c:
return None, c
ntext = dst.cells
stitch(ntext, src.cells, mapping)
if dim > 0: # dim 0 is vertical composition.
dim -= 1
r = [None]*2
for k, d, s, dm, sm in [
(0, dst.source, src.source, dst.smap, src.smap),
(1, dst.target, src.target, dst.tmap, src.tmap),
]:
cm = dict()
r[k], c = compose(d, s, dim, cm)
assert c
for n, p in sm:
if n not in dm:
dm[n] = []
dm[n].extend(
j for j, is_new in (cm[(n, i)] for i in p)
if is_new
)
source, target = r
else:
source = dst.source
target = deepcopy(src.target)
smap = dst.smap
tmap = dict()
for n, p in src.tmap.items():
tmap[n] = [mapping[(n, i)].index for i in p]
return Ntext(ntext, dst.dim, source, target, smap, tmap), True
def composable(dst, src, dim, mapping):
if dst.dim != src.dim:
return False
target, tmap = get_face(dst, dim, 1)
source, smap = get_face(src, dim, -1)
if mapping is not None:
for name, pos in smap.items():
t = tmap[name]
for i, k in enumerate(pos):
mapping[(name, k)] = _ref(t[i], False)
return source == target
def permute(ntext, mapping, perm):
# perm changes ntext and mapping in place by applying itself
# to one and its inverse to the other.
for name, q in perm.items():
cells = ntext[name]
ntext[name] = [cells[i] for i in q]
m = mapping[name]
mapping[name] = [m[i] for i in q]
def adapt(cell, dim, perm):
# Permute the dim-source. dim 0 is vertical composition.
if dim > 0:
dim -= 1
adapt(cell.source, dim, perm)
adapt(cell.target, dim, perm)
else:
permute(cell.source, cell.smap, perm)
def get_face(cell, dim, side):
if dim > 0:
dim -= 1
sc, scmap = get_face(cell.source, dim, side)
smap = dict()
for n, p in cell.smap.items():
smap[n] = [p[i] for i in scmap[n]]
return sc, smap
if side < 0:
return cell.source, cell.smap
return cell.target, cell.tmap
| 3.046875 | 3 |
cogs/MarriageCommandsCog.py | saschavhd/DiscordAssistant | 0 | 12768035 | <filename>cogs/MarriageCommandsCog.py<gh_stars>0
from datetime import datetime
import discord
from discord.ext import commands
import re
from utils.database import db
from utils.menu import Menu
from utils.page import Page
class MarriageCommandsCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.guild_only()
async def marriage(self, ctx):
user = db['Users'].find_one({'_id': ctx.author.id})
try:
married_to = user['servers'][str(ctx.guild.id)]['married_to']
except KeyError:
await ctx.send("You are not married to anyone.")
else:
marriage_date = user['servers'][str(ctx.guild.id)]['marriage_date']
days_married = int((datetime.utcnow() - marriage_date).total_seconds()/(3600*24))
await ctx.send(f"You have been married to <@{married_to}> for {days_married} days! :heart:")
@commands.command()
@commands.guild_only()
async def propose(self, ctx, member: discord.Member):
try:
is_proposer_married = db['Users'].find_one(
{'_id': ctx.author.id})['servers'][str(ctx.guild.id)]['married_to']
except KeyError:
is_proposer_married = False
try:
is_proposed_married = db['Users'].find_one(
{'_id': member.id})['servers'][str(ctx.guild.id)]['married_to']
except KeyError:
is_proposed_married = False
if is_proposer_married or is_proposed_married:
await ctx.send("Cannot propose to this person because either you or they are already married!")
return
def proposal_response_check(message):
'''Check if:
- Message by proposed member
- Message in proposal channel
- Message content is either "yes" or "no"
'''
return (
message.author == member and
message.channel == ctx.channel and
re.search("(yes|no)", message.content, re.IGNORECASE)
)
proposal_page = Page (
title=f"{ctx.author.mention} has proposed to {member.mention}! :heart:",
content=f"{member.mention}, how do you respond? (yes/no)",
footer="Type your answer in the chat below!"
)
menu = Menu (
bot=self.bot,
interactors=[member.id],
pages=[proposal_page],
channel=ctx.channel,
input=proposal_response_check,
show_buttons=False,
remove_message_after=True
)
input_tuple = await menu.display()
try:
input, _ = input_tuple
answer = input.content.lower()
except TypeError: # No response was given (tuple is empty)
return
await menu.stop()
if answer == "no":
await ctx.send("Ouch that must sting... :broken_heart:")
else:
date = datetime.utcnow()
db['Users'].update_one({'_id': ctx.author.id},
{'$set': {
f'servers.{ctx.guild.id}.married_to': member.id,
f'servers.{ctx.guild.id}.marriage_date': date
}
}
)
db['Users'].update_one({'_id': member.id},
{'$set': {
f'servers.{ctx.guild.id}.married_to': ctx.author.id,
f'servers.{ctx.guild.id}.marriage_date': date
}
}
)
await ctx.send(f"Congratulations {member.name} & {ctx.author.name} are officially married! :heart:")
@commands.command()
@commands.guild_only()
async def divorce(self, ctx):
try:
married_to = db['Users'].find_one(
{'_id': ctx.author.id})['servers'][str(ctx.guild.id)]['married_to']
except KeyError:
married_to = None
if not married_to:
await ctx.send("You are not married... Therefore you cannot divorce anyone :s")
return
def confirmation_message_check(message):
'''Check if:
- Message by command invoker
- Message in command invoked channel
- Message content is either "yes" or "no"
'''
return (
message.author == ctx.author and
message.channel == ctx.channel and
re.search("(yes|no)", message.content, re.IGNORECASE)
)
confirmation_page = Page (
title=f"Are you sure you want to divorce <@{married_to}>?",
content="Confirm your decision below. (yes/no)"
)
menu = Menu (
bot=self.bot,
interactors=[ctx.author.id],
pages=[confirmation_page],
channel=ctx.channel,
input=confirmation_message_check,
remove_message_after=True
)
input_tuple = await menu.display()
try:
input, _ = input_tuple
except TypeError:
return
answer = input.content.lower()
if answer == "no":
return
else:
db['Users'].update_many({'_id': {'$in': [ctx.author.id, married_to]}},
{'$unset': {
f'servers.{ctx.guild.id}.married_to': "",
f'servers.{ctx.guild.id}.marriage_date': ""
}
}
)
await ctx.send(f"{ctx.author.mention} and <@{married_to}> are now officially divorced! :o")
await menu.stop()
def setup(bot):
bot.add_cog(MarriageCommandsCog(bot))
| 2.921875 | 3 |
mysql/mysql_print_metadata-values.py | MichaelCurrin/python-2016 | 0 | 12768036 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 8 14:10:03 2016
@author: michaelcurrin
Open a connection to a database
Print the tables and column data metadata
Print the actual values for the some tables, for a defined table limit and row count.
Close the database.
"""
# library to connect to MySQL
import MySQLdb
# file for authorising login
import sql_config
def getRows(statement):
"""
Receive a query and return the results from the database.
Args
statement: string. e.g. 'SELECT * FROM tablenameXYZ'
Returns
output: list of row data from query results
"""
cur.execute(statement)
output = []
for row in cur.fetchall():
output.append(row)
return output
def printTable(selectedTable, limit=100):
print "ACTUAL VALUES FOR %s" % selectedTable
print
# Print header
print tablesColumnsDict[selectedTable]
# Print row values
query = "SELECT * FROM %s LIMIT %i;" % (selectedTable, limit)
for rows in getRows(query):
print rows
# e.g. (808L, datetime.datetime(2015, 11, 1, 0, 0), 'Week 4', 'WinStuff',
# Actual', 600L, 5L, Decimal('0.0373'), 3000L, 134L, 3L)
print "\n"
def main():
dbCredentials = sql_config.dbCredentials
db = MySQLdb.connect(
host=dbCredentials["host"],
user=dbCredentials["user"],
passwd=dbCredentials["passwd"],
db=dbCredentials["dbName"],
)
cur = db.cursor()
# list of SQL queries to execute
metadata_scripts = [ # get list tables and row counts for selected DB
"""
SELECT
TABLE_NAME
,TABLE_TYPE
,TABLE_ROWS
FROM information_schema.tables
WHERE TABLE_SCHEMA = '%s';
"""
% dbCredentials["dbName"],
# get list of tables and their columns for selected DB
"""
SELECT
TABLE_NAME
,COLUMN_NAME
,COLUMN_DEFAULT
,IS_NULLABLE
,DATA_TYPE
,COLUMN_TYPE
FROM information_schema.columns
WHERE TABLE_SCHEMA = '%s';
"""
% dbCredentials["dbName"],
]
# Fill a list of tables and views in current DB
tableNames = []
tableNameData = GetRows(metadata_scripts[0])
for rows in tableNameData:
# get value in first column (table name)
tableNames.append(rows[0])
# TEST
print "TABLE NAMES\n"
for items in tableNames:
print items
print ""
# Fill a dictionary containing table name as key as column name as values in a list
tablesColumnsDict = {}
tablesColumnsData = GetRows(metadata_scripts[1])
for rows in tablesColumnsData:
# Get value in first column (table name)
col1, col2 = rows[:2]
# Create empty list for table if it does not exist
if col1 not in tablesColumnsDict.keys():
tablesColumnsDict[col1] = []
# Set TABLE_NAME as key and COLUMN_NAME as value
tablesColumnsDict[col1].append(col2)
print "TABLES AND COLUMNS\n"
for keys in sorted(tablesColumnsDict.keys()):
print keys
for values in tablesColumnsDict[keys]:
print "\t%s" % values
print ""
print ""
# This works to call global variable tablesColumnsDict
# without adding to each method
# Print X rows for first Y tables
maxTables = 2
rowLimit = 5
count = 0
for tables in sorted(tablesColumnsDict.keys()):
if count == maxTables:
break
else:
PrintTable(tables, rowLimit)
count += 1
# close the database connection
db.close()
if __name__ == "__main__":
main()
| 3.515625 | 4 |
apps/site/api/renderers/__init__.py | LocalGround/localground | 9 | 12768037 | <filename>apps/site/api/renderers/__init__.py<gh_stars>1-10
from localground.apps.site.api.renderers.csv_renderer import CSVRenderer
from localground.apps.site.api.renderers.browsable_api_renderer import BrowsableAPIRenderer
from localground.apps.site.api.renderers.geojson_renderer import GeoJSONRenderer
from localground.apps.site.api.renderers.kml_renderer import KMLRenderer
from localground.apps.site.api.renderers.zip_renderer import ZIPRenderer
| 1.210938 | 1 |
unit/test_get_systemd_units_info.py | tarantool/ansible-cartridge | 17 | 12768038 | <reponame>tarantool/ansible-cartridge
import sys
import unittest
import module_utils.helpers as helpers
sys.modules['ansible.module_utils.helpers'] = helpers
from library.cartridge_get_systemd_units_info import get_systemd_units_info
def call_get_systemd_units_info(app_name, instance_vars, tnt_version):
return get_systemd_units_info({
'app_name': app_name,
'instance_vars': instance_vars,
'tnt_version': tnt_version,
})
class TestGetInstanceInfo(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_multiversion_tnt_ce(self):
app_name = 'myapp'
tnt_version = '3.0'
instance_vars = {
'cartridge_multiversion': True,
'cartridge_run_dir': 'some/run/dir',
'cartridge_data_dir': 'some/data/dir',
'cartridge_memtx_dir_parent': 'some/memtx/dir',
'cartridge_vinyl_dir_parent': 'some/vinyl/dir',
'cartridge_wal_dir_parent': 'some/wal/dir',
'cartridge_log_dir_parent': 'some/log/dir',
'dist_dir': '/some/dist/dir',
'cartridge_app_instances_dir': 'some/instances/dir',
}
res = call_get_systemd_units_info(app_name, instance_vars, tnt_version)
self.assertFalse(res.failed)
self.assertEqual(res.fact, {
'stateboard_name': 'myapp-stateboard',
'app_unit_file': 'myapp@.service',
'stateboard_unit_file': 'myapp-stateboard.service',
'instance_work_dir': 'some/data/dir/myapp.%i',
'stateboard_work_dir': 'some/data/dir/myapp-stateboard',
'instance_memtx_dir': 'some/memtx/dir/myapp.%i',
'stateboard_memtx_dir': 'some/memtx/dir/myapp-stateboard',
'instance_vinyl_dir': 'some/vinyl/dir/myapp.%i',
'stateboard_vinyl_dir': 'some/vinyl/dir/myapp-stateboard',
'instance_wal_dir': 'some/wal/dir/myapp.%i',
'stateboard_wal_dir': 'some/wal/dir/myapp-stateboard',
'instance_log_file': 'some/log/dir/myapp.%i.log',
'stateboard_log_file': 'some/log/dir/myapp-stateboard.log',
'instance_pid_file': 'some/run/dir/myapp.%i.pid',
'stateboard_pid_file': 'some/run/dir/myapp-stateboard.pid',
'instance_console_sock': 'some/run/dir/myapp.%i.control',
'stateboard_console_sock': 'some/run/dir/myapp-stateboard.control',
'instance_entrypoint': 'some/instances/dir/myapp.%i/init.lua',
'stateboard_entrypoint': 'some/instances/dir/myapp-stateboard/stateboard.init.lua',
'instance_tarantool_binary': '/usr/bin/tarantool',
'stateboard_tarantool_binary': '/usr/bin/tarantool',
})
def test_multiversion_tnt_ee(self):
app_name = 'myapp'
tnt_version = ''
instance_vars = {
'cartridge_multiversion': True,
'cartridge_run_dir': 'some/run/dir',
'cartridge_data_dir': 'some/data/dir',
'cartridge_memtx_dir_parent': None,
'cartridge_vinyl_dir_parent': None,
'cartridge_wal_dir_parent': None,
'cartridge_log_dir_parent': None,
'dist_dir': '/some/dist/dir',
'cartridge_app_instances_dir': 'some/instances/dir',
}
res = call_get_systemd_units_info(app_name, instance_vars, tnt_version)
self.assertFalse(res.failed)
self.assertEqual(res.fact, {
'stateboard_name': 'myapp-stateboard',
'app_unit_file': 'myapp@.service',
'stateboard_unit_file': 'myapp-stateboard.service',
'instance_work_dir': 'some/data/dir/myapp.%i',
'stateboard_work_dir': 'some/data/dir/myapp-stateboard',
'instance_memtx_dir': None,
'stateboard_memtx_dir': None,
'instance_vinyl_dir': None,
'stateboard_vinyl_dir': None,
'instance_wal_dir': None,
'stateboard_wal_dir': None,
'instance_log_file': None,
'stateboard_log_file': None,
'instance_pid_file': 'some/run/dir/myapp.%i.pid',
'stateboard_pid_file': 'some/run/dir/myapp-stateboard.pid',
'instance_console_sock': 'some/run/dir/myapp.%i.control',
'stateboard_console_sock': 'some/run/dir/myapp-stateboard.control',
'instance_entrypoint': 'some/instances/dir/myapp.%i/init.lua',
'stateboard_entrypoint': 'some/instances/dir/myapp-stateboard/stateboard.init.lua',
'instance_tarantool_binary': 'some/instances/dir/myapp.%i/tarantool',
'stateboard_tarantool_binary': 'some/instances/dir/myapp-stateboard/tarantool',
})
def test_not_multiversion_tnt_ce(self):
app_name = 'myapp'
tnt_version = '3.0'
instance_vars = {
'cartridge_multiversion': False,
'cartridge_run_dir': 'some/run/dir',
'cartridge_data_dir': 'some/data/dir',
'cartridge_memtx_dir_parent': 'some/memtx/dir',
'cartridge_vinyl_dir_parent': 'some/vinyl/dir',
'cartridge_wal_dir_parent': 'some/wal/dir',
'cartridge_log_dir_parent': 'some/log/dir',
'dist_dir': 'some/dist/dir',
'cartridge_app_instances_dir': 'some/instances/dir',
}
res = call_get_systemd_units_info(app_name, instance_vars, tnt_version)
self.assertFalse(res.failed)
self.assertEqual(res.fact, {
'stateboard_name': 'myapp-stateboard',
'app_unit_file': 'myapp@.service',
'stateboard_unit_file': 'myapp-stateboard.service',
'instance_work_dir': 'some/data/dir/myapp.%i',
'stateboard_work_dir': 'some/data/dir/myapp-stateboard',
'instance_memtx_dir': 'some/memtx/dir/myapp.%i',
'stateboard_memtx_dir': 'some/memtx/dir/myapp-stateboard',
'instance_vinyl_dir': 'some/vinyl/dir/myapp.%i',
'stateboard_vinyl_dir': 'some/vinyl/dir/myapp-stateboard',
'instance_wal_dir': 'some/wal/dir/myapp.%i',
'stateboard_wal_dir': 'some/wal/dir/myapp-stateboard',
'instance_log_file': 'some/log/dir/myapp.%i.log',
'stateboard_log_file': 'some/log/dir/myapp-stateboard.log',
'instance_pid_file': 'some/run/dir/myapp.%i.pid',
'stateboard_pid_file': 'some/run/dir/myapp-stateboard.pid',
'instance_console_sock': 'some/run/dir/myapp.%i.control',
'stateboard_console_sock': 'some/run/dir/myapp-stateboard.control',
'instance_entrypoint': 'some/dist/dir/init.lua',
'stateboard_entrypoint': 'some/dist/dir/stateboard.init.lua',
'instance_tarantool_binary': '/usr/bin/tarantool',
'stateboard_tarantool_binary': '/usr/bin/tarantool',
})
def test_not_multiversion_tnt_ee(self):
app_name = 'myapp'
tnt_version = ''
instance_vars = {
'cartridge_multiversion': False,
'cartridge_run_dir': 'some/run/dir',
'cartridge_data_dir': 'some/data/dir',
'cartridge_memtx_dir_parent': None,
'cartridge_vinyl_dir_parent': None,
'cartridge_wal_dir_parent': None,
'cartridge_log_dir_parent': None,
'dist_dir': 'some/dist/dir',
'cartridge_app_instances_dir': 'some/instances/dir',
}
res = call_get_systemd_units_info(app_name, instance_vars, tnt_version)
self.assertFalse(res.failed)
self.assertEqual(res.fact, {
'stateboard_name': 'myapp-stateboard',
'app_unit_file': 'myapp@.service',
'stateboard_unit_file': 'myapp-stateboard.service',
'instance_work_dir': 'some/data/dir/myapp.%i',
'stateboard_work_dir': 'some/data/dir/myapp-stateboard',
'instance_memtx_dir': None,
'stateboard_memtx_dir': None,
'instance_vinyl_dir': None,
'stateboard_vinyl_dir': None,
'instance_wal_dir': None,
'stateboard_wal_dir': None,
'instance_log_file': None,
'stateboard_log_file': None,
'instance_pid_file': 'some/run/dir/myapp.%i.pid',
'stateboard_pid_file': 'some/run/dir/myapp-stateboard.pid',
'instance_console_sock': 'some/run/dir/myapp.%i.control',
'stateboard_console_sock': 'some/run/dir/myapp-stateboard.control',
'instance_entrypoint': 'some/dist/dir/init.lua',
'stateboard_entrypoint': 'some/dist/dir/stateboard.init.lua',
'instance_tarantool_binary': 'some/dist/dir/tarantool',
'stateboard_tarantool_binary': 'some/dist/dir/tarantool',
})
| 2.125 | 2 |
basic_modules.py | Starry316/VolGradNet | 7 | 12768039 | <reponame>Starry316/VolGradNet
"""
Common basic modules
"""
import torch.nn as nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
"""
Basis convolutional layer with leaky relu
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=2, padding=1):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
def forward(self, x):
ret = F.leaky_relu(self.conv(x))
return ret
class DeConvBlock(nn.Module):
"""
Deconvolutional layer
"""
def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1):
super(DeConvBlock, self).__init__()
self.de_conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
def forward(self, x):
ret = F.relu(self.de_conv(x))
return ret
class ResidualBlock(nn.Module):
"""
ResidualBlock
"""
def __init__(self, channels, kernel_size=3, padding=1):
super(ResidualBlock, self).__init__()
self.conv_1 = ConvBlock(channels, channels, kernel_size, stride=1, padding=padding)
self.conv_2 = nn.Conv2d(channels, channels, kernel_size, stride=1, padding=padding)
def forward(self, x):
ret = x + self.conv_2(self.conv_1(x))
return ret
class ProcessingBlock(nn.Module):
"""
basic module for NGPT
"""
def __init__(self, in_channels, out_channels, padding=0):
super(ProcessingBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels * 2, 1, stride=1, padding=padding)
self.conv2 = nn.Conv2d(out_channels * 2, out_channels, 3, stride=1, padding=1)
def forward(self, x):
x1 = F.leaky_relu(self.conv1(x))
x2 = F.leaky_relu(self.conv2(x1))
return x2 | 2.765625 | 3 |
covidtrackerapp.py | Sivakumar001/covid19-tracker | 2 | 12768040 | <filename>covidtrackerapp.py<gh_stars>1-10
"""
COVID TRACKER
A Mini Project made on 10 April 2021 by <NAME>(212218104154).
this project is used to observe the daily count of corona virus
cases happening all around the world sorted by every country.
this applicationreturns all the numerical count of cases,
recovery, deaths and many more.
"""
from tkinter import ttk
from tkinter import *
import time
import requests
from bs4 import BeautifulSoup
class Scraper:
def __init__(self):
# scrape the webpage using request module of worldmeter website
file = requests.get('https://www.worldometers.info/coronavirus/')
self.soup = BeautifulSoup(file.text, 'html.parser')
file.close()
class_name = 'table table-bordered table-hover main_table_countries'
id_name = 'main_table_countries_today'
self.country_table = self.soup.find(
'table', class_=class_name, id=id_name)
def header_list(self):
self.header_list = self.country_table.findAll('th')
return (list(map(lambda a: a.text.replace('/xa0', ''), self.header_list)))
def show_total_world_cases(self):
# return the total world cases in numbers
self.total_world_cases = self.soup.findAll(
'div', class_='maincounter-number')
total_covid_count = []
for values in self.total_world_cases:
total_covid_count.append((values.text).strip('\n'))
return total_covid_count
def list_of_countries(self):
"""this function is used to get the values and countries
in a form of list to store the data"""
self.total_country_rows = self.country_table.tbody.findAll('tr')
self.country_list = []
for i in range(8, len(self.total_country_rows)):
self.country_list.append(
self.total_country_rows[i].findAll('td')[1].text)
return self.country_list
def show_specific_continent(self, user_input):
"""this function fetches the list of continent math values
to the user in a list"""
total_data = self.total_country_rows[self.country_list.index(
user_input)].findAll('td')
specific_country_data = []
for j in range(2, 19):
specific_country_data.append(
f"{total_data[j].string}")
return specific_country_data
class Tk_Screen:
def __init__(self, master):
'''Creating an Application for visualising the processed data'''
self.master = master
self.master.geometry('600x600+400+100')
self.master.title('covid tracker')
self.master.config(bg='grey')
self.master.resizable(False, False)
# frame for mainscreen
Label(self.master, text='CORONAVIRUS TRACKER',
width='37', height='2',
font=('timesnewroman bold', 20), bg='grey').place(x=0, y=0)
self.frame1 = Frame(self.master, width=400, height=200, bg='grey',
relief='groove', bd='2')
self.frame1.place(x=100, y=200)
Label(self.frame1, text='An internet connection is required to run this script',
bg='grey').place(x=60, y=10)
Label(self.frame1, text='click ok to start the script',
bg='grey', font='10').place(x=80, y=50)
self.progress = ttk.Progressbar(self.frame1, orient='horizontal',
length=240, mode='determinate')
Button(self.frame1, text='ok', activebackground='green', width='10',
command=lambda: Tk_Screen.start_progress(self)).place(x=150, y=100)
self.progress.place(x=75, y=140)
def start_progress(self):
# function for webscraping progress
self.progress['value'] = 30
self.master.update_idletasks()
self.example = Scraper()
if self.example.__init__ is None:
self.master.messagebox.showerror(
'error', 'connect to the internet')
self.master.destroy()
else:
self.progress['value'] = 65
self.master.update_idletasks()
time.sleep(1)
self.progress['value'] = 100
self.master.update_idletasks()
time.sleep(1)
self.frame1.destroy()
Tk_Screen.display_total_count(self)
Tk_Screen.create_display_components(self)
def display_total_count(self):
# display the total covid count
self.frame2 = Frame(self.master, width=440, height=120,
bg='grey', bd='1', relief='raised')
self.frame2.place(x=80, y=70)
Label(self.frame2, text='Total cases: ', bg='grey').place(x=110, y=10)
Label(self.frame2, text='Deaths: ', bg='grey').place(x=110, y=30)
Label(self.frame2, text='Recovered: ', bg='grey').place(x=110, y=50)
Label(self.frame2,
text=f'{self.example.show_total_world_cases()[0]}',
bg='grey').place(x=240, y=10)
Label(self.frame2,
text=f'{self.example.show_total_world_cases()[1]}',
bg='grey').place(x=240, y=30)
Label(self.frame2,
text=f'{self.example.show_total_world_cases()[2]}',
bg='grey').place(x=240, y=50)
def create_display_components(self):
# selection of various countries in the combobox
self.frame3 = Frame(self.master, width=400, bg='grey',
height=380, bd=1, relief='groove')
self.frame3.place(x=100, y=210)
# creating combobox
self.cmbo = ttk.Combobox(
self.frame3, values=self.example.list_of_countries(),
state='readonly')
self.cmbo.current(0)
self.cmbo.place(x=130, y=10)
# creating select button
Button(self.frame3, text='select',
command=lambda: Tk_Screen.show_numbers(self)).place(x=170, y=40)
# creating display labels
self.label_listed_values = [Label(self.frame3, width='10', bg='grey')
for i in range(11)]
label_list = self.example.header_list()
y_pos = 50
for i in range(2, 13):
y_pos += 25
Label(self.frame3, text=f'{label_list[i]}', bg='grey').place(
x=40, y=y_pos)
self.label_listed_values[i - 2].place(x=280, y=y_pos)
def show_numbers(self):
# display the values for the selected country
value = self.example.show_specific_continent(self.cmbo.get())
for i in range(11):
self.label_listed_values[i].config(text='{}'.format(value[i]))
if __name__ == '__main__':
# running the main function
mainscreen = Tk()
window_object = Tk_Screen(mainscreen)
mainscreen.mainloop()
| 3.46875 | 3 |
neuropype/gui/connectNodesDlg.py | ablot/Pinceau | 0 | 12768041 | <filename>neuropype/gui/connectNodesDlg.py<gh_stars>0
#!/usr/bin/env python
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ui_connectNodes
class connectNodesDlg(QDialog, ui_connectNodes.Ui_connectDialog):
def __init__(self, parent=None):
super(connectNodesDlg, self).__init__(parent)
self.setupUi(self)
self.mw = parent
self.updateComboBox()
self.connect(self.showGroupsCheckBox, SIGNAL("stateChanged(int)"),
self.updateComboBox)
self.connect(self.inNodeComboBox, SIGNAL("activated(QString)"),
self.updateInputComboBox)
self.connect(self.outNodeComboBox, SIGNAL("activated(QString)"),
self.updateOutputComboBox)
self.connect(self.connectPushButton, SIGNAL("clicked()"),
self.connectNodes)
self.connect(self.closePushButton, SIGNAL("clicked()"), self.close)
self.connect(self.inputComboBox, SIGNAL("activated(QString)"),
self.updateLabel)
def connectNodes(self):
p = {}
p['inNodeName'] = str(self.inNodeComboBox.currentText())
p['outNodeName'] = str(self.outNodeComboBox.currentText())
p['inputName'] = str(self.inputComboBox.currentText())
p['outputName'] = str(self.outputComboBox.currentText())
p['force'] = bool(self.forceCheckBox.isChecked())
self.parent().tree.Connect(p['inNodeName'], p['inputName'],
p['outNodeName'],p['outputName'],
force = p['force'])
self.parent()._treeChanged()
self.updateLabel(p['inputName'])
def updateComboBox(self, index = 0):
self.inNodeComboBox.clear()
self.outNodeComboBox.clear()
listNodes = list(self.mw.tree.list_nodes())
listNodes.sort()
self.inNodeComboBox.addItems(QStringList(listNodes))
self.outNodeComboBox.addItems(QStringList(listNodes))
#setting connector comboBox
self.updateInputComboBox(self.inNodeComboBox.currentText())
self.updateOutputComboBox(self.outNodeComboBox.currentText())
def updateInputComboBox(self, text):
node = getattr(self.parent().tree, str(text))
groups = self.showGroupsCheckBox.isChecked()
if groups:
inputs = node._inputGroups.keys()
else:
inputs = node.inputs.keys()
inputs.sort()
self.inputComboBox.clear()
self.inputComboBox.addItems(QStringList(inputs))
self.updateLabel(str(self.inputComboBox.currentText()), node)
def updateOutputComboBox(self, text):
node = getattr(self.parent().tree, str(text))
groups = self.showGroupsCheckBox.isChecked()
if groups:
outputs = node._outputGroups.keys()
else:
outputs = node.outputs.keys()
outputs.sort()
self.outputComboBox.clear()
self.outputComboBox.addItems(QStringList(outputs))
def updateLabel(self, inputName, node = None):
inputName = str(inputName)
if node is None:
node = getattr(self.parent().tree,
str(self.inNodeComboBox.currentText()))
if self.showGroupsCheckBox.isChecked():
insource = node._inGrConnection
else:
insource = node.inputs
if not insource.has_key(inputName):
raise ValueError('Unknown input %s'%inputName)
connected = insource[inputName] if insource.has_key(inputName) else None
if connected is None:
self.connectedToLabel.setText('Not connected')
else:
self.connectedToLabel.setText('Connected to %s of %s'%(connected[1],
connected[0]))
| 2.25 | 2 |
tests/test_prepare.py | benoitc/pyuv | 1 | 12768042 |
from common import unittest2
import pyuv
class PrepareTest(unittest2.TestCase):
def test_prepare1(self):
self.prepare_cb_called = 0
def prepare_cb(prepare):
self.prepare_cb_called += 1
prepare.stop()
prepare.close()
loop = pyuv.Loop.default_loop()
prepare = pyuv.Prepare(loop)
prepare.start(prepare_cb)
loop.run()
self.assertEqual(self.prepare_cb_called, 1)
if __name__ == '__main__':
unittest2.main(verbosity=2)
| 2.8125 | 3 |
proliantutils/hpssa/manager.py | anta-nok/proliantutils | 0 | 12768043 | <filename>proliantutils/hpssa/manager.py
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import time
import jsonschema
from jsonschema import exceptions as json_schema_exc
from proliantutils import exception
from proliantutils.hpssa import constants
from proliantutils.hpssa import disk_allocator
from proliantutils.hpssa import objects
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
RAID_CONFIG_SCHEMA = os.path.join(CURRENT_DIR, "raid_config_schema.json")
def _update_physical_disk_details(raid_config, server):
"""Adds the physical disk details to the RAID configuration passed."""
raid_config['physical_disks'] = []
physical_drives = server.get_physical_drives()
for physical_drive in physical_drives:
physical_drive_dict = physical_drive.get_physical_drive_dict()
raid_config['physical_disks'].append(physical_drive_dict)
def validate(raid_config):
"""Validates the RAID configuration provided.
This method validates the RAID configuration provided against
a JSON schema.
:param raid_config: The RAID configuration to be validated.
:raises: InvalidInputError, if validation of the input fails.
"""
raid_schema_fobj = open(RAID_CONFIG_SCHEMA, 'r')
raid_config_schema = json.load(raid_schema_fobj)
try:
jsonschema.validate(raid_config, raid_config_schema)
except json_schema_exc.ValidationError as e:
raise exception.InvalidInputError(e.message)
for logical_disk in raid_config['logical_disks']:
# If user has provided 'number_of_physical_disks' or
# 'physical_disks', validate that they have mentioned at least
# minimum number of physical disks required for that RAID level.
raid_level = logical_disk['raid_level']
min_disks_reqd = constants.RAID_LEVEL_MIN_DISKS[raid_level]
no_of_disks_specified = None
if 'number_of_physical_disks' in logical_disk:
no_of_disks_specified = logical_disk['number_of_physical_disks']
elif 'physical_disks' in logical_disk:
no_of_disks_specified = len(logical_disk['physical_disks'])
if (no_of_disks_specified and
no_of_disks_specified < min_disks_reqd):
msg = ("RAID level %(raid_level)s requires at least %(number)s "
"disks." % {'raid_level': raid_level,
'number': min_disks_reqd})
raise exception.InvalidInputError(msg)
def _select_controllers_by(server, select_condition, msg):
"""Filters out the hpssa controllers based on the condition.
This method updates the server with only the controller which satisfies
the condition. The controllers which doesn't satisfies the selection
condition will be removed from the list.
:param server: The object containing all the supported hpssa controllers
details.
:param select_condition: A lambda function to select the controllers based
on requirement.
:param msg: A String which describes the controller selection.
:raises exception.HPSSAOperationError, if all the controller are in HBA
mode.
"""
all_controllers = server.controllers
supported_controllers = [c for c in all_controllers if select_condition(c)]
if not supported_controllers:
reason = ("None of the available SSA controllers %(controllers)s "
"have %(msg)s"
% {'controllers': ', '.join([c.id for c in all_controllers]),
'msg': msg})
raise exception.HPSSAOperationError(reason=reason)
server.controllers = supported_controllers
def create_configuration(raid_config):
"""Create a RAID configuration on this server.
This method creates the given RAID configuration on the
server based on the input passed.
:param raid_config: The dictionary containing the requested
RAID configuration. This data structure should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100},
<info-for-logical-disk-2>
]}
:returns: the current raid configuration. This is same as raid_config
with some extra properties like root_device_hint, volume_name,
controller, physical_disks, etc filled for each logical disk
after its creation.
:raises exception.InvalidInputError, if input is invalid.
:raises exception.HPSSAOperationError, if all the controllers are in HBA
mode.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
validate(raid_config)
# Make sure we create the large disks first. This is avoid the
# situation that we avoid giving large disks to smaller requests.
# For example, consider this:
# - two logical disks - LD1(50), LD(100)
# - have 4 physical disks - PD1(50), PD2(50), PD3(100), PD4(100)
#
# In this case, for RAID1 configuration, if we were to consider
# LD1 first and allocate PD3 and PD4 for it, then allocation would
# fail. So follow a particular order for allocation.
#
# Also make sure we create the MAX logical_disks the last to make sure
# we allot only the remaining space available.
logical_disks_sorted = (
sorted((x for x in raid_config['logical_disks']
if x['size_gb'] != "MAX"),
reverse=True,
key=lambda x: x['size_gb']) +
[x for x in raid_config['logical_disks'] if x['size_gb'] == "MAX"])
if any(logical_disk['share_physical_disks']
for logical_disk in logical_disks_sorted
if 'share_physical_disks' in logical_disk):
logical_disks_sorted = _sort_shared_logical_disks(logical_disks_sorted)
# We figure out the new disk created by recording the wwns
# before and after the create, and then figuring out the
# newly found wwn from it.
wwns_before_create = set([x.wwn for x in
server.get_logical_drives()])
for logical_disk in logical_disks_sorted:
if 'physical_disks' not in logical_disk:
disk_allocator.allocate_disks(logical_disk, server,
raid_config)
controller_id = logical_disk['controller']
controller = server.get_controller_by_id(controller_id)
if not controller:
msg = ("Unable to find controller named '%(controller)s'."
" The available controllers are '%(ctrl_list)s'." %
{'controller': controller_id,
'ctrl_list': ', '.join(
[c.id for c in server.controllers])})
raise exception.InvalidInputError(reason=msg)
if 'physical_disks' in logical_disk:
for physical_disk in logical_disk['physical_disks']:
disk_obj = controller.get_physical_drive_by_id(physical_disk)
if not disk_obj:
msg = ("Unable to find physical disk '%(physical_disk)s' "
"on '%(controller)s'" %
{'physical_disk': physical_disk,
'controller': controller_id})
raise exception.InvalidInputError(msg)
controller.create_logical_drive(logical_disk)
# Now find the new logical drive created.
server.refresh()
wwns_after_create = set([x.wwn for x in
server.get_logical_drives()])
new_wwn = wwns_after_create - wwns_before_create
if not new_wwn:
reason = ("Newly created logical disk with raid_level "
"'%(raid_level)s' and size %(size_gb)s GB not "
"found." % {'raid_level': logical_disk['raid_level'],
'size_gb': logical_disk['size_gb']})
raise exception.HPSSAOperationError(reason=reason)
new_logical_disk = server.get_logical_drive_by_wwn(new_wwn.pop())
new_log_drive_properties = new_logical_disk.get_logical_drive_dict()
logical_disk.update(new_log_drive_properties)
wwns_before_create = wwns_after_create.copy()
_update_physical_disk_details(raid_config, server)
return raid_config
def _sort_shared_logical_disks(logical_disks):
"""Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions.
"""
is_shared = (lambda x: True if ('share_physical_disks' in x and
x['share_physical_disks']) else False)
num_of_disks = (lambda x: x['number_of_physical_disks']
if 'number_of_physical_disks' in x else
constants.RAID_LEVEL_MIN_DISKS[x['raid_level']])
# Separate logical disks based on share_physical_disks value.
# 'logical_disks_shared' when share_physical_disks is True and
# 'logical_disks_nonshared' when share_physical_disks is False
logical_disks_shared = []
logical_disks_nonshared = []
for x in logical_disks:
target = (logical_disks_shared if is_shared(x)
else logical_disks_nonshared)
target.append(x)
# Separete logical disks with raid 1 from the 'logical_disks_shared' into
# 'logical_disks_shared_raid1' and remaining as
# 'logical_disks_shared_excl_raid1'.
logical_disks_shared_raid1 = []
logical_disks_shared_excl_raid1 = []
for x in logical_disks_shared:
target = (logical_disks_shared_raid1 if x['raid_level'] == '1'
else logical_disks_shared_excl_raid1)
target.append(x)
# Sort the 'logical_disks_shared' in reverse order based on
# 'number_of_physical_disks' attribute, if provided, otherwise minimum
# disks required to create the logical volume.
logical_disks_shared = sorted(logical_disks_shared_excl_raid1,
reverse=True,
key=num_of_disks)
# Move RAID 1+0 to first in 'logical_disks_shared' when number of physical
# disks needed to create logical volume cannot be shared with odd number of
# disks and disks higher than that of RAID 1+0.
check = True
for x in logical_disks_shared:
if x['raid_level'] == "1+0":
x_num = num_of_disks(x)
for y in logical_disks_shared:
if y['raid_level'] != "1+0":
y_num = num_of_disks(y)
if x_num < y_num:
check = (True if y_num % 2 == 0 else False)
if check:
break
if not check:
logical_disks_shared.remove(x)
logical_disks_shared.insert(0, x)
check = True
# Final 'logical_disks_sorted' list should have non shared logical disks
# first, followed by shared logical disks with RAID 1, and finally by the
# shared logical disks sorted based on number of disks and RAID 1+0
# condition.
logical_disks_sorted = (logical_disks_nonshared +
logical_disks_shared_raid1 +
logical_disks_shared)
return logical_disks_sorted
def delete_configuration():
"""Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
for controller in server.controllers:
# Trigger delete only if there is some RAID array, otherwise
# hpssacli/ssacli will fail saying "no logical drives found.".
if controller.raid_arrays:
controller.delete_all_logical_drives()
return get_configuration()
def get_configuration():
"""Get the current RAID configuration.
Get the RAID configuration from the server and return it
as a dictionary.
:returns: A dictionary of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
server = objects.Server()
logical_drives = server.get_logical_drives()
raid_config = {}
raid_config['logical_disks'] = []
for logical_drive in logical_drives:
logical_drive_dict = logical_drive.get_logical_drive_dict()
raid_config['logical_disks'].append(logical_drive_dict)
_update_physical_disk_details(raid_config, server)
return raid_config
def has_erase_completed():
server = objects.Server()
drives = server.get_physical_drives()
if any((drive.erase_status == 'Erase In Progress')
for drive in drives):
return False
else:
return True
def erase_devices():
"""Erase all the drives on this server.
This method performs sanitize erase on all the supported physical drives
in this server. This erase cannot be performed on logical drives.
:returns: a dictionary of controllers with drives and the erase status.
:raises exception.HPSSAException, if none of the drives support
sanitize erase.
"""
server = objects.Server()
for controller in server.controllers:
drives = [x for x in controller.unassigned_physical_drives
if (x.get_physical_drive_dict().get('erase_status', '')
== 'OK')]
if drives:
controller.erase_devices(drives)
while not has_erase_completed():
time.sleep(300)
server.refresh()
status = {}
for controller in server.controllers:
drive_status = {x.id: x.erase_status
for x in controller.unassigned_physical_drives}
sanitize_supported = controller.properties.get(
'Sanitize Erase Supported', 'False')
if sanitize_supported == 'False':
msg = ("Drives overwritten with zeros because sanitize erase "
"is not supported on the controller.")
else:
msg = ("Sanitize Erase performed on the disks attached to "
"the controller.")
drive_status.update({'Summary': msg})
status[controller.id] = drive_status
return status
| 1.859375 | 2 |
fr-test.py | 0dj0bz/hn-vis | 0 | 12768044 | <gh_stars>0
import sys
import argparse
import uuid
import json
from multiprocessing import Process
import matplotlib.pyplot as plt
import math
from random import random
from numpy import arange
vertices = (
(1, -1, 1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1),
(1, -1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, -1, -1)
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7)
)
#-----------------------------------------------------------------------------
class Edge:
def __init__(self, edgeId=None, parentGraph=None, srcNode=None,
dstNode=None, weight=1.0, isDirected=False, label=None,
properties={}):
if (edgeId is None):
self.edgeId = 'E_' + str(srcNode.nodeId) + '_' + str(dstNode.nodeId)
else:
self.edgeId = edgeId
self.src = srcNode
self.dst = dstNode
self.label = label
self.weight = weight
self.isDirected = isDirected
self.properties = properties
# self.eId = parentGraph.addEdge(srcNode.nId, dstNode.nId, weight)
return
def __del__(self):
return
#-----------------------------------------------------------------------------
class Node:
def __init__(self, nodeId=None, parentGraph=None, label=None,
properties={}):
self.coords = {'x' : 0, 'y' : 0, 'dx' : 0, 'dy' : 0}
# self.nId = parentGraph.addNode()
if (nodeId is None):
self.nodeId = 'N_' + str(uuid.uuid4())
else:
self.nodeId = nodeId
self.label = label
self.properties = properties
return
def __del__(self):
return
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
class Graph:
def __init__(self, graphId=None):
# self.graph = networkit.graph.Graph()
if (graphId is None):
self.graphId = 'G_' + str(uuid.uuid4())
else:
self.graphId = graphId
# self.graph.setName(self.graphId)
self.nodes = {}
self.edges = {}
return
def __del__(self):
self.nodes = None
self.edges = None
# self.graph = None
return
def addNode(self, nodeId=None, label=None, properties={}):
n = Node(nodeId=nodeId, parentGraph=None, label=label,
properties=properties)
nDict = {n.nodeId : {'node': n, 'properties': properties}}
self.nodes.update(nDict)
# if (self.graph.numberOfNodes() != len(self.nodes.keys())):
# print("graph / dict out of balance")
return n
def addEdge(self, src=None, dst=None, isDirected=False, weight=1.0,
label=None, properties={}):
e = Edge(parentGraph=None, srcNode=src, dstNode=dst,
weight=weight, isDirected=isDirected, label=label,
properties=properties)
eDict = {e.edgeId : {'edge':e, 'properties':properties}}
self.edges.update(eDict)
return e
# attractive force
def f_a(self, d,k):
return d*d/k
# repulsive force
def f_r(self, d,k):
return k*k/d
def fruchterman_reingold(self, iteration=50):
W = 1
L = 1
area = W*L
k = math.sqrt(area/len(self.nodes)) #nx.number_of_nodes(G))
# initial position
for v in iter(self.nodes): #nx.nodes_iter(G):
# G.node[v]['x'] = W*random()
# G.node[v]['y'] = L*random()
self.nodes[v]['node'].coords['x'] = W * random()
self.nodes[v]['node'].coords['y'] = L *random()
t = W/10
dt = t/(iteration+1)
print("area:{0}".format(area))
print("k:{0}".format(k))
print("t:{0}, dt:{1}".format(t,dt))
for i in range(iteration):
print("iter {0}".format(i))
pos = {}
# for v in G.nodes_iter():
# pos[v] = [G.node[v]['x'],G.node[v]['y']]
for v in iter(self.nodes):
pos[v] = [self.nodes[v]['node'].coords['x'], self.nodes[v]['node'].coords['y']]
# plt.close()
# plt.ylim([-0.1,1.1])
# plt.xlim([-0.1,1.1])
# plt.axis('off')
# TODO: resume work here
# nx.draw_networkx(G,pos=pos,node_size=10,width=0.1,with_labels=False)
# plt.savefig("fig/{0}.png".format(i))
# calculate repulsive forces
for v in iter(self.nodes):
self.nodes[v]['node'].coords['dx'] = 0
self.nodes[v]['node'].coords['dy'] = 0
for u in iter(self.nodes):
if v != u:
dx = self.nodes[v]['node'].coords['x'] - self.nodes[u]['node'].coords['x']
dy = self.nodes[v]['node'].coords['y'] - self.nodes[u]['node'].coords['y']
delta = math.sqrt(dx*dx+dy*dy)
if delta != 0:
d = self.f_r(delta,k)/delta
self.nodes[v]['node'].coords['dx'] += dx*d
self.nodes[v]['node'].coords['dy'] += dy*d
# calculate attractive forces
for e in iter(self.edges):
v = self.edges[e]['edge'].dst
u = self.edges[e]['edge'].src
dx = v.coords['x'] - u.coords['x']
dy = v.coords['y'] - u.coords['y']
delta = math.sqrt(dx*dx+dy*dy)
if delta != 0:
d = self.f_a(delta,k)/delta
ddx = dx*d
ddy = dy*d
v.coords['dx'] += -ddx
u.coords['dx'] += +ddx
v.coords['dy'] += -ddy
u.coords['dy'] += +ddy
# limit the maximum displacement to the temperature t
# and then prevent from being displace outside frame
for v in iter(self.nodes):
dx = self.nodes[v]['node'].coords['dx']
dy = self.nodes[v]['node'].coords['dy']
disp = math.sqrt(dx*dx+dy*dy)
if disp != 0:
# cnt += 1
d = min(disp,t)/disp
x = self.nodes[v]['node'].coords['x'] + dx*d
y = self.nodes[v]['node'].coords['y'] + dy*d
x = min(W,max(0,x)) - W/2
y = min(L,max(0,y)) - L/2
self.nodes[v]['node'].coords['x'] = min(math.sqrt(W*W/4-y*y),max(-math.sqrt(W*W/4-y*y),x)) + W/2
self.nodes[v]['node'].coords['y'] = min(math.sqrt(L*L/4-x*x),max(-math.sqrt(L*L/4-x*x),y)) + L/2
# cooling
t -= dt
pos = {}
for v in iter(self.nodes):
pos[v] = [self.nodes[v]['node'].coords['x'],self.nodes[v]['node'].coords['y']]
# plt.close()
# plt.ylim([-0.1,1.1])
# plt.xlim([-0.1,1.1])
# plt.axis('off')
# nx.draw_networkx(G,pos=pos,node_size=10,width=0.1,with_labels=False)
# plt.savefig("fig/{0}.png".format(i+1))
return pos
if __name__ == "__main__":
g = Graph()
print(edges[0][0], edges[0][1])
for i in range(0,11):
g.addNode(str(i))
print(g.nodes[str(0)])
for j in range(0,11):
g.addEdge(src=g.nodes[str(edges[j][0])]['node'], dst=g.nodes[str(edges[j][1])]['node'])
fg = g.fruchterman_reingold()
for e in edges:
print('edge: ', e[0], '->', e[1], ': ', fg[str(e[0])], ', ', fg[str(e[1])])
| 2.296875 | 2 |
notes/c20170904h2/visualise.py | tommilligan/dailyprogrammer | 0 | 12768045 | #!/usr/bin/env python
"""
Generate visualisations for the main script
"""
from io import BytesIO
import sys
from PIL import Image, ImageDraw
import dailyprogrammer.challenges.c20170904e2 as challenge
from dailyprogrammer.utils.logging import moduleLogger
logger = moduleLogger(__name__)
INF = float('inf')
def getY(line, x):
"""
Get a y coordinate for a line
"""
m, c = line
y = int((m * x) + c)
return y
def main(circles, scale=50, units=10, offset=(0, 0)):
"""
:param circles
"""
# Offset circles to get them positive
circles = list((x+offset[0], y+offset[1], r) for x, y, r in circles)
dimensions = (units * scale, ) * 2
im = Image.new("RGB", dimensions, (255, 255, 255))
draw = ImageDraw.Draw(im)
# Draw circles
logger.warn("Drawing circles")
for circle in circles:
x, y, r = circle
coords = [i * scale for i in (x-r, y-r, x+r, y+r)]
draw.ellipse(coords, outline=(127, 127, 127))
# Draw tangent lines
logger.warn("Drawing tangent lines")
hull = challenge.convexHullDisks(circles)
for line in hull:
m, c = line
c = c * scale
xs = [-im.width, im.width]
if abs(m) != INF:
points = list((x, getY((m, c), x)) for x in xs)
else:
points = list((c, y) for y in xs)
draw.line(points, fill=(255, 0, 0), width=1)
# Draw smallest box
logger.warn("Drawing box")
box = challenge.minimumBounding(circles)
for i, point in enumerate(box):
a = box[i]
try:
b = box[i+1]
except IndexError:
b = box[0]
points = [a, b]
points = list(tuple(p * scale for p in point) for point in points)
draw.line(points, fill=(0, 0, 255), width=1)
im = im.transpose(Image.FLIP_TOP_BOTTOM)
# write to stdout
imagefile = BytesIO()
im.save(imagefile, "PNG")
imagedata = imagefile.getvalue()
sys.stdout.buffer.write(imagedata)
if __name__ == "__main__":
# challenge
#main([(1,1,2), (2,2,0.5), (-1,-3,2), (5,2,1)],
# scale=25,
# main=20,
# offset=(9, 10))
# demo
main([(3, 3, 1), (3, 6, 1), (4, 7, 1.5), (8, 6, 0.5), (7, 5, 0.75), (5, 4.5, 2), (5.5, 3.5, 0.125)])
| 3.265625 | 3 |
src/color_selector.py | khushi-411/air-handwriting-detection-and-recognition | 1 | 12768046 | import cv2
def color_detector():
def setValues(x):
print("")
# creating a window for HUE selector
cv2.namedWindow("Color detectors")
cv2.createTrackbar("Upper Hue", "Color detectors", 153, 180, setValues)
cv2.createTrackbar("Upper Saturation", "Color detectors", 255, 255, setValues)
cv2.createTrackbar("Upper Value", "Color detectors", 255, 255, setValues)
cv2.createTrackbar("Lower Hue", "Color detectors", 64, 180, setValues)
cv2.createTrackbar("Lower Saturation", "Color detectors", 72, 255, setValues)
cv2.createTrackbar("Lower Value", "Color detectors", 49, 255, setValues)
| 3.453125 | 3 |
python_utils/__about__.py | mgorny/python-utils | 0 | 12768047 | __package_name__ = 'python-utils'
__version__ = '2.5.0'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__description__ = (
'Python Utils is a module with some convenient utilities not included '
'with the standard Python install')
__url__ = 'https://github.com/WoLpH/python-utils'
| 1.117188 | 1 |
prokat/engine/migrations/0001_initial.py | mackevich/prokat | 0 | 12768048 | <gh_stars>0
# Generated by Django 2.1.2 on 2018-10-23 13:58
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('description', models.CharField(max_length=300)),
('created', models.DateTimeField(auto_now=True)),
('ranking', models.DecimalField(decimal_places=0, default=0, max_digits=100)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Category')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=300)),
('price', models.FloatField(default=0.0)),
('created', models.DateTimeField(auto_now=True)),
('ranking', models.DecimalField(decimal_places=0, default=0, max_digits=100)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Category')),
],
),
]
| 1.75 | 2 |
tests/test_init.py | ryanmcfarland/flask-kdb | 0 | 12768049 | <reponame>ryanmcfarland/flask-kdb<filename>tests/test_init.py
import pytest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from flask import Flask
from flask_kdb import FlaskKDB
# Based heavily on - https://github.com/jidn/flask-obscure/blob/master/tests/test_init.py
defaults = {
"host":"localhost",
"port":5000
}
def make_app(host=None, port=None):
app = Flask(__name__)
if host and port:
app.config["KDB_HOST"] = host
app.config["KDB_PORT"] = port
return app
def test_kdb_from_config():
app = make_app(host=defaults['host'], port=defaults['port'])
kdb = FlaskKDB(app)
assert kdb.sendSync("5+5") == 10
def test_kdb_initapp():
app = make_app(host=defaults['host'], port=defaults['port'])
kdb = FlaskKDB()
kdb.init_app(app)
assert kdb.sendSync("4+4") == 8
# Below test uses the same kdb socket when sending queries
# test.q output:
#"23:14:52.788 | Opening | 5"
#"23:14:52.791 | sync | 5 | 3+2"
#"23:14:52.793 | sync | 5 | 3+3"
#"23:14:52.794 | Closing | 5"
def test_multi_apps():
app1 = make_app(host=defaults['host'], port=defaults['port'])
app2 = make_app(host=defaults['host'], port=defaults['port'])
kdb = FlaskKDB()
kdb.init_app(app1)
kdb.init_app(app2)
@app1.route("/<qry>")
def index1(qry):
return str(kdb.sendSync(qry))
@app2.route("/<qry>")
def index2(qry):
return str(kdb.sendSync(qry))
with app1.test_client() as go:
rv = go.get("/3+2")
assert 200 == rv.status_code
assert 5 == int(rv.data)
with app2.test_client() as go:
rv = go.get("/3+3")
assert 200 == rv.status_code
assert 6 == int(rv.data) | 2.234375 | 2 |
src/ast_toolbox/mcts/AdaptiveStressTestingRandomSeed.py | hdelecki/AdaptiveStressTestingToolbox | 29 | 12768050 | <gh_stars>10-100
import copy
import gym
import ast_toolbox.mcts.RNGWrapper as RNG
from ast_toolbox.mcts.AdaptiveStressTesting import AdaptiveStressTest
class AdaptiveStressTestRS(AdaptiveStressTest):
"""The AST wrapper for MCTS using random seeds as actions.
Parameters
----------
kwargs :
Keyword arguments passed to `ast_toolbox.mcts.AdaptiveStressTesting.AdaptiveStressTest`
"""
def __init__(self, **kwargs):
super(AdaptiveStressTestRS, self).__init__(**kwargs)
self.rsg = RNG.RSG(self.params.rsg_length, self.params.init_seed)
self.initial_rsg = copy.deepcopy(self.rsg)
def reset_rsg(self):
"""Reset the random seed generator.
"""
self.rsg = copy.deepcopy(self.initial_rsg)
def random_action(self):
"""Randomly sample an action for the rollout.
Returns
----------
action : :py:class:`ast_toolbox.mcts.AdaptiveStressTestingRandomSeed.ASTRSAction`
The sampled action.
"""
self.rsg.next()
return ASTRSAction(action=copy.deepcopy(self.rsg), env=self.env)
def explore_action(self, s, tree):
"""Randomly sample an action for the exploration.
Returns
----------
action : :py:class:`ast_toolbox.mcts.AdaptiveStressTestingRandomSeed.ASTRSAction`
The sampled action.
"""
self.rsg.next()
return ASTRSAction(action=copy.deepcopy(self.rsg), env=self.env)
class ASTRSAction:
"""The AST action containing the random seed.
Parameters
----------
action :
The random seed.
env : :py:class:`ast_toolbox.envs.go_explore_ast_env.GoExploreASTEnv`
The environment.
"""
def __init__(self, action, env):
self.env = env
self.action = action
def __hash__(self):
"""The redefined hashing method.
Returns
----------
hash : int
The hashing result.
"""
return hash(self.action)
def __eq__(self, other):
"""The redefined equal method.
Returns
----------
is_equal : bool
Whether the two states are equal.
"""
return self.action == other.action
def get(self):
"""Get the true action.
Returns
----------
action :
The true actions used in the env.
"""
rng_state = self.action.state
# TODO: a better approch to make use of random seed of length > 1
action_seed = int(rng_state[0])
if isinstance(self.env.action_space, gym.spaces.Space):
action_space = self.env.action_space
# need to do this since every time call env.action_space, a new space is created
action_space.seed(action_seed)
true_action = action_space.sample()
else:
true_action = action_seed
return true_action
| 2.515625 | 3 |