max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/core/utils.py | saiblo/saiblo-local-judger | 4 | 12772151 | <gh_stars>1-10
def bytes2int(data: bytes) -> int:
return int.from_bytes(data, byteorder="big", signed=True)
def int2bytes(x: int) -> bytes:
return int.to_bytes(x, length=4, byteorder="big", signed=True)
| 2.71875 | 3 |
Python32/sounds.py | andersonsilvade/python_C | 0 | 12772152 | import pygame.mixer
sounds = pygame.mixer
sounds.init()
def espera_tocar(canal):
while canal.get_busy():
pass
s= sounds.Sound("heartbeat.wav")
espera_tocar(s.play())
s2 = sounds.Sound("buzz.wav")
espera_tocar(s2.play())
s3 = sounds.Sound("ohno.wav")
espera_tocar(s3.play())
s4 = sounds.Sound("carhorn.wav")
espera_tocar(s4.play())
| 2.40625 | 2 |
setup.py | iNouvellie/social-network-rusa | 0 | 12772153 | <reponame>iNouvellie/social-network-rusa<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
"""
@author: <NAME>
@contact: https://vk.com/python273
@license Apache License, Version 2.0, see LICENSE file
Copyright (C) 2017
"""
setup(
name='vk_api',
version='8.3.1',
author='python273',
author_email='<EMAIL>',
url='https://github.com/python273/vk_api',
description='Module for writing scripts for vk.com (vkontakte)',
download_url='https://github.com/python273/vk_api/archive/master.zip',
license='Apache License, Version 2.0, see LICENSE file',
packages=['vk_api', 'jconfig'],
install_requires=['requests', 'enum34'],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
]
)
| 1.398438 | 1 |
runner/job/run.py | perrette/runner | 2 | 12772154 | """Run model ensemble
The canonical form of `job run` is:
job run [OPTIONS] -- EXECUTABLE [OPTIONS]
where `EXECUTABLE` is your model executable or a command, followed by its
arguments. Note the `--` that separates `job run` arguments `OPTIONS` from the
executable. When there is no ambiguity in the command-line arguments (as seen
by python's argparse) it may be dropped. `job run` options determine in which
manner to run the model, which parameter values to vary (the ensemble), and how
to communicate these parameter values to the model.
"""
examples="""
Examples
--------
job run -p a=2,3,4 b=0,1 -o out --shell -- echo --a {a} --b {b} --out {}
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
The command above runs an ensemble of 6 model versions, by calling `echo --a {a}
--b {b} --out {}` where `{a}`, `{b}` and `{}` are formatted using runtime with
parameter and run directory values, as displayed in the output above. Parameters can also be provided as a file:
job run -p a=2,3,4 b=0,1 -o out --file-name "params.txt" --file-type "linesep" --line-sep " " --shell cat {}/params.txt
a 2
b 0
a 2
b 1
a 3
b 0
a 3
b 1
a 4
b 0
a 4
b 1
Where UNIX `cat` command displays file content into the terminal. File types
that involve grouping, such as namelist, require a group prefix with a `.`
separator in the parameter name:
job run -p g1.a=0,1 g2.b=2. -o out --file-name "params.txt" --file-type "namelist" --shell cat {}/params.txt
&g1
a = 0
/
&g2
b = 2.0
/
&g1
a = 1
/
&g2
b = 2.0
/
"""
import argparse
import tempfile
import numpy as np
from runner.param import MultiParam, DiscreteParam
from runner.model import Model
#from runner.xparams import XParams
from runner.xrun import XParams, XRun, XPARAM
from runner.job.model import interface
from runner.job.config import ParserIO, program
import os
EXPCONFIG = 'experiment.json'
EXPDIR = 'out'
# run
# ---
def parse_slurm_array_indices(a):
indices = []
for i in a.split(","):
if '-' in i:
if ':' in i:
i, step = i.split(':')
step = int(step)
else:
step = 1
start, stop = i.split('-')
start = int(start)
stop = int(stop) + 1 # last index is ignored in python
indices.extend(range(start, stop, step))
else:
indices.append(int(i))
return indices
def _typechecker(type):
def check(string):
try:
type(string) # just a check
except Exception as error:
print('ERROR:', str(error))
raise
return string
submit = argparse.ArgumentParser(add_help=False)
grp = submit.add_argument_group("simulation modes")
#grp.add_argument('--batch-script', help='')
#x = grp.add_mutually_exclusive_group()
grp.add_argument('--max-workers', type=int,
help="number of workers for parallel processing (need to be allocated, e.g. via sbatch) -- default to the number of runs")
grp.add_argument('-t', '--timeout', type=float, default=31536000, help='timeout in seconds (default to %(default)s)')
grp.add_argument('--shell', action='store_true',
help='print output to terminal instead of log file, run sequentially, mostly useful for testing/debugging')
grp.add_argument('--echo', action='store_true',
help='display commands instead of running them (but does setup output directory). Alias for --shell --force echo [model args ...]')
#grp.add_argument('-b', '--array', action='store_true',
# help='submit using sbatch --array (faster!), EXPERIMENTAL)')
grp.add_argument('-f', '--force', action='store_true',
help='perform run even if params.txt already exists directory')
folders = argparse.ArgumentParser(add_help=False)
grp = folders.add_argument_group("simulation settings")
grp.add_argument('-o','--out-dir', default=EXPDIR, dest='expdir',
help='experiment directory \
(params.txt and logs/ will be created, as well as individual model output directories')
grp.add_argument('-a','--auto-dir', action='store_true',
help='run directory named according to parameter values instead of run `id`')
params_parser = argparse.ArgumentParser(add_help=False)
x = params_parser.add_mutually_exclusive_group()
x.add_argument('-p', '--params',
type=DiscreteParam.parse,
help="""Param values to combine.
SPEC specifies discrete parameter values
as a comma-separated list `VALUE[,VALUE...]`
or a range `START:STOP:N`.""",
metavar="NAME=SPEC",
nargs='*')
x.add_argument('-i','--params-file', help='ensemble parameters file')
x.add_argument('--continue', dest="continue_simu", action='store_true',
help=argparse.SUPPRESS)
#help='load params.txt from simulation directory')
params_parser.add_argument('-j','--id', type=_typechecker(parse_slurm_array_indices), dest='runid',
metavar="I,J...,START-STOP:STEP,...",
help='select one or several ensemble members (0-based !), \
slurm sbatch --array syntax, e.g. `0,2,4` or `0-4:2` \
or a combination of these, `0,2,4,5` <==> `0-4:2,5`')
params_parser.add_argument('--include-default',
action='store_true',
help='also run default model version (with no parameters)')
#grp = output_parser.add_argument_group("model output",
# description='model output variables')
#grp.add_argument("-v", "--output-variables", nargs='+', default=[],
# help='list of state variables to include in output.txt')
#
#grp.add_argument('-l', '--likelihood',
# type=ScipyParam.parse,
# help='distribution, to compute weights',
# metavar="NAME=DIST",
# default = [],
# nargs='+')
parser = argparse.ArgumentParser(parents=[interface.parser, params_parser, folders, submit], epilog=examples, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
runio = interface.join(ParserIO(folders)) # interface + folder: saveit
@program(parser)
def main(o):
if o.echo:
o.model = ['echo'] + o.model
o.shell = True
o.force = True
model = Model(interface.get(o))
pfile = os.path.join(o.expdir, XPARAM)
if o.continue_simu:
o.params_file = pfile
o.force = True
if o.params_file:
xparams = XParams.read(o.params_file)
elif o.params:
prior = MultiParam(o.params)
xparams = prior.product() # only product allowed as direct input
#update = {p.name:p.value for p in o.params}
else:
xparams = XParams(np.empty((0,0)), names=[])
o.include_default = True
xrun = XRun(model, xparams, expdir=o.expdir, autodir=o.auto_dir, max_workers=o.max_workers, timeout=o.timeout)
# create dir, write params.txt file, as well as experiment configuration
try:
if not o.continue_simu:
xrun.setup(force=o.force)
except RuntimeError as error:
print("ERROR :: "+str(error))
print("Use -f/--force to bypass this check")
parser.exit(1)
#write_config(vars(o), os.path.join(o.expdir, EXPCONFIG), parser=experiment)
runio.dump(o, open(os.path.join(o.expdir, EXPCONFIG),'w'))
if o.runid:
indices = parse_slurm_array_indices(o.runid)
else:
indices = np.arange(xparams.size)
if o.include_default:
indices = list(indices) + [None]
# test: run everything serially
if o.shell:
for i in indices:
xrun[i].run(background=False)
# the default
else:
xrun.run(indices=indices)
return
main.register('run', help='run model (single version or ensemble)')
if __name__ == '__main__':
main()
| 2.3125 | 2 |
7_snmp/code/discover.py | lluxury/P_U_S_A | 0 | 12772155 | <gh_stars>0
#!/usr/bin/env python
from processing import Process, Queue, Pool
import time
import subprocess
from IPy import IP
import sys
from snmp import Snmp
q = Queue()
oq = Queue()
#ips = IP("10.0.1.0/24")
ips = ["172.16.31.10", "172.16.17.32", "172.16.31.10","192.168.3.11",
"192.168.1.1"]
num_workers = 10
class HostRecord(object):
"""Record for Hosts"""
def __init__(self, ip=None, mac=None, snmp_response=None):
self.ip = ip
self.mac = mac
self.snmp_response = snmp_response
def __repr__(self):
return "[Host Record('%s','%s','%s')]" % (self.ip,
self.mac,
self.snmp_response)
def f(i,q,oq):
while True:
time.sleep(.1)
if q.empty():
sys.exit()
print "Process Number: %s Exit" % i
ip = q.get()
print "Process Number: %s" % i
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print "%s: is alive" % ip
oq.put(ip)
else:
print "Process Number: %s didn't find a response for %s " % (i, ip)
pass
def snmp_query(i,out):
while True:
time.sleep(.1)
if out.empty():
sys.exit()
print "Process Number: %s" % i
ipaddr = out.get()
s = Snmp()
h = HostRecord()
h.ip = ipaddr
h.snmp_response = s.query()
print h
return h
try:
q.putmany(ips)
finally:
for i in range(num_workers):
p = Process(target=f, args=[i,q,oq])
p.start()
for i in range(num_workers):
pp = Process(target=snmp_query, args=[i,oq])
pp.start()
print "main process joins on queue"
p.join()
#while not oq.empty():
# print "Validated", oq.get()
print "Main Program finished"
| 2.515625 | 3 |
app.py | dpjmv/spotify-mode-percentage | 0 | 12772156 | <filename>app.py
import spotipy
import sys
import spotipy.util as util
import os
def connectToSpotify(username):
"""
Default constructor
:param username:
Username of the user to interact with.
:returns:
A spotipy.Spotify object that can then be used to interact
with the Spotify Web API
"""
scopes = 'playlist-read-private playlist-read-collaborative ' + \
'playlist-modify-public playlist-modify-private'
token = util.prompt_for_user_token(username, scopes)
if not token:
print(f"No token for user {username}")
sys.exit()
return spotipy.Spotify(auth=token)
def getTrackIds(sp, username, playlist, offset=0):
"""
Returns the ids of the tracks contained in a playlist
:param sp:
A spotipy.Spotify object to be used for the request.
:param username:
The username of the user who's playlists you want the retrieve.
:param playlist:
Name of the playlist from wich the tracks are retrieved.
:param offset:
Do not worry about this parameter, it is used for recursion.
:returns:
A list containing all the ids of the tracks that are in the playlist.
"""
limit = 100
fields = "items(track(id)), total"
api_response = sp.user_playlist_tracks(username,
playlist["id"], fields, limit=limit, offset=offset)
track_ids = [x["track"]["id"] for x in api_response["items"]]
if api_response["total"] > limit + offset:
next_page = getTrackIds(sp, username, playlist, offset + limit)
for item in next_page:
track_ids.append(item)
return track_ids
def getPlaylists(sp, username, offset=0):
"""
Retrieves all the playlists a user has
:param sp:
A spotipy.Spotify object to be used for the request.
:param username:
The username of the user who's playlists you want the retrieve.
:param offset:
Do not worry about this parameter, it is used for recursion.
:returns:
A dict containing all of the user's playlists.
"""
limit = 50
api_response = sp.user_playlists(username, limit, offset)
playlists = [x for x in api_response["items"]]
if api_response["total"] > limit + offset:
next_playlists = getPlaylists(sp, username, offset + limit)
for playlist in next_playlists:
playlists.append(playlist)
return playlists
def getPlaylistsByName(sp, username, playlist_names):
playlists = []
for playlist in getPlaylists(sp, username):
if playlist["name"] in playlist_names:
playlists.append(playlist)
return playlists
def getAudioFeatures(sp, tracks):
track_features = []
iteration = 0
while len(track_features) < len(tracks):
upper_limit = (iteration + 1) * 50
lower_limit = iteration * 50
request_data = tracks[lower_limit:upper_limit]
api_response = sp.audio_features(request_data)
track_features = track_features + api_response
iteration += 1
return track_features
def main():
# PLaylists to analyse, they have to be among your saved playlists
playlist_names = ["Ω"]
# Retrieve username
username = os.getenv("SPFY_MODE_USERNAME")
if len(sys.argv) > 1:
username = sys.argv[1]
elif username:
pass
else:
print(f"Usage: {sys.argv[0]} username")
sys.exit()
# Connect to spotify
sp = connectToSpotify(username)
# get playlist ids
playlists = getPlaylistsByName(sp, username, playlist_names)
tracks = []
for playlist in playlists:
tracks = tracks + getTrackIds(sp, username, playlist)
track_with_features = getAudioFeatures(sp, tracks)
count_by_mode = {"minor": 0, "major": 0}
for track in track_with_features:
mode = track["mode"]
if mode == 0:
count_by_mode["minor"] += 1
elif mode == 1:
count_by_mode["major"] += 1
coefficient = 100 / len(tracks)
perc_major = count_by_mode["major"] * coefficient
perc_minor = count_by_mode["minor"] * coefficient
print(f"Reparition of modes in playlists {playlist_names}:")
print(f"- Major: {round(perc_major)}%")
print(f"- Minor: {round(perc_minor)}%")
main() | 3.25 | 3 |
t_10_brief_tour_of_the_standard_library/t_10_8_dates_and_times/main.py | naokiur/Python-tutorial | 0 | 12772157 | from datetime import date
now = date.today()
print(now)
print(now.strftime("%m-%d-%y. %d %b %Y is a %A on the %d day of %B"))
past = date(2003, 12, 2)
print(past)
print(past.strftime("%m-%d-%y. %d %b %Y is a %A on the %d day of %B"))
birthday = date(1964, 7, 31)
age = now - birthday
print(age.days)
| 4 | 4 |
intern/bale_mkt.py | zaqwes8811/smart-vocabulary-cards | 0 | 12772158 | # type-mkt: interfaces
"""
TODO(zaqwes): Move from here
Словарь макетов:
String - абстрактная строка
Int(int) - целое число
! Комментарии расставлять как показано
под именами классов и интерфейсов. Это используется при разборе
макета
Делемма:
Для Python тип не важен, для Java важен, как быть с макетом?
Ровнять по Java?
Так же по другому происходит комментирование.
class Name !! no comments
/*(? это предположение)* /!! в длинных комментах
"""
"""
Dict:
ресурс - файл, сокет, ресурс по ссылке, cgi-скрипт, etc.
Use cases:
@use case
@use case
@use case
Notes:
Данные можно получать и прямо по ссылкам, и по готовому тексту
Так как макет интерфейсный, то по всем java классам и интерфейсам
должны генерится отдельные файлы
Thinks:
Есть ли в Java деструкторы или финалайзеры?
Есть ли смысл в разделении захвата ресурса и преобразовании? Пока
не используется преобразование по частям. А если использовать по частям?
Наследование интерфейсов
http:#www.javainception.ru/index.php/interfeisi/interfeisi/lmnozhestvennoe-nasledovanier-v-java.html
http:#www.javaportal.ru/java/articles/mnj.html - in depth
Перенести навигационную часть в dals or not? В dals конкретные запросы, или карты тоже,
там сейчас есть dir_walker.py,
и он вполне ценен и, кажется, на месте. Наверное стоит. Но пример множественного наследования
можно оставить.
Что если объединить обработку и навигацию? Появляется избыточность, если обработка данных не нужна
Что если разделить? Возможно будет возникать повторное открытие ресурсов
А что если использовать множественное наследование интерфейсов?
Troubles:
Возможно требуется двойной (и более) захват ресурса, что может быть накладно.
Это особенно сильно проявляется при делении на получение карты ресурса и его обработку.
Может таки объединить классы, тогда в алгоритм может быть таким:
1. Захват вершины
2. Получение карты ресурса
3. "Ходим" по карте и что-то делаем
4. Отпускаем ресурс
Usige:
Java:
interface IAll extend/*(? это предположение)* / strange_things.ExtractorTextFromResource, strange_things.ExtracitorResourcePlan
class MixedClass2 implements strange_things.ExtractorTextFromResource, strange_things.ExtracitorResourcePlan
"""
interface strange_things.ExtracitorResourcePlan
""" Как бы навигационная часть """
# Спорный вызов. Как он будет использоваться при множественном наследовании?
# Захватывает точку входа в ресурс (например, файл базы данных)
strange_things.ErrorContainer captureRoot(String urlRoot)
# Error _freeRoot(); # должен вызывать в деструкторе
# @param url информация о том, как можно добраться до вершины ресурса
# @return карта, по которой можно передвигаться
strange_things.GraphToText getPlan(String url, strange_things.ErrorContainer err)
strange_things.GraphToText getPlanFromText(String url, strange_things.ErrorContainer err)
# @return просто список "адресов"
# Обобщить дробилку с рекурсией! Она использовалась для разбора страницы Курсеры
List<String> getListAddresses(String url, strange_things.ErrorContainer err)
# Выделяем адреса без подключения, у нас есть текст, который доставерно
# отражает план ресурса
List<String> extractListAddressesFromText(String text, strange_things.ErrorContainer err)
interface strange_things.ExtractorTextFromResource
""" Соединяется с ресурсом и преобразовывает его в текст.
Thinks:
Не забывать освободить ресурсы(о реализации), но как быть при множ. наследовании
"""
# Выделяет весь текст из ресурса по некоторому адресу
# @param url в переменной все данные, чтобы ресурс мог быть открыт
String extract(String url, strange_things.ErrorContainer err)
# Получить из ресурса сразу список единиц контента - предложений, например
List<String> contentItemsToList(String url, strange_things.ErrorContainer err)
interface strange_things.TextToText
# @param text зашумленнй текст, например html код
# @return чистый текст
# ! не возможно разрешить перегрузку
String testToText(String text, strange_things.ErrorContainer err)
# Получить сразу список единиц контента
List<String> contentItemsToList(String text, strange_things.ErrorContainer err)
interface strange_things.ErrorContainer
"""
Thinks:
Как я понял в Java нельзя передать примитив по ссылке, исключениями для обработки ошибок
пользоваться не хочу - исключения для исключительных ситуаций.
Можно как-то вернуть tuple, но похоже с python это не склеить
"""
String what()
int getErrCode()
| 2.734375 | 3 |
tests/communities/test_cli.py | lhenze/invenio-communities | 0 | 12772159 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Tests for the CLI."""
from faker import Faker
from invenio_communities.fixtures.demo import create_fake_community
from invenio_communities.fixtures.tasks import create_demo_community
def test_fake_demo_community_creation(
app, db, location, es_clear, community_type_record
):
"""Assert that demo community creation works without failing."""
faker = Faker()
create_demo_community(create_fake_community(faker))
| 1.742188 | 2 |
microblog/accounts/migrations/0008_auto_20200929_2146.py | ShrayankM/Django-MicroBlog | 0 | 12772160 | <reponame>ShrayankM/Django-MicroBlog
# Generated by Django 3.1.1 on 2020-09-29 16:16
from django.db import migrations
import smartfields.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_auto_20200927_0603'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_pic',
field=smartfields.fields.ImageField(blank=True, null=True, upload_to='profilepics/'),
),
]
| 1.65625 | 2 |
user_interface/test/test_dakota_class.py | ukaea/ALC_UQ | 2 | 12772161 | from dakota_class import DakotaClass
from exceptions import *
import unittest
import xarray as xr
import numpy as np
import os
class TestDakotaClass(unittest.TestCase):
# Try and create an instance of the dakota class
def test_create_dakota_template(self):
my_dakota = DakotaClass()
self.assertEqual( my_dakota.dakota.get_attribute('evaluation_concurrency'), 1 )
self.assertEqual( my_dakota.dakota.get_attribute('response_functions'), 1 )
def test_add_run_settings(self):
attrs = { 'sample_type':'sampling', 'seed':54 }
new_settings = xr.Dataset(attrs=attrs)
my_dakota = DakotaClass()
my_dakota.update_settings(new_settings)
self.assertEqual( my_dakota.dakota.get_attribute('sample_type').strip(), 'sampling' )
self.assertEqual( my_dakota.dakota.get_attribute('seed'), 54 )
def test_add_common_variable(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('means'), means ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('std_deviations'), sds ) )
def test_add_lognormal_variable(self):
attrs = { 'type':'lognormal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('means'), means ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('std_deviations'), sds ) )
def test_add_scan_variable(self):
attrs = { 'type':'scan' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 2,3,4,5 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('lower_bounds'), lower ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('upper_bounds'), upper ) )
def test_add_correlated_scan_variable(self):
attrs = { 'type':'scan_correlated' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 4,4,4,4 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('lower_bounds'), [0.0] ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('upper_bounds'), [1.0] ) )
def test_write_dakote_file(self):
my_dakota = DakotaClass()
my_dakota.write_input_file('test_dakota.dat')
self.assertTrue( os.path.isfile('test_dakota.dat') )
os.remove('test_dakota.dat')
######################################################
# FAILURE TESTS
######################################################
def test_add_variable_not_dataset(self):
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = {'means':means, 'std_deviations':sds }
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_with_no_type(self):
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds } )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_unknown_type(self):
attrs = { 'type':'unknown' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_missing_data(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
test_var = xr.Dataset( {'means':means}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_incompatible_data(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4,0.5 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_with_nans(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,np.nan,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_correlated_scan_variable_with_inconsistent_partitions(self):
attrs = { 'type':'scan_correlated' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 4,5,4,4 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
| 2.609375 | 3 |
zope/MimeTypeItem.py | nuxeo-cps/zope2--PortalTransforms | 0 | 12772162 | <reponame>nuxeo-cps/zope2--PortalTransforms
from Acquisition import Implicit
from OFS.SimpleItem import Item
from AccessControl import ClassSecurityInfo
from Globals import Persistent, InitializeClass
try:
from Products.CMFCore.permissions import ManagePortal
except ImportError: # BBB: CMF 1.4
from Products.CMFCore.CMFCorePermissions import ManagePortal
from Products.PortalTransforms.interfaces import imimetype
from Products.PortalTransforms.MimeTypeItem import mimetype
__revision__ = '$Id$'
class MimeTypeItem(mimetype, Persistent, Implicit, Item):
""" A mimetype object to be managed inside the mimetypes tool """
security = ClassSecurityInfo()
__implements__ = (imimetype,)
security.declarePublic('name')
security.declarePublic('major')
security.declarePublic('minor')
security.declarePublic('normalized')
security.declareProtected(ManagePortal, 'edit')
def edit(self, name, mimetypes, extensions, icon_path, binary=0,
REQUEST=None):
"""edit this mime type"""
# if mimetypes and extensions are string instead of lists, split them on new lines
if type(mimetypes) in (type(''), type(u'')):
mimetypes = [mts.strip() for mts in mimetypes.split('\n') if mts.strip()]
if type(extensions) in (type(''), type(u'')):
extensions = [mts.strip() for mts in extensions.split('\n') if mts.strip()]
self.__name__ = self.id = name
self.mimetypes = mimetypes
self.extensions = extensions
self.binary = binary
self.icon_path = icon_path
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()+'/manage_main')
InitializeClass(MimeTypeItem)
| 1.8125 | 2 |
testplan/exporters/testing/xml/__init__.py | Morgan-Stanley/Testplan | 0 | 12772163 | <gh_stars>0
"""
XML Export logic for test reports.
"""
import os
import pathlib
import shutil
import socket
from collections import Counter
from typing import Generator, List, Dict, Union
from lxml import etree
from lxml.etree import Element
from lxml.builder import E # pylint: disable=no-name-in-module
from testplan.common.config import ConfigOption
from testplan.common.exporters import ExporterConfig
from testplan.common.utils.path import unique_name
from testplan.common.utils.strings import slugify
from testplan.report import (
TestReport,
TestCaseReport,
TestGroupReport,
ReportCategories,
Status,
)
from testplan.report.testing.base import Report
from ..base import Exporter
class BaseRenderer:
"""
Base renderer, renders a test group report with the following structure:
.. code-block:: python
TestGroupReport(name=..., category='<test-category>')
TestGroupReport(name=..., category='testsuite')
TestCaseReport(name=...) (failing)
RawAssertion (dict form)
TestCaseReport(name=...) (passing)
TestCaseReport(name=...) (passing)
"""
def render(self, source: TestGroupReport) -> Element:
"""
Renders each suite separately and groups them within `testsuites` tag.
:param source: Testplan report
:return: testsuites element
"""
testsuites = []
counter = Counter({})
for index, suite_report in enumerate(source):
counter += suite_report.counter
suite_elem = self.render_testsuite(index, source, suite_report)
testsuites.append(suite_elem)
return E.testsuites(
*testsuites,
tests=str(counter["total"]),
errors=str(counter["error"]),
failures=str(counter["failed"])
)
def get_testcase_reports(
self,
testsuite_report: Report,
) -> Generator[TestCaseReport, None, None]:
"""
Generator function to yield testcases from a suite report recursively.
:param testsuite_report: Testplan report
:return: generator to produce all testcases
"""
for child in testsuite_report:
if isinstance(child, TestCaseReport):
yield child
elif isinstance(child, TestGroupReport):
# Recurse - yield each of the testcases in this group.
for testcase in self.get_testcase_reports(child):
yield testcase
else:
raise TypeError("Unsupported report type: {}".format(child))
def render_testsuite(
self, index, test_report, testsuite_report
) -> Element:
"""
Renders a single testsuite with its testcases within a `testsuite` tag.
:param index: index of the testsuite as item in Testplan report
:param test_report: Testplan report
:param testsuite_report: testsuite level report
:return: testsuite element
"""
cases = [
self.render_testcase(
test_report, testsuite_report, testcase_report
)
for testcase_report in self.get_testcase_reports(testsuite_report)
]
return E.testsuite(
*cases,
hostname=socket.gethostname(),
id=str(index),
package="{}:{}".format(test_report.name, testsuite_report.name),
name=testsuite_report.name,
errors=str(testsuite_report.counter["error"]),
failures=str(testsuite_report.counter["failed"]),
tests=str(testsuite_report.counter["total"])
)
def render_testcase(
self,
test_report: TestReport,
testsuite_report: TestGroupReport,
testcase_report: TestCaseReport,
) -> Element:
"""
Renders a testcase with errors & failures within a `testcase` tag.
:param test_report: Testplan report
:param testsuite_report: testsuite level report
:param testcase_report: testcase level report
:return: testcase element
"""
# the xsd for junit only allows errors OR failures not both
if testcase_report.status == Status.ERROR:
details = self.render_testcase_errors(testcase_report)
elif testcase_report.status == Status.FAILED:
details = self.render_testcase_failures(testcase_report)
else:
details = []
return E.testcase(
*details,
name=testcase_report.name,
classname="{}:{}:{}".format(
test_report.name, testsuite_report.name, testcase_report.name
),
time=str(testcase_report.timer["run"].elapsed)
if "run" in testcase_report.timer
else "0"
)
def render_testcase_errors(
self,
testcase_report: TestCaseReport,
) -> List[Element]:
"""
Creates an `error` tag holding information via testcase report logs.
:param testcase_report: testcase level report
:return: error element
"""
return [
E.error(message=log["message"])
for log in testcase_report.logs
if log["levelname"] == "ERROR"
]
def render_testcase_failures(
self,
testcase_report: TestCaseReport,
) -> List[Element]:
"""
Iterates over failing assertions to create `failure` tags.
:param testcase_report: testcase level report
:return: failure element
"""
# Depth does not matter, we just need entries in flat form
flat_dicts = list(zip(*testcase_report.flattened_entries(depth=0)))[1]
failed_assertions = [
entry
for entry in flat_dicts
# Only get failing assertions
if entry["meta_type"] == "assertion" and not entry["passed"] and
# Groups have no use in XML output
not entry["type"] in ("Group", "Summary")
]
failures = []
for entry in failed_assertions:
failure = E.failure(
message=entry["description"] or entry["type"], type="assertion"
)
if entry["type"] == "RawAssertion":
failure.text = etree.CDATA(entry["content"])
failures.append(failure)
return failures
class MultiTestRenderer(BaseRenderer):
"""
Source report represents a MultiTest with the following structure:
.. code-block:: python
TestGroupReport(name=..., category='multitest')
TestGroupReport(name=..., category='testsuite')
TestCaseReport(name=...)
Assertion entry (dict)
Assertion entry (dict)
TestGroupReport(name='...', category='parametrization')
TestCaseReport(name=...)
Assertion entry (dict)
Assertion entry (dict)
TestCaseReport(name=...)
Assertion entry (dict)
Assertion entry (dict)
Final XML will have flattened testcase data from parametrization groups.
"""
def get_testcase_reports(
self, testsuite_report: Union[TestCaseReport, TestGroupReport]
) -> List[TestCaseReport]:
"""
Collects all testcase level reports from a testsuite.
:param testsuite_report:
:raises TypeError:
:return:
"""
testcase_reports = []
for child in testsuite_report:
if isinstance(child, TestCaseReport):
testcase_reports.append(child)
elif (
isinstance(child, TestGroupReport)
and child.category == ReportCategories.PARAMETRIZATION
):
testcase_reports.extend(child.entries)
else:
raise TypeError("Unsupported report type: {}".format(child))
return testcase_reports
class XMLExporterConfig(ExporterConfig):
"""
Configuration object for
:py:class:`<~testplan.exporters.testing.xml.XMLExporter>`.
"""
@classmethod
def get_options(cls):
return {ConfigOption("xml_dir"): str}
class XMLExporter(Exporter):
"""
Exporter subclass for handling XML. Produces one XML file per each child of
TestPlanReport (e.g. Multitest reports)
:param xml_dir: Directory for saving xml reports.
"""
CONFIG: XMLExporterConfig = XMLExporterConfig
renderer_map: Dict[ReportCategories, BaseRenderer] = {
ReportCategories.MULTITEST: MultiTestRenderer
}
def __init__(self, name="XML exporter", **options):
super(XMLExporter, self).__init__(name=name, **options)
def export(self, source: TestReport) -> str:
"""
Creates multiple XML files in the given directory for MultiTest.
:param source:
:return:
"""
xml_dir = pathlib.Path(self.cfg.xml_dir).resolve()
if xml_dir.exists():
shutil.rmtree(xml_dir)
xml_dir.mkdir(parents=True, exist_ok=True)
files = set(os.listdir(xml_dir))
for child_report in source:
filename = "{}.xml".format(slugify(child_report.name))
filename = unique_name(filename, files)
files.add(filename)
file_path = xml_dir / filename
# TODO: "mostly" - is this just confidence or proven?
# If a report has XML string attribute it was mostly
# generated via parsing a JUnit compatible XML file
# already, meaning we don't need to re-generate the XML
# contents, but can directly write the contents to a file
# instead.
if hasattr(child_report, "xml_string"):
with open(file_path, "w") as xml_target:
xml_target.write(child_report.xml_string)
else:
renderer = self.renderer_map.get(
child_report.category, BaseRenderer
)()
element = etree.ElementTree(renderer.render(child_report))
element.write(
str(file_path),
pretty_print=True,
xml_declaration=True,
encoding="utf-8",
)
self.logger.exporter_info(
"%s XML files created at %s", len(source), xml_dir
)
return str(xml_dir)
| 2.40625 | 2 |
docs/_static/demos/io/DownloadMmtfFilesDemo.ipynb.py | sbliven/mmtf-pyspark | 59 | 12772164 |
# coding: utf-8
# # Download Mmtf Files Demo
#
# Example of downloading a list of PDB entries from [RCSB]("http://mmtf.rcsb.org")
#
# ## Imports
# In[9]:
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io import mmtfReader
from mmtfPyspark.structureViewer import view_structure
# ## Configure Spark
# In[10]:
conf = SparkConf().setMaster("local[*]") .setAppName("DownloadMMTFFiles")
sc = SparkContext(conf = conf)
# ## Download a list of PDB entries using MMTF web services
# In[11]:
pdbIds = ['1AQ1','1B38','1B39','1BUH']
pdb = mmtfReader.download_mmtf_files(pdbIds, sc)
# ## Count the number of entires downloaded
# In[12]:
count = pdb.count()
print(f'number of entries downloaded : {count}')
# ## Visualize Structures
# In[13]:
structures = pdb.keys().collect()
view_structure(structures, style = 'line')
# ## Terminate Spark
# In[14]:
sc.stop()
| 2.34375 | 2 |
myo.py | hassanyf/myo-emg-python | 4 | 12772165 | <filename>myo.py
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 7 15:21:26 2018
@author: hassanyf and nabeelyousfi
"""
import pandas as pd
import keras
# Training and predicting EMG's from dataset
dataset = pd.read_csv('dataset.csv')
X = dataset.iloc[:, [0,1,2,3,4,5,6,7]].values
y = dataset.iloc[:, 8].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder
labelencoder_y= LabelEncoder()
y = labelencoder_y.fit_transform(y)
#y=y.reshape(-1,1)
from keras.utils import to_categorical
y_binary = to_categorical(y)
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_binary, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 8))
# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 4, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Part 3 - Making predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))
| 3.09375 | 3 |
src/explainer/task.py | saromanov/explainer | 0 | 12772166 | from typing import List
from parse import parse_explain
from db import explain, rows_count
from parse import Analyzer
from serializer import Serializer
class Task(Serializer):
''' defines query as a task
'''
def __init__(self, parent_title, title, query, *args, **kwargs):
self.query = query
self.rows_count = kwargs.get('rows_count', 0)
self.table = kwargs.get('table')
self._parent_title = parent_title
self._title = title
self._times = kwargs.get('times', 10)
def title(self) -> str:
return self._title
def parent_title(self) -> str:
return self._parent_title
def __str__(self) -> str:
return 'Title: {0}\nQuery: {1}\n Times:{2}'.format(self._title, self.query, self._times)
def run(self, session) -> List[Analyzer]:
if self.table:
self._rows_count = rows_count(session, self.table)
return [parse_explain(self._title, explain(session, self.query)) for x in range(self._times)]
| 2.765625 | 3 |
scripts/webscraping/youtube/youtube_first_page_titles.py | iluxonchik/python-general-repo | 0 | 12772167 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
# Windows console uses the cp437 encoding, which only supports 256 characters,
# wich means that some unicode chars can't be rendered, so one quick fix is
# to escape those chars and print their actual code instead of rendering them.
# backslashreplace = replace with backslashed escape sequences (escape unsuported chars)
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
if (len(sys.argv) != 2):
print("Usage: python youtube_first_page_titles.py <search query>")
sys.exit(-1)
query = sys.argv[1].replace(" ", "+")
html = urlopen("https://www.youtube.com/results?search_query=" + query)
bsObj = BeautifulSoup(html, "html.parser")
titles = bsObj.findAll("a", {"class":"yt-uix-tile-link"})
for title in titles:
print(title['title']) | 3.125 | 3 |
src/main/migrations/0017_auto_20170625_1622.py | shashankmohabia/gymkhana-master | 1 | 12772168 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-25 10:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oauth', '0002_auto_20170612_1258'),
('main', '0016_auto_20170625_1315'),
]
operations = [
migrations.AddField(
model_name='club',
name='mentor',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.CASCADE, related_name='cmentor', to='oauth.UserProfile'),
preserve_default=False,
),
migrations.AlterField(
model_name='club',
name='gallery',
field=models.ForeignKey(blank=True, help_text='Select a gallery to link to this club.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='photologue.Gallery'),
),
migrations.AlterField(
model_name='society',
name='mentor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='smentor', to='oauth.UserProfile'),
),
]
| 1.710938 | 2 |
photos/admin.py | martinmandina/Photo-Gallery | 0 | 12772169 | <reponame>martinmandina/Photo-Gallery
from django.contrib import admin
from .models import Category,Location,Image,Photo
# Register your models here.
admin.site.register(Location)
admin.site.register(Category)
admin.site.register(Image)
admin.site.register(Photo)
| 1.40625 | 1 |
src/semantics/autotype_inferencer.py | RodroVMS/cool-compiler-2022 | 0 | 12772170 | <reponame>RodroVMS/cool-compiler-2022
from parsing.parsing_rules import p_param
import semantics.visitor as visitor
from parsing.ast import ArithmeticNode, AssignNode, AttrDeclarationNode, BlocksNode, CaseNode, CaseOptionNode, ClassDeclarationNode, ComparerNode, ComplementNode, ConditionalNode, EqualsNode, IsVoidNode, LetNode, LoopNode, MethodCallNode, MethodDeclarationNode, Node, NotNode, ProgramNode, VarDeclarationNode, VariableNode
from semantics.tools import Context, ErrorType, Scope, TypeBag, conforms, equal, join, join_list, smart_add
class AutotypeInferencer:
def __init__(self, context:Context, errors) -> None:
self.context = context
self.current_type = None
self.errors = errors
@visitor.on('node')
def visit(self, node, scope):
pass
@visitor.when(ProgramNode)
def visit(self, node:ProgramNode, scope:Scope):
for declaration in node.declarations:
self.visit(declaration, scope.next_child())
scope.reset()
@visitor.when(ClassDeclarationNode)
def visit(self, node, scope):
self.current_type = self.context.get_type(node.id, unpacked=True)
for feature in node.features:
self.visit(feature, scope)
@visitor.when(AttrDeclarationNode)
def visit(self, node, scope):
if not node.expr:
return
node_infered = node.inferenced_type
expr_infered = node.expr.inferenced_type.clone()
self.visit(node.expr, scope)
new_expr_infered = node.expr.inferenced_type
if equal(expr_infered, new_expr_infered):
return
new_clone_inferred = new_expr_infered.clone()
if not conforms(new_expr_infered, node_infered):
self.add_error(node, f"Type Error: In class '{self.current_type.name}' attribue '{node.id}' expression type({new_clone_inferred.name}) does not conforms to declared type ({node_infered.name}).")
# What is made error type here!!!
@visitor.when(MethodDeclarationNode)
def visit(self, node, scopex:Scope):
scope = scopex.next_child()
ret_type_infered = node.body.inferenced_type.clone()
self.visit(node.body, scope)
new_type_infered = node.body.inferenced_type
if equal(ret_type_infered, new_type_infered):
return
current_method = self.current_type.get_method(node.id)
ret_type_decl = current_method.return_type.swap_self_type(self.current_type)
new_clone_infered = new_type_infered.clone()
if not conforms(new_type_infered, ret_type_decl):
self.add_error(node, f"Type Error: In Class \'{self.current_type.name}\' method \'{current_method.name}\' return expression type({new_clone_infered.name}) does not conforms to declared return type ({ret_type_decl.name})")
ret_type_expr = ErrorType()
node.inferenced_type = ret_type_expr
ret_type_decl.swap_self_type(self.current_type, back = True)
@visitor.when(BlocksNode)
def visit(self, node, scope):
for expr in node.expr_list:
self.visit(expr, scope)
node.inferenced_type = node.expr_list[-1].inferenced_type
@visitor.when(ConditionalNode)
def visit(self, node, scope):
condition_infered = node.condition.inferenced_type.clone()
then_infered = node.then_body.inferenced_type.clone()
else_infered = node.else_body.inferenced_type.clone()
self.visit(node.condition, scope)
self.visit(node.then_body, scope)
self.visit(node.else_body, scope)
new_condition_infered = node.condition.inferenced_type
new_then_infered = node.then_body.inferenced_type
new_else_infered = node.else_body.inferenced_type
if not equal(condition_infered, new_condition_infered):
self.add_error(node, f"Type Error: If's condition type({new_condition_infered.name}) does not conforms to Bool type.")
if equal(then_infered, new_then_infered) and equal(else_infered, new_else_infered):
return
joined_type = join(new_then_infered, new_else_infered)
node.inferenced_type = joined_type
@visitor.when(CaseNode)
def visit(self, node, scope:Scope):
self.visit(node.case_expr, scope)
type_list = []
change = False
for option in node.options:
child = scope.next_child()
option_infered = option.inferenced_type.clone()
self.visit(option, child)
new_option_infered = option.inferenced_type
type_list.append(new_option_infered)
change = change or not equal(option_infered, new_option_infered)
if change:
joined_type = join_list(type_list)
node.inferenced_type = joined_type
@visitor.when(CaseOptionNode)
def visit(self, node, scope:Scope):
self.visit(node.expr, scope)
@visitor.when(LoopNode)
def visit(self, node, scope):
condition_infered = node.condition.inferenced_type.clone()
self.visit(node.condition, scope)
new_cond_infered = node.condition.inferenced_type
if not equal(condition_infered, new_cond_infered):
self.add_error(node, f"Type Error: Loop's condition type({new_cond_infered.name}) does not conforms to Bool type.")
self.visit(node.body, scope)
@visitor.when(LetNode)
def visit(self, node, scope):
child = scope.next_child()
for var in node.var_decl_list:
self.visit(var, child)
self.visit(node.in_expr, child)
node.inferenced_type = node.in_expr.inferenced_type
@visitor.when(VarDeclarationNode)
def visit(self, node, scope:Scope):
if not node.expr:
return
expr_infered = node.expr.inferenced_type.clone()
self.visit(node.expr, scope)
new_expr_inferred = node.expr.inferenced_type
if equal(expr_infered, new_expr_inferred):
return
node_infered = node.inferenced_type
new_clone_infered = new_expr_inferred.clone()
if not conforms(new_expr_inferred, node_infered):
self.add_error(node, f"Semantic Error: Variable \'{node.id}\' expression type({new_clone_infered.name}) does not conforms to declared type({node_infered.name}).")
@visitor.when(AssignNode)
def visit(self, node, scope:Scope):
if not node.defined and node.id != "self":
return
expr_infered = node.expr.inferenced_type.clone()
self.visit(node.expr, scope)
new_expr_infered = node.expr.inferenced_type
if equal(expr_infered, new_expr_infered):
return
var_type = scope.find_variable(node.id).type
new_clone_infered = new_expr_infered.clone()
if not conforms(new_expr_infered, var_type):
self.add_error(node, f"Type Error: Cannot assign new value to variable '{node.id}'. Expression type({new_clone_infered.name}) does not conforms to declared type ({var_type.name}).")
var_type = ErrorType()
node.inferenced_type = var_type
@visitor.when(MethodCallNode)
def visit(self, node, scope):
caller = node.inferenced_caller
if node.type and node.expr:
bridge_infered = node.expr.inferenced_type.clone()
self.visit(node.expr, scope)
bridge = node.expr.inferenced_type
if not equal(bridge_infered, bridge):
bridge_clone = bridge.clone()
if not conforms(bridge, caller):
self.add_error(node, f"Semantic Error: Cannot effect dispatch because expression type({bridge_clone.name}) does not conforms to caller type({caller.name}).")
caller = ErrorType()
elif node.expr:
self.visit(node.expr, scope)
caller = node.expr.inferenced_type
if len(caller.type_set) > 1:
methods_by_name = self.context.get_method_by_name(node.id, len(node.args))
types = [typex for _, typex in methods_by_name]
conforms(caller, TypeBag(set(types), types))
if len(caller.heads) > 1:
error = f"Semantic Error: Method \"{node.id}\" found in {len(caller.heads)} unrelated types:\n"
error += " -Found in: "
error += ", ".join(typex.name for typex in caller.heads)
self.add_error(node, error)
caller = ErrorType()
elif len(caller.heads) == 0:
self.add_error(node, f"There is no method called {node.id} which takes {len(node.args)} paramters.")
caller = ErrorType()
if len(caller.heads) == 1:
caller_type = caller.heads[0]
method = caller_type.get_method(node.id)
if len(node.args) != len(method.param_types):
self.add_error(node, f"Semantic Error: Method '{node.id}' from class '{caller_type.name}' takes {len(node.args)} arguments but {method.param_types} were given.'")
node.inferenced_type = ErrorType()
decl_return_type = method.return_type.clone()
decl_return_type.swap_self_type(caller_type)
type_set = set()
heads = []
type_set = smart_add(type_set, heads, decl_return_type)
for i in range(len(node.args)):
arg = node.args[i]
p_type = method.param_types[i]
arg_infered = arg.inferenced_type.clone()
self.visit(arg, scope)
new_arg_infered = arg.inferenced_type
new_clone_infered = new_arg_infered.clone()
if not conforms(new_arg_infered, p_type):
self.add_error(node.arg, f"Type Error: Argument expression type ({new_clone_infered.name}) does not conforms parameter declared type({p_type.name})")
node.inferenced_type = TypeBag(type_set, heads)
else:
node.inferenced_type = ErrorType()
node.inferenced_caller = caller
@visitor.when(ArithmeticNode)
def visit(self, node, scope):
left_infered = node.left.inferenced_type#.clone()
right_infered = node.right.inferenced_type#.clone()
self.visit(node.left, scope)
self.visit(node.right, scope)
new_left = node.left.inferenced_type
new_right = node.right.inferenced_type
int_type = self.context.get_type("Int")
if not equal(left_infered, new_left):
left_clone = new_left.clone()
if not conforms(left_infered, int_type):
self.add_error(node.left, f"Type Error: Arithmetic Error: Left member type({left_clone.name}) does not conforms to Int type.")
if not equal(right_infered, new_right):
right_clone = new_left.clone()
if not conforms(right_infered, int_type):
self.add_error(node.right, f"Type Error: Arithmetic Error: Right member type({right_clone.name}) does not conforms to Int type.")
@visitor.when(ComparerNode)
def visit(self, node, scope):
left_infered = node.left.inferenced_type#.clone()
right_infered = node.right.inferenced_type#.clone()
self.visit(node.left, scope)
self.visit(node.right, scope)
new_left = node.left.inferenced_type
new_right = node.right.inferenced_type
left_clone = new_left.clone()
right_clone = new_right.clone()
if equal(left_infered, new_left) and equal(right_infered, new_right):
return
if not conforms(left_clone, new_right) and not conforms(right_clone, new_left):
self.add_error(node, f"Type Error: Left expression type({new_left.name}) does not conforms to right expression type({new_right.name})")
@visitor.when(VariableNode)
def visit(self, node, scope:Scope):
if node.defined:
node.inferenced_type = scope.find_variable(node.value).type
@visitor.when(NotNode)
def visit(self, node, scope):
expr_infered = node.expr.inferenced_type#.clone()
self.visit(node.expr, scope)
new_expr = node.expr.inferenced_type
expr_clone = new_expr.clone()
bool_type = self.context.get_type("Bool")
if equal(expr_infered, new_expr):
return
if not conforms(new_expr, bool_type):
self.add_error(node.value, f"Type Error: Not's expresion type({expr_clone.name} does not conforms to Bool type")
@visitor.when(ComplementNode)
def visit(self, node, scope):
expr_infered = node.expr.inferenced_type#.clone()
self.visit(node.expr, scope)
new_expr = node.expr.inferenced_type
expr_clone = new_expr.clone()
int_type = self.context.get_type("Int")
if equal(expr_infered, new_expr):
return
if not conforms(new_expr, int_type):
self.add_error(node.value, f"Type Error: Not's expresion type({expr_clone.name} does not conforms to Int type")
@visitor.when(IsVoidNode)
def visit(self, node, scope):
self.visit(node.expr, scope)
def add_error(self, node:Node, text:str):
line, col = node.get_position() if node else (0, 0)
self.errors.append(((line,col), f"({line}, {col}) - " + text))
#todo: los .clone detras de los old_infered_types puede eliminarse, me parece q no son necesarios
#todo: para no tener los .clone en todas las visitas en todos los casos posibles debe actualizarse el
#todo: .inferenced_type
#todo: Para pensar:
#todo: que hacer si expr_type no conforma con decl_type
#todo: Convierto expr en error_type si se me queda vacio, si es error ahora va a ser despues
#todo: ademas le pone ya una condicion al bolsa, en caso de que sea AUTO_TYPE
#todo: si lo mantengo igual que puedo ganar?
#todo: Es necesario agregar una propiedad Autotype a los TypeBag que indiquen quien es quien?
#todo: En el caso donde Auto1 se conforma de Auto2, y Auto1, nada mas puede ser Int o Object, y el
#todo: otro puede ser de todo, de que manera y cuando achicar Auto2.
#todo: Una manera puede ser cuando no se hayan hecho mas cambios sobre Auto1 y Auto2
#todo: AutoType checking for later:
#todo: Redefined methods with params and return type are autotypes
#todo: Check use of self types inside auto types, how does it affects, etc...
#todo: | 2.46875 | 2 |
agrspy/envspy-histaqi/discards/monurl.py | soonyenju/agrspy | 2 | 12772171 | # coding: utf-8
import json
import time
from datetime import datetime
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from sylogger import logger
def main():
run(stop_date = "20131101", provs = ['江苏'])
def run(stop_date = "20181101", provs = []):
stop_date = datetime.strptime(stop_date, r"%Y%m%d")
histaqi_data = {}
with open("url.json", "r", encoding='utf-8') as f:
base_urls = json.load(f)
if provs:
keys = list(base_urls.keys())
for key in keys:
if key not in provs: base_urls.pop(key)
for prov_name, city_urls in base_urls.items():
histaqi_data[prov_name] = {}
for city_name, city_url in city_urls.items():
try:
print(city_name, city_url)
response = connect(city_url)
html = response.text
soup = BeautifulSoup(html, "lxml")
boxs = soup.select("div.box.p")
box = boxs[0] # Box只有一个元素
lis = box.select("li > a")
city_data = {}
for li in lis:
city_mon_url = urljoin(city_url, li["href"])
mon_name = li["title"]
print(mon_name)
cur_date = datetime.strptime(mon_name[0:4] + mon_name[5:7], r"%Y%m")
if (cur_date - stop_date).days < 0:
break
else:
vals = read(city_mon_url)
city_data[mon_name] = vals
print(mon_name + " is done")
histaqi_data[prov_name][city_name] = city_data
# print(histaqi_data)
except Exception as identifier:
logger('logging.log', msg_type='error', msg=identifier)
with open(datetime.now().strftime(r"%Y%m%d") + ".json", "w") as f:
json.dump(histaqi_data, f, ensure_ascii = False, indent = 4)
continue
with open("aqi.json", "w") as f:
json.dump(histaqi_data, f, ensure_ascii = False, indent = 4)
# https://www.cnblogs.com/kongzhagen/p/6472746.html
# https://www.biaodianfu.com/python-requests-retry.html
# https://blog.csdn.net/xie_0723/article/details/52790786
def read(city_mon_url):
vals = []
response = connect(city_mon_url)
html = response.text
soup = BeautifulSoup(html, "lxml")
tds = soup.select("div.api_month_list td")
for td in tds:
if not td.attrs and not td.findChildren():
item = td.get_text().strip()
# print(item)
vals.append(item)
# exit(0)
return vals
def connect(url, timeout = 500, max_retries = 30, encoding = "gbk"):
headers = {'user-agent': 'my-app/0.0.1'}
request_retry = requests.adapters.HTTPAdapter(max_retries = max_retries)
s = requests.session()
s.mount('https://',request_retry)
s.mount('http://',request_retry)
try:
response = s.get(url, headers = headers, timeout = timeout)
except Exception as identifier:
print(identifier)
time.sleep(5)
response = s.get(url, headers = headers, timeout = timeout)
response.encoding = encoding
return response
if __name__ == "__main__":
main()
| 2.9375 | 3 |
env/WritePolicyEnv.py | jeanqasaur/jeeves | 253 | 12772172 | <reponame>jeanqasaur/jeeves
import JeevesLib
# import fast.AST
# from collections import defaultdict
class WritePolicyEnv:
def __init__(self):
self.writers = {}
def mapPrimaryContext(self, ivar, ctxt):
self.writers[ivar] = ctxt
# This function associates a new set of write policies with a label.
def addWritePolicy(self, label, policy, newWriter):
# If the label is associated with a writer, then associate it with the
# new write policies.
if self.writers.has_key(label):
ictxt = self.writers[label]
# Make a new label mapped to the same writer.
newLabel = JeevesLib.mkLabel(label.name)
self.mapPrimaryContext(newLabel, ictxt)
# Associate the new policies with this new label.
JeevesLib.restrict(newLabel
, lambda oc:
JeevesLib.jand(lambda: label
, lambda: JeevesLib.jand(
lambda: policy(ictxt)(oc)
, lambda: policy(newWriter)(oc))))
return newLabel
# Otherwise return the label as is.
else:
return label
| 2.765625 | 3 |
app/core/tests/utils.py | NhatHox23/nhat-recipe-backend | 0 | 12772173 | <reponame>NhatHox23/nhat-recipe-backend<filename>app/core/tests/utils.py
from django.contrib.auth import get_user_model
from recipe.models import Tag
def sample_user(email='<EMAIL>', password='<PASSWORD>'):
"""Helper function to help create user"""
user = get_user_model().objects.create_user(email=email, password=password)
return user
def sample_tag(user=None, name="Unit Test"):
"""Helper function to help create tag"""
if not user:
user = sample_user()
tag = Tag.objects.create(user=user, name=name)
return tag
| 2.328125 | 2 |
funcs_app.py | ds-suyog/python_flask_func_app | 0 | 12772174 | <filename>funcs_app.py
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def functions():
return render_template('my-form.html')
@app.route('/', methods=['POST'])
def my_form_post():
if request.form["submit"] == "submit":
import funcs
if request.form['factorial'] != '':
fact_input = int(request.form['factorial'])
result_fact = funcs.fact(fact_input)
else:
fact_input = ''
result_fact = ''
if request.form['fibonacci'] != '':
fibo_input = int(request.form['fibonacci'])
result_fibo = funcs.fibo(fibo_input)
else:
result_fibo = ''
if request.form['armstrong'] != '':
arms_input = int(request.form['armstrong'])
result_arms = funcs.isarmstrong(arms_input)
else:
result_arms = ''
if request.form['palindrome'] != '':
pal_input = request.form['palindrome']
result_palin = funcs.ispalin(pal_input)
else:
result_palin = ''
return render_template('display.html', fact_in = fact_input, r_fact = result_fact, r_fibo = result_fibo, r_arms = result_arms, r_palin = result_palin)
elif request.form["submit"] == "back":
return render_template('my-form.html')
if __name__ == '__main__':
app.run() | 2.859375 | 3 |
lib/timetype.py | nicciniamh/timelywall | 1 | 12772175 | <reponame>nicciniamh/timelywall<filename>lib/timetype.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Timely Wallpaper Changer
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import debug, datetime, ephem, math
twilightDegrees = {'civil': -6, 'nautical': -12, 'astronomical': -18}
def getTimeType(latitude, longitude, altitude, twilight='civil', morningends=0,eveningstarts=0):
'''
get the time of day time (night,morning,day,evening) based on lat/lon/alt
Latitude and Longitude can be in decimmal or deg:min:sec notation.
Western longitudes and southern latitudes are negative
Return a string indicating night, morning, datime or evening.
Example:
Los Angeles, CA is 34.039714/-118.310327 or 34:02:22.3828/-118:18:37.136
Sydney, AU is -33.8688/151.2093 or -33:52:7.68/151:12:33.48
Please see the ephem (PyPi) documentation (http://rhodesmill.org/pyephem/) for full
specifications of coordinates.
Twlight is specified as Civil (-6 degrees) Nautical (-12 degrees)
and Astronomical -18 degrees.
If using civil twilight:
It is night when the sun is below -6 degrees on the horizon.
It is morning when it is before noon and the sun is between -6 and 0 degrees on the horizon
it is after noon when the hour is >= 12 and the sun is greater than 0 degrees on the horizon
It is evening when it is after noon and the sun is between 0 and -6 degres on the horizon
'''
if not twilight.lower() in twilightDegrees:
raise ValueError('Twilight must be one of',', '.join(list(twilightDegrees.keys())))
debug.debug('Calculating for {}/{} elevation {}'.format(latitude,longitude,altitude))
debug.debug(u'{} twilight is {}° below horizon'.format(twilight,abs(twilightDegrees[twilight.lower()])))
debug.debug(u'Morning ends {}° above horizon, evening starts {}° above horizon'.format(morningends,eveningstarts))
sun = ephem.Sun()
observer = ephem.Observer()
observer.lat = latitude
observer.lon = longitude
try:
observer.elevation = int(altitude)
except:
observer.elevation = 0
now = datetime.datetime.now()
utc = datetime.datetime.utcnow()
hour = now.hour
observer.date = utc
debug.debug('The local time is {:02}:{:02}:{:02} ({:02}:{:02}:{:02} UTC)'.format(now.hour,now.minute,now.second,
utc.hour,utc.minute,utc.second))
sun.compute(observer)
current_sun_alt = sun.alt
elevation = current_sun_alt*180/math.pi
debug.debug('Elevation is: ',elevation,current_sun_alt)
if elevation < twilightDegrees[twilight.lower()]:
return 'night'
if hour < 12:
if elevation < 0:
return 'morning'
if morningends and elevation < morningends:
return 'morning'
return 'daytime'
elif hour >= 12:
if elevation < 0:
return 'evening'
if eveningstarts and elevation < eveningstarts:
return 'evening'
return 'daytime'
if __name__ == "__main__":
debug.setdebug(1)
print getTimeType(-123.9429,45.6134,42)
| 2.453125 | 2 |
umls/semantic_types.py | OHNLP/clinical-problem-standardization | 5 | 12772176 | '''
Utilities for processing semantic types
'''
import pkg_resources
lines = list(map(lambda x: x.split('|'), open(pkg_resources.resource_filename(__name__, 'SemanticTypes_2018AB.txt')).readlines()))
abbreviation_to_id = {x[0]: x[1] for x in lines}
id_to_abbreviation = {x[1]: x[0] for x in lines}
groups = {line[2]:line[0] for line in
list(map(lambda x: x.split('|'), open(pkg_resources.resource_filename(__name__, 'SemGroups_2018.txt')).readlines()))
}
group_names = ["NONE"] + list(sorted(list(set(groups.values()))))
names = ["NONE"] + list(sorted(list(set(abbreviation_to_id.keys()))))
def get_sem_type(concepts):
types = []
for c in concepts:
types += c.semtypes
return types
def get_semantic_group_from_concept(concepts):
types = []
for concept in concepts:
for semtype in concept.semtypes:
if semtype in abbreviation_to_id:
types.append(groups[abbreviation_to_id[semtype]])
return types
| 2.921875 | 3 |
AWSPythonCode/PurgeS3Bucket.py | wesleywh/EMI_Servers | 0 | 12772177 | <filename>AWSPythonCode/PurgeS3Bucket.py
import argparse
import boto3
# Install boto3: pip install boto3 --target=C:\path\to\dir
parser = argparse.ArgumentParser(description='Deletes everything in a bucket including versions.')
parser.add_argument('--bucket', dest='bucket', help='The S3 bucket name to compeltely purge.')
parser.add_argument('--profile', dest='profile', help='The AWS profile to use.')
parser.add_argument('--region', dest='region', help='The AWS region of this s3 bucket.')
args = parser.parse_args()
session = boto3.session.Session(profile_name=args.profile)
s3 = session.resource('s3', region_name=args.region)
bucket = s3.Bucket(args.bucket)
bucket.objects.all().delete()
bucket.object_versions.delete() | 2.53125 | 3 |
ch6/tablePrinter.py | ecmartz/python-practice | 0 | 12772178 | #!/usr/bin/python3.4
tableData = [['apples','oranges','cherries','bananas'],
['Alice','Bob','Carol','David'],
['dogs','cats','moose','goose']]
# Per the hint
colWidth = [0] * len(tableData)
# Who knew you had to transpose this list of lists
def matrixTranspose( matrix ):
if not matrix: return []
return [ [ row[ i ] for row in matrix ] for i in range( len( matrix[ 0 ] ) ) ]
def printTable(argData):
# Copy of transpose
argDataTrans = matrixTranspose(argData)
# Get longest string in each
for sub in range(len(argData)):
for i in argData[sub]:
if(len(i)>colWidth[sub]):
colWidth[sub] = len(i)
# Get max column width
maxCol = max(colWidth)
# Now print it using the transposed array
for j in range(len(argDataTrans)):
for k in range(len(argDataTrans[j])):
print(argDataTrans[j][k].rjust(maxCol),end='')
print()
if __name__ == '__main__':
printTable(tableData) | 3.8125 | 4 |
app/models.py | faylau/microblog | 0 | 12772179 | <reponame>faylau/microblog
#coding=utf-8
from hashlib import md5
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, SmallInteger, DateTime, \
ForeignKey, create_engine
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
from app import app
from database import Base, engine
print app.config
ROLE_USER = 0
ROLE_ADMIN = 1
# SQLALCHEMY_DATABASE_URI = 'sqlite:///E:\GitHub\microblog\data-dev.db'
# engine = create_engine(SQLALCHEMY_DATABASE_URI, convert_unicode=True, echo=True)
# engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'],
# convert_unicode=True,
# echo=True)
# db_session = scoped_session(sessionmaker(autocommit=False,
# autoflush=False,
# bind=engine))
#
# Base = declarative_base()
# Base.query = db_session.query_property()
def init_db():
Base.metadata.create_all(bind=engine)
def drop_db():
Base.metadata.drop_all(bind=engine)
class User(Base):
"""
:summary:
"""
__tablename__ = 't_users'
id = Column(Integer, primary_key=True)
username = Column(String(40), index=True, unique=True, nullable=False)
nickname = Column(String(40), index=True, unique=True, nullable=False)
password = Column(String(20), nullable=False)
email = Column(String(120), index=True, unique=True)
role = Column(SmallInteger, default=ROLE_USER)
posts = relationship('Post', backref='author', lazy='dynamic')
about_me = Column(String(140))
last_seen = Column(DateTime)
# def __init__(self, username, nickname, password, email, role=ROLE_USER):
# self.username = username
# self.nickname = nickname
# self.password = password
# self.email = email
# self.role = role
def __repr__(self):
return '<User %r>' % (self.username)
def avatar(self, size):
return 'http://www.gravatar.com/avatar' + md5(self.email).hexdigest() + '?d=mm&s=' + str(size)
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname=nickname).first() is None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname=new_nickname).first() is None:
break
version += 1
return new_nickname
# def is_authenticated(self):
# """ Use Flask-Login to deal with login, you must implement this method.
# """
# return True
#
# def is_active(self):
# """ Use Flask-Login to deal with login, you must implement this method.
# """
# return True
#
# def is_anonymous(self):
# """ Use Flask-Login to deal with login, you must implement this method.
# """
# return False
#
# def get_id(self):
# """ Use Flask-Login to deal with login, you must implement this method.
# Returned user id should be unicode type.
# """
# return unicode(self.id)
class Post(Base):
"""
:summary:
"""
__tablename__ = 't_posts'
id = Column(Integer, primary_key=True)
body = Column(String(140))
timestamp = Column(DateTime)
user_id = Column(Integer, ForeignKey('t_users.id'))
def __repr__(self):
return '<Post %s>' % self.body
if __name__ == "__main__":
# init_db()
# drop_db()
# u1 = User(username='admin', nickname=u'系统管理员', password='<PASSWORD>',
# email='<EMAIL>', about_me='', role=ROLE_ADMIN)
# u2 = User(username='andy', nickname=u'D调的华丽', password='<PASSWORD>',
# email='<EMAIL>', about_me='', role=ROLE_USER)
# db_session.add(u1)
# db_session.add(u2)
# db_session.commit()
pass
| 2.390625 | 2 |
src/pkgcore/repository/prototype.py | thesamesam/pkgcore | 0 | 12772180 | """
base repository template
"""
__all__ = (
"CategoryIterValLazyDict", "PackageMapping", "VersionMapping", "tree"
)
import os
from snakeoil.klass import jit_attr
from snakeoil.mappings import DictMixin, LazyValDict
from snakeoil.osutils import pjoin
from snakeoil.sequences import iflatten_instance
from ..ebuild.atom import atom
from ..operations import repo
from ..restrictions import boolean, packages, restriction, values
from ..restrictions.util import collect_package_restrictions
class IterValLazyDict(LazyValDict):
__slots__ = ()
def __str__(self):
return str(list(self))
def force_regen(self, key):
if key in self._vals:
del self._vals[key]
else:
self._keys = tuple(x for x in self._keys if x != key)
class CategoryIterValLazyDict(IterValLazyDict):
__slots__ = ()
def force_add(self, key):
if key not in self:
s = set(self._keys)
s.add(key)
self._keys = tuple(s)
def force_remove(self, key):
if key in self:
self._keys = tuple(x for x in self._keys if x != key)
__iter__ = IterValLazyDict.keys
def __contains__(self, key):
if self._keys_func is not None:
return key in list(self.keys())
return key in self._keys
class PackageMapping(DictMixin):
def __init__(self, parent_mapping, pull_vals):
self._cache = {}
self._parent = parent_mapping
self._pull_vals = pull_vals
def __getitem__(self, key):
o = self._cache.get(key)
if o is not None:
return o
if key not in self._parent:
raise KeyError(key)
self._cache[key] = vals = self._pull_vals(key)
return vals
def keys(self):
return self._parent.keys()
def __contains__(self, key):
return key in self._cache or key in self._parent
def force_regen(self, cat):
try:
del self._cache[cat]
except KeyError:
pass
class VersionMapping(DictMixin):
def __init__(self, parent_mapping, pull_vals):
self._cache = {}
self._parent = parent_mapping
self._pull_vals = pull_vals
def __getitem__(self, key):
o = self._cache.get(key)
if o is not None:
return o
if not key[1] in self._parent.get(key[0], ()):
raise KeyError(key)
val = self._pull_vals(key)
self._cache[key] = val
return val
def keys(self):
for cat, pkgs in self._parent.items():
for pkg in pkgs:
yield (cat, pkg)
def force_regen(self, key, val):
if val:
self._cache[key] = val
else:
self._cache.pop(key, None)
class tree:
"""Template for all repository variants.
Args:
frozen (bool): controls whether the repository is mutable or immutable
Attributes:
raw_repo: if wrapping a repo, set raw_repo per instance to it
livefs (bool): set it to True if it's a repository representing a livefs
package_class: callable to generate a package instance, must override
configured (bool): if a repo is unusable for merging/unmerging
without being configured, set it to False
frozen_settable (bool): controls whether frozen is able to be set
on initialization
operations_kls: callable to generate a repo operations instance
categories (dict): available categories in the repo
packages (dict): mapping of packages to categories in the repo
versions (dict): mapping of versions to packages in the repo
frozen (bool): repository mutability status
lock: TODO
"""
raw_repo = None
is_supported = True
livefs = False
package_class = None
configured = True
frozen_settable = True
operations_kls = repo.operations
pkg_masks = frozenset()
def __init__(self, frozen=False):
self.categories = CategoryIterValLazyDict(
self._get_categories, self._get_categories)
self.packages = PackageMapping(self.categories, self._get_packages)
self.versions = VersionMapping(self.packages, self._get_versions)
if self.frozen_settable:
self.frozen = frozen
self.lock = None
def configure(self, *args):
"""Return a configured form of the repository."""
raise NotImplementedError(self, "configure")
def _get_categories(self, *args):
"""this must return a list, or sequence"""
raise NotImplementedError(self, "_get_categories")
def _get_packages(self, category):
"""this must return a list, or sequence"""
raise NotImplementedError(self, "_get_packages")
def _get_versions(self, package):
"""this must return a list, or sequence"""
raise NotImplementedError(self, "_get_versions")
def __getitem__(self, cpv):
cpv_inst = self.package_class(*cpv)
if cpv_inst.fullver not in self.versions[(cpv_inst.category, cpv_inst.package)]:
raise KeyError(cpv)
return cpv_inst
def __setitem__(self, *vals):
raise AttributeError
def __delitem__(self, cpv):
raise AttributeError
def __iter__(self):
"""Filtered iterator over all the repo's packages.
All packages with metadata issues are skipped.""
"""
return self.itermatch(packages.AlwaysTrue)
def __len__(self):
return sum(len(v) for v in self.versions.values())
def __contains__(self, obj):
"""Determine if a path or a package is in a repo."""
if isinstance(obj, str):
path = os.path.normpath(obj)
try:
repo_path = os.path.realpath(getattr(self, 'location'))
except AttributeError:
return False
# existing relative path
if not path.startswith(os.sep) and os.path.exists(pjoin(repo_path, path)):
return True
# existing full path
fullpath = os.path.realpath(os.path.abspath(path))
if fullpath.startswith(repo_path) and os.path.exists(fullpath):
return True
return False
else:
for pkg in self.itermatch(obj):
return True
return False
def has_match(self, atom, **kwds):
kwds.pop("sorter", None)
kwds.pop("yield_none", None)
for pkg in self.itermatch(atom, **kwds):
return True
return False
def match(self, atom, **kwds):
return list(self.itermatch(atom, **kwds))
def itermatch(self, restrict, sorter=None, pkg_filter=None, versioned=True,
raw_pkg_cls=None, pkg_cls=None, force=None, yield_none=False):
"""Generator that yields packages match a restriction.
:type restrict: :obj:`pkgcore.restrictions.packages.PackageRestriction`
instance.
:param restrict: restriction to search via
:param sorter: callable to do sorting during searching-
if sorting the results, use this instead of sorting externally.
:param pkg_filter: callable to do package filtering
:param versioned: boolean controlling returning versioned or unversioned pkgs
:param raw_pkg_cls: custom package class to use for generating raw pkg instances
:param pkg_cls: custom package class to override raw pkg instances with
:param yield_none: if True then itermatch will yield None for every
non-matching package. This is meant for use in combination with
C{twisted.task.cooperate} or other async uses where itermatch
should not wait many (wallclock) seconds between yielding
packages. If you override this method you should yield
None in long-running loops, strictly calling it for every package
is not necessary.
"""
if not isinstance(restrict, restriction.base):
raise TypeError(
f"restrict must be a pkgcore.restriction.restrictions.base instance: "
f"got {restrict!r}")
if sorter is None:
sorter = iter
if pkg_filter is None:
pkg_filter = iter
if raw_pkg_cls is None:
if versioned:
raw_pkg_cls = self.package_class
else:
raw_pkg_cls = lambda *args: args
if isinstance(restrict, atom):
candidates = [(restrict.category, restrict.package)]
else:
candidates = self._identify_candidates(restrict, sorter)
if force is None:
match = restrict.match
elif force:
match = restrict.force_True
else:
match = restrict.force_False
return self._internal_match(
candidates, match, raw_pkg_cls=raw_pkg_cls, pkg_cls=pkg_cls,
yield_none=yield_none, sorter=sorter, pkg_filter=pkg_filter,
versioned=versioned)
def _internal_gen_candidates(self, candidates, sorter, raw_pkg_cls, pkg_filter, versioned):
for cp in sorter(candidates):
if versioned:
pkgs = (raw_pkg_cls(cp[0], cp[1], ver) for ver in self.versions.get(cp, ()))
else:
if self.versions.get(cp, ()):
pkgs = (raw_pkg_cls(cp[0], cp[1]),)
else:
pkgs = ()
pkgs = iter(pkgs)
yield from sorter(pkg_filter(pkgs))
def _internal_match(self, candidates, match_func, pkg_cls, yield_none=False, **kwargs):
for pkg in self._internal_gen_candidates(candidates, **kwargs):
if pkg_cls is not None:
pkg = pkg_cls(pkg)
if match_func(pkg):
yield pkg
elif yield_none:
yield None
def _identify_candidates(self, restrict, sorter):
# full expansion
if not isinstance(restrict, boolean.base) or isinstance(restrict, atom):
return self._fast_identify_candidates(restrict, sorter)
dsolutions = [
([c.restriction
for c in collect_package_restrictions(x, ("category",))],
[p.restriction
for p in collect_package_restrictions(x, ("package",))])
for x in restrict.iter_dnf_solutions(True)]
# see if any solution state isn't dependent on cat/pkg in anyway.
# if so, search whole search space.
for x in dsolutions:
if not x[0] and not x[1]:
if sorter is iter:
return self.versions
return (
(c, p)
for c in sorter(self.categories)
for p in sorter(self.packages.get(c, ())))
# simple cases first.
# if one specifies categories, and one doesn't
cat_specified = bool(dsolutions[0][0])
pkg_specified = bool(dsolutions[0][1])
pgetter = self.packages.get
if any(True for x in dsolutions[1:] if bool(x[0]) != cat_specified):
if any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified):
# merde. so we've got a mix- some specify cats, some
# don't, some specify pkgs, some don't.
# this may be optimizable
return self.versions
# ok. so... one doesn't specify a category, but they all
# specify packages (or don't)
pr = values.OrRestriction(
*tuple(iflatten_instance(
(x[1] for x in dsolutions if x[1]), values.base)))
return (
(c, p)
for c in sorter(self.categories)
for p in sorter(pgetter(c, [])) if pr.match(p))
elif any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified):
# one (or more) don't specify pkgs, but they all specify cats.
cr = values.OrRestriction(
*tuple(iflatten_instance(
(x[0] for x in dsolutions), values.base)))
cats_iter = (c for c in sorter(self.categories) if cr.match(c))
return (
(c, p)
for c in cats_iter for p in sorter(pgetter(c, [])))
return self._fast_identify_candidates(restrict, sorter)
def _fast_identify_candidates(self, restrict, sorter):
pkg_restrict = set()
cat_restrict = set()
cat_exact = set()
pkg_exact = set()
for x in collect_package_restrictions(restrict,
("category", "package",)):
if x.attr == "category":
cat_restrict.add(x.restriction)
elif x.attr == "package":
pkg_restrict.add(x.restriction)
for e, s in ((pkg_exact, pkg_restrict), (cat_exact, cat_restrict)):
l = [x for x in s
if isinstance(x, values.StrExactMatch) and not x.negate]
s.difference_update(l)
e.update(x.exact for x in l)
del l
if restrict.negate:
cat_exact = pkg_exact = ()
if cat_exact:
if not cat_restrict and len(cat_exact) == 1:
# Cannot use pop here, cat_exact is reused below.
c = next(iter(cat_exact))
if not pkg_restrict and len(pkg_exact) == 1:
cp = (c, pkg_exact.pop())
if cp in self.versions:
return [cp]
return []
cats_iter = [c]
else:
cat_restrict.add(values.ContainmentMatch(frozenset(cat_exact)))
cats_iter = sorter(self._cat_filter(cat_restrict))
elif cat_restrict:
cats_iter = self._cat_filter(
cat_restrict, negate=restrict.negate)
else:
cats_iter = sorter(self.categories)
if pkg_exact:
if not pkg_restrict:
if sorter is iter:
pkg_exact = tuple(pkg_exact)
else:
pkg_exact = sorter(pkg_exact)
return (
(c, p)
for c in cats_iter for p in pkg_exact)
else:
pkg_restrict.add(values.ContainmentMatch(frozenset(pkg_exact)))
if pkg_restrict:
return self._package_filter(
cats_iter, pkg_restrict, negate=restrict.negate)
elif not cat_restrict:
if sorter is iter and not cat_exact:
return self.versions
else:
return (
(c, p) for c in
cats_iter for p in sorter(self.packages.get(c, ())))
return (
(c, p)
for c in cats_iter for p in sorter(self.packages.get(c, ())))
def _cat_filter(self, cat_restricts, negate=False):
sentinel = not negate
cats = [x.match for x in cat_restricts]
for x in self.categories:
for match in cats:
if match(x) == sentinel:
yield x
break
def _package_filter(self, cats_iter, pkg_restricts, negate=False):
sentinel = not negate
restricts = [x.match for x in pkg_restricts]
pkgs_dict = self.packages
for cat in cats_iter:
for pkg in pkgs_dict.get(cat, ()):
for match in restricts:
if match(pkg) == sentinel:
yield (cat, pkg)
break
def notify_remove_package(self, pkg):
"""internal function
notify the repository that a pkg it provides is being removed
"""
ver_key = (pkg.category, pkg.package)
l = [x for x in self.versions[ver_key] if x != pkg.fullver]
if not l:
# dead package
wipe = list(self.packages[pkg.category]) == [pkg.package]
self.packages.force_regen(pkg.category)
if wipe:
self.categories.force_regen(pkg.category)
self.versions.force_regen(ver_key, tuple(l))
def notify_add_package(self, pkg):
"""internal function
notify the repository that a pkg is being added to it
"""
ver_key = (pkg.category, pkg.package)
s = set(self.versions.get(ver_key, ()))
s.add(pkg.fullver)
if pkg.category not in self.categories:
self.categories.force_add(pkg.category)
self.packages.force_regen(pkg.category)
self.versions.force_regen(ver_key, tuple(s))
@property
def operations(self):
return self.get_operations()
def get_operations(self, observer=None):
return self.operations_kls(self)
def __bool__(self):
try:
next(iter(self.versions))
return True
except StopIteration:
return False
def __str__(self):
if self.aliases:
return str(self.aliases[0])
return repr(self)
@property
def aliases(self):
potentials = (getattr(self, key, None) for key in ('repo_id', 'location'))
return tuple(x for x in potentials if x is not None)
@jit_attr
def masked(self):
"""Base package mask restriction."""
return packages.OrRestriction(*self.pkg_masks)
| 1.65625 | 2 |
ASTStructure.py | PauloBarrantes/Niark | 1 | 12772181 | class Niark:
def __init__(self):
self.statements = []
def addStatement(self, statement):
self.statements.insert(0,statement)
def printObject(self, tabs):
for x in range (0, len(self.statements)):
self.statements[x].printObject(tabs)
#############################################################
# Begin simple instruction section
#############################################################
class VariableAssignation:
id = 'VARIABLE ASSIGNATION'
def __init__(self, name, value):
self.name = name
self.value = value
def printObject(self, tabs):
print(tabs,self.id,self.name,self.value)
class ArrayAssignation:
id = 'ARRAY ASSIGNATION'
def __init__(self, name, index, value):
self.name = name
self.index = index
self.value = value
def printObject(self, tabs) :
if type(self.index)is Arithmetic:
print(tabs, self.id, self.name)
self.index.printObject(tabs)
print(tabs, self.value)
else:
print(tabs,self.id,self.name, self.index, self.value)
class VariableDeclaration:
id = 'VARIABLE DECLARATION'
def __init__(self, variable):
self.variable = variable
def printObject(self,tabs):
print(tabs,self.id,self.variable.printObject(tabs))
class ArrayDeclaration:
id = 'ARRAY DECLARATION'
def __init__(self, array):
self.array = array
def printObject(self, tabs):
print(tabs,self.id,self.array.printObject(tabs))
class Instruction:
def __init__(self, id, value):
self.id = id
self.value = value
def printObject(self, tabs):
if type(self.value) is Arithmetic:
print (tabs, self.id)
self.value.printObject(tabs)
else:
print(tabs,self.id, self.value)
class FunctionCall:
id = 'FUNCTION CALL'
def __init__(self, name, parameters):
self.name = name
self.parameters = parameters
def printObject(self,tabs):
if type(self.parameters) is Arithmetic:
print(tabs,self.id, self.name)
self.parameters.printObject(tabs)
else:
print(tabs,self.id, self.name, self.parameters)
class IncDec:
id = 'INCDEC'
def __init__(self, operator, variable):
self.operator = operator
self.variable = variable
def printObject(self,tabs):
print(tabs,self.id,self.operator,self.variable)
#############################################################
# End simple instruction section
#############################################################
#############################################################
# Begin conditions, arithmetics and incdec
#############################################################
class Condition:
id = 'CONDITION'
def __init__(self, term1, operator, term2):
self.term1 = term1
self.term2 = term2
self.operator = operator
def printObject(self,tabs):
if type(self.term1) is Arithmetic:
print(tabs,self.id,)
self.term1.printObject(tabs)
print(tabs,self.operator ,self.term2)
else:
if type(self.term2) is Arithmetic:
print(tabs,self.id, self.term1, self.operator)
self.term2.printObject(tabs)
else:
if type(self.term2) is FunctionCall:
self.term2.printObject(tabs)
else:
print(tabs,self.id,self.term1, self.operator, self.term2)
class Arithmetic:
id = 'ARITHMETIC'
def __init__(self, term1, operator, term2):
self.term1 = term1
self.term2 = term2
self.operator = operator
def printObject(self,tabs):
if type(self.term1) is FunctionCall and type(self.term2) is FunctionCall:
print (tabs, self.id )
self.term1.printObject(tabs)
print (tabs,self.operator)
self.term2.printObject(tabs)
else:
print(tabs,self.id,self.term1, self.operator, self.term2)
#############################################################
# End parameters, conditions and arithmetics
#############################################################
#############################################################
# Begin complex instruction section
#############################################################
class If:
id = 'IF'
def __init__(self, conditions,instructionList):
self.conditions = conditions
self.instructions = instructionList.instructions
def addInstruction(self, instruction):
self.instructions.insert(0,instruction)
def printObject(self,tabs):
print(tabs,self.id)
if(self.conditions != None):
self.conditions.printObject(tabs+" ")
if(self.instructions != None):
for x in range (len(self.instructions)):
if (self.instructions[x] != None):
self.instructions[x].printObject(tabs+" ")
else:
print('No instructions in the IF section')
class IfAndElse:
id = 'IFANDELSE'
id1 = 'IF'
id2 = 'ELSE'
def __init__(self,conditions,instructionListIf,instructionListElse):
self.conditions = conditions
self.instructionsIf = instructionListIf.instructions
self.instructionsElse = instructionListElse.instructions
def printObject(self,tabs):
print(tabs, self.id1)
if (self.conditions != None):
self.conditions.printObject(tabs)
if (self.instructionsIf != None):
for x in range(len(self.instructionsIf)):
if (self.instructionsIf[x] != None):
self.instructionsIf[x].printObject(tabs+" ")
else:
print('No instructions in the IF section')
print(tabs, self.id2)
if (self.instructionsElse != None):
for x in range(len(self.instructionsElse)):
if (self.instructionsElse[x] != None):
self.instructionsElse[x].printObject(tabs+" ")
else:
print('No instructions in the ELSE section')
class For:
id = 'FOR'
def __init__(self,declaration,conditions,incdec,instructionList):
self.declaration = declaration
self.conditions = conditions
self.incdec = incdec
self.instructions = []
# self.instructions.insert(0,declaration) //Asignamos como atributo la declaración para que no sea parte del ciclo.
self.instructions.extend(instructionList.instructions)
def printObject(self,tabs):
print(tabs,self.id)
self.conditions.printObject(tabs)
self.incdec.printObject(tabs)
if (self.instructions != None):
for x in range(len(self.instructions)):
if (self.instructions[x] != None):
self.instructions[x].printObject(tabs+" ")
#############################################################
# End complex instruction section
#############################################################
class Method:
id = 'METHOD'
def __init__(self, functionDomain, returnType, name, parameter,instructionList):
self.functionDomain = functionDomain
self.returnType = returnType
self.name = name
self.parameter = parameter
self.instructions = instructionList.instructions
def printObject(self,tabs):
print(self.id,self.functionDomain,self.returnType,self.name,self.parameter)
if (self.instructions != None):
for x in range(len(self.instructions)):
self.instructions[x].printObject(tabs+" ")
#############################################################
# Begin variables section
#############################################################
class Variable:
id = 'VARIABLE'
def __init__(self, name,value):
self.name = name
self.value = value
def printObject(self,tabs):
print(tabs,self.id, self.name, self.value)
class Array:
id = 'ARRAY'
def __init__(self, name, size):
self.name = name
self.size = size
def printObject(self,tabs):
print(tabs,self.id, self.name, self.size)
class InstructionList:
id = 'INSTRUCTION LIST'
def __init__(self,instruction,instructionList):
self.instructions = []
self.instructions.insert(0,instruction)
if (instructionList is not None):
self.instructions.extend(instructionList.instructions)
def printObject(self,tabs):
if (self.instructions != None):
for x in range(len(self.instructions)):
self.instructions[x].printObject(tabs)
#############################################################
# End variables section
#############################################################
| 3.546875 | 4 |
qsr_lib/src/qsrlib_qsrs/qsr_rcc2_rectangle_bounding_boxes_2d.py | yianni/rtd-dbg | 0 | 12772182 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib_qsrs.qsr_rcc_abstractclass import QSR_RCC_Abstractclass
class QSR_RCC2_Rectangle_Bounding_Boxes_2D(QSR_RCC_Abstractclass):
"""RCC2 relations.
# 'dc' bb1 is disconnected from bb2
# 'c' bb1 is connected to bb2
"""
_unique_id = "rcc2"
"""str: Unique identifier name of the QSR."""
_all_possible_relations = ("dc", "c")
"""tuple: All possible relations of the QSR."""
def __init__(self):
super(QSR_RCC2_Rectangle_Bounding_Boxes_2D, self).__init__()
def _convert_to_requested_rcc_type(self, qsr):
return qsr if qsr == "dc" else "c"
| 2.765625 | 3 |
tests.live/Python/test_live.py | Bradben/iqsharp | 115 | 12772183 | #!/bin/env python
# -*- coding: utf-8 -*-
##
# test_live.py: Tests Azure Quantum functionality Live.
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
## IMPORTS ##
import pytest
import warnings
## TESTS ##
def connect():
import qsharp.azure
return qsharp.azure.connect(
credential="environment"
)
def has_completed(job) -> bool:
"""Check if the job has completed."""
return (
job.status == "Succeeded"
or job.status == "Failed"
or job.status == "Cancelled"
)
def wait_until_completed(job):
import time
import qsharp.azure
max_poll_wait_secs = 5
timeout_secs = 30
poll_wait = 0.2
total_time = 0.
while not has_completed(job):
if total_time >= timeout_secs:
raise TimeoutError(f"The wait time has exceeded {timeout_secs} seconds.")
time.sleep(poll_wait)
total_time += poll_wait
job = qsharp.azure.status(job.id)
poll_wait = (
max_poll_wait_secs
if poll_wait >= max_poll_wait_secs
else poll_wait * 1.5
)
def test_ionq_targets():
"""
Tests that we can fetch targets from the service,
and that the workspace includes the targets we need for submission
"""
targets = connect()
assert len(targets) > 2
target_ids = [t.id for t in targets]
assert 'ionq.simulator' in target_ids
assert 'ionq.qpu' in target_ids
def test_ionq_submit():
"""
Test that the SampleQrng operation can be submitted successfully on the ionq.simulator
"""
import time
import qsharp
from Microsoft.Quantum.Tests import SampleQrng
# Make sure we can simulate locally:
count = 3
result = SampleQrng.simulate(count=count, name='andres')
assert len(result) == count
import qsharp.azure
connect()
t = qsharp.azure.target("ionq.simulator")
assert isinstance(t, qsharp.azure.AzureTarget)
assert t.id == "ionq.simulator"
job = qsharp.azure.submit(SampleQrng, count=count, name="andres")
assert isinstance(job, qsharp.azure.AzureJob)
assert not job.id == ''
print("Submitted job: ", job.id)
try:
wait_until_completed(job)
except TimeoutError:
warnings.warn("IonQ execution exceeded timeout. Skipping fetching results.")
else:
job = qsharp.azure.status()
assert isinstance(job, qsharp.azure.AzureJob)
assert job.status == "Succeeded"
histogram = {
'[0,0,0]': 0.125,
'[0,0,1]': 0.125,
'[0,1,0]': 0.125,
'[0,1,1]': 0.125,
'[1,0,0]': 0.125,
'[1,0,1]': 0.125,
'[1,1,0]': 0.125,
'[1,1,1]': 0.125
}
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert histogram == retrieved_histogram
def test_honeywell_targets():
"""
Tests that we can fetch targets from the service,
and that the workspace includes the targets we need for submission
"""
targets = connect()
assert len(targets) > 2
target_ids = [t.id for t in targets]
assert 'honeywell.hqs-lt-s1' in target_ids
assert 'honeywell.hqs-lt-s1-apival' in target_ids
def test_honeywell_submit():
"""
Test that the RunTeleport operation can be submitted successfully on the honeywell apival target
"""
import qsharp
from Microsoft.Quantum.Tests import RunTeleport
# Make sure we can simulate locally:
expected = True
result = RunTeleport.simulate(doPlus=expected)
assert result == 0 if expected else 1
import qsharp.azure
connect()
t = qsharp.azure.target("honeywell.hqs-lt-s1-apival")
assert isinstance(t, qsharp.azure.AzureTarget)
assert t.id == "honeywell.hqs-lt-s1-apival"
job = qsharp.azure.submit(RunTeleport, doPlus=expected)
assert isinstance(job, qsharp.azure.AzureJob)
assert not job.id == ''
print("Submitted job: ", job.id)
try:
wait_until_completed(job)
except TimeoutError:
warnings.warn("Honeywell execution exceeded timeout. Skipping fetching results.")
else:
job = qsharp.azure.status()
assert isinstance(job, qsharp.azure.AzureJob)
if job.status == "Succeeded":
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert '0' in retrieved_histogram
| 2.234375 | 2 |
addresses/hinsdale.py | johncoleman83/attom_python_client | 0 | 12772184 | #!/usr/bin/env python3
"""
ATTOM API
https://api.developer.attomdata.com
"""
HINSDALE = "HINSDALE, IL"
MADISON_HINSDALE = {}
HOMES = {
"216 S MADISON ST": HINSDALE,
"607 S ADAMS ST": HINSDALE,
"428 MINNEOLA ST": HINSDALE,
"600 S BRUNER ST": HINSDALE,
"637 S BRUNER ST": HINSDALE,
"142 S STOUGH ST": HINSDALE,
"106 S BRUNER ST": HINSDALE,
"37 S STOUGH ST": HINSDALE,
"840 S ADAMS ST": HINSDALE,
"618 S QUINCY ST": HINSDALE,
"904 S STOUGH ST": HINSDALE,
"222 CENTER ST": HINSDALE,
"602 S ADAMS ST": HINSDALE,
"18 E NORTH ST": HINSDALE,
"818 S MADISON ST": HINSDALE,
"427 N MADISON ST": HINSDALE,
"317 E CHICAGO AVE": HINSDALE,
"2 S BRUNER ST": HINSDALE,
"133 S QUINCY ST": HINSDALE,
"410 S MADISON ST": HINSDALE,
"113 MAUMELL ST": HINSDALE,
"138 E MAPLE ST": HINSDALE,
"819 W 8TH ST": HINSDALE,
"519 E 1ST ST": HINSDALE,
"733 N ELM ST": HINSDALE,
"603 JEFFERSON ST": HINSDALE,
"729 JEFFERSON ST": HINSDALE,
"731 TOWN PL": HINSDALE,
"233 S QUINCY ST": HINSDALE,
"238 S MADISON ST": HINSDALE,
"718 W 4TH ST": HINSDALE,
"209 S MADISON ST": HINSDALE,
"415 N COUNTY LINE RD": HINSDALE,
"111 S STOUGH ST": HINSDALE,
"818 W HINSDALE AVE": HINSDALE,
"712 S STOUGH ST": HINSDALE,
"650 S THURLOW ST": HINSDALE,
"134 MAUMELL ST": HINSDALE,
"508 HIGHLAND RD": HINSDALE,
"411 S STOUGH ST": HINSDALE,
"431 S QUINCY ST": HINSDALE,
"442 S QUINCY ST": HINSDALE,
}
| 1.945313 | 2 |
wordle_assistant.py | ckatkinson/WordleAssistant | 0 | 12772185 | #!/usr/bin/python3
from typing import List
from src.word_importer import import_word_list
from src.scoring import word_scores
from src.wordle_filter import filter_from_word_info
class WordleAssistant:
"""
This class asks for input when initialized and then packages the suggestion mechanism.
"""
def __init__(self, word_list: List[str] = import_word_list()) -> None:
self.guess = input("Input the 5-letter word that you guessed: \n").strip(" '\"")
self.color = input(
"Input the colors given back by wordle (for example bygyy): \n"
).strip(" '\"")
self.word_list = filter_from_word_info(self.guess, self.color, word_list)
self.scored_words = word_scores(self.word_list)
def suggestions(self, number: int = 10) -> List[str]:
return [w for w, _ in self.scored_words[:number]]
# This is the 'interactive' part of the program. Is this (ie the while loop) a good idea? Probably not, but I'm still learning.
# Initial suggestions:
def main():
wl = import_word_list()
scored = word_scores(wl)
print("\nHere are some suggested guesses: ")
print([w for w, _ in scored[:10]])
w = WordleAssistant()
print(w.suggestions())
# loop until we're done
while True:
cont = input("Would you like to keep going ([y]/n)? ")
if cont == "n":
break
w = WordleAssistant(filter_from_word_info(w.guess, w.color, w.word_list))
print("\nHere are some suggested guesses: ")
print(w.suggestions())
if __name__ == "__main__":
main()
| 3.84375 | 4 |
pyhp/cmd.py | cr/pyHP49 | 1 | 12772186 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import protocol
import hpstr
#http://www.hpcalc.org/details.php?id=5910
objtypes = {
0x3329: ("DOREAL","real (%) 153."),
0x7729: ("DOCMP","complex (C%) (3.,4.)"),
0x2C2A: ("DOCSTR","string ($) 'Hello'"),
0xE829: ("DOARRY","array ( [] ) [3. 4.]"),
0x742A: ("DOLIST","list ( {} ) {3 4}"),
0x482E: ("DOIDNT","global name (id) 'MYPROG'"),
0x6D2E: ("DOLAM","local name (lam) 'j'"),
0x9D2D: ("DOCOL","program ( :: ; ) :: %1 %2 x+ ;"),
0xB82A: ("DOSYMB","algebraic (alg) '1+2*3^4'"),
0x4E2A: ("DOHSTR/XS" "user binary integer (hxs) #1234567890123456h"),
0x1E2B: ("DOGROB","grob"),
0xFC2A: ("DOTAG","tagged :Price:153.95"),
0xDA2A: ("DOEXT","unit 365.2422_d"),
0x922E: ("DOROMP","xlib (romptr) XLIB F0 BA"),
0x1129: ("DOBINT","bint ~ FFFFFh"),
0x962A: ("DORRP","dir (rrp) DIR ... END"),
0x5529: ("DOEREL","long real (%%) 1.23456789012345E12345"),
0x9D29: ("DOECMP","long complex (C%%) (3E0,4E0)"),
0x0A2A: ("DOLNKARRY","linked array ( l[] )"),
0xBF29: ("DOCHAR","character"),
0xCC2D: ("DOCODE","code object"),
0x402B: ("DOLIB","library"),
0x622B: ("DOBAK","backup object"),
0x882B: ("DOEXT0","library data (aka EXT0)"),
0xAA2B: ("DOEXT1","or DOACPTR access pointer (aka Extended Ptr, and EXT1)"),
0xCC2B: ("DOEXT2","font (erroneously called EXT2 by Vger)"),
0xFE26: ("DOMINIFONT","MiniFont"),
0xEE2B: ("DOEXT3","ext3 note: was dispatch type DF in HP48"),
0x102C: ("DOEXT4","ext4"),
0x1426: ("DOINT","integer (ZINT)"),
0x3A26: ("DOLNGREAL","infinite-precision real (not yet implemented)"),
0x6026: ("DOLNGCMP","infinite-precision complex (not yet implemented)"),
0x8626: ("DOMATRIX","symbolic matrix"),
0xAC26: ("DOFLASHP","Flash Pointer (FPTR n n; FPTR2 ^name)"),
0xD526: ("DOAPLET","Aplet (not yet implemented)")
}
def objtype( integer, verbose=0 ):
"""Translates object type number into a string representation.
The default verbose=0 gives a compact descriptor, =1 a verbose description.
"""
return objtypes[integer][verbose]
def version():
"""Returns the server version string.
"""
protocol.cmd( "V" )
return hpstr.tostr( protocol.readpacket() )
def meminfo():
"""Returns the number of free bytes in calculator memory.
"""
protocol.cmd( "M" )
return int( hpstr.tostr( protocol.readpacket()[:-1] ) )
def ls():
"""Returns a list with objects in the current directory.
"""
protocol.cmd( "L" )
raw = protocol.readpacket()
p = 0
ls = []
while p < len(raw):
l = raw[p] ; p += 1
name = hpstr.tostr( raw[p:p+l] ) ; p += l
objtype = raw[p]*256 + raw[p+1] ; p += 2
size = (raw[p] + raw[p+1]*256 + raw[p+2]*256*256) / 2.0 ; p += 3
flags = raw[p]*256 + raw[p+1] ; p += 2
ls.append( [ name, objtype, size, flags ] )
return ls
def get( remotefile ):
"""Reads remotefile from current directory and returns it as byte array.
Currently only binary mode is supported.
"""
return protocol.get( remotefile )
def put( remotefile, data ):
"""Writes data to remotefile in current directory and returns status.
Currently only binary mode is supported.
"""
return protocol.put( remotefile, data )
| 2.078125 | 2 |
tests/node/test_node_async.py | vladr11/graphql-relay-py | 0 | 12772187 | from typing import NamedTuple
from pytest import mark
from graphql import (
graphql,
GraphQLField,
GraphQLID,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
)
from graphql_relay import node_definitions
class User(NamedTuple):
id: str
name: str
user_data = [User(id="1", name="<NAME>"), User(id="2", name="<NAME>")]
user_type: GraphQLObjectType
node_interface, node_field = node_definitions(
lambda id_, _info: next(filter(lambda obj: obj.id == id_, user_data), None),
lambda _obj, _info, _type: user_type,
)[:2]
user_type = GraphQLObjectType(
"User",
lambda: {
"id": GraphQLField(GraphQLNonNull(GraphQLID)),
"name": GraphQLField(GraphQLString),
},
interfaces=[node_interface],
)
query_type = GraphQLObjectType("Query", lambda: {"node": node_field})
schema = GraphQLSchema(query=query_type, types=[user_type])
def describe_node_interface_and_fields_with_async_object_fetcher():
@mark.asyncio
async def gets_the_correct_id_for_users():
source = """
{
node(id: "1") {
id
}
}
"""
assert await graphql(schema, source) == ({"node": {"id": "1"}}, None)
@mark.asyncio
async def gets_the_correct_name_for_users():
source = """
{
node(id: "1") {
id
... on User {
name
}
}
}
"""
assert await graphql(schema, source) == (
{"node": {"id": "1", "name": "<NAME>"}},
None,
)
| 2.34375 | 2 |
mars/lib/nvutils.py | sighingnow/mars | 0 | 12772188 | # -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
# Some constants taken from cuda.h
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
# nvml constants
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass # opaque handle
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_initialized = False
_gpu_count = None
_driver_info = None
_device_infos = dict()
class NVError(Exception):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVError('Device API Error %d: %s' % (result, _error_str.value.decode()))
if _nvml_lib is not None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
def _nvml_check_error(result):
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
if _error_str:
raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))
else:
raise NVError('Unknown NVML API Error %d' % result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib
if _initialized:
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVError:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib
if _initialized:
return
_nvml_lib = _load_nv_library('libnvidia-ml.so', 'libnvidia-ml.dylib', 'nvml.dll')
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVError:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _initialized
_init_cp()
_init_nvml()
_initialized = _nvml_lib is not None and _cuda_lib is not None
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
_gpu_count = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='%d.%d' % (cuda_version.value // 1000, cuda_version.value % 1000)
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if not _initialized:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
info = _device_infos[dev_index] = _cu_device_info(
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if not _initialized:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = 'GPU-' + str(dev_uuid)
if sys.version_info[0] >= 3:
uuid_str = uuid_str.encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
| 1.859375 | 2 |
modred/tests/testltigalerkinproj.py | pythonpadawanEXE/modred | 55 | 12772189 | #!/usr/bin/env python
"""Test ltigalerkinproj module"""
import unittest
import os
from os.path import join
from shutil import rmtree
import numpy as np
from modred import ltigalerkinproj as lgp, parallel, util
from modred import util
from modred.py2to3 import range
from modred.vectors import VecHandlePickle
<EMAIL>('Testing something else.')
class TestLTIGalerkinProjectionBase(unittest.TestCase):
def setUp(self):
if not os.access('.', os.W_OK):
raise RuntimeError('Cannot write to current directory')
self.test_dir ='files_LTIGalerkinProj_DELETE_ME'
if parallel.is_rank_zero() and not os.path.exists(self.test_dir):
os.mkdir(self.test_dir)
parallel.barrier()
def tearDown(self):
parallel.barrier()
parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True)
parallel.barrier()
def test_put_reduced_arrays(self):
"""Test putting reduced mats"""
A_reduced_path = join(self.test_dir, 'A.txt')
B_reduced_path = join(self.test_dir, 'B.txt')
C_reduced_path = join(self.test_dir, 'C.txt')
A = parallel.call_and_bcast(np.random.random, ((10, 10)))
B = parallel.call_and_bcast(np.random.random, ((1, 10)))
C = parallel.call_and_bcast(np.random.random, ((10, 2)))
LTI_proj = lgp.LTIGalerkinProjectionBase()
LTI_proj.A_reduced = A.copy()
LTI_proj.B_reduced = B.copy()
LTI_proj.C_reduced = C.copy()
LTI_proj.put_model(A_reduced_path, B_reduced_path, C_reduced_path)
np.testing.assert_equal(util.load_array_text(A_reduced_path), A)
np.testing.assert_equal(util.load_array_text(B_reduced_path), B)
np.testing.assert_equal(util.load_array_text(C_reduced_path), C)
<EMAIL>('Testing something else.')
@unittest.skipIf(parallel.is_distributed(), 'Serial only')
class TestLTIGalerkinProjectionArrays(unittest.TestCase):
"""Tests that can find the correct A, B, and C arrays."""
def setUp(self):
self.num_basis_vecs = 10
self.num_adjoint_basis_vecs = 10
self.num_states = 11
self.num_inputs = 3
self.num_outputs = 2
self.generate_data_set(
self.num_basis_vecs, self.num_adjoint_basis_vecs,
self.num_states, self.num_inputs, self.num_outputs)
self.LTI_proj = lgp.LTIGalerkinProjectionArrays(
self.basis_vecs, adjoint_basis_vecs=self.adjoint_basis_vecs,
is_basis_orthonormal=True)
def tearDown(self):
pass
def test_init(self):
""" """
pass
def generate_data_set(
self, num_basis_vecs, num_adjoint_basis_vecs,
num_states, num_inputs, num_outputs):
"""Generates random data, saves, and computes true reduced A, B, C."""
self.basis_vecs = (
parallel.call_and_bcast(
np.random.random, (num_states, num_basis_vecs)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_basis_vecs)))
self.adjoint_basis_vecs =(
parallel.call_and_bcast(
np.random.random, (num_states, num_basis_vecs)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_basis_vecs)))
self.A_array = (
parallel.call_and_bcast(
np.random.random, (num_states, num_states)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_states)))
self.B_array = (
parallel.call_and_bcast(
np.random.random, (num_states, num_inputs)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_inputs)))
self.C_array = (
parallel.call_and_bcast(
np.random.random, (num_outputs, num_states)) +
1j * parallel.call_and_bcast(
np.random.random, (num_outputs, num_states)))
self.A_on_basis_vecs = self.A_array.dot(self.basis_vecs)
self.B_on_standard_basis_array = self.B_array
self.C_on_basis_vecs = self.C_array.dot(self.basis_vecs).squeeze()
parallel.barrier()
self.A_true = self.adjoint_basis_vecs.conj().T.dot(
self.A_array.dot(
self.basis_vecs))
self.B_true = self.adjoint_basis_vecs.conj().T.dot(self.B_array)
self.C_true = self.C_array.dot(self.basis_vecs)
self.proj_array = np.linalg.inv(
self.adjoint_basis_vecs.conj().T.dot(self.basis_vecs))
self.A_true_non_orth = self.proj_array.dot(self.A_true)
self.B_true_non_orth = self.proj_array.dot(self.B_true)
<EMAIL>('Testing something else')
def test_reduce_A(self):
"""Reduction of A array for Array, LookUp operators and in_memory."""
A_returned = self.LTI_proj.reduce_A(self.A_on_basis_vecs)
np.testing.assert_equal(A_returned, self.A_true)
LTI_proj = lgp.LTIGalerkinProjectionArrays(
self.basis_vecs, adjoint_basis_vecs=self.adjoint_basis_vecs,
is_basis_orthonormal=False)
A_returned = LTI_proj.reduce_A(self.A_on_basis_vecs)
np.testing.assert_equal(LTI_proj._proj_array, self.proj_array)
np.testing.assert_equal(A_returned, self.A_true_non_orth)
<EMAIL>('Testing something else')
def test_reduce_B(self):
"""Given modes, test reduced B array"""
B_returned = self.LTI_proj.reduce_B(self.B_on_standard_basis_array)
np.testing.assert_equal(B_returned, self.B_true)
LTI_proj = lgp.LTIGalerkinProjectionArrays(
self.basis_vecs, adjoint_basis_vecs=self.adjoint_basis_vecs,
is_basis_orthonormal=False)
B_returned = LTI_proj.reduce_B(self.B_on_standard_basis_array)
np.testing.assert_allclose(B_returned, self.B_true_non_orth)
<EMAIL>('Testing something else')
def test_reduce_C(self):
"""Test that, given modes, can find correct C array"""
C_returned = self.LTI_proj.reduce_C(self.C_on_basis_vecs)
np.testing.assert_equal(C_returned, self.C_true)
<EMAIL>('Testing something else')
def test_compute_model(self):
# No test; just check it runs. Results are checked in other tests.
A, B, C = self.LTI_proj.compute_model(
self.A_on_basis_vecs, self.B_on_standard_basis_array,
self.C_on_basis_vecs)
<EMAIL>('Testing something else')
def test_adjoint_basis_vec_optional(self):
"""Test that adjoint modes default to direct modes"""
no_adjoints_LTI_proj = lgp.LTIGalerkinProjectionArrays(
self.basis_vecs, is_basis_orthonormal=True)
np.testing.assert_equal(
no_adjoints_LTI_proj.adjoint_basis_vecs,
self.basis_vecs)
<EMAIL>('Testing something else.')
<EMAIL>If(parallel.is_distributed(), 'Only test in serial')
class TestLTIGalerkinProjectionHandles(unittest.TestCase):
"""Tests that can find the correct A, B, and C arrays from modes."""
def setUp(self):
if not os.access('.', os.W_OK):
raise RuntimeError('Cannot write to current directory')
self.test_dir ='file_LTIGalerkinProj_DELETE_ME'
if parallel.is_rank_zero() and not os.path.exists(self.test_dir):
os.mkdir(self.test_dir)
parallel.barrier()
self.basis_vec_path = join(self.test_dir, 'basis_vec_%02d.txt')
self.adjoint_basis_vec_path = join(
self.test_dir, 'adjoint_basis_vec_%02d.txt')
self.A_on_basis_vec_path = join(self.test_dir, 'A_on_mode_%02d.txt')
self.B_on_basis_path = join(self.test_dir, 'B_on_basis_%02d.txt')
self.C_on_basis_vec_path = join(self.test_dir, 'C_on_mode_%02d.txt')
self.num_basis_vecs = 10
self.num_adjoint_basis_vecs = 10
self.num_states = 11
self.num_inputs = 3
self.num_outputs = 2
self.generate_data_set(
self.num_basis_vecs, self.num_adjoint_basis_vecs,
self.num_states, self.num_inputs, self.num_outputs)
self.LTI_proj = lgp.LTIGalerkinProjectionHandles(
np.vdot, self.basis_vec_handles,
adjoint_basis_vec_handles=self.adjoint_basis_vec_handles,
is_basis_orthonormal=True, verbosity=0)
def tearDown(self):
parallel.barrier()
if parallel.is_rank_zero():
rmtree(self.test_dir, ignore_errors=True)
parallel.barrier()
def test_init(self):
""" """
pass
def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
num_states, num_inputs, num_outputs):
"""Generates random data, saves, and computes true reduced A,B,C."""
self.basis_vec_handles = [
VecHandlePickle(self.basis_vec_path % i)
for i in range(self.num_basis_vecs)]
self.adjoint_basis_vec_handles = [
VecHandlePickle(self.adjoint_basis_vec_path % i)
for i in range(self.num_adjoint_basis_vecs)]
self.A_on_basis_vec_handles = [
VecHandlePickle(self.A_on_basis_vec_path % i)
for i in range(self.num_basis_vecs)]
self.B_on_standard_basis_handles = [
VecHandlePickle(self.B_on_basis_path % i)
for i in range(self.num_inputs)]
self.C_on_basis_vec_handles = [
VecHandlePickle(self.C_on_basis_vec_path % i)
for i in range(self.num_basis_vecs)]
self.basis_vec_array = (
parallel.call_and_bcast(
np.random.random, (num_states, num_basis_vecs)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_basis_vecs)))
self.adjoint_basis_vec_array = (
parallel.call_and_bcast(
np.random.random, (num_states, num_adjoint_basis_vecs)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_adjoint_basis_vecs)))
self.A_array = (
parallel.call_and_bcast(
np.random.random, (num_states, num_states)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_states)))
self.B_array = (
parallel.call_and_bcast(
np.random.random, (num_states, num_inputs)) +
1j * parallel.call_and_bcast(
np.random.random, (num_states, num_inputs)))
self.C_array = (
parallel.call_and_bcast(
np.random.random, (num_outputs, num_states)) +
1j * parallel.call_and_bcast(
np.random.random, (num_outputs, num_states)))
self.basis_vecs = [
self.basis_vec_array[:, i].squeeze() for i in range(num_basis_vecs)]
self.adjoint_basis_vecs = [
self.adjoint_basis_vec_array[:, i].squeeze()
for i in range(num_adjoint_basis_vecs)]
self.A_on_basis_vecs = [
self.A_array.dot(basis_vec).squeeze()
for basis_vec in self.basis_vecs]
self.B_on_basis = [
self.B_array[:, i].squeeze() for i in range(self.num_inputs)]
self.C_on_basis_vecs = [
np.array(self.C_array.dot(basis_vec).squeeze(), ndmin=1)
for basis_vec in self.basis_vecs]
if parallel.is_rank_zero():
for handle,vec in zip(self.basis_vec_handles, self.basis_vecs):
handle.put(vec)
for handle,vec in zip(
self.adjoint_basis_vec_handles, self.adjoint_basis_vecs):
handle.put(vec)
for handle,vec in zip(
self.A_on_basis_vec_handles, self.A_on_basis_vecs):
handle.put(vec)
for handle,vec in zip(
self.B_on_standard_basis_handles, self.B_on_basis):
handle.put(vec)
for handle,vec in zip(
self.C_on_basis_vec_handles, self.C_on_basis_vecs):
handle.put(vec)
parallel.barrier()
self.A_true = self.adjoint_basis_vec_array.conj().T.dot(
self.A_array.dot(self.basis_vec_array))
self.B_true = self.adjoint_basis_vec_array.conj().T.dot(self.B_array)
self.C_true = self.C_array.dot(self.basis_vec_array)
self.proj_array = np.linalg.inv(
self.adjoint_basis_vec_array.conj().T.dot(self.basis_vec_array))
self.A_true_non_orth = self.proj_array.dot(self.A_true)
self.B_true_non_orth = self.proj_array.dot(self.B_true)
<EMAIL>('Testing something else')
def test_derivs(self):
"""Test can take derivs"""
dt = 0.1
true_derivs = []
num_vecs = len(self.basis_vec_handles)
for i in range(num_vecs):
true_derivs.append((
self.A_on_basis_vec_handles[i].get() -
self.basis_vec_handles[i].get()).squeeze() / dt)
deriv_handles = [
VecHandlePickle(join(self.test_dir, 'deriv_test%d' % i))
for i in range(num_vecs)]
lgp.compute_derivs_handles(
self.basis_vec_handles, self.A_on_basis_vec_handles,
deriv_handles, dt)
derivs_loaded = [v.get() for v in deriv_handles]
derivs_loaded = list(map(np.squeeze, derivs_loaded))
list(map(np.testing.assert_allclose, derivs_loaded, true_derivs))
<EMAIL>('Testing something else')
def test_reduce_A(self):
"""Reduction of A array for Array, LookUp operators and in_memory."""
A_returned = self.LTI_proj.reduce_A(self.A_on_basis_vec_handles)
np.testing.assert_allclose(A_returned, self.A_true)
LTI_proj = lgp.LTIGalerkinProjectionHandles(
np.vdot, self.basis_vec_handles,
adjoint_basis_vec_handles=self.adjoint_basis_vec_handles,
is_basis_orthonormal=False, verbosity=0)
A_returned = LTI_proj.reduce_A(self.A_on_basis_vec_handles)
np.testing.assert_allclose(LTI_proj._proj_array, self.proj_array)
np.testing.assert_allclose(A_returned, self.A_true_non_orth)
<EMAIL>('Testing something else')
def test_reduce_B(self):
"""Given modes, test reduced B array, orthogonal and non-orthogonal."""
B_returned = self.LTI_proj.reduce_B(self.B_on_standard_basis_handles)
np.testing.assert_allclose(B_returned, self.B_true)
LTI_proj = lgp.LTIGalerkinProjectionHandles(
np.vdot, self.basis_vec_handles,
adjoint_basis_vec_handles=self.adjoint_basis_vec_handles,
is_basis_orthonormal=False, verbosity=0)
B_returned = LTI_proj.reduce_B(self.B_on_standard_basis_handles)
np.testing.assert_allclose(B_returned, self.B_true_non_orth)
<EMAIL>('Testing something else')
def test_reduce_C(self):
"""Test that, given modes, can find correct C array"""
C_returned = self.LTI_proj.reduce_C(self.C_on_basis_vecs)
np.testing.assert_allclose(C_returned, self.C_true)
<EMAIL>('Testing something else')
def test_adjoint_basis_vec_optional(self):
"""Test that adjoint modes default to direct modes"""
no_adjoints_LTI_proj = lgp.LTIGalerkinProjectionHandles(
np.vdot, self.basis_vec_handles, is_basis_orthonormal=True,
verbosity=0)
np.testing.assert_equal(
no_adjoints_LTI_proj.adjoint_basis_vec_handles,
self.basis_vec_handles)
if __name__ == '__main__':
unittest.main()
| 2.390625 | 2 |
bauxite/http/client.py | vcokltfre/bauxite | 1 | 12772190 | from __future__ import annotations
from asyncio import create_task, sleep
from collections import defaultdict
from dataclasses import dataclass
from json import dumps
from typing import Any, Awaitable, Callable, Mapping, Optional, Sequence, Type, Union
from aiohttp import BasicAuth, ClientResponse, ClientSession, FormData
from bauxite.constants import API_URL, VERSION
from .errors import (
BadGateway,
BadRequest,
Forbidden,
GatewayTimeout,
HTTPError,
MethodNotAllowed,
NotFound,
ServerError,
ServiceUnavailable,
TooManyRequests,
Unauthorized,
UnprocessableEntity,
)
from .file import File
from .ratelimiting import LocalRateLimiter, RateLimiter
from .route import Route
Callback = Callable[[ClientResponse, Route], Awaitable[None]]
Unset = object()
@dataclass
class _RequestContext:
route: Route
headers: dict[str, str]
params: dict[str, Any]
files: Sequence[File]
json: Any
@dataclass
class _ResponseContext:
route: Route
response: ClientResponse
successful: bool
class HTTPClient:
_status_codes: Mapping[int, Type[HTTPError]] = defaultdict(
lambda: HTTPError,
{
400: BadRequest,
401: Unauthorized,
403: Forbidden,
404: NotFound,
405: MethodNotAllowed,
422: UnprocessableEntity,
429: TooManyRequests,
500: ServerError,
502: BadGateway,
503: ServiceUnavailable,
504: GatewayTimeout,
},
)
def __init__(
self,
token: str,
api_url: Optional[str] = None,
user_agent: Optional[str] = None,
proxy_url: Optional[str] = None,
proxy_auth: Optional[BasicAuth] = None,
ratelimiter: Optional[RateLimiter] = None,
on_success: Optional[set[Callback]] = None,
on_error: Optional[set[Callback]] = None,
on_ratelimit: Optional[set[Callback]] = None,
) -> None:
self._token = token.strip()
self._api_url = api_url or API_URL
self._user_agent = (
user_agent
or f"DiscordBot (https://github.com/vcokltfre/bauxite, {VERSION})"
)
self._proxy_url = proxy_url
self._proxy_auth = proxy_auth
self._ratelimiter = ratelimiter or LocalRateLimiter()
self.__session: Optional[ClientSession] = None
self._on_success = on_success or set()
self._on_error = on_error or set()
self._on_ratelimit = on_ratelimit or set()
@property
def _session(self) -> ClientSession:
if self.__session and not self.__session.closed:
return self.__session
self.__session = ClientSession(
headers={
"Authorization": f"Bot {self._token}",
"User-Agent": self._user_agent,
}
)
return self.__session
def _dispatch(self, listeners: set[Callback], ctx: _ResponseContext) -> None:
for listener in listeners:
create_task(listener(ctx.response, ctx.route))
async def _request(
self, ctx: _RequestContext, reset_files: int
) -> _ResponseContext:
if ctx.files:
data = FormData()
for i, file in enumerate(ctx.files):
file.reset(reset_files)
data.add_field(f"file_{i}", file.fp, filename=file.filename)
if ctx.json is not Unset:
data.add_field(
"payload_json", dumps(ctx.json), content_type="application/json"
)
ctx.params["data"] = data
elif ctx.json is not Unset:
ctx.params["json"] = ctx.json
lock = await self._ratelimiter.acquire(ctx.route.bucket)
async with lock:
response = await self._session.request(
ctx.route.method,
self._api_url + ctx.route.path,
headers=ctx.headers,
**ctx.params,
)
status = response.status
headers = response.headers
response_ctx = _ResponseContext(ctx.route, response, 200 <= status < 300)
rl_reset_after = float(headers.get("X-RateLimit-Reset-After", 0))
rl_bucket_remaining = int(headers.get("X-RateLimit-Remaining", 1))
if response_ctx.successful:
self._dispatch(self._on_success, response_ctx)
if rl_bucket_remaining == 0:
self._dispatch(self._on_ratelimit, response_ctx)
await lock.release(rl_reset_after)
else:
await lock.release(0)
return response_ctx
elif status == 429:
self._dispatch(self._on_error, response_ctx)
self._dispatch(self._on_ratelimit, response_ctx)
if not headers.get("Via"):
raise TooManyRequests(response)
json = await response.json()
is_global = json.get("global", False)
retry_after = json["retry_after"]
if is_global:
await self._ratelimiter.lock_globally(retry_after)
else:
await lock.release(retry_after)
else:
self._dispatch(self._on_error, response_ctx)
raise self._status_codes[status](response)
return response_ctx
async def request(
self,
route: Route,
qparams: Optional[dict[str, Union[str, int]]] = None,
reason: Optional[str] = None,
files: Optional[Sequence[File]] = None,
json: Optional[Any] = Unset,
max_attempts: int = 3,
) -> ClientResponse:
headers = {}
params = {}
if qparams:
params["params"] = qparams
if reason:
headers["X-Audit-Log-Reason"] = reason
for attempt in range(max_attempts):
ctx = _RequestContext(route, headers, params, files or (), json)
resp = await self._request(ctx, attempt)
if resp.successful:
return resp.response
if attempt == max_attempts - 1:
raise self._status_codes[resp.response.status](resp.response)
await sleep(1 + attempt * 2)
raise Exception("Unreachable")
async def close(self) -> None:
if self.__session:
await self.__session.close()
| 1.921875 | 2 |
tests/_apis/test_BaseApi.py | physicsninja/Riot-Watcher | 0 | 12772191 | <filename>tests/_apis/test_BaseApi.py
import unittest
import sys
if sys.version_info > (3, 0):
from unittest.mock import MagicMock
from unittest import mock
else:
from mock import MagicMock
import mock
from riotwatcher._apis import BaseApi
class BaseApiTestCase(unittest.TestCase):
def setUp(self):
self._expected_preview_return = object()
self._expected_after_return = object()
# Mock with returns in the handler
self._request_handler_mock = MagicMock(name='request_handler')
self._request_handler_mock.preview_request = MagicMock(name='preview_request')
self._request_handler_mock.preview_request.return_value = self._expected_preview_return
self._request_handler_mock.after_request = MagicMock(name='after_request')
self._request_handler_mock.after_request.return_value = self._expected_after_return
self._request_handler_mock.preview_static_request = MagicMock(name='preview_static_request')
self._request_handler_mock.preview_static_request.return_value = self._expected_preview_return
self._request_handler_mock.after_static_request = MagicMock(name='after_static_request')
self._request_handler_mock.after_static_request.return_value = self._expected_after_return
# Mock without returns in the handler
self._request_handler_mock_no_return = MagicMock(name='request_handler')
self._request_handler_mock_no_return.preview_request = MagicMock(name='preview_request')
self._request_handler_mock_no_return.after_request = MagicMock(name='after_request')
self._request_handler_mock_no_return.preview_static_request = MagicMock(name='preview_static_request')
self._request_handler_mock_no_return.after_static_request = MagicMock(name='after_static_request')
self._request_handler_mock_no_return.preview_request.return_value = None
self._request_handler_mock_no_return.after_request.return_value = None
self._request_handler_mock_no_return.preview_static_request.return_value = None
self._request_handler_mock_no_return.after_static_request.return_value = None
self._api_key = 'sadf'
self._mock_api_response = 'api_response_xx'
# BASE TESTS
def test_base_api_request_preview_handler(self):
base_api = BaseApi(self._api_key, [self._request_handler_mock])
endpoint_name = 'endpoint_xx'
method_name = 'method_xx'
region = 'region_xx'
url_ext = 'url_xx'
kwarg = 'extra_xx'
base_api.request(endpoint_name, method_name, region, url_ext, extra=kwarg)
self._request_handler_mock.preview_request.assert_called_once_with(
region,
endpoint_name,
method_name,
'https://region_xx.api.riotgames.com%s' % url_ext,
{'extra': kwarg}
)
def test_base_api_request_after_handler_with_preview_return(self):
base_api = BaseApi(self._api_key, [self._request_handler_mock])
endpoint_name = 'endpoint_xx'
method_name = 'method_xx'
region = 'region_xx'
url_ext = 'url_xx'
kwarg = 'extra_xx'
ret = base_api.request(endpoint_name, method_name, region, url_ext, extra=kwarg)
self._request_handler_mock.after_request.assert_called_once_with(
region,
endpoint_name,
method_name,
'https://region_xx.api.riotgames.com%s' % url_ext,
self._expected_preview_return
)
self.assertEqual(ret, self._expected_after_return)
@mock.patch('requests.get')
def test_base_api_request_after_handler_with_preview_no_return(self, mock_get):
mock_get.return_value = self._mock_api_response
base_api = BaseApi(self._api_key, [self._request_handler_mock_no_return])
endpoint_name = 'endpoint_xx'
method_name = 'method_xx'
region = 'region_xx'
url_ext = 'url_xx'
kwarg = 'extra_xx'
ret = base_api.request(endpoint_name, method_name, region, url_ext, extra=kwarg)
self._request_handler_mock_no_return.after_request.assert_called_once_with(
region,
endpoint_name,
method_name,
'https://region_xx.api.riotgames.com%s' % url_ext,
self._mock_api_response
)
mock_get.assert_called_once_with(
'https://region_xx.api.riotgames.com%s' % url_ext,
headers={'X-Riot-Token': self._api_key},
params={'extra': kwarg}
)
self.assertEqual(ret, self._mock_api_response)
# STATIC TESTS
def test_base_api_request_static_preview_handler(self):
base_api = BaseApi(self._api_key, [self._request_handler_mock])
version = 'version_xx'
locale = 'locale_xx'
url_ext = 'url_xx'
ret = base_api.request_static(version, locale, url_ext)
self._request_handler_mock.preview_static_request.assert_called_once_with(
version,
locale,
'https://ddragon.leagueoflegends.com/cdn/%s/data/%s/%s.json' % (version, locale, url_ext)
)
def test_base_api_request_static_after_handler_with_preview_return(self):
base_api = BaseApi(self._api_key, [self._request_handler_mock])
version = 'version_xx'
locale = 'locale_xx'
url_ext = 'url_xx'
ret = base_api.request_static(version, locale, url_ext)
self._request_handler_mock.after_static_request.assert_called_once_with(
version,
locale,
'https://ddragon.leagueoflegends.com/cdn/%s/data/%s/%s.json' % (version, locale, url_ext),
self._expected_preview_return
)
self.assertEqual(ret, self._expected_after_return)
@mock.patch('requests.get')
def test_base_api_request_static_after_handler_with_preview_no_return(self, mock_get):
mock_get.return_value = self._mock_api_response
base_api = BaseApi(self._api_key, [self._request_handler_mock_no_return])
version = 'version_xx'
locale = 'locale_xx'
url_ext = 'url_xx'
ret = base_api.request_static(version, locale, url_ext)
self._request_handler_mock_no_return.after_static_request.assert_called_once_with(
version,
locale,
'https://ddragon.leagueoflegends.com/cdn/%s/data/%s/%s.json' % (version, locale, url_ext),
self._mock_api_response
)
mock_get.assert_called_once_with(
'https://ddragon.leagueoflegends.com/cdn/%s/data/%s/%s.json' % (version, locale, url_ext),
)
self.assertEqual(ret, self._mock_api_response)
# VERSION TESTS
def test_base_api_request_version_preview_handler(self):
base_api = BaseApi(self._api_key, [self._request_handler_mock])
region = 'region_xx'
ret = base_api.request_version(region)
self._request_handler_mock.preview_static_request.assert_called_once_with(
'',
'',
'https://ddragon.leagueoflegends.com/realms/%s.json' % region
)
def test_base_api_request_version_after_handler_with_preview_return(self):
base_api = BaseApi(self._api_key, [self._request_handler_mock])
region = 'region_xx'
ret = base_api.request_version(region)
self._request_handler_mock.after_static_request.assert_called_once_with(
'',
'',
'https://ddragon.leagueoflegends.com/realms/%s.json' % region,
self._expected_preview_return
)
self.assertEqual(ret, self._expected_after_return)
@mock.patch('requests.get')
def test_base_api_request_version_after_handler_with_preview_no_return(self, mock_get):
mock_get.return_value = self._mock_api_response
base_api = BaseApi(self._api_key, [self._request_handler_mock_no_return])
region = 'region_xx'
ret = base_api.request_version(region)
self._request_handler_mock_no_return.after_static_request.assert_called_once_with(
'',
'',
'https://ddragon.leagueoflegends.com/realms/%s.json' % region,
self._mock_api_response
)
mock_get.assert_called_once_with(
'https://ddragon.leagueoflegends.com/realms/%s.json' % region,
)
self.assertEqual(ret, self._mock_api_response)
| 2.3125 | 2 |
examples/threads.py | TankerHQ/python-cli-ui | 30 | 12772192 | import threading
import time
from threading import Thread
import cli_ui
def long_computation():
# Simulates a long computation
time.sleep(0.6)
def count_down(lock, start):
x = start
while x >= 0:
with lock:
# Note: the sleeps are here so that we are more likely to
# see mangled output
#
# In reality, if you only call `ui.info()` once you don't
# need locks at all thanks to the GIL
cli_ui.info("down", end=" ")
time.sleep(0.2)
cli_ui.info(x)
time.sleep(0.2)
long_computation()
x -= 1
def count_up(lock, stop):
x = 0
while x <= stop:
with lock:
cli_ui.info("up", end=" ")
time.sleep(0.2)
cli_ui.info(x)
time.sleep(0.2)
long_computation()
x += 1
def main():
lock = threading.Lock()
t1 = Thread(target=count_down, args=(lock, 4))
t2 = Thread(target=count_up, args=(lock, 4))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
| 3.421875 | 3 |
end_to_end/Generate_Chains_old.py | Bharathgc/Coreference-resolution | 1 | 12772193 |
folder_nm='end_to_end'
coref_path="/home/raj/"+folder_nm+"/output/coreferent_pairs/output2.txt"
chains_path="/home/raj/"+folder_nm+"/output/chains/chains.txt"
f1=open(chains_path,"w+")
def linear_search(obj, item, start=0):
for l in range(start, len(obj)):
if obj[l] == item:
return l
return -1
with open(coref_path, 'r') as f6:
i=0
key=[]
value=[]
words,start,start_end,concept=[],[],[],[]
for num,line in enumerate(f6,1):
#from the coreferent pairs file separate the words and their positions in file
items_nnp = line.rstrip("\n\r").split("|")
word_1,start_1,end_1,word_2,start_2,end_2,concept_type = items_nnp[0], items_nnp[1],items_nnp[2],items_nnp[3],items_nnp[4],items_nnp[5],items_nnp[6]
#get all words in a list and also their positions in another list at corresponding positions
if linear_search(start,start_1)==-1:
words.append(word_1)
start.append(start_1)
start_end.append(start_1+" "+end_1)
concept.append(concept_type)
if linear_search(start,start_2)==-1:
words.append(word_2)
start.append(start_2)
start_end.append(start_2+" "+end_2)
concept.append(concept_type)
#1st row will be marked as 1st pair and so on
key.append(num)
value.append(start_1)
key.append(num)
value.append(start_2)
def formchain(i,Chains):
#if the element is not present in the chain then add it
if linear_search(Chains,value[i])==-1:
Chains.append(value[i])
#store the key and value temporarily
temp_k=key[i]
temp_v=value[i]
#if there is only one element in the list delete it
if i==(len(key)-1):
key[len(key)-1]=""
value[len(value)-1]=""
else:
#delete the element by shifting the following elements by 1 position to left
for j in range (i,len(key)-1):
key[j]=key[j+1]
value[j]=value[j+1]
#mark the last position as ""
key[len(key)-1]=""
value[len(value)-1]=""
# call the method again for the another mention of the pair which shares same key
if linear_search(key,temp_k)!=-1:
formchain(linear_search(key,temp_k),Chains)
# call the method for another pair which has same mention which has already been included
if linear_search(value,temp_v)!=-1:
formchain(linear_search(value,temp_v),Chains)
#As positions are being shifted left, 0th element will never be zero unless the entire array is empty
while(key[0]!=""):
Chains=[]
#start with first element of the list
formchain(0,Chains)
for i in range(len(Chains)-1):
j=linear_search(start,Chains[i])
f1.write(words[j]+"|"+start_end[j]+"|")
j=linear_search(start,Chains[len(Chains)-1])
f1.write(words[j]+"|"+start_end[j]+"|"+concept[j]+"\n")
f1.close()
| 2.765625 | 3 |
commandHandler.py | NightKev/Renol-IRC | 2 | 12772194 | <filename>commandHandler.py
import imp, os
import Queue
import logging
from time import strftime
from timeit import default_timer
from datetime import datetime
import centralizedThreading
from BotEvents import TimerEvent, MsgEvent, StandardEvent
from IRC_registration import trackVerification
from CommandHelp import HelpModule
from IRCLogging import LoggingModule
from BanList import BanList
class commandHandling():
def __init__(self, channels, cmdprefix, name, ident, adminlist, loglevel):
self.LoggingModule = LoggingModule(loglevel)
self.__CMDHandler_log__ = logging.getLogger("CMDHandler")
self.name = name
self.ident = ident
self.Plugin = self.__LoadModules__("IRCpackets")
self.commands = self.__LoadModules__("commands")
self.bot_userlist = adminlist
self.Bot_Auth = trackVerification(adminlist)
self.channels = channels
self.channelData = {}
self.topic = {}
self.cmdprefix = cmdprefix
self.events = {"time" : TimerEvent(), "chat" : MsgEvent(),
"channeljoin" : StandardEvent(),
"channelpart" : StandardEvent(),
"channelkick" : StandardEvent(),
"userquit" : StandardEvent(),
"nickchange" : StandardEvent()}
self.events["time"].addEvent("LogfileSwitch", 60, self.LoggingModule.__switch_filehandle_daily__)
self.server = None
self.latency = None
self.rankconvert = {"@@" : 3, "@" : 2, "+" : 1, "" : 0}
self.startupTime = datetime.now()
self.PacketsReceivedBeforeDeath = Queue.Queue(maxsize = 50)
self.threading = centralizedThreading.ThreadPool()
self.Banlist = BanList("BannedUsers.db")
self.helper = HelpModule()
self.auth = None
def handle(self, send, prefix, command, params, auth):
self.send = send
## In the next few lines I implement a basic logger so the logs can be put out when the bot dies.
## Should come in handy when looking at what or who caused trouble
## There is room for 50 entries, number can be increased or lowered at a later point
try:
self.PacketsReceivedBeforeDeath.put(u"{0} {1} {2}".format(prefix, command, params), False)
except Queue.Full:
self.PacketsReceivedBeforeDeath.get(block = False)
self.PacketsReceivedBeforeDeath.put(u"{0} {1} {2}".format(prefix, command, params), False)
try:
if command in self.Plugin:
self.Plugin[command][0].execute(self, send, prefix, command, params)
else:
# 0 is the lowest possible log level. Messages about unimplemented packets are
# very common, so they will clutter up the file even if logging is set to DEBUG
self.__CMDHandler_log__.log(0, "Unimplemented Packet: %s", command)
except KeyError as error:
#print "Unknown command '"+command+"'"
self.__CMDHandler_log__.exception("Missing channel or other KeyError caught")
print "Missing channel or other KeyError caught: "+str(error)
def timeEventChecker(self):
self.events["time"].tryAllEvents(self)
def userGetRank(self, channel, username):
#print self.channelData[channel]["Userlist"]
for user in self.channelData[channel]["Userlist"]:
if user[0].lower() == username.lower():
return user[1]
def userGetRankNum(self, channel, username):
if username in self.bot_userlist and self.Bot_Auth.isRegistered(username):
return 3
else:
for user in self.channelData[channel]["Userlist"]:
if user[0].lower() == username.lower():
if user[1] == "@@":
return 2
else:
return self.rankconvert[user[1]]
return -1 # No user found
def retrieveTrueCase(self, channel):
for chan in self.channelData:
if chan.lower() == channel.lower():
return chan
return False
# A wrapper for sendChatMessage that does not require a send argument.
def sendMessage(self, channel, msg, msgsplitter = None, splitAt = " "):
self.sendChatMessage(self.send, channel, msg, msgsplitter, splitAt)
def sendChatMessage(self, send, channel, msg, msgsplitter = None, splitAt = " "):
# we calculate a max length value based on what the server would send to other users
# if this bot sent a message.
# Private messages from the server look like this:
# nick!user@hostname PRIVMSG target :Hello World!
# Nick is the username of the bot, user is the identification name of the bot and can be
# different from the nick, it will prefix the hostname. target is the channel
# to which we send the message. At the end, we add a constant (25) to the length to account
# for whitespaces and other characters and eventual oddities.
# The Hostname will be limited to 63, regardless of the actual length.
# 7 characters for the PRIVSM string
# if you want to create your own tweaked message splitter,
# provide it as the fourth argument to self.sendChatMessage
# otherwise, the default one, i.e. self.defaultsplitter, is used
if msgsplitter == None:
msgsplitter = self.defaultsplitter
prefixLen = len(self.name) + len(self.ident) + 63 + 7 + len(channel) + 25
remaining = 512-prefixLen
#print remaining
if len(msg)+prefixLen > 512:
msgpart = msgsplitter(msg, remaining, splitAt)
self.__CMDHandler_log__.debug("Breaking message %s into parts %s", msg, msgpart)
for part in msgpart:
#send("PRIVMSG {0} :{1}".format(channel, part))
#send("PRIVMSG "+str(channel)+" :"+str(part))
send(u"PRIVMSG {0} :{1}".format(channel, part))
self.__CMDHandler_log__.debug("Sending parted message to channel/user %s: '%s'", channel, msg)
else:
#send("PRIVMSG {0} :{1}".format(channel, msg))
#send("PRIVMSG "+channel+" :"+msg)
send(u"PRIVMSG {0} :{1}".format(channel, msg))
self.__CMDHandler_log__.debug("Sending to channel/user %s: '%s'", channel, msg)
def sendNotice(self, destination, msg, msgsplitter = None, splitAt = " "):
# Works the same as sendChatMessage
# Only difference is that this message is sent as a NOTICE,
# and it does not require a send parameter.
if msgsplitter == None:
msgsplitter = self.defaultsplitter
#NOTICE
prefixLen = len(self.name) + len(self.ident) + 63 + 6 + len(destination) + 25
remaining = 512-prefixLen
#print remaining
if len(msg)+prefixLen > 512:
msgpart = msgsplitter(msg, remaining, splitAt)
self.__CMDHandler_log__.debug("Breaking message %s into parts %s", msg, msgpart)
for part in msgpart:
#self.send("NOTICE "+str(destination)+" :"+str(part))
self.send(u"NOTICE {0} :{1}".format(destination, part))
self.__CMDHandler_log__.debug("Sending parted notice to channel/user %s: '%s'", destination, msg)
else:
#self.send("NOTICE "+str(destination)+" :"+str(msg))
self.send(u"NOTICE {0} :{1}".format(destination, msg))
self.__CMDHandler_log__.debug("Sending notice to channel/user %s: '%s'", destination, msg)
def defaultsplitter(self, msg, length, splitAt):
start = 0
end = length
items = []
while end <= len(msg):
splitpos = msg[start:end].rfind(splitAt)
# case 1: whitespace has not been found, ergo:
# message is too long, so we split it at the position specified by 'end'
if splitpos < 0:
items.append(msg[start:end])
start = end
# case 2: whitespace has been found, ergo:
# we split it at the whitespace
# splitpos is a value local to msg[start:end], so we need to add start to it to get a global value
else:
items.append(msg[start:start+splitpos])
start = start+splitpos+len(splitAt)
end = start + length
# Check if there is any remaining data
# If so, append the remaining data to the list
if start < len(msg):
items.append(msg[start:])
# remove all empty strings in the list because they are not needed nor desired
for i in range(items.count("")):
items.remove("")
return items
## writeQueue adds a specified string to the internal queue of the bot.
## This functions handles marking the string with a DebugEntry prefix and the time
## at which the entry was added. You can also specify a name that will be added to
## the entry so that you can identify which module or command has created the entry.
##
##
## Please note that at the time of this writing the queue can hold a maximum of 50 entries.
## Adding new entries will kick the oldest entries out of the queue, so you should be
## conservative with the usage of writeQueue.
# UPDATE: writeQueue is now deprecated, please use Python's logging module.
# The logging module allows you to have seperate info and debug messages which will
# be written automatically into log files. These are not limited to an arbitrary
# number and will (should) not disappear on repeated crashes. Read up on how to use the logging module.
# writeQueue messages will be written to the log files for the sake of improved compatibility
def writeQueue(self, string, modulename = "no_name_given"):
entryString = "DebugEntry at {0} [{1!r}]: {2!r}".format(strftime("%H:%M:%S (%z)"), modulename, string)
self.__CMDHandler_log__.debug("Added DebugEntry: '%s'", entryString)
try:
self.PacketsReceivedBeforeDeath.put(entryString, False)
except Queue.Full:
self.PacketsReceivedBeforeDeath.get(block = False)
self.PacketsReceivedBeforeDeath.put(entryString, False)
def joinChannel(self, send, channel):
if isinstance(channel, str):
if channel not in self.channelData:
#self.channels.append(channel)
self.channelData[channel] = {"Userlist" : [], "Topic" : "", "Mode" : ""}
send("JOIN "+channel, 5)
self.__CMDHandler_log__.info("Joining channel: '%s'", channel)
elif isinstance(channel, list):
for chan in channel:
if chan not in self.channelData:
#self.channels.append(channel)
self.channelData[chan] = {"Userlist" : [], "Topic" : "", "Mode" : ""}
send("JOIN "+",".join(channel), 3)
self.__CMDHandler_log__.info("Joining several channels: '%s'", channel)
else:
self.__CMDHandler_log__.error("Trying to join a channel, but channel is not list or string: %s [%s]", channel, type(channel))
raise TypeError
print self.channelData
def whoisUser(self, user):
self.send("WHOIS {0}".format(user))
self.Bot_Auth.queueUser(user)
self.__CMDHandler_log__.debug("Sending WHOIS for user '%s'", user)
def userInSight(self, user):
print self.channelData
self.__CMDHandler_log__.debug("Checking if user '%s' is in the following channels: %s", user, self.channelData.keys())
for channel in self.channelData:
for userD in self.channelData[channel]["Userlist"]:
if user == userD[0]:
return True
self.__CMDHandler_log__.debug("Yes, he is (at least) in channel '%s'", channel)
return False
self.__CMDHandler_log__.debug("No, user is out of sight.")
def __ListDir__(self, dir):
files = os.listdir(dir)
newlist = []
self.__CMDHandler_log__.debug("Listing files in directory '%s'", dir)
for i in files:
if not i.startswith("__init__") and i.endswith(".py"):
newlist.append(i)
return newlist
def __LoadModules__(self,path):
ModuleList = self.__ListDir__(path)
self.__CMDHandler_log__.info("Loading modules in path '%s'...", path)
Packet = {}
for i in ModuleList:
self.__CMDHandler_log__.debug("Loading file %s in path '%s'", i, path)
module = imp.load_source("RenolIRC_"+i[0:-3], path+"/"+i)
#print i
Packet[module.ID] = (module, path+"/"+i)
try:
if not callable(module.__initialize__):
module.__initialize__ = False
self.__CMDHandler_log__.log(0, "File %s does not use an initialize function", i)
except AttributeError:
module.__initialize__ = False
self.__CMDHandler_log__.log(0, "File %s does not use an initialize function", i)
Packet[module.ID] = (module, path+"/"+i)
#Packet[i[1].lower()].PATH = path + "/"+i[2]
#self.Packet[i[1]] = self.Packet[i[1]].EXEC()
print "ALL MODULES LOADED"
self.__CMDHandler_log__.info("Modules in path '%s' loaded.", path)
return Packet
| 2.265625 | 2 |
api/urls.py | gheyderov/E-commerce-website | 0 | 12772195 | from os import name
from django.db import router
from django.urls import path
from django.urls.conf import include
from api.views.core_views import SubscribeAPIView
from api.views.order_views import BasketItemDeleteAPIView, BasketView, WishlistAPIView, WishlistDeleteAPIView
from api.views.product_views import ProductAPIView, ProductCategoryAPIView
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path ('basket/', BasketView.as_view(), name='basket'),
path ('wishlist/', WishlistAPIView.as_view(), name='wishlist'),
path ('wishlist/delete', WishlistDeleteAPIView.as_view(), name='wishlist_delete'),
path ('subscribe/', SubscribeAPIView.as_view(), name='subscribe'),
path ('productcategory/', ProductCategoryAPIView.as_view(), name='product-category'),
path ('product/' , ProductAPIView.as_view(), name='product'),
path ('basket-item/delete/', BasketItemDeleteAPIView.as_view(), name='basket-item-delete'),
] | 2 | 2 |
tests/testxy.py | parsotat/swiftbat_python | 5 | 12772196 | <reponame>parsotat/swiftbat_python
import unittest
import swiftbat
import numpy as np
from pathlib import Path
class TestXY(unittest.TestCase):
def getsample(self):
"""
Test data from BAT data
from astropy.io import fits
d = fits.getdata('/tmp/sw01090661000bevshto_uf.evt.gz')
sample = np.array(d[10:1000])
np.save(open(file, "wb"), sample)
"""
file = Path(__file__).parent.joinpath("sampleevents.np")
sample = np.load(open(file, "rb"))
return sample
def test_detid2xy_scalar(self):
sample = self.getsample()
for samplerow in sample[0:100]:
x, y = swiftbat.detid2xy(samplerow['DET_ID'])
assert (x, y) == (samplerow['DETX'], samplerow['DETY'])
def test_detid2xy_array(self):
sample = self.getsample()
x, y = swiftbat.detid2xy(sample['DET_ID'])
assert np.allclose(x, sample['DETX'])
assert np.allclose(y, sample['DETY'])
def test_xy2detid_scalar(self):
sample = self.getsample()
for samplerow in sample[0:100]:
detid = swiftbat.xy2detid(samplerow['DETX'], samplerow['DETY'])
assert detid == samplerow['DET_ID']
def test_xy2detid_array(self):
sample = self.getsample()
detid = swiftbat.xy2detid(sample['DETX'], sample['DETY'])
assert np.allclose(detid, sample['DET_ID'])
if __name__ == '__main__':
unittest.main()
| 2.421875 | 2 |
bsadiscord/data/sayings.py | perkinsms/bsadiscord | 0 | 12772197 | <reponame>perkinsms/bsadiscord<filename>bsadiscord/data/sayings.py<gh_stars>0
SCOUTOATH = '''
On my honor, I will do my best
to do my duty to God and my country
to obey the Scout Law
to help other people at all times
to keep myself physically strong, mentally awake and morally straight.
'''
SCOUTLAW = '''
A scout is:
Trustworthy
Loyal
Helpful
Friendly
Courteous
Kind
Obedient
Cheerful
Thrifty
Brave
Clean
and Reverent
'''
OUTDOORCODE = '''
As an American, I will do my best:
To be clean in my outdoor manner
To be careful with fire
To be considerate in the outdoors
And to be conservation-minded
'''
PLEDGE = '''
I pledge allegiance
to the flag
of the United States of America
and to the republic
for which it stands:
one nation under God
with liberty
and justice for all
'''
| 1.898438 | 2 |
src/solidityobf/transformations/modify_data_flow_1.py | ZhouBoXiao/SolidityObfuscator | 2 | 12772198 | from solidityobf.common.classes import String,PropertyName,Ident,Expr,VarDeclaration,Assignment,Statements
from solidityobf.common.tree_walker import walker
from solidityobf.common.common import deepcopy,is_functioncall_inside
from solidityobf.common.finder import get_all_ident,get_all_upper_statements_and_pos,get_upper_statement_and_pos,get_upper_expression_statement_and_pos
from solidityobf.transformations.common import NUMBERMAX, count_object, shouldi, genVarNotIn
def modify_data_flow_1(program_tree, verbose=0, numbermax=NUMBERMAX):
if verbose > 1:
print "apply modify_data_flow_1 transformation"
p = deepcopy(program_tree)
ret = get_all_ident(program_tree)
arg = {"ident_present":ret, "numbermax":numbermax}
arg["size"] = count_object(p, [Assignment])
walker(p, postfunction=modify_data_flow_1_post_func, arg=arg)
return p
def modify_data_flow_1_post_func(program, arg):
if isinstance(program, Assignment):
if shouldi(arg=arg):
s, pos = get_upper_statement_and_pos(program)
s2, pos2 = get_upper_expression_statement_and_pos(program)
if pos2 is None and s is not None:
if not is_functioncall_inside(program):# and position_in_exprstatement(program) == 0: # TODO check position_in_exprstatement is working
vardeclist, name = genVarNotIn(arg["ident_present"])
s.insert(pos, Assignment(Expr([Ident(name)]), "=", program.expr))
s.insert(0, vardeclist)
program.expr = Ident(name)
program.expr.parent = program
return [], arg
| 2.265625 | 2 |
robber/matchers/called.py | pylipp/robber.py | 23 | 12772199 | from robber import expect
from robber.explanation import Explanation
from robber.matchers.base import Base
class Called(Base):
"""
expect(mock).to.be.called()
"""
def matches(self):
try:
return self.actual.called
except AttributeError:
raise TypeError('{actual} is not a mock'.format(actual=self.actual))
@property
def explanation(self):
return Explanation(self.actual, self.is_negative, 'be called')
expect.register('called', Called)
| 2.640625 | 3 |
tools/ffmpeg/playmany.py | AnantTiwari-Naman/pyglet | 1,160 | 12772200 | <filename>tools/ffmpeg/playmany.py<gh_stars>1000+
"""
Usage
playmany.py
Uses media_player to play a sequence of samples and record debug info
A configuration must be active, see command configure.py
If the active configuration has disallowed dbg overwrites it will do nothing.
If a playlist was provided at session creation, then only the samples in the
playlist will be played, otherwise all files in samples_dir.
"""
import os
import subprocess
import sys
import fs
import mpexceptions
def main():
try:
pathserv = fs.get_path_info_for_active_session()
except mpexceptions.ExceptionUndefinedSamplesDir:
print("The env var 'pyglet_mp_samples_dir' is not defined.")
return 1
except mpexceptions.ExceptionNoSessionIsActive:
print("*** Error, no session active.")
return 1
try:
play_many(pathserv, timeout=120)
except mpexceptions.ExceptionAttemptToBreakRawDataProtection:
print("*** Error, attempt to overwrite raw data when protect_raw_data is True.")
return 1
return 0
def play_many(pathserv, timeout=120):
"""plays the samples in the session playlist for the current active session
timeout: max time allowed to play a sample, default is 120 seconds
"""
conf = fs.get_session_configuration(pathserv)
if conf["dev_debug"]:
pass
else:
if conf["protect_raw_data"]:
raise mpexceptions.ExceptionAttemptToBreakRawDataProtection()
playlist_gen = pathserv.session_playlist_generator()
core_play_many(pathserv, playlist_gen, timeout=timeout)
def core_play_many(pathserv, playlist_gen, timeout=120):
for sample, filename in playlist_gen:
dbg_file = pathserv.dbg_filename(sample)
print("playmany playing:", filename)
cmdline = [os.path.join(fs.get_media_player_path(), "media_player.py"),
"--debug",
"--outfile=" + dbg_file,
filename]
killed, returncode = cmd__py3(cmdline, timeout=timeout)
if killed:
print("WARNING: killed by timeout, file: %s" % filename)
def cmd__py3(cmdline, bufsize=-1, cwd=None, timeout=60):
"""runs a .py script as a subprocess with the same python as the caller
cmdline: list [<scriptname>, arg1, ...]
timeout: time in seconds; subprocess wil be killed if it is still running
at that time.
"""
# use the same python as the caller to run the script
cmdline.insert(0, "-u")
cmdline.insert(0, sys.executable)
p = subprocess.Popen(
cmdline,
bufsize = bufsize,
shell = False,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = cwd
)
killed = True
try:
out, err = p.communicate(timeout=timeout)
killed = False
except subprocess.TimeoutExpired:
p.kill()
out, err = p.communicate()
## print("out:", out)
## print("err:", err)
returncode = p.returncode
return killed, returncode
def sysargs_to_mainargs():
"""builds main args from sys.argv"""
if len(sys.argv) > 1 and sys.argv[1].startswith("--help"):
print(__doc__)
sys.exit(1)
if __name__ == "__main__":
sysargs_to_mainargs()
main()
| 2.046875 | 2 |
chapt05/shadow.py | ohlogic/PythonOpenGLSuperBible4Glut | 0 | 12772201 | <reponame>ohlogic/PythonOpenGLSuperBible4Glut
#!/usr/bin/python3
# Demonstrates simple planar shadows
# <NAME>
# <EMAIL>
#
# based heavily on shadow.cpp
# OpenGL SuperBible
# Program by <NAME>.
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
ESCAPE = b'\033'
import sys
sys.path.append("../shared")
from math3d import M3DVector3f, m3dFindNormal, M3DMatrix44f, m3dGetPlaneEquation, m3dMakePlanarShadowMatrix
from fakeglut import glutSolidSphere
xRot = 0.0
yRot = 0.0
lightArrayType = GLfloat * 4
ambientLight = lightArrayType(0.3, 0.3, 0.3, 1.0)
diffuseLight = lightArrayType(0.7, 0.7, 0.7, 1.0)
specular = lightArrayType(1.0, 1.0, 1.0, 1.0)
lightPos = (GLfloat * 4)(-75.0, 150.0, -50.0, 0.0)
specref = (GLfloat * 4)(1.0, 1.0, 1.0, 1.0)
shadowMat = M3DMatrix44f()
def DrawJet(nShadow):
# Set material color, note we only have to set to black
# for the shadow once
if nShadow == 0:
glColor3ub(128, 128, 128)
else:
glColor3ub(0,0,0)
# Nose Cone - Points straight down
# Set material color
glBegin(GL_TRIANGLES)
glNormal3f(0.0, -1.0, 0.0)
glNormal3f(0.0, -1.0, 0.0)
glVertex3f(0.0, 0.0, 60.0)
glVertex3f(-15.0, 0.0, 30.0)
glVertex3f(15.0,0.0,30.0)
# Verticies for this panel
vPoints = [ M3DVector3f(15.0, 0.0, 30.0),
M3DVector3f(0.0, 15.0, 30.0),
M3DVector3f(0.0, 0.0, 60.0)]
# Calculate the normal for the plane
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(0.0, 0.0, 60.0),
M3DVector3f(0.0, 15.0, 30.0),
M3DVector3f(-15.0, 0.0, 30.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
# Body of the Plane ############
vPoints = [ M3DVector3f(-15.0, 0.0, 30.0),
M3DVector3f(0.0, 15.0, 30.0),
M3DVector3f(0.0, 0.0, -56.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(0.0, 0.0, -56.0),
M3DVector3f(0.0, 15.0, 30.0),
M3DVector3f(15.0, 0.0, 30.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
glNormal3f(0.0, -1.0, 0.0)
glVertex3f(15.0,0.0,30.0)
glVertex3f(-15.0, 0.0, 30.0)
glVertex3f(0.0, 0.0, -56.0)
#######################
# Left wing
# Large triangle for bottom of wing
vPoints = [ M3DVector3f(0.0, 2.0, 27.0),
M3DVector3f(-60.0, 2.0, -8.0),
M3DVector3f(60, 2.0, -8.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(60.0, 2.0, -8.0),
M3DVector3f(0.0, 7.0, -8.0),
M3DVector3f(0.0, 2.0, 27.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(60.0, 2.0, -8.0),
M3DVector3f(-60.0, 2.0, -8.0),
M3DVector3f(0.0, 7.0, -8.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(0.0, 2.0, 27.0),
M3DVector3f(0.0, 7.0, -8.0),
M3DVector3f(-60.0, 2.0, -8.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
# Tail section###############
# Bottom of back fin
glNormal3f(0.0, -1.0, 0.0)
glVertex3f(-30.0, -0.50, -57.0)
glVertex3f(30.0, -0.50, -57.0)
glVertex3f(0.0,-0.50,-40.0)
vPoints = [ M3DVector3f(0.0, -0.5, -40.0),
M3DVector3f(30.0, -0.5, -57.0),
M3DVector3f(0.0, 4.0, -57.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(0.0, 4.0, -57.0),
M3DVector3f(-30.0, -0.5, -57.0),
M3DVector3f(0.0, -0.5, -40.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(30.0, -0.5, -57.0),
M3DVector3f(-30.0, -0.5, -57.0),
M3DVector3f(0.0, 4.0, -57.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(0.0, 0.5, -40.0),
M3DVector3f(3.0, 0.5, -57.0),
M3DVector3f(0.0, 25.0, -65.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(0.0, 25.0, -65.0),
M3DVector3f(-3.0, 0.5, -57.0),
M3DVector3f(0.0, 0.5, -40.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
vPoints = [ M3DVector3f(3.0, 0.5, -57.0),
M3DVector3f(-3.0, 0.5, -57.0),
M3DVector3f(0.0, 25.0, -65.0)]
vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])
glNormal3fv(vNormal)
glVertex3fv(vPoints[0])
glVertex3fv(vPoints[1])
glVertex3fv(vPoints[2])
glEnd()
def InitGL(Width, Height):
global shadowMat
# Any three points on the ground (counter clockwise order)
points = [ M3DVector3f(-30.0, -149.0, -20.0),
M3DVector3f(-30.0, -149.0, 20.0),
M3DVector3f(40.0, -149.0, 20.0) ]
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE) # Do not calculate inside of jet
glFrontFace(GL_CCW) # Counter clock-wise polygons face out
# Enable Lighting
glEnable(GL_LIGHTING)
# Setup and enable light 0
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight)
glLightfv(GL_LIGHT0, GL_SPECULAR, specular)
glLightfv(GL_LIGHT0,GL_POSITION,lightPos)
glEnable(GL_LIGHT0)
# Enable color tracking
glEnable(GL_COLOR_MATERIAL)
# Set Material properties to follow glColor values
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE)
# All materials hereafter have full specular reflectivity
# with a high shine
glMaterialfv(GL_FRONT, GL_SPECULAR, specref)
glMateriali(GL_FRONT, GL_SHININESS, 128)
# light blue background
glClearColor(0.0, 0.0, 1.0, 1.0)
# Get the plane equation from three points on the ground
vPlaneEquation = m3dGetPlaneEquation(points[0], points[1], points[2])
# Calculate projection matrix to draw shadow on the ground
shadowMat = m3dMakePlanarShadowMatrix(vPlaneEquation, lightPos)
glEnable(GL_NORMALIZE)
# Called to draw scene
def DrawGLScene():
# Clear the window with current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Draw the ground, we do manual shading to a darker green
# in the background to give the illusion of depth
glBegin(GL_QUADS)
glColor3ub(0,32,0)
glVertex3f(400.0, -150.0, -200.0)
glVertex3f(-400.0, -150.0, -200.0)
glColor3ub(0,255,0)
glVertex3f(-400.0, -150.0, 200.0)
glVertex3f(400.0, -150.0, 200.0)
glEnd()
# Save the matrix state and do the rotations
glPushMatrix()
# Draw jet at new orientation, put light in correct position
# before rotating the jet
glEnable(GL_LIGHTING)
glLightfv(GL_LIGHT0,GL_POSITION,lightPos)
glRotatef(xRot, 1.0, 0.0, 0.0)
glRotatef(yRot, 0.0, 1.0, 0.0)
DrawJet(0)
# Restore original matrix state
glPopMatrix()
# Get ready to draw the shadow and the ground
# First disable lighting and save the projection state
glDisable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glPushMatrix()
# Multiply by shadow projection matrix
glMultMatrixf(shadowMat)
# Now rotate the jet around in the new flattend space
glRotatef(xRot, 1.0, 0.0, 0.0)
glRotatef(yRot, 0.0, 1.0, 0.0)
# Pass true to indicate drawing shadow
DrawJet(1)
# Restore the projection to normal
glPopMatrix()
# Draw the light source
glPushMatrix()
glTranslatef(lightPos[0],lightPos[1], lightPos[2])
glColor3ub(255,255,0)
glutSolidSphere(5.0,10,10)
glPopMatrix()
# Restore lighting state variables
glEnable(GL_DEPTH_TEST)
glutSwapBuffers()
def ReSizeGLScene(w, h):
# Prevent a divide by zero
if(h == 0):
h = 1
# Set Viewport to window dimensions
glViewport(0, 0, w, h)
# Reset coordinate system
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Produce the perspective projection
fAspect = float(w)/float(h)
gluPerspective(60.0, fAspect, 200.0, 500.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# Move out Z axis so we can see everything
glTranslatef(0.0, 0.0, -400.0)
glLightfv(GL_LIGHT0, GL_POSITION, lightPos)
def specialkeyPressed(key, x, y):
global xRot, yRot
if key == GLUT_KEY_UP:
xRot -= 5.0
elif key == GLUT_KEY_DOWN:
xRot += 5.0
elif key == GLUT_KEY_LEFT:
yRot -= 5.0
elif key == GLUT_KEY_RIGHT:
yRot += 5.0
xRot = float(int(xRot) % 360)
yRot = float(int(yRot) % 360)
glutPostRedisplay()
def keyPressed(*args):
if args[0] == ESCAPE:
glutDestroyWindow(window)
sys.exit()
# Main program entry point
if __name__ == '__main__':
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("Shadow")
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
#glutIdleFunc(DrawGLScene)
#glutTimerFunc( int(1.0/10.0), update, 0)
glutReshapeFunc(ReSizeGLScene)
glutKeyboardFunc(keyPressed)
glutSpecialFunc (specialkeyPressed);
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
| 2.59375 | 3 |
code/__init__.py | deekay2310/SE21_HW2B_Group6 | 16 | 12772202 | from math import *
| 1.179688 | 1 |
dataset.py | yokinglou/CorresPondenceNet | 1 | 12772203 | import os
import h5py
import torch
import numpy as np
import scipy
import json
class CorresPondenceNet(torch.utils.data.Dataset):
def __init__(self, cfg, flag='train'):
super().__init__()
with open(os.path.join(cfg['data_path'], 'name2id.json'), 'r') as f:
self.name2id = json.load(f)
try:
self.catg = self.name2id[cfg['class_name'].capitalize()]
except:
raise ValueError
self.task = cfg['task_type']
with h5py.File(os.path.join(cfg['data_path'], 'corr_mean_dist_geo', '{}_mean_distance.h5'.format(self.catg)), 'r') as f:
self.mean_distance = f['mean_distance'][:]
if self.task == 'embedding':
self.users = {}
self.pcds = []
self.keypoints = []
self.num_annos = 0
filename = os.path.join(
cfg['data_path'], '{}.h5'.format(self.catg))
with h5py.File(filename, 'r') as f:
self.pcds = f['point_clouds'][:]
self.keypoints = f['keypoints'][:]
self.mesh_names = f['mesh_names'][:]
num_train = int(self.pcds.shape[0] * 0.7)
num_divide = int(self.pcds.shape[0] * 0.85)
if flag == 'train':
self.pcds = self.pcds[:num_train]
self.keypoints = self.keypoints[:num_train]
self.mesh_names = self.mesh_names[:num_train]
elif flag == 'val':
self.pcds = self.pcds[num_train:num_divide]
self.keypoints = self.keypoints[num_train:num_divide]
self.mesh_names = self.mesh_names[num_train:num_divide]
elif flag == 'test':
self.pcds = self.pcds[num_divide:]
self.keypoints = self.keypoints[num_divide:]
self.mesh_names = self.mesh_names[num_divide:]
else:
raise ValueError
self.num_annos = self.pcds.shape[0]
else:
raise ValueError
def __getitem__(self, item):
if self.task == 'embedding':
pcd = self.pcds[item]
keypoint_index = np.array(self.keypoints[item], dtype=np.int32)
return torch.tensor(pcd).float(), torch.tensor(keypoint_index).int(), torch.tensor(self.mean_distance).float(), 0
else:
raise ValueError
def __len__(self):
return self.num_annos | 2.046875 | 2 |
libs/sol-REL-1.7.5.0/tests/test_file.py | realms-team/solmanager | 0 | 12772204 | <filename>libs/sol-REL-1.7.5.0/tests/test_file.py
from .context import sol
import os
import random
import pytest
# ============================ defines ===============================
FILENAME = 'temp_test_file.sol'
EXAMPLE_MAC = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]
# ============================ fixtures ==============================
@pytest.fixture
def removeFile():
yield
try:
os.remove(FILENAME)
except OSError:
# if file does not exist. NOT an error.
pass
EXPECTEDRANGE = [
(
100, # start_timestamp
300, # end_timestamp
100, # idxMin
301, # idxMax
),
(
-5, # start_timestamp
300, # end_timestamp
0, # idxMin
301, # idxMax
),
(
100, # start_timestamp
1100, # end_timestamp
100, # idxMin
1000, # idxMax
),
(
-5, # start_timestamp
1100, # end_timestamp
0, # idxMin
1000, # idxMax
),
(
-500, # start_timestamp
-100, # end_timestamp
0, # idxMin
0, # idxMax
),
(
1100, # start_timestamp
1500, # end_timestamp
0, # idxMin
0, # idxMax
),
]
@pytest.fixture(params=EXPECTEDRANGE)
def expectedRange(request):
return request.param
# ============================ helpers ===============================
def random_sol_json(timestamp=0):
returnVal = {
"timestamp": timestamp,
"mac": sol._format_buffer([random.randint(0x00, 0xff)] * 8),
"type": 0x0e,
"value": {
'srcPort': random.randint(0x0000, 0xffff),
'dstPort': random.randint(0x0000, 0xffff),
'data': [random.randint(0x00, 0xff)] * random.randint(10, 30),
},
}
return returnVal
# ============================ tests =================================
def test_dump_load(removeFile):
# prepare dicts to dump
sol_jsonl_toDump = [random_sol_json() for _ in range(1000)]
# dump
sol.dumpToFile(sol_jsonl_toDump, FILENAME)
# load
sol_jsonl_loaded = sol.loadFromFile(FILENAME)
# compare
print sol_jsonl_loaded
print sol_jsonl_toDump
assert sol_jsonl_loaded == sol_jsonl_toDump
def test_dump_corrupt_load(removeFile):
# prepare dicts to dump
sol_jsonl_toDump1 = [random_sol_json() for _ in range(500)]
sol_jsonl_toDump2 = [random_sol_json() for _ in range(500)]
# write first set of valid data
sol.dumpToFile(sol_jsonl_toDump1, FILENAME)
# write HDLC frame with corrupt CRC
with open(FILENAME, 'ab') as f:
bin_data = ''.join([chr(b) for b in
[0x7E, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x75,
0x94, 0xE8, 0x0B, 0x6B, 0xAE, 0xE1, 0x19, 0x54, 0x74, 0xF3, 0x00, 0x00, 0x7E]])
f.write(bin_data)
# write some garbage
with open(FILENAME, 'ab') as f:
f.write("############################## garbage ##############################")
# write second set of valid data
sol.dumpToFile(sol_jsonl_toDump2, FILENAME)
# load
sol_jsonl_loaded = sol.loadFromFile(FILENAME)
# compare
assert sol_jsonl_loaded == sol_jsonl_toDump1 + sol_jsonl_toDump2
def test_retrieve_range(removeFile):
# prepare dicts to dump
sol_jsonl_toDump = [random_sol_json(timestamp=ts) for ts in range(1000)]
# dump
sol.dumpToFile(sol_jsonl_toDump, FILENAME)
# load
sol_jsonl_loaded = sol.loadFromFile(
FILENAME,
start_timestamp=100,
end_timestamp=1900,
)
# compare
assert sol_jsonl_loaded == sol_jsonl_toDump[100:]
def test_retrieve_range_corrupt_beginning(removeFile):
# prepare dicts to dump
sol_jsonl_toDump = [random_sol_json(timestamp=ts) for ts in range(1000)]
# dump
with open(FILENAME, 'ab') as f:
f.write("garbage")
sol.dumpToFile(sol_jsonl_toDump, FILENAME)
# load
sol_jsonl_loaded = sol.loadFromFile(
FILENAME,
start_timestamp=100,
end_timestamp=800
)
# compare
assert sol_jsonl_loaded == sol_jsonl_toDump[100:801]
def test_retrieve_range_corrupt_middle(removeFile):
# prepare dicts to dump
sol_jsonl_toDump1 = [random_sol_json(timestamp=ts) for ts in range(500)]
sol_jsonl_toDump2 = [random_sol_json(timestamp=500 + ts) for ts in range(500)]
# dump
sol.dumpToFile(sol_jsonl_toDump1, FILENAME)
with open(FILENAME, 'ab') as f:
f.write("garbage")
sol.dumpToFile(sol_jsonl_toDump2, FILENAME)
# load
sol_jsonl_loaded = sol.loadFromFile(
FILENAME,
start_timestamp=100,
end_timestamp=800,
)
# compare
assert sol_jsonl_loaded == (sol_jsonl_toDump1 + sol_jsonl_toDump2)[100:801]
def test_retrieve_range_corrupt_end(removeFile):
# prepare dicts to dump
sol_jsonl_toDump = [random_sol_json(timestamp=ts) for ts in range(1000)]
# dump
sol.dumpToFile(sol_jsonl_toDump, FILENAME)
with open(FILENAME, 'ab') as f:
f.write("garbage")
# load
sol_jsonl_loaded = sol.loadFromFile(
FILENAME,
start_timestamp=100,
end_timestamp=800
)
# compare
assert sol_jsonl_loaded == sol_jsonl_toDump[100:801]
def test_retrieve_range_corrupt_all(removeFile):
# dump
with open(FILENAME, 'ab') as f:
for _ in range(100):
f.write("garbage")
# load
sol_jsonl_loaded = sol.loadFromFile(
FILENAME,
start_timestamp=100,
end_timestamp=800,
)
# compare
assert sol_jsonl_loaded == []
| 2.078125 | 2 |
bind9/main.py | tuimac/bind9 | 0 | 12772205 | <filename>bind9/main.py<gh_stars>0
#!/usr/bin/env python3
import os
import path
import logging
import traceback
import yaml
import argparse
import subprocess
from .config.namedConfig import NamedConfig
logger = logging.getLogger('bind9')
def importConfig(path):
try:
path = os.path.expanduser(path)
with open(path, 'r') as conf:
return yaml.load(conf, Loader=yaml.SafeLoader)
except Exception as e:
raise e
class CustomArgparse(argparse.ArgumentParser):
def error(self, message):
if message == "":
print("[Error] Argument is wrong...<(^^;)\n", file=sys.stderr)
else:
print("[Error] " + message + "\n", file=sys.stderr)
self.print_help()
sys.exit(2)
def main():
namedPath = '/etc/bind/named.conf'
parser = CustomArgparse(
prog = "bind9",
description = "Just Bind9 manager to set up easier.",
add_help = True
)
try:
parser.add_argument(
"-c",
"--config",
dest = "config",
nargs = 1,
required = True,
help = "[Required] Direct bind9.yaml to import config.",
)
args = parser.parse_args()
if args.config:
named = NamedConfig(importConfig(args.config), namedPath)
named.acl()
named.options()
name.zone()
name.reload()
command = '/usr/sbin/named -c ' + namedPath + ' -g -u root'
subprocess.run(command.split())
else:
parser.error
except:
parser.
traceback.print_exc()
if __name__ == '__main__':
main()
| 2.453125 | 2 |
game_settings.py | Mibblez/a-star-with-python | 0 | 12772206 | # Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREY = (140, 140, 140)
CYAN = (0, 255, 255)
DARK_CYAN = (0, 150, 150)
ORANGE = (255, 165, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
| 1.90625 | 2 |
singleton/print_version.py | Tomvictor/python-design-patterns | 0 | 12772207 | <reponame>Tomvictor/python-design-patterns
import sys
print(sys.version) | 1.34375 | 1 |
openslides_backend/action/motion_category/__init__.py | reiterl/openslides-backend | 0 | 12772208 | from . import create_update_delete, sort, sort_motions_in_categories # noqa
| 0.960938 | 1 |
tap_amazon_advertising/streams/__init__.py | goes-funky/tap-amazon-advertising | 2 | 12772209 | <filename>tap_amazon_advertising/streams/__init__.py
from tap_amazon_advertising.streams.profiles import ProfilesStream
from tap_amazon_advertising.streams.portfolios import PortfoliosStream
from tap_amazon_advertising.streams.sponsored_products_campaigns import SponsoredProductsCampaignsStream
from tap_amazon_advertising.streams.sponsored_brands_campaigns import SponsoredBrandsCampaignsStream
from tap_amazon_advertising.streams.ad_groups import AdGroupsStream
from tap_amazon_advertising.streams.biddable_keywords import BiddableKeywordsStream
from tap_amazon_advertising.streams.negative_keywords import NegativeKeywordsStream
from tap_amazon_advertising.streams.campaign_negative_keywords import CampaignNegativeKeywordsStream
from tap_amazon_advertising.streams.product_ads import ProductAdsStream
from tap_amazon_advertising.streams.sponsored_products_report import SponsoredProductsReportProductAdsStream, \
SponsoredProductsReportCampaignsStream, \
SponsoredProductsReportAdGroupsStream, \
SponsoredProductsReportKeywordsStream
from tap_amazon_advertising.streams.sponsored_brands_report import SponsoredBrandsReportKeywordsStream, \
SponsoredBrandsReportCampaignsStream, \
SponsoredBrandsReportAdGroupsStream
AVAILABLE_STREAMS = [
ProfilesStream,
PortfoliosStream,
SponsoredProductsCampaignsStream,
SponsoredBrandsCampaignsStream,
AdGroupsStream,
BiddableKeywordsStream,
NegativeKeywordsStream,
CampaignNegativeKeywordsStream,
ProductAdsStream,
# SP Reports
SponsoredProductsReportProductAdsStream,
SponsoredProductsReportCampaignsStream,
SponsoredProductsReportAdGroupsStream,
SponsoredProductsReportKeywordsStream,
# SB Reports
SponsoredBrandsReportKeywordsStream,
SponsoredBrandsReportCampaignsStream,
SponsoredBrandsReportAdGroupsStream,
]
__all__ = [
'ProfilesStream',
'PortfoliosStream',
'SponsoredProductsCampaignsStream',
'SponsoredBrandsCampaignsStream',
'AdGroupsStream',
'BiddableKeywordsStream',
'NegativeKeywordsStream',
'CampaignNegativeKeywordsStream',
'ProductAdsStream',
'SponsoredProductsReportProductAdsStream',
'SponsoredProductsReportCampaignsStream',
'SponsoredProductsReportAdGroupsStream',
'SponsoredProductsReportKeywordsStream',
'SponsoredBrandsReportKeywordsStream',
'SponsoredBrandsReportCampaignsStream',
'SponsoredBrandsReportAdGroupsStream',
]
| 1.296875 | 1 |
trainer.py | wannieman98/RandWireNN | 0 | 12772210 |
import os
import time
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from util import epoch_time
import torch.optim as optim
from model.neural_network import RandomlyWiredNeuralNetwork
from data.data_util import fetch_dataloader, test_voc, test_imagenet
SEED = 981126
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
class Trainer:
def __init__(self, num_epoch, lr,
batch_size, num_node,
p, k, m, channel,
in_channels, path,
graph_mode, dataset,
is_small_regime,
checkpoint_path, load):
super(Trainer, self).__init__()
self.params = {'num_epoch': num_epoch,
'batch_size': batch_size,
'lr': lr,
'node_num': num_node,
'p': p,
'k': k,
'm': m,
'in_channels': in_channels,
'channel': channel,
'classes': 21 if dataset == 'voc' else 1000,
'graph_mode': graph_mode,
'load': load,
'path': path,
'dataset': dataset,
'is_small_regime': is_small_regime,
'checkpoint_path': checkpoint_path
}
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.train_data, self.val_data, self.test_data = fetch_dataloader(
self.params['dataset'],
self.params['path'],
self.params['batch_size'])
self.rwnn = RandomlyWiredNeuralNetwork(
self.params['channel'],
self.params['in_channels'],
self.params['p'],
self.params['k'],
self.params['m'],
self.params['graph_mode'],
self.params['classes'],
self.params['node_num'],
self.params['checkpoint_path'],
self.params['load'],
self.params['is_small_regime']
).to(self.device)
self.optimizer = optim.SGD(
self.rwnn.parameters(), self.params['lr'], 0.9, weight_decay=5e-5)
self.best_loss = float('inf')
self.step_num = 0
if load:
checkpoint = torch.load(os.path.join(
self.params['checkpoint_path'], 'train.tar'))
self.rwnn.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(
checkpoint['optimizer_state_dict'])
self.epoch = checkpoint['epoch']
self.best_loss = checkpoint['best_loss']
self.scheduler = checkpoint['scheduler']
self.step_num = checkpoint['step_num']
else:
self.epoch = 0
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.params['num_epoch'])
self.criterion = nn.CrossEntropyLoss()
pytorch_total_params = sum(p.numel() for p in self.rwnn.parameters())
print(f"Number of parameters {pytorch_total_params}")
def train(self):
print("\nbegin training...")
for epoch in range(self.epoch, self.params['num_epoch']):
print(
f"\nEpoch: {epoch+1} out of {self.params['num_epoch']}, step: {self.step_num}")
start_time = time.perf_counter()
epoch_loss, step = train_loop(
self.train_data, self.rwnn, self.optimizer, self.criterion, self.device)
val_loss = val_loop(self.val_data, self.rwnn,
self.criterion, self.device)
if val_loss < self.best_loss:
self.best_loss = val_loss
with open(os.path.join(self.params['checkpoint_path'], 'best_model.txt'), 'w') as f:
f.write(
f"epoch: {epoch+1}, 'validation loss: {val_loss}, step: {self.step_num}")
torch.save(
self.rwnn,
os.path.join(self.params['checkpoint_path'], 'best.pt'))
if (epoch + 1) % 15 == 0:
if self.params['dataset'] == 'voc':
test_voc(self.test_data, self.rwnn, self.device)
self.step_num += step
self.scheduler.step()
end_time = time.perf_counter()
minutes, seconds, time_left_min, time_left_sec = epoch_time(
end_time-start_time, epoch, self.params['num_epoch'])
torch.save({
'epoch': epoch,
'model_state_dict': self.rwnn.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_loss': self.best_loss,
'scheduler': self.scheduler,
'step_num': self.step_num
}, os.path.join(self.params['checkpoint_path'], 'train.tar'))
print(
f"Train_loss: {round(epoch_loss, 3)} - Val_loss: {round(val_loss, 3)}")
print(
f"Epoch time: {minutes}m {seconds}s - Time left for training: {time_left_min}m {time_left_sec}s")
def train_loop(train_iter, model, optimizer, criterion, device):
epoch_loss = 0
step_num = 0
model.train()
print("Training...")
for src, tgt in tqdm(train_iter):
src = src.to(device)
tgt = tgt.to(device)
optimizer.zero_grad()
logits = model(src)
loss = criterion(logits, tgt)
loss.backward()
optimizer.step()
step_num += 1
epoch_loss += loss.item()
return epoch_loss / len(train_iter), step_num
def val_loop(val_iter, model, criterion, device):
model.eval()
val_loss = 0
with torch.no_grad():
print("Validating...")
for src, tgt in tqdm(val_iter):
src = src.to(device)
tgt = tgt.to(device)
logits = model(src)
loss = criterion(logits, tgt)
val_loss += loss.item()
return val_loss / len(val_iter)
| 2.109375 | 2 |
src/glod/db/db_column_type_map.py | gordon-elliott/glod | 0 | 12772211 | <gh_stars>0
__copyright__ = 'Copyright(c) <NAME> 2017'
"""
"""
from sqlalchemy import (
Enum,
)
from a_tuin.db.mapper import DB_COLUMN_TYPE_MAP
from glod.model.account import AccountStatusField, AccountStatus
from glod.model.fund import FundRestrictionField, FundRestriction
from glod.model.nominal_account import (
NominalAccountSOFAHeadingField,
NominalAccountSOFAHeading,
NominalAccountCategoryField,
NominalAccountCategory,
NominalAccountSubCategoryField,
NominalAccountSubCategory,
)
from glod.model.organisation import (
OrganisationStatus,
OrganisationStatusField,
OrganisationCategory,
OrganisationCategoryField
)
from glod.model.person import PersonStatus, PersonStatusField
from glod.model.organisation_address import OrganisationAddressStatus, OrganisationAddressStatusField
from glod.model.pps import PPSStatus, PPSStatusField
from glod.model.tax_rebate_submission import SubmissionStatus, SubmissionStatusField
from glod.model.statement_item import StatementItemDesignatedBalance, StatementItemDesignatedBalanceField
from glod.model.transaction import (
PaymentMethod,
PaymentMethodField,
IncomeExpenditure,
IncomeExpenditureField,
)
DB_COLUMN_TYPE_MAP[AccountStatusField] = Enum(AccountStatus, inherit_schema=True)
DB_COLUMN_TYPE_MAP[FundRestrictionField] = Enum(FundRestriction, inherit_schema=True)
DB_COLUMN_TYPE_MAP[NominalAccountSOFAHeadingField] = Enum(NominalAccountSOFAHeading, inherit_schema=True)
DB_COLUMN_TYPE_MAP[NominalAccountCategoryField] = Enum(NominalAccountCategory, inherit_schema=True)
DB_COLUMN_TYPE_MAP[NominalAccountSubCategoryField] = Enum(NominalAccountSubCategory, inherit_schema=True)
DB_COLUMN_TYPE_MAP[OrganisationStatusField] = Enum(OrganisationStatus, inherit_schema=True)
DB_COLUMN_TYPE_MAP[OrganisationCategoryField] = Enum(OrganisationCategory, inherit_schema=True)
DB_COLUMN_TYPE_MAP[OrganisationAddressStatusField] = Enum(OrganisationAddressStatus, inherit_schema=True)
DB_COLUMN_TYPE_MAP[PersonStatusField] = Enum(PersonStatus, inherit_schema=True)
DB_COLUMN_TYPE_MAP[PPSStatusField] = Enum(PPSStatus, inherit_schema=True)
DB_COLUMN_TYPE_MAP[SubmissionStatusField] = Enum(SubmissionStatus, inherit_schema=True)
DB_COLUMN_TYPE_MAP[StatementItemDesignatedBalanceField] = Enum(StatementItemDesignatedBalance, inherit_schema=True)
DB_COLUMN_TYPE_MAP[PaymentMethodField] = Enum(PaymentMethod, inherit_schema=True)
DB_COLUMN_TYPE_MAP[IncomeExpenditureField] = Enum(IncomeExpenditure, inherit_schema=True)
| 1.773438 | 2 |
lib/experiment.py | wonderit/aptos-retinopathy-detection | 0 | 12772212 | from copy import deepcopy
from datetime import datetime
from os import makedirs, remove
from os.path import join, isfile, isdir, dirname
import numpy as np
import torch
def append_to_file(file, string):
dir_nm = dirname(file)
if len(dir_nm) > 0 and not isdir(dir_nm):
makedirs(dir_nm)
with open(file, "a") as f:
f.write(string + "\n")
def train_model(model, dataloader, device, criterion, optimizer):
model.train()
for xb, yb in dataloader:
xb, yb = xb.to(device), yb.to(device)
optimizer.zero_grad()
out = model(xb)
if out.ndim > 1 and out.shape[1] == 1:
out = out.squeeze(dim=1)
loss = criterion(out, yb)
loss.backward()
optimizer.step()
def eval_model(model, dataloader, device):
y_pred, y_true = [], []
model.eval()
with torch.no_grad():
for xb, yb in dataloader:
xb = xb.to(device)
out = model(xb)
if out.ndim > 1 and out.shape[1] == 1:
out.squeeze_(dim=1)
y_pred.append(out.detach_().cpu())
y_true.append(yb)
y_pred = torch.cat(y_pred, dim=0)
y_true = torch.cat(y_true, dim=0)
return y_pred, y_true
class FunctionEvaluator:
def __init__(self,
func_lst):
self.func_lst = func_lst
def __call__(self, y_pred, y_true):
res = {}
for fname, func, kwargs in self.func_lst:
res[fname] = func(y_pred, y_true, **kwargs)
return res
class Logger:
def __init__(self, save_path=None):
self.save_path = save_path
def log(self, msg):
print(msg)
if self.save_path is not None:
append_to_file(self.save_path, msg)
class TopNSaver:
def __init__(self, n):
self.n = n
self.dct = {0: None}
def save(self, score, state, save_path):
if any(score > key for key in self.dct) and all(score != key for key in self.dct):
if len(self.dct) >= self.n:
key_to_delete = sorted(list(self.dct.keys()))[0]
if self.dct[key_to_delete] is not None:
try:
remove(self.dct[key_to_delete])
except OSError:
pass
self.dct.pop(key_to_delete)
self.dct[score] = save_path
torch.save(state, save_path)
class Experiment:
def __init__(self,
dl_train,
dl_train_val,
dl_validation,
model,
optimizer,
criterion,
device,
max_epoch,
metrics,
target_metric,
format_str,
init_epoch=0,
scheduler=None,
load_path=None,
save_path=None,
early_stopping=None,
evaluate_freq=1,
):
self._params = locals()
self.dl_train = dl_train
self.dl_train_val = dl_train_val
self.dl_validation = dl_validation
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.max_epoch = max_epoch
self.metric_evaluator = FunctionEvaluator(metrics)
self.target_metric = target_metric
self.format_str = format_str
self.init_epoch = init_epoch
self.scheduler = scheduler
self.load_path = load_path
self.save_path = save_path
self.logger = Logger(join(save_path, "log.txt")) if save_path is not None else Logger()
self.early_stopping = early_stopping
self.evaluate_freq = evaluate_freq
self.top5saver = TopNSaver(10)
self.reset()
def reset(self):
self.results = {
"metrics_train": [],
"metrics_valid": [],
"state_dict": None,
}
self.best_validation_metric = .0
self.model_best_state_dict = None
self.no_score_improvement = 0
self.experiment_start = datetime.now()
self.now = None
def evaluate(self, epoch, step):
# evaluate subset of train set (in eval mode)
y_pred_train, y_true_train = eval_model(model=self.model,
dataloader=self.dl_train_val,
device=self.device)
metrics_train = self.metric_evaluator(y_pred_train, y_true_train)
self.results["metrics_train"].append(metrics_train)
# evaluate validation subset
y_pred_valid, y_true_valid = eval_model(model=self.model,
dataloader=self.dl_validation,
device=self.device)
metrics_valid = self.metric_evaluator(y_pred_valid, y_true_valid)
self.results["metrics_valid"].append(metrics_valid)
val_score = metrics_valid[self.target_metric]
# check if validation score is improved
if val_score > self.best_validation_metric:
self.model_best_state_dict = deepcopy(self.model.state_dict())
self.best_validation_metric = val_score
# reset early stopping counter
self.no_score_improvement = 0
# save best model weights
if self.save_path is not None:
torch.save(self.model_best_state_dict, join(self.save_path, "best_weights.pth"))
else:
self.no_score_improvement += 1
if self.early_stopping is not None and self.no_score_improvement >= self.early_stopping:
self.logger.log("Early stopping at epoch %d, step %d" % (epoch, step))
return True
if self.scheduler is not None:
self.scheduler.step(val_score)
if self.save_path is not None:
# (optional) save model state dict at end of each epoch
self.top5saver.save(val_score,
self.model.state_dict(),
join(self.save_path, "model_state_{}_{}.pth".format(epoch, step)))
# torch.save(self.model.state_dict(), join(self.save_path, "model_state_{}_{}.pth".format(epoch, step)))
# save full experiment state at the end of each epoch
checkpoint = {
'epoch': epoch + 1,
'model_curr_state_dict': self.model.state_dict(),
'model_best_state_dict': self.model_best_state_dict,
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': None if self.scheduler is None else self.scheduler.state_dict(),
'no_score_improvement': self.no_score_improvement,
'best_validation_metric': self.best_validation_metric,
}
torch.save(checkpoint, join(self.save_path, "full_state.pth"))
metrics_train = dict([(key + "_train", val) for key, val in metrics_train.items()])
metrics_valid = dict([(key + "_valid", val) for key, val in metrics_valid.items()])
time_delta = datetime.now() - self.now
s = self.format_str.format(time_delta=time_delta,
epoch=epoch,
step=step,
max_epoch=self.max_epoch,
**metrics_train,
**metrics_valid)
self.logger.log(s)
self.now = datetime.now()
return False
def train(self, epoch):
steps = np.round(np.linspace(0, len(self.dl_train), self.evaluate_freq + 1)).astype(np.int)
steps = steps[1:-1]
step = 1
self.model.train()
for i, (xb, yb) in enumerate(self.dl_train):
xb, yb = xb.to(self.device), yb.to(self.device)
self.optimizer.zero_grad()
out = self.model(xb)
if out.ndim == 2 and out.shape[1] == 1:
out = out.squeeze(dim=1)
loss = self.criterion(out, yb)
loss.backward()
self.optimizer.step()
if i in steps:
res = self.evaluate(epoch, step)
if res:
return True
step += 1
self.model.train()
self.evaluate(epoch, step)
return False
def run(self):
self.reset()
experiment_start = datetime.now()
if self.save_path is not None:
if not isdir(self.save_path):
makedirs(self.save_path)
# dump all args and their values
for key, value in self._params.items():
append_to_file(join(self.save_path, "params.txt"), "{}: {}".format(key, repr(value)))
if self.load_path is not None:
# load full experiment state to continue experiment
load_path = join(self.load_path, "full_state.pth")
if not isfile(load_path):
raise ValueError("Checkpoint file {} does not exist".format(load_path))
checkpoint = torch.load(load_path)
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.model_best_state_dict = checkpoint['model_best_state_dict']
self.model.load_state_dict(checkpoint['model_curr_state_dict'])
self.init_epoch = checkpoint['epoch']
self.best_validation_metric = checkpoint['best_validation_metric']
if self.scheduler is not None:
self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
self.logger.log("Successfully loaded checkpoint.")
self.logger.log(self.format_str)
self.now = datetime.now()
for epoch in range(self.init_epoch, self.max_epoch):
res = self.train(epoch)
if res:
break
self.logger.log("Experiment time: {}".format(datetime.now() - experiment_start))
return self.results
| 2.515625 | 3 |
src/ralph/discovery/openstack.py | quamilek/ralph | 0 | 12772213 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import urllib
import urllib2
import json
import datetime
class Error(Exception):
pass
class OpenStack(object):
def __init__(self, url, user, password, region=''):
self.auth_url = url
self.user = user
self.public_url, self.auth_token = self.auth(password, region)
def auth(self, password, region):
auth_url = '/'.join([self.auth_url, 'v2.0/tokens'])
auth_data = json.dumps({
'auth': {
'tenantName': self.user,
'passwordCredentials': {
'username': self.user,
'password': password,
},
},
})
auth_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
request = urllib2.Request(auth_url, auth_data, auth_headers)
auth_reply = json.loads(urllib2.urlopen(request).read())
for service in auth_reply['access']['serviceCatalog']:
if service['name'] == 'nova':
for endpoint in service['endpoints']:
if not region or endpoint['region'] == region:
public_url = endpoint['publicURL']
break
else:
raise Error('Service "nova" not available for this region')
break
else:
raise Error('Service "nova" not available')
auth_token = auth_reply['access']['token']['id']
return public_url, auth_token
def query(self, query, url=None, **kwargs):
query_args = urllib.urlencode(kwargs)
query_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Auth-Project-Id': self.user,
'X-Auth-Token': self.auth_token,
}
query_url = '/'.join([
url or self.public_url,
query,
]) + '?' + query_args
request = urllib2.Request(query_url, headers=query_headers)
return json.loads(urllib2.urlopen(request).read())
def simple_tenant_usage(self, start=None, end=None):
if end is None:
end = datetime.datetime.now()
if start is None:
start = end - datetime.timedelta(hours=24)
return self.query(
'os-simple-tenant-usage',
start=start.strftime('%Y-%m-%dT%H:%M:%S'),
end=end.strftime('%Y-%m-%dT%H:%M:%S'),
)['tenant_usages']
| 2.484375 | 2 |
src/snap_to_bucket/s3_handler.py | siemens/snap-to-bucket | 5 | 12772214 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SPDX-FileCopyrightText: Siemens AG, 2020 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__author__ = 'Siemens AG'
import gc
import os
import sys
import time
import base64
import hashlib
import threading
from datetime import datetime
from subprocess import PIPE, Popen
import boto3
import psutil
from botocore.exceptions import ClientError
class ProgressPercentage(object):
"""
Progress percentage printer
"""
def __init__(self, filename, size):
self._filename = filename
self._size = size
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percentage = round((self._seen_so_far / self._size) * 100, 2)
sys.stdout.write(f"Downloading {self._filename}: {percentage}% " +
"done \r")
sys.stdout.flush()
class S3Handler:
"""
Class to handle communications with S3 services
:ivar s3client: S3 client from boto3
:ivar bucket: Name of the bucket to use
:ivar verbose: Verbosity level (0-3)
:ivar temp_download: Path of the location where object from S3 is stored
:ivar restore_partition_size: Size of partition being restored
:ivar split_size: Size in bytes to split tar at
:ivar gzip: True to compress tar with gzip
:ivar storage_class: Storage class of S3 object
:ivar FIVE_HUNDRED_MB: Five hundred MiB in bytes
:ivar FIVE_GB: Five GiB in bytes
"""
FIVE_HUNDRED_MB = 500 * (1024 ** 2)
FIVE_GB = (5 * (1024 ** 3))
def __init__(self, bucket, split_size=5497558138880.0, gzip=False,
storage_class="STANDARD", verbose=0):
"""
Initializer for the class attributes.
Additionally, check if the provided bucket can be accessed.
:param bucket: Bucket to use
:type bucket: string
:param split_size: Split size of tar
:type split_size: float
:param gzip: True to compress tar with gzip
:type gzip: boolean
:param storage_class: Storage class of S3 object
:type storage_class: string
:param verbose: Verbosity level (0-3)
:type verbose: integer
"""
self.s3client = boto3.client('s3')
self.bucket = bucket
self.__check_bucket_accessiblity(bucket)
self.split_size = split_size
self.gzip = gzip
self.storage_class = storage_class
self.verbose = verbose
def __check_bucket_accessiblity(self, bucket):
"""
Check if the bucket can be accessed
:param bucket: Bucket to check
:type bucket: string
:raises Exception: If the bucket can't be accessed
"""
try:
response = self.s3client.head_bucket(Bucket=bucket)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception
except Exception as e:
print(f"Unable to access bucket '{bucket}'", file=sys.stderr)
raise e
def __get_object_count(self, key):
"""
Get the count of objects under given key
This function also assigns value to attribute ``restore_partition_size``.
If the object has meta data ``x-amz-meta-disc-size`` and value if
greater than 1, partition size is assigned to its value.
Otherwise, partition size is assigned value of content length
:param key: Object key to check
:type key: string
:return: Number of objects with ``key`` as the prefix
:rtype: integer
:raises Exception: If the objects can't be accessed
"""
try:
response = self.s3client.list_objects_v2(Bucket=self.bucket,
Prefix=key)
if response['ResponseMetadata']['HTTPStatusCode'] != 200 or 'Contents' not in response:
raise Exception
objects = [o for o in response['Contents']]
response = self.s3client.head_object(Bucket=self.bucket,
Key=objects[0]['Key'])
partition_size = 0
if 'x-amz-meta-disc-size' in response['Metadata']:
partition_size = int(
response['Metadata']['x-amz-meta-disc-size'])
if partition_size < 2:
partition_size = sum([int(o['Size']) for o in objects])
self.restore_partition_size = partition_size
return len(objects)
except Exception as e:
print(f"Unable to access key '{key}' in bucket '{self.bucket}'",
file=sys.stderr)
raise e
def __byte_checksum(self, data):
"""
Calculate the checksum for the given bytes
:param data: Data to calculate checksum for
:type data: byte
:return: The Base64 encoded MD5 checksum
:rtype: string
"""
md_obj = hashlib.md5()
md_obj.update(data)
return base64.b64encode(md_obj.digest()).decode('UTF-8').strip()
def __get_key_uploadid(self, snapshot, size, partno):
"""
Generate the key and uploadid for a snapshot
:param snapshot: Snapshot to get the key for
:type snapshot: dict()
:param size: Size of mounted partition
:type size: integer
:param partno: Part no of the upload (-1 for single part upload)
:type partno: integer
:return: S3 key and uploadid for the snapshot
:rtype: list()
"""
meta_data = dict()
content_type = 'application/x-tar'
timestr = datetime.now().isoformat(timespec='seconds')
created = snapshot['created'].isoformat(timespec='seconds')
name = snapshot['name'].replace(' ', '+').replace('/', '_')
key = f"snap/{name}/{snapshot['id']}-{created}-{timestr}"
meta_data["creation-time"] = snapshot['created'].isoformat()
meta_data["snap-volume-size"] = f"{snapshot['volumesize']} GiB"
if partno == -1:
key = f"{key}.tar"
if self.gzip:
key = f"{key}.gz"
content_type = 'application/gzip'
else:
key = f"{key}-part{partno}.tar"
if self.gzip:
key = f"{key}.gz"
content_type = 'application/gzip'
if size > 1:
meta_data["x-amz-meta-disc-size"] = str(size)
res = self.s3client.create_multipart_upload(
Bucket=self.bucket,
ContentType=content_type,
Key=key,
Metadata=meta_data,
StorageClass=self.storage_class
)
return (key, res['UploadId'])
def initiate_upload(self, snapshot, path, size=0):
"""
Start multipart upload
1. Initialize the variables
1. If the upload can be done in one go, set partno as -1
2. Get the first key and upload id
3. Create a tar process
4. Read a chunk (max 5 GB or available RAM size - 50 MB of overhead or
remaining size before split occurs)
1. Have read enough data for split
1. Finish the upload, reset the counters
2. If more data to read, get new key and upload id.
3. Otherwise break.
2. Calculate new chunk size to be read
3. Read the chunk, update the counters and get the checksum of the
chunk
4. Upload part and add returned Etag to list
4. Finish the upload
If upload fails in between, abort the upload
:param snapshot: Snapshot to be uploaded
:type snapshot: dict()
:param path: Path of the mounted directory
:type path: string
:param size: Size of the partition (attached as meta info)
:type size: integer
"""
uploaded_bytes = 0
if self.split_size >= size:
if self.verbose > 1:
print("Uploading snapshot as a single file as " +
f"{self.split_size} >= {size}")
partno = -1
else:
partno = 1
tar_process = Popen(["tar", "--directory", path, "--create",
"--preserve-permissions", "."], stdout=PIPE)
read_process = tar_process
if self.gzip:
gzip_process = Popen(["gzip", "--to-stdout", "-6"],
stdin=tar_process.stdout, stdout=PIPE)
read_process = gzip_process
more_to_read = True
try:
while more_to_read:
(key, uploadid) = self.__get_key_uploadid(snapshot, size,
partno)
(uploaded_bytes, more_to_read) = self.__read_and_upload_part(
read_process, uploaded_bytes, key, uploadid)
partno += 1
finally:
read_process = None
if self.gzip:
gzip_process.wait()
tar_process.wait()
print()
if self.verbose > 0:
print("Multipart upload finished. Sending complete")
def __read_and_upload_part(self, read_process, uploaded_bytes, key,
upload_id):
"""
Prepare an upload a single part of the tar.
1. Read the data from read_process
2. Upload it as multipart upload
3. Check if there is more data to be uploaded
4. Set the flag and complete the multipart upload
:param read_process: The process to read from
:type read_process: subprocess.Popen
:param uploaded_bytes: No of bytes already uploaded
:type uploaded_bytes: integer
:param key: S3 key
:type key: string
:param upload_id: S3 multipart upload id
:type upload_id: string
:return: No of total bytes uploaded, is there more data to process
:rtype: dict(integer, boolean)
"""
tar_read_bytes = 0
upload_partid = 1
parts_info = list()
more_to_read = True
print(f"Uploading {key} to {self.bucket} bucket")
while True:
free_mem = psutil.virtual_memory().available
if free_mem > self.FIVE_GB: # Maximum part size is 5 GiB
free_mem = self.FIVE_GB
max_chunk = free_mem - self.FIVE_HUNDRED_MB
if tar_read_bytes + max_chunk > self.split_size:
read_chunk = self.split_size - tar_read_bytes
else:
read_chunk = max_chunk
try:
inline = read_process.stdout.read(read_chunk)
if len(inline) == 0:
# No more data to read
more_to_read = False
break
tar_read_bytes += len(inline)
uploaded_bytes += len(inline)
resp = self.__upload_s3_part(inline, key, upload_partid,
upload_id)
del inline
parts_info.append({
'ETag': resp['ETag'],
'PartNumber': upload_partid
})
if self.verbose > 0:
print(f"Part # {upload_partid}, ", end='')
print("Uploaded " +
str(round(uploaded_bytes / (1024 ** 2), 2)) +
" MiB (total) ", end="\r")
upload_partid += 1
gc.collect()
if (tar_read_bytes >= self.split_size):
# One split upload completed
break
except Exception as e:
print("\nMultipart upload failed. Trying to abort",
file=sys.stderr)
inline = None # Safely drop the data
self.s3client.abort_multipart_upload(
Bucket=self.bucket,
Key=key,
UploadId=upload_id
)
raise e
self.__complete_upload(key, upload_id, parts_info)
return uploaded_bytes, more_to_read
def __upload_s3_part(self, body, key, part_id, upload_id, retry_count=0):
"""
Upload a part of S3 multipart upload.
The function also reties failed calls. Every upload request, if failed,
will be retried 4 times at 4 seconds of intervals.
:param body: Body of the upload
:param key: S3 object key
:type key: string
:param part_id: Upload part ID
:type part_id: int
:param upload_id: Multipart upload's Upload ID
:type upload_id: string
:param retry_count: How many retries have been done.
:type retry_count: int
:return: Response from S3
:raises Exception: If all upload attempt fails
"""
if retry_count > 3:
raise Exception("S3 multipart part upload failed")
try:
return self.s3client.upload_part(
Body=body,
Bucket=self.bucket,
ContentMD5=self.__byte_checksum(body),
Key=key,
PartNumber=part_id,
UploadId=upload_id
)
except ClientError as error:
print(f"Failed: '{error.response['Error']['Message']}'.\nRetrying.",
file=sys.stderr)
time.sleep(4.0)
return self.__upload_s3_part(body, key, part_id, upload_id,
retry_count + 1)
def __complete_upload(self, key, uploadid, partlist, retry_count=0):
"""
Complete a multipart upload
The function also reties failed calls. Every upload request, if failed,
will be retried 4 times at 4 seconds of intervals.
:param key: Key of the upload
:type key: string
:param uploadid: Upload id of the multipart upload
:type uploadid: string
:param partlist: List of uploaded parts
:type partlist: list(dict())
:raises Exception: If all upload attempt fails, abort uploads.
"""
if retry_count > 3:
print("\nMultipart upload failed. Trying to abort",
file=sys.stderr)
self.s3client.abort_multipart_upload(
Bucket=self.bucket,
Key=key,
UploadId=uploadid
)
raise Exception("S3 upload failed")
try:
self.s3client.complete_multipart_upload(
Bucket=self.bucket,
Key=key,
MultipartUpload={
'Parts': partlist
},
UploadId=uploadid
)
except ClientError as error:
print(f"Failed: '{error.response['Error']['Message']}'.\nRetrying.",
file=sys.stderr)
time.sleep(4.0)
self.__complete_upload(key, uploadid, partlist, retry_count + 1)
if self.verbose > 0:
print(f"\nCompleted multipart upload, key: {key}")
def get_object_count_and_size(self, key):
"""
Check if the given key is available and return number of objects under
it.
:param key: Key to check
:type key: string
:return: Number of objects under provided key prefix, size of unpacked
tar
:rtype: tuple(integer, integer)
"""
return (self.__get_object_count(key),
self.restore_partition_size)
def download_key(self, key, partno, restore_dir):
"""
Download the key from S3
Create a temporary path to download the key and start download.
:param key: Key to be downloaded
:type key: string
:param partno: Part number of the key to be downloaded (-1 if there is
only one part)
:type partno: integer
:param restore_dir: Location to store S3 object for restore
:type restore_dir: string
:return: Location of downloaded file and size of restored partition (in
bytes)
:rtype: dict(string, integer)
:raises Exception: If download fails, delete the temp location
"""
response = self.s3client.list_objects_v2(Bucket=self.bucket,
Prefix=key)
keys = [o['Key'] for o in response['Contents']]
download_key_name = None
if partno == -1:
download_key_name = keys[0]
else:
for key in keys:
if f"-part{partno}.tar" in key:
download_key_name = key
break
if download_key_name == None:
raise Exception(f"Unable to find part '{partno}' under key {key}")
self.temp_download = os.path.join(restore_dir, download_key_name)
os.makedirs(os.path.dirname(self.temp_download), exist_ok=True)
size = self.s3client.head_object(Bucket=self.bucket,
Key=download_key_name)['ContentLength']
progress = ProgressPercentage(key, size)
try:
self.s3client.download_file(self.bucket, download_key_name,
self.temp_download, Callback=progress)
print()
except Exception as e:
print(f"Failed while downloading s3://{self.bucket}/{download_key_name}",
file=sys.stderr)
os.remove(self.temp_download)
raise e
return self.temp_download
| 2.109375 | 2 |
env/lib/python3.8/site-packages/unidecode/x016.py | avdhari/enigma | 48 | 12772215 | <reponame>avdhari/enigma
data = (
'kka', # 0x00
'kk', # 0x01
'nu', # 0x02
'no', # 0x03
'ne', # 0x04
'nee', # 0x05
'ni', # 0x06
'na', # 0x07
'mu', # 0x08
'mo', # 0x09
'me', # 0x0a
'mee', # 0x0b
'mi', # 0x0c
'ma', # 0x0d
'yu', # 0x0e
'yo', # 0x0f
'ye', # 0x10
'yee', # 0x11
'yi', # 0x12
'ya', # 0x13
'ju', # 0x14
'ju', # 0x15
'jo', # 0x16
'je', # 0x17
'jee', # 0x18
'ji', # 0x19
'ji', # 0x1a
'ja', # 0x1b
'jju', # 0x1c
'jjo', # 0x1d
'jje', # 0x1e
'jjee', # 0x1f
'jji', # 0x20
'jja', # 0x21
'lu', # 0x22
'lo', # 0x23
'le', # 0x24
'lee', # 0x25
'li', # 0x26
'la', # 0x27
'dlu', # 0x28
'dlo', # 0x29
'dle', # 0x2a
'dlee', # 0x2b
'dli', # 0x2c
'dla', # 0x2d
'lhu', # 0x2e
'lho', # 0x2f
'lhe', # 0x30
'lhee', # 0x31
'lhi', # 0x32
'lha', # 0x33
'tlhu', # 0x34
'tlho', # 0x35
'tlhe', # 0x36
'tlhee', # 0x37
'tlhi', # 0x38
'tlha', # 0x39
'tlu', # 0x3a
'tlo', # 0x3b
'tle', # 0x3c
'tlee', # 0x3d
'tli', # 0x3e
'tla', # 0x3f
'zu', # 0x40
'zo', # 0x41
'ze', # 0x42
'zee', # 0x43
'zi', # 0x44
'za', # 0x45
'z', # 0x46
'z', # 0x47
'dzu', # 0x48
'dzo', # 0x49
'dze', # 0x4a
'dzee', # 0x4b
'dzi', # 0x4c
'dza', # 0x4d
'su', # 0x4e
'so', # 0x4f
'se', # 0x50
'see', # 0x51
'si', # 0x52
'sa', # 0x53
'shu', # 0x54
'sho', # 0x55
'she', # 0x56
'shee', # 0x57
'shi', # 0x58
'sha', # 0x59
'sh', # 0x5a
'tsu', # 0x5b
'tso', # 0x5c
'tse', # 0x5d
'tsee', # 0x5e
'tsi', # 0x5f
'tsa', # 0x60
'chu', # 0x61
'cho', # 0x62
'che', # 0x63
'chee', # 0x64
'chi', # 0x65
'cha', # 0x66
'ttsu', # 0x67
'ttso', # 0x68
'ttse', # 0x69
'ttsee', # 0x6a
'ttsi', # 0x6b
'ttsa', # 0x6c
'X', # 0x6d
'.', # 0x6e
'qai', # 0x6f
'ngai', # 0x70
'nngi', # 0x71
'nngii', # 0x72
'nngo', # 0x73
'nngoo', # 0x74
'nnga', # 0x75
'nngaa', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
' ', # 0x80
'b', # 0x81
'l', # 0x82
'f', # 0x83
's', # 0x84
'n', # 0x85
'h', # 0x86
'd', # 0x87
't', # 0x88
'c', # 0x89
'q', # 0x8a
'm', # 0x8b
'g', # 0x8c
'ng', # 0x8d
'z', # 0x8e
'r', # 0x8f
'a', # 0x90
'o', # 0x91
'u', # 0x92
'e', # 0x93
'i', # 0x94
'ch', # 0x95
'th', # 0x96
'ph', # 0x97
'p', # 0x98
'x', # 0x99
'p', # 0x9a
'<', # 0x9b
'>', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'f', # 0xa0
'v', # 0xa1
'u', # 0xa2
'yr', # 0xa3
'y', # 0xa4
'w', # 0xa5
'th', # 0xa6
'th', # 0xa7
'a', # 0xa8
'o', # 0xa9
'ac', # 0xaa
'ae', # 0xab
'o', # 0xac
'o', # 0xad
'o', # 0xae
'oe', # 0xaf
'on', # 0xb0
'r', # 0xb1
'k', # 0xb2
'c', # 0xb3
'k', # 0xb4
'g', # 0xb5
'ng', # 0xb6
'g', # 0xb7
'g', # 0xb8
'w', # 0xb9
'h', # 0xba
'h', # 0xbb
'h', # 0xbc
'h', # 0xbd
'n', # 0xbe
'n', # 0xbf
'n', # 0xc0
'i', # 0xc1
'e', # 0xc2
'j', # 0xc3
'g', # 0xc4
'ae', # 0xc5
'a', # 0xc6
'eo', # 0xc7
'p', # 0xc8
'z', # 0xc9
's', # 0xca
's', # 0xcb
's', # 0xcc
'c', # 0xcd
'z', # 0xce
't', # 0xcf
't', # 0xd0
'd', # 0xd1
'b', # 0xd2
'b', # 0xd3
'p', # 0xd4
'p', # 0xd5
'e', # 0xd6
'm', # 0xd7
'm', # 0xd8
'm', # 0xd9
'l', # 0xda
'l', # 0xdb
'ng', # 0xdc
'ng', # 0xdd
'd', # 0xde
'o', # 0xdf
'ear', # 0xe0
'ior', # 0xe1
'qu', # 0xe2
'qu', # 0xe3
'qu', # 0xe4
's', # 0xe5
'yr', # 0xe6
'yr', # 0xe7
'yr', # 0xe8
'q', # 0xe9
'x', # 0xea
'.', # 0xeb
':', # 0xec
'+', # 0xed
'17', # 0xee
'18', # 0xef
'19', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| 1.351563 | 1 |
tests/integration/server/test_Server_join.py | JSchneidler/python-irc | 0 | 12772216 | <gh_stars>0
from socket import socket
from server.Server import Server
from .utils import createClient, readLine, registerClient, readJoin
def test_Server_join(client: socket):
registerClient(client)
client.sendall(b"JOIN #chan\r\n")
responses = readJoin(client)
expectedResponses = [
":test!test@127.0.0.1 JOIN #chan\r\n",
":127.0.0.1 331 test #chan :No topic is set\r\n",
":127.0.0.1 353 test = #chan :@test\r\n",
":127.0.0.1 366 test #chan :End of NAMES list\r\n",
]
assert responses == expectedResponses
def test_Server_join_notEnoughParameters(client: socket):
registerClient(client)
client.sendall(b"JOIN\r\n")
response = readLine(client)
assert response == ":127.0.0.1 461 test JOIN :Not enough parameters\r\n"
def test_Server_join_channelKey(server: Server):
client = createClient(server)
client2 = createClient(server)
registerClient(client, "test")
registerClient(client2, "test2")
client.sendall(b"JOIN #chan password\r\n")
readJoin(client)
client2.sendall(b"JOIN #chan pass\r\n")
response = readLine(client2)
assert response == ":127.0.0.1 475 test2 #chan :Cannot join channel (+k)\r\n"
client2.sendall(b"JOIN #chan password\r\n")
responses = readJoin(client2)
expectedResponses = [
":test2!test2@127.0.0.1 JOIN #chan\r\n",
":127.0.0.1 331 test2 #chan :No topic is set\r\n",
":127.0.0.1 353 test2 = #chan :@test test2\r\n",
":127.0.0.1 366 test2 #chan :End of NAMES list\r\n",
]
assert responses == expectedResponses
| 2.78125 | 3 |
board.py | Blubmin/adversarial_tower_defense | 0 | 12772217 | import pygame
import math
from functools import reduce
from tower import Tower
from unit import Unit
class BoardState:
def __init__(self, board):
self.unitsDeployed = len(board._units) + board._unitsDestroyed + board._unitsThatReachedGoal
self.towersDeployed = 0
self.towersInUpperLeft = 0
self.towersInUpperRight = 0
self.towersInLowerLeft = 0
self.towersInLowerRight = 0
self.unitsOnLeftSide = 0
self.unitsOnRightSide = 0
for col in range(0, board._width):
for row in range(0, board._height):
tower = board._towers[row][col]
if not (tower is None):
self.towersDeployed += 1
if tower._x < board._width/2:
if tower._y < board._height/2:
self.towersInUpperLeft += 1
else:
self.towersInLowerLeft += 1
else:
if tower._y < board._height/2:
self.towersInUpperRight += 1
else:
self.towersInLowerRight += 1
for unit in board._units:
if unit._x < board._width/2:
self.unitsOnLeftSide += 1
else:
self.unitsOnRightSide += 1
def normalizedDistToState(self, boardState):
thisVector = []
otherVector = []
# Find the sum of all the state values
thisTotal = 0.0
otherTotal = 0.0
for key in self.__dict__:
thisTotal += pow(self.__dict__[key], 2)
otherTotal += pow(boardState[key], 2)
thisTotal = math.sqrt(thisTotal)
otherTotal = math.sqrt(otherTotal)
# Normalize the state by dividing by the sum
for key in self.__dict__:
if thisTotal > 0:
thisVector.append(self.__dict__[key] / thisTotal)
else:
thisVector.append(0)
if otherTotal > 0:
otherVector.append(boardState[key] / otherTotal)
else:
otherVector.append(0)
# Get the dist between the normalized states (max distance would be 1)
dist = 0.0
for i in range(0, len(thisVector)):
dist += abs(thisVector[i] - otherVector[i])
return dist
class Board:
def __init__(self, offset_x, offset_y):
self._offset_x = offset_x
self._offset_y = offset_y
self._cell_size = 32
self._width = 10
self._height = 10
self._towers = [[None for x in range(self._height)] for x in range(self._width)]
self._tower_list = []
self._num_towers = 0
self._num_units = 0 # total that have been deployed this round
self._last_tower = None
self._last_unit = None
self._last_unit_initial_location = None
self._units = []
self._bullets = []
self._unitsThatReachedGoal = 0
self._unitsDestroyed = 0
self._score = 0
self._tick = 0
def hasUnit(self, x, y):
return reduce(lambda u1, u2: u1 or u2, map(lambda u: int(u._lastNode[0]) == x and int(u._lastNode[1]) == y or u._nextNode and int(u._nextNode[0]) == x and int(u._nextNode[1]) == y, self._units), False)
def isInBounds(self, x, y):
return 0 <= x < self._width and 0 <= y < self._height
def hasTower(self, x, y):
if not self.isInBounds(x, y):
return False
return self._towers[x][y] != None
def add_tower(self, tower):
if not self.isInBounds(tower._x, tower._y):
return False
if self.hasUnit(tower._x, tower._y):
return False
if not (0 <= tower._x < self._width and 0 <= tower._y < self._height):
return False
if self.hasTower(tower._x, tower._y):
return False
self._towers[tower._x][tower._y] = tower
if not (self.path_exists() and self.unit_path_exists()):
self._towers[tower._x][tower._y] = None
return False
self._tower_list += [tower]
self._last_tower = tower
self._num_towers += 1
return True
def add_unit(self, unit):
if unit._x < 0 or unit._x > self._width-1:
return False
if unit._y != -1:
return False
self._units.append(unit)
self._last_unit = unit
self._last_unit_initial_location = (unit._x, unit._y)
self._num_units += 1
return True
def add_bullet(self, bullet):
self._bullets.append(bullet)
def step(self):
self._tick += 1
for unit in self._units:
unit.step(self)
# Check for updates on all units
for unit in self._units:
if unit._shouldDestroy:
self._unitsDestroyed += 1
self._score += int(unit._y)
self._units.remove(unit)
elif unit._y >= self._height:
self._unitsThatReachedGoal += 1
self._score += 10
unit.setIsAtGoal()
self._units.remove(unit)
# Check for updates on all bullets
for bullet in self._bullets:
bullet.step()
if bullet._shouldDestroy:
self._bullets.remove(bullet)
continue
# Check for out of bounds
if bullet._x > self._width or bullet._x < 0 or bullet._y > self._height+2 or bullet._y < -2:
bullet.setShouldDestroy()
else:
# Check for collisions
for unit in self._units:
if self.has_collision(bullet, unit):
unit.damage(50)
bullet._parent._body_count += 1
bullet.setShouldDestroy()
break
# Updates towers
for i in range(self._width):
for j in range(self._height):
if (self._towers[i][j] is None):
continue
self._towers[i][j].step(self)
# The state of the board at a given step (used by the generator)
def getState(self):
return BoardState(self)
# The score for the game (used by the generator)
def getScore(self):
aliveUnitsTotalDistance = 0
for unit in self._units:
aliveUnitsTotalDistance += int(unit._y)
return self._score + aliveUnitsTotalDistance
def execute(self, action):
if action.name == "PlaceUnitAction":
self.add_unit(Unit(action.x, -1, 0))
elif action.name == "PlaceTowerAction":
self.add_tower(Tower(action.x, action.y))
def distance(self, obj1, obj2):
return math.sqrt(pow(obj1._x - obj2._x, 2) + pow(obj1._y - obj2._y, 2))
def has_collision(self, obj1, obj2):
return self.distance(obj1, obj2) < 0.5
def draw(self, screen):
# Draws grid
line_color = (125, 125, 125)
for x in range(self._offset_x, self._offset_x + self._width * self._cell_size + 1, self._cell_size):
pygame.draw.line(screen, line_color, (x, self._offset_y),
(x, self._offset_y + self._height * self._cell_size))
for y in range(self._offset_y, self._offset_y + self._height * self._cell_size + 1, self._cell_size):
pygame.draw.line(screen, line_color, (self._offset_x, y),
(self._offset_x + self._width * self._cell_size, y))
# Draws mouse tower
# mouse_x, mouse_y = pygame.mouse.get_pos()
# if self.contains_point(mouse_x, mouse_y):
# s = Tower._image_transparent.copy()
# screen.blit(s, self.trunc_screen(mouse_x, mouse_y))
# Draws towers
for i in range(self._width):
for j in range(self._height):
if (self._towers[i][j] is None):
continue
screen.blit(self._towers[i][j]._image, self.grid_to_screen(i, j))
# Draws enemy units
for unit in self._units:
screen.blit(unit._image, self.grid_to_screen(unit._x, unit._y))
# Draws tower bullets
for bullet in self._bullets:
screen.blit(bullet._image, self.grid_to_screen(bullet._x, bullet._y))
def grid_to_screen(self, x, y):
return (x * self._cell_size + self._offset_x, y * self._cell_size + self._offset_y)
def trunc_screen(self, x, y):
return (((int) (x / self._cell_size)) * self._cell_size,
((int) (y / self._cell_size)) * self._cell_size)
def screen_to_grid(self, x, y):
return ((int) ((x - self._offset_x) / self._cell_size), (int) ((y - self._offset_y) / self._cell_size))
def contains_point(self, x, y):
return (self._offset_x <= x < self._offset_x + self._width * self._cell_size
and self._offset_y <= y < self._offset_y + self._height * self._cell_size)
def unit_path_exists(self):
paths = [self.path_from(u._lastNode[0], u._lastNode[1]) is not None for u in self._units if u._y >= 0]
return reduce(lambda u1, u2: u1 and u2, paths, True)
def path_exists(self):
for x in range(len(self._towers[0])):
if self.path_from(x, 0) is None:
continue
return True
return False
def path_from(self, x, y):
if not self.isInBounds(x, y):
return None
if self.hasTower(x, y):
return None
open_nodes = []
nodes = [[(False, -1, (-1, -1)) for x in range(0, self._height)] for x in range(0, self._width)]
nodes[x][y] = (True, 0, None)
open_nodes.append((0, (x, y)))
while len(open_nodes) > 0:
node = open_nodes.pop(0)
if node[1][1] is 9:
path = []
path.append(node[1])
temp = nodes[node[1][0]][node[1][1]]
while temp[2] is not None:
path = [temp[2]] + path
temp = nodes[temp[2][0]][temp[2][1]]
return path
for i in range(max(0, node[1][0] - 1), min(self._width, node[1][0] + 2)):
for j in range(max(0, node[1][1] - 1), min(self._height, node[1][1] + 2)):
if self.hasTower(i, j):
continue
if i is node[1][0] and j is node[1][1]:
continue
if i is not node[1][0] and j is not node[1][1]:
continue
if nodes[i][j][0] and node[0] + 1 >= nodes[i][j][1]:
continue
open_nodes.append((node[0] + 1, (i, j)))
nodes[i][j] = (True, node[0] + 1, node[1])
open_nodes.sort(key=lambda n: n[0])
return None | 3.109375 | 3 |
Part_3_advanced/m17_tests_II/stub_mock/example_3/galactic_systems/person.py | Mikma03/InfoShareacademy_Python_Courses | 0 | 12772218 | from dataclasses import dataclass
@dataclass
class Person:
name: str
height: int
mass: int
| 2.34375 | 2 |
main.py | itaouil/Shape-Recognition-Challenge | 0 | 12772219 | <reponame>itaouil/Shape-Recognition-Challenge
"""
Script responsible to read-in
the input image to be counted
and carry out the processing,
detection and counting logics.
"""
# Imports
import cv2
import argparse
import numpy as np
from scripts import ShapeRecognition
# Detector object
sr = ShapeRecognition()
# Counters
circles = 0
squares = 0
triangles = 0
# Argument parser (CLA)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to the input image")
args = vars(ap.parse_args())
# Load image passed from CLA
image = cv2.imread(args["image"])
# Pre-process image
(processed, ratio) = sr.process(image)
# Compute contours on the processed image
(_, cnts, _) = cv2.findContours(processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Count objects
for c in cnts:
# Compute contours center
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
# Detect the blob shape and
# increase relative counters
shape = sr.detect(c)
if shape == "circle":
circles += 1
elif shape == "square":
squares += 1
else:
triangles += 1
# Resize contour to original ratio
c = c.astype("float")
c *= ratio
c = c.astype("int")
# Draw contour with nearby text
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
print(str(squares), str(circles), str(triangles))
# # Display image
# cv2.imshow("Image", image)
# cv2.waitKey(0)
| 3.171875 | 3 |
core/utils/render_utils.py | hyunynim/DIST-Renderer | 176 | 12772220 | import trimesh
import numpy as np
import cv2
import copy
import pickle
import torch
import pdb
def depth2normal(depth, f_pix_x, f_pix_y=None):
'''
To compute a normal map from the depth map
Input:
- depth: torch.Tensor (H, W)
- f_pix_x: K[0, 0]
- f_pix_y: K[1, 1]
Return:
- normal: torch.Tensor (H, W, 3)
'''
if f_pix_y is None:
f_pix_y = f_pix_x
h, w = depth.shape
eps = 1e-12
bg_flag = (depth > 1e5) | (depth == 0)
depth[bg_flag] = 0.0
depth_left, depth_right, depth_up, depth_down = torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w)
if depth.get_device() != -1:
device_id = depth.get_device()
depth_left, depth_right, depth_up, depth_down = depth_left.to(device_id), depth_right.to(device_id), depth_up.to(device_id), depth_down.to(device_id)
depth_left[:, 1:w-1] = depth[:, :w-2].clone()
depth_right[:, 1:w-1] = depth[:, 2:].clone()
depth_up[1:h-1, :] = depth[:h-2, :].clone()
depth_down[1:h-1, :] = depth[2:, :].clone()
dzdx = (depth_right - depth_left) * f_pix_x / 2.0
dzdy = (depth_down - depth_up) * f_pix_y / 2.0
normal = torch.stack([dzdx, dzdy, -torch.ones_like(dzdx)]).permute(1, 2, 0)
normal_length = torch.norm(normal, p=2, dim=2)
normal = normal / (normal_length + 1e-12)[:,:,None]
normal[bg_flag] = 0.0
return normal
def quad2rotation(quad):
'''
input: torch.Tensor (4)
'''
bs = quad.shape[0]
qr, qi, qj, qk = quad[:,0], quad[:,1], quad[:,2], quad[:,3]
rot_mat = torch.zeros(bs, 3, 3).to(quad.get_device())
rot_mat[:,0,0] = 1 - 2 * (qj ** 2 + qk ** 2)
rot_mat[:,0,1] = 2 * (qi * qj - qk * qr)
rot_mat[:,0,2] = 2 * (qi * qk + qj * qr)
rot_mat[:,1,0] = 2 * (qi * qj + qk * qr)
rot_mat[:,1,1] = 1 - 2 * (qi ** 2 + qk ** 2)
rot_mat[:,1,2] = 2 * (qj * qk - qi * qr)
rot_mat[:,2,0] = 2 * (qi * qk - qj * qr)
rot_mat[:,2,1] = 2 * (qj * qk + qi * qr)
rot_mat[:,2,2] = 1 - 2 * (qi ** 2 + qj ** 2)
return rot_mat
def get_camera_from_tensor(inputs):
N = len(inputs.shape)
if N == 1:
inputs = inputs.unsqueeze(0)
quad, T = inputs[:,:4], inputs[:,4:]
R = quad2rotation(quad)
RT = torch.cat([R, T[:,:,None]], 2)
if N == 1:
RT = RT[0]
return RT
def get_tensor_from_camera(RT):
gpu_id = -1
if type(RT) == torch.Tensor:
if RT.get_device() != -1:
RT = RT.detach().cpu()
gpu_id = RT.get_device()
RT = RT.numpy()
from mathutils import Matrix
R, T = RT[:,:3], RT[:,3]
rot = Matrix(R)
quad = rot.to_quaternion()
tensor = np.concatenate([quad, T], 0)
tensor = torch.from_numpy(tensor).float()
if gpu_id != -1:
tensor = tensor.to(gpu_id)
return tensor
def downsize_camera_intrinsic(intrinsic, factor):
'''
Input:
- intrinsic type: np.array (3,3)
- factor int
'''
img_h, img_w = int(2 * intrinsic[1,2]), int(2 * intrinsic[0,2])
img_h_new, img_w_new = img_h / factor, img_w / factor
if (img_h_new - round(img_h_new)) > 1e-12 or (img_w_new - round(img_w_new)) > 1e-12:
raise ValueError('The image size {0} should be divisible by the factor {1}.'.format((img_h, img_w), factor))
intrinsic_new = copy.deepcopy(intrinsic)
intrinsic_new[0,:] = intrinsic[0,:] / factor
intrinsic_new[1,:] = intrinsic[1,:] / factor
return intrinsic_new
def sample_points_from_mesh(mesh, N=30000):
'''
Return:
-- points: np.array (N, 3)
'''
points = trimesh.sample.sample_surface(mesh, N)[0]
return points
def transform_point_cloud(points):
'''
solve the mismatch between the point cloud coordinate and the mesh obj.
'''
points_new = copy.deepcopy(points)
points_new[:,1] = -points[:,2]
points_new[:,2] = points[:,1]
return points_new
def read_pickle(fname):
with open(fname, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def save_render_output(render_output, fname):
depth_rendered, normal_rendered, valid_mask_rendered, _ = render_output
output = {}
output['depth'] = depth_rendered.detach().cpu().numpy()
output['normal'] = normal_rendered.detach().cpu().numpy()
output['valid_mask'] = valid_mask_rendered.detach().cpu().numpy()
save_pkl(output, fname)
def save_pkl(data, fname):
with open(fname, 'wb') as f:
pickle.dump(data, f)
| 2.453125 | 2 |
programming-laboratory-I/17gp/mais_elementos.py | MisaelAugusto/computer-science | 0 | 12772221 | <reponame>MisaelAugusto/computer-science<filename>programming-laboratory-I/17gp/mais_elementos.py
# coding: utf-8
# Aluno: <NAME>
# Matrícula: 117110525
# Problema: Conjunto com mais elementos
tamanho = 0
tamanhos = []
while True:
numero = raw_input()
if numero == "fim":
break
else:
if int(numero) >= 0:
tamanho += 1
else:
tamanhos.append(tamanho)
tamanho = 0
if len(tamanhos) > 0:
maior = tamanhos[0]
conjunto = 0
for i in range(1, len(tamanhos)):
if tamanhos[i] > maior:
maior = tamanhos[i]
conjunto = i
print "Conjunto %d - %d elemento(s)" % (conjunto + 1, maior)
| 3.703125 | 4 |
src/01_run-yamnet_covid19_20-03-30.py | lostanlen/sonyc-yamnet | 0 | 12772222 | <filename>src/01_run-yamnet_covid19_20-03-30.py
import sys
import datetime
import glob
import h5py
import numpy as np
import os
import tensorflow as tf
import time
src_dir = os.path.dirname(os.path.abspath(__file__))
git_dir = os.path.split(src_dir)[0]
yamnet_dir = os.path.join(git_dir, "yamnet")
sys.path.append(yamnet_dir)
import params # noqa: E402
import yamnet # noqa: E402
# Parse arguments.
# The first argument is the path to a directory containing SONYC recordings
# for a given sensor.
# The second argument is the path to the output directory for YAMNet features.
args = sys.argv[1:]
sensor_dir = str(args[0])
out_dir = str(args[1])
sensor_str = os.path.split(sensor_dir)[1]
sonycnode_str = os.path.splitext(sensor_str)[0].split("-")[1]
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Running YAMNET on SONYC recordings for sensor " + sonycnode_str)
print("Directory: " + sensor_dir)
print("")
print("numpy version: {:s}".format(np.__version__))
print("tensorflow version: {:s}".format(tf.__version__))
print("")
# Parse parameters.
yamnet_params = {
k: params.__dict__[k] for k in params.__dict__ if k == k.upper()}
for yamnet_param in yamnet_params:
print(yamnet_param + " = " + str(yamnet_params[yamnet_param]))
print("")
# Load YAMNet.
# We turn the YAMNet model into a two-output model:
# 1. first output is the convnet embedding (task-agnostic)
# 2. second output is the audio event classification (task = AudioSet labels)
tf.get_logger().setLevel('ERROR')
graph = tf.Graph()
with graph.as_default():
yamnet_model = yamnet.yamnet_frames_model(params)
yamnet_model_path = os.path.join(yamnet_dir, "yamnet.h5")
yamnet_model.load_weights(yamnet_model_path)
yamnet_multi_model = tf.keras.Model(
inputs=yamnet_model.inputs,
outputs=[yamnet_model.layers[-4].output, yamnet_model.output]
)
# Initialize HDF5 folder for prediction
data_dir = os.path.split(sensor_dir)[0]
out_pred_dir = os.path.join(data_dir, "covid_yamnet-pred")
os.makedirs(out_pred_dir, exist_ok=True)
h5_path = os.path.join(out_pred_dir, sonycnode_str + "_yamnet-pred.h5")
# Initialize NPZ folder for features
out_features_dir = os.path.join(out_dir, "covid_yamnet-features")
os.makedirs(out_features_dir, exist_ok=True)
out_sensor_dir = os.path.join(out_features_dir, sensor_str)
os.makedirs(out_sensor_dir, exist_ok=True)
# List SONYC recordings (NPZ files)
# These have been resampled to 16 kHz and converted to float32 by <NAME>
# The directory structure is <sensor_id>/<date>/<sensor_id>_<timestamp>_16k.npz
# each npz file contains the signal x and the sample rate fs
glob_regexp = os.path.join(sensor_dir, "**", "*_16k.npz")
sonyc_paths = glob.glob(glob_regexp)
# Compute features
for sonyc_path in sonyc_paths:
date_dir, sonyc_name = os.path.split(sonyc_path)
sensor_timestamp_str = os.path.splitext(sonyc_name)[0]
sensor_timestamp_split = sensor_timestamp_str.split("_")[:-1]
sonyc_key = "_".join(sensor_timestamp_split)
sonyc_npz = np.load(sonyc_path)
waveform = sonyc_npz["x"][np.newaxis, :]
with graph.as_default():
yamnet_feature, yamnet_pred, _ = yamnet_multi_model.predict(
waveform, steps=1)
# Save feature.
date_str = os.path.split(date_dir)[1]
out_date_dir = os.path.join(out_sensor_dir, date_str)
os.makedirs(out_date_dir, exist_ok=True)
yamnet_feature_name = sensor_timestamp_str + "_yamnet-features.npz"
yamnet_feature_path = os.path.join(out_date_dir, yamnet_feature_name)
np.savez(yamnet_feature_path, yamnet_feature)
# Save prediction.
with h5py.File(h5_path, "a") as h5_file:
h5_file[sonyc_key] = yamnet_pred
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
| 2.59375 | 3 |
regel/converter.py | heijp06/regel | 0 | 12772223 | <reponame>heijp06/regel<filename>regel/converter.py
class Converter:
APPLICATION_SINGLE = 0
APPLICATION_MANY = 1
def __init__(self, text, application):
self.text = text
self.application = application
self._function = None
def __repr__(self):
colons = ":" if self.application == Converter.APPLICATION_SINGLE else "::"
return colons + self.text
def compile(self, globals, locals):
self._function = eval(self.text, globals, locals)
def convert(self, value):
if not self._function:
raise ValueError("Converter is not compiled.")
return (
self._function(value) if self.application == Converter.APPLICATION_SINGLE
else [self._function(elem) for elem in value]
)
| 2.78125 | 3 |
venv/checkers.py | JamesG3/Checkers_AI | 0 | 12772224 | import display
import board
import robot
import config as conf
import sys
import pygame
from pygame.locals import *
import time
class Checkers:
def __init__(self):
self.display = display.Display()
self.board = board.Board()
self.set_difficulty = 0
self.turn = None
self.valid_moves = []
self.curr_piece = None
self.Rbt_noMove = 0
self.Hum_noMove = 0
self._window()
def _window(self):
'''
Initialize a GUI window
'''
pygame.init()
pygame.display.set_caption("Smart Checkers Robot")
def _changeTurn(self):
'''
Change self.turn to another player
Reset variables
'''
self.turn = "black" if self.turn == "white" else "white"
self.curr_piece = None
self.valid_moves = []
def _has_move(self, player):
'''
Valid move check
Return whether the current player has move (bool)
'''
for i in xrange(6):
for j in xrange(6):
grid = self.board.checkerBoard[i][j]
if grid.color == "B"\
and grid.piece\
and grid.piece.player == player\
and self.board.valid_moves([i, j]):
return True
return False
def _check_winner(self):
'''
Check which player wins the game
Print out the message
'''
if self.board.white_piece_Num > self.board.black_piece_Num:
self.display.show_msg("HaHa You Lose! Click Right Key to Restart")
pygame.display.update()
elif self.board.white_piece_Num < self.board.black_piece_Num:
self.display.show_msg("Congratulation, You Win! Click Right Key to Restart")
pygame.display.update()
else:
self.display.show_msg("Draw! Click Right Key to Restart")
pygame.display.update()
def _restart(self):
'''
Restart game, reset all global variables
'''
self.display = display.Display()
self.board = board.Board()
self.turn = None
self.valid_moves = []
self.curr_piece = None
self.Rbt_noMove = 0
self.Hum_noMove = 0
self.set_difficulty = 0
self._window()
def _choose_AI_level(self):
'''
Keep monitoring pygame event until an AI level is choosen
'''
self.display.show_msg("Choose AI level: Easy-1, Mid-2, Hard-3")
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.unicode == '1':
self.robot = robot.Robot(conf.DEPTH_EASY)
self.set_difficulty = 1
elif event.unicode == '2':
self.robot = robot.Robot(conf.DEPTH_MID)
self.set_difficulty = 1
elif event.unicode == '3':
self.robot = robot.Robot(conf.DEPTH_HARD)
self.set_difficulty = 1
def _choose_first_move(self):
'''
Keep monitoring pygame event until first move is choosen
'''
self.display.show_msg("Who goes first? 1: You / 2: AI")
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.unicode == '1':
self.turn = "black"
self.display = display.Display()
elif event.unicode == '2':
self.turn = "white"
self.display = display.Display()
def game(self):
'''
Checker rules and actioins
Loop this function until exit
'''
def check_move():
'''
Check whether valid move exists for current player
If doesn't exist, set self.Rbt_noMove/self.Hum_noMove to 1, then change self.turn
If exist, reset self.Rbt_noMove/self.Hum_noMove to 0, and continue
If both players don't have move, game over. Call function _check_winner().
'''
if self.Hum_noMove and self.Rbt_noMove:
self._check_winner()
if pygame.mouse.get_pressed() == conf.RIGHTKEY:
self._restart()
else: # if current player has move, reset Rbt_noMove and Hum_noMove
if self._has_move(self.turn):
if self.turn == "black":
self.Rbt_noMove = 0
elif self.turn == "white":
self.Hum_noMove = 0
else: # if current player cannot move, change turn
if self.turn == "black":
self.Hum_noMove = 1
self._changeTurn()
elif self.turn == "white":
self.Rbt_noMove = 1
self._changeTurn()
# At the beginning of game, select difficulty
# and which player moves first
if self.turn is None:
if self.set_difficulty == 0: # choose AI level
self._choose_AI_level()
else: # choose who goes first
self._choose_first_move()
# Check whether current player has move
# If yes, continue. Otherwise, change turn.
check_move()
if self.turn == "white":
action = self.robot.choose_move(self.board) # choose action
if action:
time.sleep(0.5)
piece, move = action
if abs(piece[0] - move[0]) == 2: # capture move
self.board.remove([(piece[0] + move[0]) / 2, (piece[1] + move[1]) / 2])
self.board.move(piece, move)
self.curr_piece = move
time.sleep(0.5)
self._changeTurn()
# Human move
check_move()
self.mouse = self.display.mouse_to_grid(pygame.mouse.get_pos())
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
jump_step = self.board.check_jump(self.turn)
grid = self.board.checkerBoard[self.mouse[0]][self.mouse[1]]
# select piece
if grid.piece and grid.piece.player == self.turn:
if jump_step == []:
self.curr_piece = self.mouse
self.valid_moves = self.board.valid_moves(self.curr_piece)
else:
if self.mouse in jump_step:
self.curr_piece = self.mouse
self.valid_moves = self.board.valid_moves(self.curr_piece)
# choose a move
elif self.curr_piece and self.mouse in self.valid_moves:
self.board.move(self.curr_piece, self.mouse)
# if capture, then remove an adversary piece
if abs(self.curr_piece[0] - self.mouse[0]) == 2:
piece_rmv = ((self.curr_piece[0]+self.mouse[0])/2, (self.curr_piece[1]+self.mouse[1])/2)
self.board.remove(piece_rmv)
self._changeTurn()
def main(self):
while True:
self.game()
self.display. update_board(self.board, self.curr_piece, self.valid_moves)
checkers = Checkers()
checkers.main()
| 3.296875 | 3 |
ch11_Django/myproject/poster/models.py | Nickhool/core-python | 1 | 12772225 | <reponame>Nickhool/core-python
from django.db import models
# Create your models here.
from django.db import models
class Tweet(models.Model):
text = models.CharField(max_length=140)
author_email = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
published_at = models.DateTimeField(null=True)
STATE_CHOICES = (
('pending', 'pending'),
('published', 'published'),
('rejected', 'rejected')
)
state = models.CharField(max_length=15, choices=STATE_CHOICES)
def __unicode__(self):
return self.text
class Meta:
permissions = (
("can_approve_or_reject_tweets",
"Can approve or reject tweets"),
)
class Comment(models.Model):
tweet = models.ForeignKey(Tweet, on_delete=models.CASCADE)
text = models.CharField(max_length=300)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.text | 2.578125 | 3 |
python/100-days-of-code/programs/day_22/score_board.py | star-junk/references | 0 | 12772226 | from turtle import Turtle
class ScoreBoard(Turtle):
def __init__(self, x: int=0, y: int=0):
super().__init__()
self.hideturtle()
self.penup()
self.color('white')
self.score = 0
self.setpos((x, y))
self.write_score()
def write_score(self):
self.clear()
self.write(f"{self.score}", font=('Arial', 46))
def increase_by_one(self):
self.score += 1
self.write_score()
| 3.6875 | 4 |
pytorch_lightning/strategies/bagua.py | neptune-ml/pytorch-lightning | 0 | 12772227 | <reponame>neptune-ml/pytorch-lightning<gh_stars>0
import logging
import os
from typing import Any, Dict, List, Optional, Union
import torch
from torch.nn import Module
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import (
_LightningModuleWrapperBase,
_LightningPrecisionModuleWrapperBase,
unwrap_lightning_module,
)
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.ddp import DDPStrategy
from pytorch_lightning.strategies.strategy import TBroadcast
from pytorch_lightning.utilities.distributed import ReduceOp
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _BAGUA_AVAILABLE
from pytorch_lightning.utilities.seed import reset_seed
if _BAGUA_AVAILABLE:
import bagua.torch_api as bagua
from bagua.torch_api.algorithms import Algorithm
from bagua.torch_api.algorithms.q_adam import QAdamOptimizer
from bagua.torch_api.communication import allreduce_inplace, barrier, broadcast_object, is_initialized
from bagua.torch_api.communication import ReduceOp as BaguaReduceOp
from bagua.torch_api.data_parallel.distributed import DistributedDataParallel_V1_9_0 as BaguaDistributedDataParallel
else:
BaguaReduceOp = None
BaguaDistributedDataParallel = None
log = logging.getLogger(__name__)
class LightningBaguaModule(_LightningModuleWrapperBase):
def __init__(self, pl_module: Union["pl.LightningModule", _LightningPrecisionModuleWrapperBase]) -> None:
super().__init__(pl_module)
# Bagua use `bagua_module_name` to distinguish different modules
self._bagua_module_name = f"{pl_module.__class__.__name__}{id(pl_module)}"
if _BAGUA_AVAILABLE:
# Convert a reduce op to its equivalent `bagua.torch_api.ReduceOp`
_bagua_reduce_ops = {
ReduceOp.SUM: BaguaReduceOp.SUM,
ReduceOp.PRODUCT: BaguaReduceOp.PRODUCT,
ReduceOp.MIN: BaguaReduceOp.MIN,
ReduceOp.MAX: BaguaReduceOp.MAX,
ReduceOp.BAND: BaguaReduceOp.BAND,
ReduceOp.BOR: BaguaReduceOp.BOR,
ReduceOp.BXOR: BaguaReduceOp.BXOR,
"avg": BaguaReduceOp.AVG,
"mean": BaguaReduceOp.AVG,
"sum": BaguaReduceOp.SUM,
}
else:
_bagua_reduce_ops = {}
class BaguaStrategy(DDPStrategy):
strategy_name = "bagua"
def __init__(
self,
algorithm: str = "gradient_allreduce",
flatten: bool = True,
accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[PrecisionPlugin] = None,
**bagua_kwargs: Union[Any, Dict[str, Any]],
):
"""Strategy for training using the `Bagua <https://github.com/BaguaSys/bagua>`_ library, with advanced
distributed training algorithms and system optimizations.
This strategy requires the `bagua` package to be installed. See
`installation guide <https://tutorials.baguasys.com/installation>`_ for more information.
The :class:`BaguaStrategy` is only supported on GPU and on Linux systems.
Arguments:
algorithm: Distributed algorithm used to do the actual communication and update. Built-in algorithms
include "gradient_allreduce", "bytegrad", "decentralized", "low_precision_decentralized", "qadam" and
"async".
flatten: Whether to flatten the Bagua communication buckets. The flatten operation will reset data
pointer of bucket tensors so that they can use faster code paths.
bagua_kwargs: Additional keyword arguments that will be passed to initialize the Bagua algorithm. More
details on keyword arguments accepted for each algorithm can be found in the
`documentation <https://bagua.readthedocs.io/en/latest/autoapi/bagua/torch_api/algorithms/index.html>`_.
"""
if not _BAGUA_AVAILABLE:
raise MisconfigurationException(
"To use the `BaguaStrategy`, you must have `Bagua` installed. Use `pip install bagua` to install it."
)
super().__init__(
accelerator=accelerator,
parallel_devices=parallel_devices,
cluster_environment=cluster_environment,
checkpoint_io=checkpoint_io,
precision_plugin=precision_plugin,
)
self._bagua_algorithm = algorithm
self._bagua_flatten = flatten
self._bagua_kwargs = bagua_kwargs
@property
def lightning_module(self) -> "pl.LightningModule":
model = self._model
if isinstance(model, BaguaDistributedDataParallel):
model = model.module
return unwrap_lightning_module(model) # type: ignore[arg-type]
def setup_distributed(self) -> None:
reset_seed()
# determine which process we are and world size
self.set_world_ranks()
self._init_bagua_distributed()
def _init_bagua_distributed(self) -> None:
self._set_node_environment_variables()
log.info(
"Initializing Bagua Distributed: "
f"GLOBAL_RANK: {self.global_rank}, "
f"MEMBER: {self.global_rank + 1}/{self.world_size}"
)
# need to set device first before initialize Bagua distributed environment
# Note: setup_environment calls super().setup_distributed after calling init_distributed()
torch.cuda.set_device(self.local_rank)
if not is_initialized():
bagua.init_process_group()
def _set_node_environment_variables(self) -> None:
"""Set the environment variables as required by the :func:`bagua.init_process_group` call.
This enables the use of other cluster environments which don't set these exact variables, e.g., Bagua can be
launched with ``torch.distributed.run``.
"""
os.environ["MASTER_ADDR"] = self.cluster_environment.main_address # type: ignore[union-attr]
os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port) # type: ignore[union-attr]
os.environ["RANK"] = str(self.global_rank)
os.environ["NODE_RANK"] = str(self.node_rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["LOCAL_RANK"] = str(self.local_rank)
def _check_qadam_optimizer(self) -> None:
has_qadam_optimizer = any([isinstance(opt, QAdamOptimizer) for opt in self.optimizers])
if not has_qadam_optimizer or len(self.optimizers) > 1 or len(self.lr_scheduler_configs) > 1:
raise MisconfigurationException("Bagua QAdam can only accept one QAdamOptimizer and one LR Scheduler.")
self._bagua_kwargs["q_adam_optimizer"] = self.optimizers[0]
def configure_ddp(self) -> None:
model = LightningBaguaModule(self.model) # type: ignore[arg-type]
self._model = self._setup_model(model)
# start the background communication for async algorithm
assert self.lightning_module.trainer is not None
if self.lightning_module.trainer.training and self._bagua_algorithm == "async":
self.model.bagua_algorithm.resume(self.model) # type: ignore
def _setup_model(self, model: Module) -> BaguaDistributedDataParallel:
"""Wraps the model into a Bagua distributed module."""
if self._bagua_algorithm == "qadam":
self._check_qadam_optimizer()
algorithm = Algorithm.init(self._bagua_algorithm, **self._bagua_kwargs)
return BaguaDistributedDataParallel(
module=model,
optimizers=self.optimizers,
algorithm=algorithm,
gradient_as_bucket_view=self._bagua_flatten,
)
@classmethod
def register_strategies(cls, strategy_registry: Dict) -> None:
strategy_registry.register(
cls.strategy_name,
cls,
description=f"{cls.__class__.__name__}",
)
def teardown(self) -> None:
# abort the background communication for async algorithm
assert self.lightning_module.trainer is not None
if self.lightning_module.trainer.training and self._bagua_algorithm == "async":
self.model.bagua_algorithm.abort(self.model) # type: ignore
if isinstance(self.model, BaguaDistributedDataParallel):
self.model = self.lightning_module
if self.root_device.type == "cuda":
# GPU teardown
log.detail(f"{self.__class__.__name__}: moving model to CPU")
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
def barrier(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
if is_initialized():
barrier()
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
return broadcast_object(obj, src)
def reduce(
self, tensor: torch.Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean"
) -> torch.Tensor:
"""Reduces a tensor from several distributed processes to one aggregated tensor.
Args:
tensor: The tensor to sync and reduce.
group: The process group to gather results from. Defaults to all processes (world).
reduce_op: The reduction operation.
Can also be a string 'sum' or ReduceOp.
Return:
The reduced value, except when the input was not a tensor the output remains is unchanged.
"""
if not isinstance(tensor, torch.Tensor):
return tensor
if group is not None:
raise ValueError("`Bagua` does not support allreduce using a subcommunicator at this time. Unset `group`.")
if reduce_op is None:
op = BaguaReduceOp.AVG
else:
op = _bagua_reduce_ops.get(reduce_op, None)
if op is None:
raise ValueError(f"Unrecognized `reduce_op` for `BaguaStrategy`: {reduce_op}")
allreduce_inplace(tensor, op=op)
return tensor
| 1.851563 | 2 |
polling_stations/apps/data_collection/management/commands/import_hyndburn.py | mtravis/UK-Polling-Stations | 0 | 12772228 | <gh_stars>0
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "E07000120"
addresses_name = "local.2018-05-03/Version 1/polling_station_export-2018-02-16.csv"
stations_name = "local.2018-05-03/Version 1/polling_station_export-2018-02-16.csv"
elections = ["local.2018-05-03"]
csv_encoding = "windows-1252"
| 1.578125 | 2 |
packages/core/minos-microservice-aggregate/minos/aggregate/transactions/repositories/abc.py | minos-framework/minos-python | 247 | 12772229 | <gh_stars>100-1000
from __future__ import (
annotations,
)
from abc import (
ABC,
abstractmethod,
)
from datetime import (
datetime,
)
from typing import (
AsyncIterator,
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
Inject,
Injectable,
Lock,
LockPool,
NotProvidedException,
PoolFactory,
SetupMixin,
)
from ...exceptions import (
TransactionNotFoundException,
)
from ..entries import (
TransactionEntry,
TransactionStatus,
)
@Injectable("transaction_repository")
class TransactionRepository(ABC, SetupMixin):
"""Transaction Repository base class."""
@Inject()
def __init__(
self, lock_pool: Optional[LockPool] = None, pool_factory: Optional[PoolFactory] = None, *args, **kwargs
):
super().__init__(*args, **kwargs)
if lock_pool is None and pool_factory is not None:
lock_pool = pool_factory.get_pool("lock")
if lock_pool is None:
raise NotProvidedException("A lock pool instance is required.")
self._lock_pool = lock_pool
async def submit(self, transaction: TransactionEntry) -> TransactionEntry:
"""Submit a new or updated transaction to store it on the repository.
:param transaction: The transaction to be stored.
:return: This method does not return anything.
"""
return await self._submit(transaction)
@abstractmethod
async def _submit(self, transaction: TransactionEntry) -> TransactionEntry:
raise NotImplementedError
# noinspection PyUnusedLocal
async def get(self, uuid: UUID, **kwargs) -> TransactionEntry:
"""Get a ``TransactionEntry`` from its identifier.
:param uuid: Identifier of the ``RootEntity``.
:param kwargs: Additional named arguments.
:return: The ``TransactionEntry`` instance.
"""
try:
return await self.select(uuid=uuid).__anext__()
except StopAsyncIteration:
raise TransactionNotFoundException(f"Transaction identified by {uuid!r} does not exist.")
async def select(
self,
uuid: Optional[UUID] = None,
uuid_ne: Optional[UUID] = None,
uuid_in: Optional[tuple[UUID, ...]] = None,
destination_uuid: Optional[UUID] = None,
status: Optional[TransactionStatus] = None,
status_in: Optional[tuple[str, ...]] = None,
event_offset: Optional[int] = None,
event_offset_lt: Optional[int] = None,
event_offset_gt: Optional[int] = None,
event_offset_le: Optional[int] = None,
event_offset_ge: Optional[int] = None,
updated_at: Optional[datetime] = None,
updated_at_lt: Optional[datetime] = None,
updated_at_gt: Optional[datetime] = None,
updated_at_le: Optional[datetime] = None,
updated_at_ge: Optional[datetime] = None,
**kwargs,
) -> AsyncIterator[TransactionEntry]:
"""Get a transaction from the repository.
:param uuid: Transaction identifier equal to the given value.
:param uuid_ne: Transaction identifier not equal to the given value
:param uuid_in: Transaction identifier within the given values.
:param destination_uuid: Destination Transaction identifier equal to the given value.
:param status: Transaction status equal to the given value.
:param status_in: Transaction status within the given values
:param event_offset: Event offset equal to the given value.
:param event_offset_lt: Event Offset lower than the given value
:param event_offset_gt: Event Offset greater than the given value
:param event_offset_le: Event Offset lower or equal to the given value
:param event_offset_ge: Event Offset greater or equal to the given value
:param updated_at: Updated at equal to the given value.
:param updated_at_lt: Updated at lower than the given value.
:param updated_at_gt: Updated at greater than the given value.
:param updated_at_le: Updated at lower or equal to the given value.
:param updated_at_ge: Updated at greater or equal to the given value.
:param kwargs: Additional named arguments.
:return: An asynchronous iterator.
"""
generator = self._select(
uuid=uuid,
uuid_ne=uuid_ne,
uuid_in=uuid_in,
destination_uuid=destination_uuid,
status=status,
status_in=status_in,
event_offset=event_offset,
event_offset_lt=event_offset_lt,
event_offset_gt=event_offset_gt,
event_offset_le=event_offset_le,
event_offset_ge=event_offset_ge,
updated_at=updated_at,
updated_at_lt=updated_at_lt,
updated_at_gt=updated_at_gt,
updated_at_le=updated_at_le,
updated_at_ge=updated_at_ge,
**kwargs,
)
# noinspection PyTypeChecker
async for entry in generator:
yield entry
@abstractmethod
async def _select(self, **kwargs) -> AsyncIterator[TransactionEntry]:
raise NotImplementedError
def write_lock(self) -> Lock:
"""Get write lock.
:return: An asynchronous context manager.
"""
return self._lock_pool.acquire("aggregate_transaction_write_lock")
| 2.28125 | 2 |
allianceutils/util/__init__.py | AllianceSoftware/alliance-django-utils | 2 | 12772230 | from pathlib import Path
from typing import Callable
from typing import Iterable
from typing import Tuple
from typing import Union
from django.conf import settings
from django.utils.autoreload import autoreload_started
from django.utils.autoreload import StatReloader
from .camel_case import camel_to_underscore
from .camel_case import camelize
from .camel_case import underscore_to_camel
from .camel_case import underscoreize
from .date import python_to_django_date_format
from .get_firstparty_apps import get_firstparty_apps
__all__ = [
'camel_to_underscore',
'camelize',
'get_firstparty_apps',
'underscore_to_camel',
'underscoreize',
'python_to_django_date_format',
'retry_fn',
]
def retry_fn(fn: Callable, allowable_exceptions: Tuple, retry_count: int=5):
"""
Call fn, retrying if exception type in allowable_exceptions is raised up to retry_count times
"""
for i in range(0, retry_count):
try:
return fn()
except allowable_exceptions:
if i == retry_count - 1:
raise
def add_autoreload_extra_files(extra_files: Iterable[Union[str, Path]]):
if not settings.DEBUG:
return
try:
from werkzeug.serving import is_running_from_reloader
except ImportError:
is_running_from_reloader = None
if is_running_from_reloader and is_running_from_reloader():
# we're running from the main runserver_plus process
if not hasattr(settings, 'RUNSERVER_PLUS_EXTRA_FILES'):
settings.RUNSERVER_PLUS_EXTRA_FILES = []
settings.RUNSERVER_PLUS_EXTRA_FILES += extra_files
else:
# either:
# - we're using the runserver (django) server
# - we're running from a child runserver_plus thread. If this is the case
# then the django autoreload signal will do nothing: working as intended
def add_watched_files(sender: StatReloader, **kwargs):
sender.extra_files.update([Path(p) for p in extra_files])
autoreload_started.connect(add_watched_files)
| 1.9375 | 2 |
ci/report/final_result.py | PradeepKiruvale/localworkflow | 6 | 12772231 | #!/bin/python3
"""Parse final xml and return an error if there are failures.
"""
import sys
from xml.dom.minidom import parse
dom = parse(sys.argv[1])
errors = 0
failures = 0
for nodes in dom.childNodes:
l = nodes.attributes.length
for node in range(l):
attr = nodes.attributes.item(node)
print(f" {attr.name} : {attr.value}")
if attr.name == "failures":
failures = int(attr.value)
if attr.name == "errors":
errors = int(attr.value)
print(f"Recorded {errors} errors and {failures} failures in {sys.argv[1]}")
if errors == 0 and failures == 0:
print("Passed, no errors.")
sys.exit(0)
else:
print("Failed, there are errors in the test run.")
sys.exit(1)
| 3.484375 | 3 |
tests/clients/settings/test_org_settings.py | PixelogicDev/py42 | 0 | 12772232 | <reponame>PixelogicDev/py42<gh_stars>0
import json
from copy import deepcopy
import pytest
from tests.clients.conftest import param
from tests.clients.conftest import PHOTOS_REGEX
from tests.clients.conftest import PICTURES_REGEX
from tests.clients.conftest import TEST_ADDED_EXCLUDED_PATH
from tests.clients.conftest import TEST_ADDED_PATH
from tests.clients.conftest import TEST_EXTERNAL_DOCUMENTS_DIR
from tests.clients.conftest import TEST_HOME_DIR
from tests.clients.conftest import TEST_PHOTOS_DIR
from py42.clients.settings import get_val
from py42.clients.settings.org_settings import OrgSettings
from py42.exceptions import Py42Error
ONEGB = 1000000000
TEST_T_SETTINGS_DICT = {
"device_upgrade_delay": {
"scope": "ORG",
"value": "60",
"locked": False,
"id": 510808,
},
"org-securityTools-device-detection-enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"device_engine_pause_allowedTypes": {
"scope": "ORG",
"value": '["legalHold","backup"]',
"locked": False,
"id": 537575,
},
"device_advancedExfiltrationDetection_enabled": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"org_securityTools_printer_detection_enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"device_network_dscp_preferIP4": {
"scope": "ORG",
"value": "false",
"locked": False,
"id": 537575,
},
"org-securityTools-cloud-detection-enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"org-securityTools-enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"c42.msa.acceptance": {
"scope": "ORG",
"value": "917633711460206173;<EMAIL>;2019-09-05T17:05:09:046",
"locked": True,
"id": 510682,
},
"org-securityTools-yara-scanner-enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 510853,
},
"org-securityTools-restore-detection-enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 510853,
},
"device_fileForensics_enabled": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"device_webRestore_enabled": {
"scope": "ORG",
"value": "false",
"locked": False,
"id": 510808,
},
"device_network_utilization_schedule_enabled": {
"scope": "ORG",
"value": "true",
"locked": True,
"id": 537575,
},
"device_network_utilization_schedule_rate": {
"scope": "ORG",
"value": '{"peak":{"wan":{"active":"256","idle":"0"},"lan":{"active":"256","idle":"256"}},"offPeak":{"wan":{"active":0,"idle":0},"lan":{"active":0,"idle":0}}}',
"locked": True,
"id": 537575,
},
"org-securityTools-open-file-detection-enable": {
"scope": "ORG",
"value": "true",
"locked": False,
"id": 537575,
},
"device_network_utilization_schedule": {
"scope": "ORG",
"value": '{"sun":{"included":"true","startTimeOfDay":"09:00","endTimeOfDay":"17:00"},"mon":{"included":"true","startTimeOfDay":"09:00","endTimeOfDay":"17:00"},"tue":{"included":"true","startTimeOfDay":"09:00","endTimeOfDay":"17:00"},"wed":{"included":true,"startTimeOfDay":"12:00","endTimeOfDay":"19:00"},"thu":{"included":"true","startTimeOfDay":"09:00","endTimeOfDay":"17:00"},"fri":{"included":"true","startTimeOfDay":"09:00","endTimeOfDay":"17:00"},"sat":{"included":"true","startTimeOfDay":"09:00","endTimeOfDay":"17:00"}}',
"locked": True,
"id": 537575,
},
}
@pytest.fixture
def org_settings_dict():
with open("tests/clients/settings/org_settings_not_inherited.json", "r") as f:
data = json.load(f)
return data["data"]
@pytest.fixture
def org_settings_inherited_dict():
with open("tests/clients/settings/org_settings_inherited.json", "r") as f:
data = json.load(f)
return data["data"]
@pytest.fixture
def org_device_defaults_with_empty_values(org_settings_dict):
org_settings_dict = deepcopy(org_settings_dict)
# set empty file selection
org_settings_dict["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["pathset"] = [
{"paths": {"@os": "Linux", "path": [], "@cleared": "true"}}
]
# set empty filename exclusions
org_settings_dict["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["excludeUser"] = [
{u"windows": [], u"linux": [], u"macintosh": []}
]
return org_settings_dict
@pytest.fixture
def org_device_defaults_with_single_values(org_settings_dict):
org_settings_dict = deepcopy(org_settings_dict)
# set single path file selection
org_settings_dict["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["pathset"] = [
{"path": {"@include": TEST_HOME_DIR}, "@os": "Linux"}
]
# set single filename exclusions
org_settings_dict["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["excludeUser"] = [
{
"windows": [],
"pattern": {"@regex": PHOTOS_REGEX},
"linux": [],
"macintosh": [],
}
]
return org_settings_dict
@pytest.fixture
def org_device_defaults_with_multiple_values(org_settings_dict):
org_settings_dict = deepcopy(org_settings_dict)
org_settings_dict["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["pathset"] = [
{
"path": [
{"@include": TEST_HOME_DIR},
{"@include": TEST_EXTERNAL_DOCUMENTS_DIR},
{"@exclude": TEST_PHOTOS_DIR},
],
"@os": "Linux",
}
]
org_settings_dict["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["excludeUser"] = [
{
"windows": [],
"pattern": [{"@regex": PHOTOS_REGEX}, {"@regex": PICTURES_REGEX}],
"linux": [],
"macintosh": [],
}
]
return org_settings_dict
class TestOrgSettings(object):
@pytest.mark.parametrize(
"param",
[
("org_name", "TEST_ORG"),
("external_reference", "test_ref"),
("notes", "test_note"),
("archive_hold_days", 365),
("maximum_user_subscriptions", 99),
("org_backup_quota", -1),
("user_backup_quota", -1),
("web_restore_admin_limit", 500),
("web_restore_user_limit", 250),
("backup_warning_email_days", 3),
("backup_critical_email_days", 14),
("backup_alert_recipient_emails", ["<EMAIL>"]),
],
)
def test_org_settings_properties_retrieve_expected_results(
self, param, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
attr, expected = param
assert getattr(org_settings, attr) == expected
def test_inherited_org_settings_inheritance_flags_return_true(
self, org_settings_inherited_dict
):
org_settings = OrgSettings(org_settings_inherited_dict, TEST_T_SETTINGS_DICT)
assert org_settings.quota_settings_inherited
assert org_settings.reporting_settings_inherited
@pytest.mark.parametrize(
"param",
[
("archive_hold_days", 14),
("maximum_user_subscriptions", -1),
("org_backup_quota", -1),
("user_backup_quota", -1),
("web_restore_admin_limit", 250),
("web_restore_user_limit", 250),
("backup_warning_email_days", 7),
("backup_critical_email_days", 14),
("backup_alert_recipient_emails", []),
],
)
def test_inherited_org_settings_properties_retrieve_expected_results(
self, param, org_settings_inherited_dict
):
org_settings = OrgSettings(org_settings_inherited_dict, TEST_T_SETTINGS_DICT)
attr, expected = param
assert getattr(org_settings, attr) == expected
@pytest.mark.parametrize(
"param",
[
("archive_hold_days", 15),
("maximum_user_subscriptions", 100),
("org_backup_quota", 10000),
("user_backup_quota", 10000),
],
)
def test_inherited_org_quota_settings_setattr_removes_inheritance(
self, param, org_settings_inherited_dict
):
org_settings = OrgSettings(org_settings_inherited_dict, TEST_T_SETTINGS_DICT)
attr, val = param
setattr(org_settings, attr, val)
assert not org_settings.quota_settings_inherited
@pytest.mark.parametrize(
"param",
[
(
"available_destinations",
{
"632540230984925185": "PROe Cloud, US - West",
"43": "PROe Cloud, US",
"673679195225718785": "PROe Cloud, AMS",
"587738803578339329": "PROe Cloud, SIN",
},
),
("warning_email_enabled", False),
("critical_email_enabled", False),
("warning_alert_days", 3),
("critical_alert_days", 5),
("backup_status_email_enabled", False),
("backup_status_email_frequency_days", 7),
],
)
def test_org_settings_device_defaults_retrieve_expected_results(
self, param, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
attr, expected = param
assert getattr(org_settings.device_defaults, attr) == expected
def test_org_settings_endpoint_monitoring_enabled_returns_expected_results(
self, org_settings_dict
):
t_setting = deepcopy(TEST_T_SETTINGS_DICT)
t_setting["org-securityTools-enable"]["value"] = "true"
org_settings = OrgSettings(org_settings_dict, t_setting)
assert org_settings.endpoint_monitoring_enabled is True
t_setting["org-securityTools-enable"]["value"] = "false"
org_settings = OrgSettings(org_settings_dict, t_setting)
assert org_settings.endpoint_monitoring_enabled is False
def test_org_settings_set_endpoint_monitoring_enabled_to_true_from_false_creates_expected_packets(
self, org_settings_dict
):
t_setting = deepcopy(TEST_T_SETTINGS_DICT)
t_setting["org-securityTools-enable"]["value"] = "true"
org_settings = OrgSettings(org_settings_dict, t_setting)
org_settings.endpoint_monitoring_enabled = False
assert {
"key": "org-securityTools-enable",
"value": "false",
"locked": False,
} in org_settings.packets
assert {
"key": "device_advancedExfiltrationDetection_enabled",
"value": "false",
"locked": False,
} in org_settings.packets
assert {
"key": "org-securityTools-cloud-detection-enable",
"value": "false",
"locked": False,
} in org_settings.packets
assert {
"key": "org-securityTools-open-file-detection-enable",
"value": "false",
"locked": False,
} in org_settings.packets
assert {
"key": "org-securityTools-device-detection-enable",
"value": "false",
"locked": False,
} in org_settings.packets
assert {
"key": "org_securityTools_printer_detection_enable",
"value": "false",
"locked": False,
} in org_settings.packets
assert len(org_settings.packets) == 6
def test_org_settings_set_endpoint_monitoring_enabled_to_false_from_true_creates_expected_packets(
self, org_settings_dict
):
t_setting = deepcopy(TEST_T_SETTINGS_DICT)
t_setting["org-securityTools-enable"]["value"] = "false"
org_settings = OrgSettings(org_settings_dict, t_setting)
org_settings.endpoint_monitoring_enabled = True
assert {
"key": "org-securityTools-enable",
"value": "true",
"locked": False,
} in org_settings.packets
assert {
"key": "device_advancedExfiltrationDetection_enabled",
"value": "true",
"locked": False,
} in org_settings.packets
assert len(org_settings.packets) == 2
@pytest.mark.parametrize(
"param",
[
(
"endpoint_monitoring_removable_media_enabled",
"org-securityTools-device-detection-enable",
),
(
"endpoint_monitoring_cloud_sync_enabled",
"org-securityTools-cloud-detection-enable",
),
(
"endpoint_monitoring_browser_and_applications_enabled",
"org-securityTools-open-file-detection-enable",
),
(
"endpoint_monitoring_file_metadata_collection_enabled",
"device_fileForensics_enabled",
),
],
)
def test_org_settings_set_endpoint_monitoring_sub_categories_when_endpoint_monitoring_disabled_sets_endpoint_monitoring_enabled(
self, param, org_settings_dict
):
attr, key = param
t_setting = deepcopy(TEST_T_SETTINGS_DICT)
settings = deepcopy(org_settings_dict)
t_setting["org-securityTools-enable"]["value"] = "false"
org_settings = OrgSettings(settings, t_setting)
setattr(org_settings, attr, True)
packet_keys = [packet["key"] for packet in org_settings.packets]
assert key in packet_keys
assert "org-securityTools-enable" in packet_keys
for packet in org_settings.packets:
if packet["key"] == "org-securityTools-enable":
assert packet["value"] == "true"
if packet["key"] == key:
assert packet["value"] == "true"
@pytest.mark.parametrize(
"param",
[
param(
name="endpoint_monitoring_file_metadata_scan_enabled",
new_val=True,
expected_stored_val="true",
dict_location="device_fileForensics_scan_enabled",
),
param(
name="endpoint_monitoring_file_metadata_ingest_scan_enabled",
new_val=True,
expected_stored_val="true",
dict_location="device_fileForensics_enqueue_scan_events_during_ingest",
),
param(
name="endpoint_monitoring_background_priority_enabled",
new_val=True,
expected_stored_val="true",
dict_location="device_background_priority_enabled",
),
param(
name="web_restore_enabled",
new_val=True,
expected_stored_val="true",
dict_location="device_webRestore_enabled",
),
],
)
def test_org_settings_set_independent_t_setting_properties(
self, param, org_settings_dict
):
t_setting = deepcopy(TEST_T_SETTINGS_DICT)
settings = deepcopy(org_settings_dict)
org_settings = OrgSettings(settings, t_setting)
setattr(org_settings, param.name, param.new_val)
packet_keys = [packet["key"] for packet in org_settings.packets]
assert param.dict_location in packet_keys
for packet in org_settings.packets:
if packet["key"] == param.dict_location:
assert packet["value"] == "true"
setattr(org_settings, param.name, False)
packet_keys = [packet["key"] for packet in org_settings.packets]
assert param.dict_location in packet_keys
for packet in org_settings.packets:
if packet["key"] == param.dict_location:
assert packet["value"] == "false"
def test_missing_t_settings_return_none_when_accessed_by_property(
self, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
assert org_settings.endpoint_monitoring_file_metadata_scan_enabled is None
assert (
org_settings.endpoint_monitoring_file_metadata_ingest_scan_enabled is None
)
assert org_settings.endpoint_monitoring_background_priority_enabled is None
assert org_settings.endpoint_monitoring_custom_applications_win is None
assert org_settings.endpoint_monitoring_custom_applications_mac is None
assert (
org_settings.endpoint_monitoring_file_metadata_collection_exclusions is None
)
@pytest.mark.parametrize(
"param",
[
param(
name="org_name",
new_val="Org Name Updated",
expected_stored_val="Org Name Updated",
dict_location=["orgName"],
),
param(
name="external_reference",
new_val="Updated Reference",
expected_stored_val="Updated Reference",
dict_location=["orgExtRef"],
),
param(
name="notes",
new_val="Updated Note",
expected_stored_val="Updated Note",
dict_location=["notes"],
),
param(
name="maximum_user_subscriptions",
new_val=99,
expected_stored_val=99,
dict_location=["settings", "maxSeats"],
),
param(
name="org_backup_quota",
new_val=42,
expected_stored_val=ONEGB * 42,
dict_location=["settings", "maxBytes"],
),
param(
name="user_backup_quota",
new_val=42,
expected_stored_val=ONEGB * 42,
dict_location=["settings", "defaultUserMaxBytes"],
),
param(
name="web_restore_admin_limit",
new_val=42,
expected_stored_val=42,
dict_location=["settings", "webRestoreAdminLimitMb"],
),
param(
name="web_restore_user_limit",
new_val=42,
expected_stored_val=42,
dict_location=["settings", "webRestoreUserLimitMb"],
),
param(
name="backup_warning_email_days",
new_val=14,
expected_stored_val=14,
dict_location=["settings", "warnInDays"],
),
param(
name="backup_critical_email_days",
new_val=25,
expected_stored_val=25,
dict_location=["settings", "alertInDays"],
),
param(
name="backup_alert_recipient_emails",
new_val="<EMAIL>", # test string input
expected_stored_val=["<EMAIL>"],
dict_location=["settings", "recipients"],
),
param(
name="backup_alert_recipient_emails",
new_val=["<EMAIL>", "<EMAIL>"], # test list input
expected_stored_val=["<EMAIL>", "<EMAIL>"],
dict_location=["settings", "recipients"],
),
],
)
def test_org_settings_setting_mutable_property_updates_dict_correctly_and_registers_changes(
self, param, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
setattr(org_settings, param.name, param.new_val)
assert (
get_val(org_settings.data, param.dict_location) == param.expected_stored_val
)
assert param.name in org_settings.changes
class TestOrgDeviceSettingsDefaultsBackupSets(object):
def test_backup_set_destinations_property_returns_expected_value(
self, org_settings_dict
):
backup_set_0_expected_destinations = {}
backup_set_1_expected_destinations = {
"43": "PROe Cloud, US <LOCKED>",
"673679195225718785": "PROe Cloud, AMS",
}
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
assert (
org_settings.device_defaults.backup_sets[0].destinations
== backup_set_0_expected_destinations
)
assert (
org_settings.device_defaults.backup_sets[1].destinations
== backup_set_1_expected_destinations
)
def test_backup_set_add_destination_when_destination_available(
self, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
org_settings.device_defaults.backup_sets[0].add_destination(632540230984925185)
expected_destinations_property = {
"43": "PROe Cloud, US <LOCKED>",
"673679195225718785": "PROe Cloud, AMS",
}
expected_destinations_list = [
{"@id": "43", "@locked": "true"},
{"@id": "632540230984925185"},
{"@id": "673679195225718785"},
]
assert (
org_settings.device_defaults.backup_sets[1].destinations
== expected_destinations_property
)
for destination in expected_destinations_list:
destination in org_settings.device_defaults["settings"][
"serviceBackupConfig"
]["backupConfig"]["backupSets"]["backupSet"][1]["destinations"]
def test_backup_set_add_destination_when_destination_not_available_raises(
self, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
expected_destinations_property = {
"43": "PROe Cloud, US <LOCKED>",
"673679195225718785": "PROe Cloud, AMS",
}
with pytest.raises(Py42Error):
org_settings.device_defaults.backup_sets[1].add_destination(404)
assert (
org_settings.device_defaults.backup_sets[1].destinations
== expected_destinations_property
)
def test_backup_set_remove_destination_when_destination_available(
self, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
expected_destinations_property = {"673679195225718785": "PROe Cloud, AMS"}
expected_destinations_dict = [{"@id": "673679195225718785"}]
org_settings.device_defaults.backup_sets[1].remove_destination(43)
assert (
org_settings.device_defaults.backup_sets[1].destinations
== expected_destinations_property
)
assert (
org_settings.device_defaults["settings"]["serviceBackupConfig"][
"backupConfig"
]["backupSets"]["backupSet"][1]["destinations"]
== expected_destinations_dict
)
def test_backup_set_remove_destination_when_destination_not_available_raises(
self, org_settings_dict
):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
expected_destinations_property = {
"43": "PROe Cloud, US <LOCKED>",
"673679195225718785": "PROe Cloud, AMS",
}
with pytest.raises(Py42Error):
org_settings.device_defaults.backup_sets[1].remove_destination(404)
assert (
org_settings.device_defaults.backup_sets[1].destinations
== expected_destinations_property
)
def test_backup_set_lock_destination(self, org_settings_dict):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
expected_destinations_property = {
"43": "PROe Cloud, US <LOCKED>",
"673679195225718785": "PROe Cloud, AMS <LOCKED>",
}
expected_destinations_dict = [
{"@id": "43", "@locked": "true"},
{"@id": "673679195225718785", "@locked": "true"},
]
org_settings.device_defaults.backup_sets[1].lock_destination(673679195225718785)
assert (
org_settings.device_defaults.backup_sets[1].destinations
== expected_destinations_property
)
assert (
org_settings.device_defaults["settings"]["serviceBackupConfig"][
"backupConfig"
]["backupSets"]["backupSet"][1]["destinations"]
== expected_destinations_dict
)
def test_backup_set_unlock_destination(self, org_settings_dict):
org_settings = OrgSettings(org_settings_dict, TEST_T_SETTINGS_DICT)
expected_destinations_property = {
"43": "PROe Cloud, US",
"673679195225718785": "PROe Cloud, AMS",
}
expected_destinations_dict = [
{"@id": "43"},
{"@id": "673679195225718785"},
]
org_settings.device_defaults.backup_sets[1].unlock_destination(43)
assert (
org_settings.device_defaults.backup_sets[1].destinations
== expected_destinations_property
)
assert (
org_settings.device_defaults["settings"]["serviceBackupConfig"][
"backupConfig"
]["backupSets"]["backupSet"][1]["destinations"]
== expected_destinations_dict
)
def test_backup_set_included_files_returns_expected_values(
self,
org_device_defaults_with_empty_values,
org_device_defaults_with_single_values,
org_device_defaults_with_multiple_values,
):
# empty pathset
org_settings = OrgSettings(
org_device_defaults_with_empty_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].included_files == []
# single path pathset
org_settings = OrgSettings(
org_device_defaults_with_single_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].included_files == [
TEST_HOME_DIR
]
# multiple path pathset
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].included_files == [
TEST_HOME_DIR,
TEST_EXTERNAL_DOCUMENTS_DIR,
]
def test_backup_set_included_files_append_produces_expected_pathset_value_and_registers_change(
self, org_device_defaults_with_multiple_values
):
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
expected_path_list = [
{"@include": TEST_HOME_DIR, "@und": "false"},
{"@include": TEST_EXTERNAL_DOCUMENTS_DIR, "@und": "false"},
{"@include": TEST_ADDED_PATH, "@und": "false"},
{"@exclude": TEST_PHOTOS_DIR, "@und": "false"},
]
org_settings.device_defaults.backup_sets[0].included_files.append(
TEST_ADDED_PATH
)
actual_path_list = org_settings.device_defaults["settings"][
"serviceBackupConfig"
]["backupConfig"]["backupSets"]["backupSet"][0]["backupPaths"]["pathset"][
"paths"
][
"path"
]
assert actual_path_list == expected_path_list
assert "included_files" in org_settings.device_defaults.changes
assert (
"-> {}".format(
[TEST_HOME_DIR, TEST_EXTERNAL_DOCUMENTS_DIR, TEST_ADDED_PATH]
)
in org_settings.device_defaults.changes["included_files"]
)
def test_backup_set_included_files_remove_produces_expected_pathset_value(
self, org_device_defaults_with_multiple_values
):
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
expected_path_list = [
{"@include": TEST_HOME_DIR, "@und": "false"},
{"@exclude": TEST_PHOTOS_DIR, "@und": "false"},
]
org_settings.device_defaults.backup_sets[0].included_files.remove(
TEST_EXTERNAL_DOCUMENTS_DIR
)
actual_path_list = org_settings.device_defaults["settings"][
"serviceBackupConfig"
]["backupConfig"]["backupSets"]["backupSet"][0]["backupPaths"]["pathset"][
"paths"
][
"path"
]
assert actual_path_list == expected_path_list
assert "included_files" in org_settings.device_defaults.changes
assert (
"-> {}".format([TEST_HOME_DIR])
in org_settings.device_defaults.changes["included_files"]
)
def test_backup_set_excluded_files_returns_expected_values(
self,
org_device_defaults_with_empty_values,
org_device_defaults_with_multiple_values,
):
# empty file selection
org_settings = OrgSettings(
org_device_defaults_with_empty_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].excluded_files == []
# multiple path pathset
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].excluded_files == [
TEST_PHOTOS_DIR
]
def test_backup_set_excluded_files_append_produces_expected_pathset_value_and_registers_change(
self, org_device_defaults_with_multiple_values
):
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
org_settings.device_defaults.backup_sets[0].excluded_files.append(
TEST_ADDED_EXCLUDED_PATH
)
expected_path_list = [
{"@include": TEST_HOME_DIR, "@und": "false"},
{"@exclude": TEST_PHOTOS_DIR, "@und": "false"},
{"@exclude": TEST_ADDED_EXCLUDED_PATH, "@und": "false"},
]
actual_path_list = org_settings.device_defaults["settings"][
"serviceBackupConfig"
]["backupConfig"]["backupSets"]["backupSet"][0]["backupPaths"]["pathset"][
"paths"
][
"path"
]
for path in expected_path_list:
assert path in actual_path_list
assert "excluded_files" in org_settings.device_defaults.changes
assert (
"-> {}".format([TEST_PHOTOS_DIR, TEST_ADDED_EXCLUDED_PATH])
in org_settings.device_defaults.changes["excluded_files"]
)
def test_backup_set_excluded_files_remove_produces_expected_pathset_value(
self, org_device_defaults_with_multiple_values
):
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
expected_path_list = [
{"@include": TEST_HOME_DIR, "@und": "false"},
]
org_settings.device_defaults.backup_sets[0].excluded_files.remove(
TEST_PHOTOS_DIR
)
actual_path_list = org_settings.device_defaults["settings"][
"serviceBackupConfig"
]["backupConfig"]["backupSets"]["backupSet"][0]["backupPaths"]["pathset"][
"paths"
][
"path"
]
for path in expected_path_list:
assert path in actual_path_list
assert "excluded_files" in org_settings.device_defaults.changes
assert (
"-> {}".format([]) in org_settings.device_defaults.changes["excluded_files"]
)
def test_backup_set_filename_exclusions_returns_expected_list_results(
self,
org_device_defaults_with_empty_values,
org_device_defaults_with_single_values,
org_device_defaults_with_multiple_values,
):
# empty exclude list
org_settings = OrgSettings(
org_device_defaults_with_empty_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].filename_exclusions == []
# single exclude
org_settings = OrgSettings(
org_device_defaults_with_single_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].filename_exclusions == [
PHOTOS_REGEX
]
# multiple excludes
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
assert org_settings.device_defaults.backup_sets[0].filename_exclusions == [
PHOTOS_REGEX,
PICTURES_REGEX,
]
def test_backup_set_filename_exclusions_append_produces_expected_values(
self,
org_device_defaults_with_empty_values,
org_device_defaults_with_single_values,
org_device_defaults_with_multiple_values,
):
# empty starting filename exclusions
org_settings = OrgSettings(
org_device_defaults_with_empty_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
org_settings.device_defaults.backup_sets[0].filename_exclusions.append(
PHOTOS_REGEX
)
assert org_settings["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["excludeUser"]["patternList"]["pattern"] == [
{"@regex": PHOTOS_REGEX}
]
assert "filename_exclusions" in org_settings.changes
assert PHOTOS_REGEX in org_settings.changes["filename_exclusions"]
# single starting filename exclusion
org_settings = OrgSettings(
org_device_defaults_with_single_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
org_settings.device_defaults.backup_sets[0].filename_exclusions.append(
PICTURES_REGEX
)
assert org_settings["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["excludeUser"]["patternList"]["pattern"] == [
{"@regex": PHOTOS_REGEX},
{"@regex": PICTURES_REGEX},
]
assert "filename_exclusions" in org_settings.changes
assert PHOTOS_REGEX in org_settings.changes["filename_exclusions"]
assert PICTURES_REGEX in org_settings.changes["filename_exclusions"]
# multiple starting filename exclusions
NEW_REGEX = ".*/Logs/"
org_settings = OrgSettings(
org_device_defaults_with_multiple_values, TEST_T_SETTINGS_DICT
)
org_settings.device_defaults.backup_sets[0].pop("@locked")
org_settings.device_defaults.backup_sets[0].filename_exclusions.append(
NEW_REGEX
)
assert org_settings["deviceDefaults"]["serviceBackupConfig"]["backupConfig"][
"backupSets"
]["backupSet"][0]["backupPaths"]["excludeUser"]["patternList"]["pattern"] == [
{"@regex": PHOTOS_REGEX},
{"@regex": PICTURES_REGEX},
{"@regex": NEW_REGEX},
]
assert "filename_exclusions" in org_settings.changes
assert PHOTOS_REGEX in org_settings.changes["filename_exclusions"]
assert PICTURES_REGEX in org_settings.changes["filename_exclusions"]
assert NEW_REGEX in org_settings.changes["filename_exclusions"]
| 1.820313 | 2 |
Qianhui_Liang_1.001_final/qh_yelp/naive_bayes.py | lqh-0514/1.01_final_foodie_mate | 0 | 12772233 | <filename>Qianhui_Liang_1.001_final/qh_yelp/naive_bayes.py<gh_stars>0
# coding: utf-8
# In[1]:
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 20:13:10 2018
@author: qianhuil
"""
import pymongo
from pymongo import MongoClient
import json
import nltk
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.collocations import *
import string
import pickle
# In[2]:
def read_in_fromdb():
#connect to mongodb, start client session
client = MongoClient()
#connect to specific database
db = client.olive
#connect to specific collection
collection = db.yelp_reviews
#finding needs from full collection, data = collection.find({'restaurant_id':'neptune-oyster-boston'}) will find only neptune, etc.
data = collection.find() #finds all
#extract data from cursor
itembuffer=[]
for item in data:
itembuffer.append(item)
return itembuffer
def tokenize(set_data):
'''
read the data, tokenize, clean the word
return a list of strings of reviewing words.
'''
# with open(filename, 'r') as f:
# set_data = json.load(f)
rate_dict = {}
for i in set_data:
rate_dict.setdefault(i["review_rating"],[]).append(i["review_detail"])
review = [] # list to store rating views
for i in rate_dict.keys():
for value in rate_dict[i]:
for token in value.split():
if token.isalpha():
token = token.lower().strip()
token = token.replace("!","")
token = token.replace(".","")
token = token.replace("?",'')
token = token.replace('"','')
review.append(token)
print("appending tokens for rating = {} successful".format(i))
return review
def freq(set_data):
'''
get the frequency dictionary for the word of bag model,
set differences done, token position changed, not suitable for the N-gram
'''
freq_disk = nltk.FreqDist(tokenize(set_data))
# review = [t.lower() for t in value.split()]
# for j in review:
# review.append(j.lower())
# print(review)
# review.append(word.lower() for word in word_tokenize(value))
#print(review)
#clean_review = review[:]
# print(review[:100])
clean_freq_disk = freq_disk.copy()
reviewset = set(freq_disk.keys())
stopword = set(stopwords.words("english"))
remove_word = reviewset & stopword
for token in freq_disk.keys():
if token in remove_word:
del clean_freq_disk[token]
print("stopword cleaning success")
return clean_freq_disk
def clean_token(set_data):
'''
clean stopwords for N-gram
'''
tokens = tokenize(set_data)
stop_words = set(stopwords.words("english"))
clean_token = [w for w in tokens if not w in stop_words]
return clean_token
def feature(set_data,feature_size = 2000):
# for key,val in freq.items():
# print(str(key)+":"+str(val))
word_features = list(freq(set_data))[:feature_size]
return word_features # a list of top 100 frequent word
def feature_2(set_data,feature_size = 100):
'''
getting the feature using bigram
'''
stop_words = set(stopwords.words("english"))
tokens = tokenize(set_data)
bigram_measures = nltk.collocations.BigramAssocMeasures() # bigram
# trigram_measures = nltk.collocations.TrigramAssocMeasures() # trigram
finder = BigramCollocationFinder.from_words(tokens)
finder.apply_word_filter(lambda w:w in stop_words)##what kind of filter suits here>???
finder.apply_freq_filter(2)
scored = finder.score_ngrams(bigram_measures.raw_freq)
word_features_bigram = finder.nbest(bigram_measures.raw_freq, feature_size)
bigramFeature = []
for item in word_features_bigram:
bigramFeature.append(" ".join(item))
print(bigramFeature[:100])
return bigramFeature
def feature_3(set_data,feature_size = 100):
'''
getting the feature using trigram
'''
stop_words = set(stopwords.words("english"))
tokens = tokenize(set_data) ##!!trigram keep the stopwords, A and B
trigram_measures = nltk.collocations.TrigramAssocMeasures() # trigram
finder = TrigramCollocationFinder.from_words(tokens)
finder.apply_word_filter(lambda w:w in stop_words)
finder.apply_ngram_filter(lambda w1, w2, w3: 'and' in (w1, w3))##what kind of filter suits here>???
finder.apply_freq_filter(2)
scored = finder.score_ngrams(trigram_measures.raw_freq)
word_features_trigram = finder.nbest(trigram_measures.raw_freq, feature_size)
trigramFeature=[]
for item in word_features_trigram:
trigramFeature.append(' '.join(item))
print(trigramFeature[:100])
return trigramFeature
def document_features(word_features,review):
features = {}
for word in word_features:
features["contains({})".format(word)] = review.count(word)
return features
def document_features2(word_features,review):
features = {}
for word in word_features:
if word in review:
features["contains({})".format(word)] = "True"
else:
features["contains({})".format(word)] = "False"
return features
def train_model_count(feature_function,feature_size):
'''
for N-gram
'''
set_data = read_in_fromdb()
# with open(filename, 'r') as f:
# set_data = json.load(f)
word_features = feature_function(set_data,feature_size)
featuresets = []
for i in set_data:
# print(type(word_tokenize(i["review_detail"])))
# token = []
# # print(type(token))
# for word in word_tokenize(i["review_detail"]):
# token.append(word.lower())
#token.append(word.lower() for word in word_tokenize(i["review_detail"]))
# label = (document_features(word_features, token),i["review_rating"])
label = (document_features(word_features, i["review_detail"]),i["review_rating"])
featuresets.append(label)
train_set,test_set = featuresets[100:],featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(30)
def train_model_count_words(feature_function,feature_size):
set_data = read_in_fromdb()
# with open(filename, 'r') as f:
# set_data = json.load(f)
word_features = feature_function(set_data,feature_size)
featuresets = []
for i in set_data:
# print(type(word_tokenize(i["review_detail"])))
token = []
# # print(type(token))
for word in word_tokenize(i["review_detail"]):
token.append(word.lower())
#token.append(word.lower() for word in word_tokenize(i["review_detail"]))
label = (document_features(word_features, token),i["review_rating"])
# label = (document_features(word_features, i["review_detail"]),i["review_rating"])
featuresets.append(label)
train_set,test_set = featuresets[100:],featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(30)
def train_model_yesno(feature_function,feature_size):
set_data = read_in_fromdb()
# with open(filename, 'r') as f:
# set_data = json.load(f)
word_features = feature_function(set_data,feature_size)
featuresets = []
for i in set_data:
# print(type(word_tokenize(i["review_detail"])))
token = []
# # print(type(token))
# for word in word_tokenize(i["review_detail"]):
# token.append(word.lower())
#token.append(word.lower() for word in word_tokenize(i["review_detail"]))
# label = (document_features(word_features, token),i["review_rating"])
# label = (document_features(word_features, i["review_detail"]),i["review_rating"])
label = (document_features2(word_features, i["review_detail"]),i["review_rating"])
featuresets.append(label)
train_set,test_set = featuresets[100:],featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(30)
return classifier
def train_classifier(feature_size1, feature_size2=100):
#read in data to set_data
set_data = read_in_fromdb()
#extract features from feature function passed in
print("extracting the features for corpus")
word_features = feature(set_data, feature_size1) #list of features, either words or n-grams
word_features2 = feature_2(set_data, feature_size2)
word_features3 = feature_3(set_data, feature_size2)
featuresets = []
for i in set_data:
label = (document_features(word_features, i["review_detail"]),i["review_rating"])
label2 = (document_features2(word_features2, i["review_detail"]),i["review_rating"])
label3 = (document_features3(word_features3, i["review_detail"]),i["review_rating"])
featuresets.append(label)
featuresets.append(label2)
featuresets.append(label3)
train_set,test_set = featuresets[100:],featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(30)
return classifier
# In[11]:
# In[ ]:
if __name__ == "__main__":
feature_sizes=[2000, 3000, 4000, 5000]
for feature_size in feature_sizes:
classifier = train_model_yesno(feature, feature_size)
outfile = open('classifier_np_ft{}_bin.pickle'.format(feature_size),"wb")
pickle.dump(classifier,outfile)
outfile.close()
print("feature_size",feature_size)
# In[10]:
#classifier.most_informative_features(n=10)
# In[52]:
#classifier.show_most_informative_features(n=10)
# In[46]:
#train_model_count_words(feature, 3000, "neptune_oyster_test_data.json")
# In[33]:
#train_model_count(feature_2, 500, "neptune_oyster_test_data.json")
# In[26]:
#train_model_count(feature, 10, "full_reviews.json")
| 2.546875 | 3 |
tests/data/pattern_matching_extras.py | timsampsoncujo/black | 2 | 12772234 | import match
match something:
case [a as b]:
print(b)
case [a as b, c, d, e as f]:
print(f)
case Point(a as b):
print(b)
case Point(int() as x, int() as y):
print(x, y)
match = 1
case: int = re.match(something)
match re.match(case):
case type("match", match):
pass
case match:
pass
def func(match: case, case: match) -> case:
match Something():
case another:
...
case func(match, case):
...
| 3.34375 | 3 |
Python3-functions/write_into_local_file.py | ipetel/code-snippets | 1 | 12772235 | <reponame>ipetel/code-snippets<filename>Python3-functions/write_into_local_file.py<gh_stars>1-10
"""
# .___ .___ __________ __ .__
# | | __| _/____ ____ \______ \ _____/ |_ ____ | |
# | |/ __ |\__ \ / \ | ___// __ \ __\/ __ \| |
# | / /_/ | / __ \| | \ | | \ ___/| | \ ___/| |__
# |___\____ |(____ /___| / |____| \___ >__| \___ >____/
# \/ \/ \/ \/ \/
#
Code Description: Write Data into File
Auther: <NAME>
Date: Dec 2020
Version: 1.0
Script Logic: None
Environment Variables: None
"""
import logging
from pprint import pprint
from functools import wraps
# ___ logging config
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL, format='### %(levelname)s ### - line: %(lineno)s, msg: %(message)s')
logger = logging.getLogger()
# ___ function_decorator
def try_except_decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
try:
response = function(*args, **kwargs)
logger.info(f'{function.__name__}() function operation was completed successfully')
return response
except Exception as err:
logger.error(f'{err}')
return wrapper
# ___ Write List of strings into CSV file
# assuming list of strings that each of them ends with '\n'
@try_except_decorator
def write_object_into_csv(file_path,data):
with open(file_path, 'w') as f:
f.writelines(data)
file_path = 'file_1.csv'
data = ['a1\n','b2\n','c3\n']
#write_object_into_csv(file_path,data)
# ___ Write List/Dict into JSON file
import json
@try_except_decorator
def write_object_into_json(file_path,data):
with open(file_path, 'w') as f:
json.dump(data, f)
file_path = 'file_1.json'
data = [{'name':'a1\n'},{'name':'b2\n'},{'name':'c3\n'}]
write_object_into_json(file_path,data)
| 2.953125 | 3 |
providers/twitter.py | goru/oauth2-proxy.py | 0 | 12772236 | import logging
import requests
logger = logging.getLogger(__name__)
# https://developer.twitter.com/en/docs/authentication/oauth-2-0/user-access-token
class TwitterProvider:
def __init__(self, configs):
self.client_id = configs['client_id']
self.redirect_uri = configs['redirect_uri']
self.scope = configs['scope']
self.accept_users = configs['accept_users']
def get_authorize_uri(self, session):
uri = (f'https://twitter.com/i/oauth2/authorize?response_type=code&'
+ f'client_id={self.client_id}&redirect_uri={self.redirect_uri}&'
+ f'scope={self.scope}&state={session.state}&'
+ f'code_challenge={session.code_challenge}&'
+ f'code_challenge_method={session.code_challenge_method}')
logger.info(uri)
return uri
def get_token(self, args, session):
if not {'state', 'code'}.issubset(args.keys()):
return False
if args['state'] != session.state:
return False
resp = requests.post('https://api.twitter.com/2/oauth2/token',
data={
'code': args['code'],
'grant_type': 'authorization_code',
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'code_verifier': session.code_verifier
}
)
logger.info(resp.json())
session.set_expires_in(resp.json()['expires_in'])
session.access_token = resp.json()['access_token']
session.refresh_token = resp.json()['refresh_token']
return True
def check_user(self, session):
resp = requests.get('https://api.twitter.com/2/users/me',
headers={
'Authorization': f'Bearer {session.access_token}'
}
)
logger.info(resp.json())
if resp.json()['data']['id'] not in self.accept_users:
return False
return True
def refresh_token(self, session):
resp = requests.post('https://api.twitter.com/2/oauth2/token',
data={
'refresh_token': session.refresh_token,
'grant_type': 'refresh_token',
'client_id': self.client_id
}
)
logger.info(resp.json())
session.set_expires_in(resp.json()['expires_in'])
session.access_token = resp.json()['access_token']
session.refresh_token = resp.json()['refresh_token']
return True
| 2.765625 | 3 |
SmartObjectFramework/ObjectService/rdflib/term.py | OSIOT/OBSOLETE-IoT-Toolkit | 1 | 12772237 | """
This module defines the different types of terms. Terms are the kinds of
objects that can appear in a quoted/asserted triple. This includes those
that are core to RDF:
* Blank Nodes
* URI References
* Literals (which consist of a literal value,datatype and language tag)
Those that extend the RDF model into N3:
* Formulae
* Universal Quantifications (Variables)
And those that are primarily for matching against 'Nodes' in the underlying Graph:
* REGEX Expressions
* Date Ranges
* Numerical Ranges
"""
__all__ = [
'bind',
'Node',
'Identifier',
'URIRef',
'BNode',
'Literal',
'Variable',
'Statement',
]
import logging
_LOGGER = logging.getLogger(__name__)
import base64
import threading
from urlparse import urlparse, urljoin, urldefrag
from string import ascii_letters
from random import choice
from itertools import islice
from datetime import date, time, datetime
from isodate import parse_time, parse_date, parse_datetime
from re import sub
try:
from hashlib import md5
except ImportError:
from md5 import md5
import py3compat
b = py3compat.b
class Node(object):
"""
A Node in the Graph.
"""
__slots__ = ()
class Identifier(Node, unicode): # we allow Identifiers to be Nodes in our Graph
"""
See http://www.w3.org/2002/07/rdf-identifer-terminology/
regarding choice of terminology.
"""
__slots__ = ()
def __new__(cls, value):
return unicode.__new__(cls, value)
class URIRef(Identifier):
"""
RDF URI Reference: http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref
"""
__slots__ = ()
def __new__(cls, value, base=None):
if base is not None:
ends_in_hash = value.endswith("#")
value = urljoin(base, value, allow_fragments=1)
if ends_in_hash:
if not value.endswith("#"):
value += "#"
#if normalize and value and value != normalize("NFC", value):
# raise Error("value must be in NFC normalized form.")
try:
rt = unicode.__new__(cls, value)
except UnicodeDecodeError:
rt = unicode.__new__(cls, value, 'utf-8')
return rt
def toPython(self):
return unicode(self)
def n3(self):
return "<%s>" % self
def concrete(self):
if "#" in self:
return URIRef("/".join(self.rsplit("#", 1)))
else:
return self
def abstract(self):
if "#" not in self:
scheme, netloc, path, params, query, fragment = urlparse(self)
if path:
return URIRef("#".join(self.rsplit("/", 1)))
else:
if not self.endswith("#"):
return URIRef("%s#" % self)
else:
return self
else:
return self
def defrag(self):
if "#" in self:
url, frag = urldefrag(self)
return URIRef(url)
else:
return self
def __reduce__(self):
return (URIRef, (unicode(self),))
def __getnewargs__(self):
return (unicode(self), )
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if isinstance(other, URIRef):
return unicode(self)==unicode(other)
else:
return False
def __hash__(self):
return hash(URIRef) ^ hash(unicode(self))
if not py3compat.PY3:
def __str__(self):
return self.encode()
def __repr__(self):
if self.__class__ is URIRef:
clsName = "rdflib.term.URIRef"
else:
clsName = self.__class__.__name__
return """%s(%s)""" % (clsName, super(URIRef,self).__repr__())
def md5_term_hash(self):
"""a string of hex that will be the same for two URIRefs that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(self.encode())
d.update(b("U"))
return d.hexdigest()
def _unique_id():
# Used to read: """Create a (hopefully) unique prefix"""
# now retained merely to leave interal API unchanged.
# From BNode.__new__() below ...
#
# acceptable bnode value range for RDF/XML needs to be
# something that can be serialzed as a nodeID for N3
#
# BNode identifiers must be valid NCNames" _:[A-Za-z][A-Za-z0-9]*
# http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#nodeID
return "N" # ensure that id starts with a letter
# Adapted from http://icodesnip.com/snippet/python/simple-universally-unique-id-uuid-or-guid
def bnode_uuid():
"""
Generates a uuid on behalf of Python 2.4
"""
import os
import random
import socket
from time import time
from binascii import hexlify
pid = [None]
try:
ip = socket.gethostbyname(socket.gethostname())
ip = long(ip.replace('.', '999').replace(':', '999'))
except:
# if we can't get a network address, just imagine one
ip = long(random.random() * 100000000000000000L)
def _generator():
if os.getpid() != pid[0]:
# Process might have been forked (issue 200), must reseed random:
try:
preseed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
preseed = 0
seed = long(str(preseed) + str(os.getpid())
+ str(long(time() * 1000000)) + str(ip))
random.seed(seed)
pid[0] = os.getpid()
t = long(time() * 1000.0)
r = long(random.random() * 100000000000000000L)
data = str(t) + ' ' + str(r) + ' ' + str(ip)
return md5(data).hexdigest()
return _generator
def uuid4_ncname():
"""
Generates UUID4-based but ncname-compliant identifiers.
"""
from uuid import uuid4
def _generator():
return uuid4().hex
return _generator
def _serial_number_generator():
import sys
if sys.version_info[:2] < (2, 5):
return bnode_uuid()
else:
return uuid4_ncname()
class BNode(Identifier):
"""
Blank Node: http://www.w3.org/TR/rdf-concepts/#section-blank-nodes
"""
__slots__ = ()
def __new__(cls, value=None,
_sn_gen=_serial_number_generator(), _prefix=_unique_id()):
"""
# only store implementations should pass in a value
"""
if value==None:
# so that BNode values do not
# collide with ones created with a different instance of this module
# at some other time.
node_id = _sn_gen()
value = "%s%s" % (_prefix, node_id)
else:
# TODO: check that value falls within acceptable bnode value range
# for RDF/XML needs to be something that can be serialzed
# as a nodeID for N3 ?? Unless we require these
# constraints be enforced elsewhere?
pass # assert is_ncname(unicode(value)), "BNode identifiers
# must be valid NCNames" _:[A-Za-z][A-Za-z0-9]*
# http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#nodeID
return Identifier.__new__(cls, value)
def toPython(self):
return unicode(self)
def n3(self):
return "_:%s" % self
def __getnewargs__(self):
return (unicode(self), )
def __reduce__(self):
return (BNode, (unicode(self),))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
"""
>>> BNode("foo")==None
False
>>> BNode("foo")==URIRef("foo")
False
>>> URIRef("foo")==BNode("foo")
False
>>> BNode("foo")!=URIRef("foo")
True
>>> URIRef("foo")!=BNode("foo")
True
"""
if isinstance(other, BNode):
return unicode(self)==unicode(other)
else:
return False
def __hash__(self):
return hash(BNode) ^ hash(unicode(self))
if not py3compat.PY3:
def __str__(self):
return self.encode()
def __repr__(self):
if self.__class__ is BNode:
clsName = "rdflib.term.BNode"
else:
clsName = self.__class__.__name__
return """%s('%s')""" % (clsName, str(self))
def md5_term_hash(self):
"""a string of hex that will be the same for two BNodes that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(self.encode())
d.update(b("B"))
return d.hexdigest()
class Literal(Identifier):
doc = """
RDF Literal: http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal
>>> from rdflib import Literal, XSD
>>> Literal(1).toPython()
1%(L)s
>>> Literal("adsf") > 1
True
>>> from rdflib.namespace import XSD
>>> lit2006 = Literal('2006-01-01',datatype=XSD.date)
>>> lit2006.toPython()
datetime.date(2006, 1, 1)
>>> lit2006 < Literal('2007-01-01',datatype=XSD.date)
True
>>> Literal(datetime.utcnow()).datatype
rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#dateTime')
>>> oneInt = Literal(1)
>>> twoInt = Literal(2)
>>> twoInt < oneInt
False
>>> Literal('1') < Literal(1)
False
>>> Literal('1') < Literal('1')
False
>>> Literal(1) < Literal('1')
True
>>> Literal(1) < Literal(2.0)
True
>>> Literal(1) < URIRef('foo')
True
>>> Literal(1) < 2.0
True
>>> Literal(1) < object
True
>>> lit2006 < "2007"
True
>>> "2005" < lit2006
True
>>> x = Literal("2", datatype=XSD.integer)
>>> x
rdflib.term.Literal(%(u)s'2', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> Literal(x) == x
True
>>> x = Literal("cake", lang="en")
>>> x
rdflib.term.Literal(%(u)s'cake', lang='en')
>>> Literal(x) == x
True
"""
__doc__ = py3compat.format_doctest_out(doc)
__slots__ = ("language", "datatype", "_cmp_value")
def __new__(cls, value, lang=None, datatype=None):
if lang is not None and datatype is not None:
raise TypeError("A Literal can only have one of lang or datatype, "
"per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal")
if isinstance(value, Literal): # create from another Literal instance
datatype=datatype or value.datatype
lang=lang or value.language
if datatype:
lang = None
else:
value, datatype = _castPythonToLiteral(value)
if datatype:
lang = None
if datatype:
datatype = URIRef(datatype)
if py3compat.PY3 and isinstance(value, bytes):
value = value.decode('utf-8')
try:
inst = unicode.__new__(cls, value)
except UnicodeDecodeError:
inst = unicode.__new__(cls, value, 'utf-8')
inst.language = lang
inst.datatype = datatype
inst._cmp_value = inst._toCompareValue()
return inst
def __reduce__(self):
return (Literal, (unicode(self), self.language, self.datatype),)
def __getstate__(self):
return (None, dict(language=self.language, datatype=self.datatype))
def __setstate__(self, arg):
_, d = arg
self.language = d["language"]
self.datatype = d["datatype"]
@py3compat.format_doctest_out
def __add__(self, val):
"""
>>> Literal(1) + 1
2%(L)s
>>> Literal("1") + "1"
rdflib.term.Literal(%(u)s'11')
"""
py = self.toPython()
if isinstance(py, Literal):
s = super(Literal, self).__add__(val)
return Literal(s, self.language, self.datatype)
else:
return py + val
@py3compat.format_doctest_out
def __neg__(self):
"""
>>> (- Literal(1))
-1%(L)s
>>> (- Literal(10.5))
-10.5
>>> from rdflib.namespace import XSD
>>> (- Literal("1", datatype=XSD['integer']))
-1%(L)s
Not working:
#>>> (- Literal("1"))
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: Not a number; rdflib.term.Literal(u'1')
>>>
"""
py = self.toPython()
try:
return py.__neg__()
except Exception, e:
raise TypeError("Not a number; %s" % repr(self))
@py3compat.format_doctest_out
def __pos__(self):
"""
>>> (+ Literal(1))
1%(L)s
>>> (+ Literal(-1))
-1%(L)s
>>> from rdflib.namespace import XSD
>>> (+ Literal("-1", datatype=XSD['integer']))
-1%(L)s
Not working in Python 3:
#>>> (+ Literal("1"))
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: Not a number; rdflib.term.Literal(u'1')
"""
py = self.toPython()
try:
return py.__pos__()
except Exception, e:
raise TypeError("Not a number; %s" % repr(self))
@py3compat.format_doctest_out
def __abs__(self):
"""
>>> abs(Literal(-1))
1%(L)s
>>> from rdflib.namespace import XSD
>>> abs( Literal("-1", datatype=XSD['integer']))
1%(L)s
Not working in Python 3:
#>>> abs(Literal("1"))
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: Not a number; rdflib.term.Literal(u'1')
"""
py = self.toPython()
try:
return py.__abs__()
except Exception, e:
raise TypeError("Not a number; %s" % repr(self))
@py3compat.format_doctest_out
def __invert__(self):
"""
>>> ~(Literal(-1))
0%(L)s
>>> from rdflib.namespace import XSD
>>> ~( Literal("-1", datatype=XSD['integer']))
0%(L)s
Not working:
#>>> ~(Literal("1"))
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: Not a number; rdflib.term.Literal(u'1')
>>>
"""
py = self.toPython()
try:
return py.__invert__()
except Exception:
raise TypeError("Not a number; %s" % repr(self))
@py3compat.format_doctest_out
def __lt__(self, other):
"""
>>> from rdflib.namespace import XSD
>>> Literal("YXNkZg==", datatype=XSD['base64Binary']) < "foo"
True
>>> %(u)s"\xfe" < Literal(%(u)s"foo")
False
>>> Literal(base64.encodestring(%(u)s"\xfe".encode("utf-8")), datatype=URIRef("http://www.w3.org/2001/XMLSchema#base64Binary")) < %(u)s"foo"
False
"""
if other is None:
return False # Nothing is less than None
try:
return self._cmp_value < other
except UnicodeDecodeError, ue:
if isinstance(self._cmp_value, py3compat.bytestype):
return self._cmp_value < other.encode("utf-8")
else:
raise ue
except TypeError:
try:
# On Python 3, comparing bytes/str is a TypeError, not a UnicodeError
if isinstance(self._cmp_value, py3compat.bytestype):
return self._cmp_value < other.encode("utf-8")
return unicode(self._cmp_value) < other
except (TypeError, AttributeError):
# Treat different types like Python 2 for now.
return py3compat.type_cmp(self._cmp_value, other) == -1
def __le__(self, other):
"""
>>> from rdflib.namespace import XSD
>>> Literal('2007-01-01T10:00:00', datatype=XSD.dateTime) <= Literal('2007-01-01T10:00:00', datatype=XSD.dateTime)
True
"""
if other is None:
return False
if self==other:
return True
else:
return self < other
def __gt__(self, other):
if other is None:
return True # Everything is greater than None
try:
return self._cmp_value > other
except UnicodeDecodeError, ue:
if isinstance(self._cmp_value, py3compat.bytestype):
return self._cmp_value > other.encode("utf-8")
else:
raise ue
except TypeError:
try:
# On Python 3, comparing bytes/str is a TypeError, not a UnicodeError
if isinstance(self._cmp_value, py3compat.bytestype):
return self._cmp_value > other.encode("utf-8")
return unicode(self._cmp_value) > other
except (TypeError, AttributeError):
# Treat different types like Python 2 for now.
return py3compat.type_cmp(self._cmp_value, other) == 1
def __ge__(self, other):
if other is None:
return False
if self==other:
return True
else:
return self > other
def __ne__(self, other):
"""
Overriden to ensure property result for comparisons with None via !=.
Routes all other such != and <> comparisons to __eq__
>>> Literal('') != None
True
>>> Literal('2') != Literal('2')
False
"""
return not self.__eq__(other)
def __hash__(self):
"""
>>> from rdflib.namespace import XSD
>>> a = {Literal('1', datatype=XSD.integer):'one'}
>>> Literal('1', datatype=XSD.double) in a
False
"Called for the key object for dictionary operations,
and by the built-in function hash(). Should return
a 32-bit integer usable as a hash value for
dictionary operations. The only required property
is that objects which compare equal have the same
hash value; it is advised to somehow mix together
(e.g., using exclusive or) the hash values for the
components of the object that also play a part in
comparison of objects." -- 3.4.1 Basic customization (Python)
"Two literals are equal if and only if all of the following hold:
* The strings of the two lexical forms compare equal, character by character.
* Either both or neither have language tags.
* The language tags, if any, compare equal.
* Either both or neither have datatype URIs.
* The two datatype URIs, if any, compare equal, character by character."
-- 6.5.1 Literal Equality (RDF: Concepts and Abstract Syntax)
"""
return Identifier.__hash__(self) ^ hash(self.language) ^ hash(self.datatype)
@py3compat.format_doctest_out
def __eq__(self, other):
"""
>>> f = URIRef("foo")
>>> f is None or f == ''
False
>>> Literal("1", datatype=URIRef("foo")) == Literal("1", datatype=URIRef("foo"))
True
>>> Literal("1", datatype=URIRef("foo")) == Literal("2", datatype=URIRef("foo"))
False
>>> Literal("1", datatype=URIRef("foo")) == "asdf"
False
>>> from rdflib.namespace import XSD
>>> Literal('2007-01-01', datatype=XSD.date) == Literal('2007-01-01', datatype=XSD.date)
True
>>> Literal('2007-01-01', datatype=XSD.date) == date(2007, 1, 1)
True
>>> oneInt = Literal(1)
>>> oneNoDtype = Literal('1')
>>> oneInt == oneNoDtype
False
>>> Literal("1", XSD['string']) == Literal("1", XSD['string'])
True
>>> Literal("one", lang="en") == Literal("one", lang="en")
True
>>> Literal("hast", lang='en') == Literal("hast", lang='de')
False
>>> oneInt == Literal(1)
True
>>> oneFloat = Literal(1.0)
>>> oneInt == oneFloat
True
>>> oneInt == 1
True
"""
if other is None:
return False
if isinstance(other, Literal):
return self._cmp_value == other._cmp_value
elif isinstance(other, basestring):
return unicode(self) == other
else:
return self._cmp_value == other
@py3compat.format_doctest_out
def n3(self):
r'''
Returns a representation in the N3 format.
Examples::
>>> Literal("foo").n3()
%(u)s'"foo"'
Strings with newlines or triple-quotes::
>>> Literal("foo\nbar").n3()
%(u)s'"""foo\nbar"""'
>>> Literal("''\'").n3()
%(u)s'"\'\'\'"'
>>> Literal('"""').n3()
%(u)s'"\\"\\"\\""'
Language::
>>> Literal("hello", lang="en").n3()
%(u)s'"hello"@en'
Datatypes::
>>> Literal(1).n3()
%(u)s'"1"^^<http://www.w3.org/2001/XMLSchema#integer>'
>>> Literal(1, lang="en").n3()
%(u)s'"1"^^<http://www.w3.org/2001/XMLSchema#integer>'
>>> Literal(1.0).n3()
%(u)s'"1.0"^^<http://www.w3.org/2001/XMLSchema#double>'
Datatype and language isn't allowed (datatype takes precedence)::
>>> Literal(True).n3()
%(u)s'"true"^^<http://www.w3.org/2001/XMLSchema#boolean>'
Custom datatype::
>>> footype = URIRef("http://example.org/ns#foo")
>>> Literal("1", datatype=footype).n3()
%(u)s'"1"^^<http://example.org/ns#foo>'
'''
return self._literal_n3()
@py3compat.format_doctest_out
def _literal_n3(self, use_plain=False, qname_callback=None):
'''
Using plain literal (shorthand) output::
>>> from rdflib.namespace import XSD
>>> Literal(1)._literal_n3(use_plain=True)
%(u)s'1'
>>> Literal(1.0)._literal_n3(use_plain=True)
%(u)s'1e+00'
>>> Literal(1.0, datatype=XSD.decimal)._literal_n3(use_plain=True)
%(u)s'1.0'
>>> Literal(1.0, datatype=XSD.float)._literal_n3(use_plain=True)
%(u)s'"1.0"^^<http://www.w3.org/2001/XMLSchema#float>'
>>> Literal("foo", datatype=XSD.string)._literal_n3(
... use_plain=True)
%(u)s'"foo"^^<http://www.w3.org/2001/XMLSchema#string>'
>>> Literal(True)._literal_n3(use_plain=True)
%(u)s'true'
>>> Literal(False)._literal_n3(use_plain=True)
%(u)s'false'
Using callback for datatype QNames::
>>> Literal(1)._literal_n3(
... qname_callback=lambda uri: "xsd:integer")
%(u)s'"1"^^xsd:integer'
'''
if use_plain and self.datatype in _PLAIN_LITERAL_TYPES:
try:
self.toPython() # check validity
# this is a bit of a mess -
# in py >=2.6 the string.format function makes this easier
# we try to produce "pretty" output
if self.datatype == _XSD_DOUBLE:
return sub(".?0*e","e", u'%e' % float(self))
elif self.datatype == _XSD_DECIMAL:
return sub("0*$","0",u'%f' % float(self))
else:
return u'%s' % self
except ValueError:
pass # if it's in, we let it out?
encoded = self._quote_encode()
datatype = self.datatype
quoted_dt = None
if datatype:
if qname_callback:
quoted_dt = qname_callback(datatype)
if not quoted_dt:
quoted_dt = "<%s>" % datatype
language = self.language
if language:
if datatype:
# TODO: this isn't valid RDF (it's datatype XOR language)
return '%s@%s^^%s' % (encoded, language, quoted_dt)
return '%s@%s' % (encoded, language)
elif datatype:
return '%s^^%s' % (encoded, quoted_dt)
else:
return '%s' % encoded
def _quote_encode(self):
# This simpler encoding doesn't work; a newline gets encoded as "\\n",
# which is ok in sourcecode, but we want "\n".
#encoded = self.encode('unicode-escape').replace(
# '\\', '\\\\').replace('"','\\"')
#encoded = self.replace.replace('\\', '\\\\').replace('"','\\"')
# NOTE: Could in theory chose quotes based on quotes appearing in the
# string, i.e. '"' and "'", but N3/turtle doesn't allow "'"(?).
if "\n" in self:
# Triple quote this string.
encoded = self.replace('\\', '\\\\')
if '"""' in self:
# is this ok?
encoded = encoded.replace('"""','\\"\\"\\"')
return '"""%s"""' % encoded.replace('\r','\\r')
else:
return '"%s"' % self.replace('\n','\\n').replace('\\', '\\\\'
).replace('"', '\\"').replace('\r','\\r')
if not py3compat.PY3:
def __str__(self):
return self.encode()
def __repr__(self):
args = [super(Literal, self).__repr__()]
if self.language is not None:
args.append("lang=%s" % repr(self.language))
if self.datatype is not None:
args.append("datatype=%s" % repr(self.datatype))
if self.__class__ == Literal:
clsName = "rdflib.term.Literal"
else:
clsName = self.__class__.__name__
return """%s(%s)""" % (clsName, ", ".join(args))
def toPython(self):
"""
Returns an appropriate python datatype derived from this RDF Literal
"""
convFunc = _toPythonMapping.get(self.datatype, None)
if convFunc:
rt = convFunc(self)
else:
rt = self
return rt
def _toCompareValue(self):
try:
rt = self.toPython()
except Exception, e:
_LOGGER.warning("could not convert %s to a Python datatype" %
repr(self))
rt = self
if rt is self:
if self.language is None and self.datatype is None:
return unicode(rt)
else:
return (unicode(rt), rt.datatype, rt.language)
return rt
def md5_term_hash(self):
"""a string of hex that will be the same for two Literals that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(self.encode())
d.update(b("L"))
return d.hexdigest()
_XSD_PFX = 'http://www.w3.org/2001/XMLSchema#'
_XSD_FLOAT = URIRef(_XSD_PFX+'float')
_XSD_DOUBLE = URIRef(_XSD_PFX+'double')
_XSD_DECIMAL = URIRef(_XSD_PFX+'decimal')
_PLAIN_LITERAL_TYPES = (
URIRef(_XSD_PFX+'integer'),
URIRef(_XSD_PFX+'boolean'),
_XSD_DOUBLE,
_XSD_DECIMAL,
)
def _castPythonToLiteral(obj):
"""
Casts a python datatype to a tuple of the lexical value and a
datatype URI (or None)
"""
for pType,(castFunc,dType) in _PythonToXSD:
if isinstance(obj, pType):
if castFunc:
return castFunc(obj), dType
elif dType:
return obj, dType
else:
return obj, None
return obj, None # TODO: is this right for the fall through case?
from decimal import Decimal
# Mappings from Python types to XSD datatypes and back (burrowed from sparta)
# datetime instances are also instances of date... so we need to order these.
# SPARQL/Turtle/N3 has shortcuts for int, double, decimal
# python has only float - to be in tune with sparql/n3/turtle
# we default to XSD.double for float literals
_PythonToXSD = [
(basestring, (None, None)),
(float , (None, URIRef(_XSD_PFX+'double'))),
(bool , (lambda i:str(i).lower(), URIRef(_XSD_PFX+'boolean'))),
(int , (None, URIRef(_XSD_PFX+'integer'))),
(long , (None, URIRef(_XSD_PFX+'integer'))),
(Decimal , (None, URIRef(_XSD_PFX+'decimal'))),
(datetime , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'dateTime'))),
(date , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'date'))),
(time , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'time'))),
]
XSDToPython = {
URIRef(_XSD_PFX+'time') : parse_time,
URIRef(_XSD_PFX+'date') : parse_date,
URIRef(_XSD_PFX+'dateTime') : parse_datetime,
URIRef(_XSD_PFX+'string') : None,
URIRef(_XSD_PFX+'normalizedString') : None,
URIRef(_XSD_PFX+'token') : None,
URIRef(_XSD_PFX+'language') : None,
URIRef(_XSD_PFX+'boolean') : lambda i:i.lower() in ['1','true'],
URIRef(_XSD_PFX+'decimal') : Decimal,
URIRef(_XSD_PFX+'integer') : long,
URIRef(_XSD_PFX+'nonPositiveInteger') : int,
URIRef(_XSD_PFX+'long') : long,
URIRef(_XSD_PFX+'nonNegativeInteger') : int,
URIRef(_XSD_PFX+'negativeInteger') : int,
URIRef(_XSD_PFX+'int') : long,
URIRef(_XSD_PFX+'unsignedLong') : long,
URIRef(_XSD_PFX+'positiveInteger') : int,
URIRef(_XSD_PFX+'short') : int,
URIRef(_XSD_PFX+'unsignedInt') : long,
URIRef(_XSD_PFX+'byte') : int,
URIRef(_XSD_PFX+'unsignedShort') : int,
URIRef(_XSD_PFX+'unsignedByte') : int,
URIRef(_XSD_PFX+'float') : float,
URIRef(_XSD_PFX+'double') : float,
URIRef(_XSD_PFX+'base64Binary') : lambda s: base64.b64decode(py3compat.b(s)),
URIRef(_XSD_PFX+'anyURI') : None,
}
_toPythonMapping = {}
_toPythonMapping.update(XSDToPython)
def bind(datatype, conversion_function):
"""
bind a datatype to a function for converting it into a Python
instance.
"""
if datatype in _toPythonMapping:
_LOGGER.warning("datatype '%s' was already bound. Rebinding." %
datatype)
_toPythonMapping[datatype] = conversion_function
class Variable(Identifier):
"""
"""
__slots__ = ()
def __new__(cls, value):
if len(value)==0: raise Exception("Attempted to create variable with empty string as name!")
if value[0]=='?':
value=value[1:]
return unicode.__new__(cls, value)
def __repr__(self):
return self.n3()
def toPython(self):
return "?%s" % self
def n3(self):
return "?%s" % self
def __reduce__(self):
return (Variable, (unicode(self),))
def md5_term_hash(self):
"""a string of hex that will be the same for two Variables that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(self.encode())
d.update(b("V"))
return d.hexdigest()
class Statement(Node, tuple):
def __new__(cls, (subject, predicate, object), context):
return tuple.__new__(cls, ((subject, predicate, object), context))
def __reduce__(self):
return (Statement, (self[0], self[1]))
def toPython(self):
return (self[0], self[1])
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2.546875 | 3 |
pydax/_schema.py | SSaishruthi/pydax | 0 | 12772238 | <filename>pydax/_schema.py
#
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"Schema parsing and loading functionality."
from abc import ABC
from copy import deepcopy
from typing import Any, Dict, Union
import yaml
from . import typing as typing_
from ._schema_retrieval import retrieve_schema_file
SchemaDict = Dict[str, Any]
class Schema(ABC):
"""Abstract class that provides functionality to load and export schemata.
:param url_or_path: URL or path to a schema file.
:param tls_verification: When set to ``True``, verify the remote link is https and whether the TLS certificate is
valid. When set to a path to a file, use this file as a CA bundle file. When set to ``False``, allow http links
and do not verify any TLS certificates. Ignored if ``url_or_path`` is a local path.
:raises ValueError: An error occurred when parsing ``url_or_path`` as either a URL or path.
:raises InsecureConnectionError: The connection is insecure. See ``tls_verification`` for more details.
"""
def __init__(self, url_or_path: Union[typing_.PathLike, str], *,
tls_verification: Union[bool, typing_.PathLike] = True) -> None:
"""Constructor method.
"""
self._schema: SchemaDict = self._load_retrieved_schema(retrieve_schema_file(url_or_path,
tls_verification=tls_verification))
# The URL or path from which the schema was retrieved
self._retrieved_url_or_path: Union[typing_.PathLike, str] = url_or_path
def _load_retrieved_schema(self, schema: str) -> SchemaDict:
"""Safely loads retrieved schema file.
:param schema: Retrieved schema object.
:return: Nested dictionary representation of a schema.
"""
return yaml.safe_load(schema)
def export_schema(self, *keys: str) -> SchemaDict:
"""Returns a copy of a loaded schema. Should be used for debug purposes only.
:param keys: The sequence of keys that leads to the portion of the schema to be exported.
:return: Copy of the schema dictionary.
Example:
>>> schema = DatasetSchema('./tests/schemata/datasets.yaml')
>>> schema.export_schema('datasets', 'noaa_jfk', '1.1.4')
{'name': 'NOAA Weather Data – JFK Airport'...}
"""
schema: SchemaDict = self._schema
for k in keys:
schema = schema[k]
return deepcopy(schema)
@property
def retrieved_url_or_path(self) -> Union[typing_.PathLike, str]:
"""The URL or path from which the schema was retrieved.
Example:
>>> schema = DatasetSchema('./tests/schemata/datasets.yaml')
>>> schema.retrieved_url_or_path
'./tests/schemata/datasets.yaml'
"""
return self._retrieved_url_or_path
class DatasetSchema(Schema):
"""Dataset schema class that inherits functionality from :class:`Schema`.
"""
# We have this class here because we reserve the potential to put specific dataset schema code here
pass
class FormatSchema(Schema):
"""Format schema class that inherits functionality from :class:`Schema`.
"""
# We have this class here because we reserve the potential to put specific format schema code here
pass
class LicenseSchema(Schema):
"""License schema class that inherits functionality from :class:`Schema`.
"""
# We have this class here because we reserve the potential to put specific license schema code here
pass
class SchemaManager():
"""Stores all loaded schemata in :attr:`schemata`.
:param kwargs: Schema name and schema instance key-value pairs.
Example:
>>> dataset_schema = DatasetSchema('./tests/schemata/datasets.yaml')
>>> schema_manager = SchemaManager(datasets=dataset_schema)
>>> licenses_schema = LicenseSchema('./tests/schemata/licenses.yaml')
>>> schema_manager.add_schema('licenses', licenses_schema)
>>> schema_manager.schemata
{'datasets':..., 'licenses':...}
"""
def __init__(self, **kwargs: Schema) -> None:
"""Constructor method
"""
self.schemata: Dict[str, Schema] = {}
for name, val in kwargs.items():
self.add_schema(name, val)
def add_schema(self, name: str, val: Schema) -> None:
"""Store schema instance in a dictionary. If a schema with the same name as ``name`` is already stored, it is
overridden.
:param name: Schema name.
:param val: Schema instance.
"""
if not isinstance(val, Schema):
raise TypeError('val must be a Schema instance.')
self.schemata[name] = val
| 2.0625 | 2 |
examples/01_mask_xt.py | chris-carbonell/safecracker | 0 | 12772239 | # safecracker
from safecracker import safecracker as sc
from safecracker.safe import zip as safe
from safecracker.tools import mask as sct
from examples import testing
# get safe
safe = safe.Safe("examples/safe/easy.zip")
# get safecracker
pwg = sct.PasswordGenerator("-d", max_len=3) # just digits
safecracker = sc.Safecracker(PasswordGenerator=pwg)
# crack
testing.test(safe, safecracker, ["hello world.txt"]) | 2.40625 | 2 |
tests/jj/fraunhofer/test_estimation.py | ShabaniLab/shabanipy | 6 | 12772240 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test Fraunhofer estimation.
"""
import numpy as np
from shabanipy.jj.fraunhofer.estimation import guess_current_distribution
def create_fraunhofer_like():
fields = np.linspace(-1, 1, 1001)
return fields, np.abs(np.sinc(8 * (fields - 0.1)))
def create_squid_like():
fields = np.linspace(-1, 1, 1001)
return (
fields,
2 + np.cos(8 * np.pi * (fields + 0.1)) * np.sinc(0.1 * (fields + 0.1)),
)
def validate_fraunhofer(offset, first_node, amplitude, c_dis):
np.testing.assert_almost_equal(offset, 0.1)
assert abs(first_node + 0.025) < 0.05
np.testing.assert_almost_equal(amplitude, 1.0)
np.testing.assert_array_equal(c_dis, np.ones(5) / 20)
def validate_squid(offset, first_node, amplitude, c_dis):
np.testing.assert_almost_equal(offset, -0.1)
assert abs(first_node - 0.025) < 0.05
np.testing.assert_almost_equal(amplitude, 3.0)
np.testing.assert_array_equal(c_dis, np.array([0.625, 0, 0, 0, 0.625]))
def test_guess_current_distribution_fraunhofer():
"""Test identifying a fraunhofer like pattern.
"""
fields, fraunhofer_like_ics = create_fraunhofer_like()
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, fraunhofer_like_ics, 5, 4
)
validate_fraunhofer(offsets, first_nodes, amplitudes, c_dis)
def test_guess_current_distribution_squid():
"""Test identifying a SQUID like pattern.
"""
fields, squid_like_ics = create_squid_like()
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, squid_like_ics, 5, 4
)
validate_squid(offsets, first_nodes, amplitudes, c_dis)
def test_guess_current_distribution_too_small_data():
"""Test handling data which do not comport enough points.
"""
fields = np.linspace(-1, 1, 201)
fraunhofer_like_ics = np.abs(np.sinc(2 * (fields - 0.1)))
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, fraunhofer_like_ics, 5, 4
)
np.testing.assert_almost_equal(offsets, 0.1)
assert amplitudes == 1.0
def test_2D_inputs():
"""Test that we can handle properly 2D inputs."""
fields_f, fraunhofer_like_ics = create_fraunhofer_like()
fields_s, squid_like_ics = create_squid_like()
# 2D inputs
fields = np.empty((2, len(fields_f)))
fields[0] = fields_f
fields[1] = fields_s
ics = np.empty_like(fields)
ics[0] = fraunhofer_like_ics
ics[1] = squid_like_ics
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, ics, 5, 4
)
for o, f, a, cd, validator in zip(
offsets, first_nodes, amplitudes, c_dis, (validate_fraunhofer, validate_squid)
):
validator(o, f, a, cd)
def test_3D_inputs():
"""Test that we can handle properly 3D inputs."""
fields_f, fraunhofer_like_ics = create_fraunhofer_like()
fields_s, squid_like_ics = create_squid_like()
# 3D inputs
fields = np.empty((2, 2, len(fields_f)))
fields[0, :] = fields_f
fields[1, :] = fields_s
ics = np.empty_like(fields)
ics[0, :] = fraunhofer_like_ics
ics[1, :] = squid_like_ics
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, ics, 5, 4
)
for o, f, a, cd, validator in zip(
offsets, first_nodes, amplitudes, c_dis, (validate_fraunhofer, validate_squid)
):
validator(o[0], f[0], a[0], cd[0])
validator(o[1], f[1], a[1], cd[1])
| 2.140625 | 2 |
setup.py | felix-last/kmeans_smote | 73 | 12772241 | <reponame>felix-last/kmeans_smote<filename>setup.py
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
tests_require = [
'pytest>=4.2.1,<4.3',
'pytest-cov>=2.6.1,<2.7',
'codecov>=2.0.15,<2.1'
],
setup(
name='kmeans_smote',
version="0.1.2",
py_modules=['kmeans_smote'],
install_requires=[
'imbalanced-learn>=0.4.0,<0.5',
'scikit-learn>=0.19.0,<0.21',
'numpy>=1.13,<1.16'
],
tests_require=tests_require,
extras_require={
'test':tests_require
},
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/felix-last/kmeans_smote",
description=("Oversampling for imbalanced learning based on k-means and SMOTE"),
long_description=read('README.rst'),
license="MIT",
keywords=[
'Class-imbalanced Learning',
'Oversampling',
'Classification',
'Clustering',
'Supervised Learning'
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
]
)
| 1.414063 | 1 |
2.4 2-Sum.py | INOS-soft/MOmmentum-SECList | 0 | 12772242 | <reponame>INOS-soft/MOmmentum-SECList<gh_stars>0
"""
1.Question 1
The goal of this problem is to implement a variant of the 2-SUM algorithm covered in this week's lectures.
The file (2sum.txt) contains 1 million integers, both positive and negative (there might be some repetitions!).This is your array of integers, with the ith row of the file specifying the ith entry of the array.
Your task is to compute the number of target values tt in the interval [-10000,10000] (inclusive) such that there are distinct numbers x,y in the input file that satisfy x+y=t. (NOTE: ensuring distinctness requires a one-line addition to the algorithm from lecture.)
Write your numeric answer (an integer between 0 and 20001) in the space provided.
OPTIONAL CHALLENGE: If this problem is too easy for you, try implementing your own hash table for it. For example, you could compare performance under the chaining and open addressing approaches to resolving collisions.
"""
import time
##################################################
# Reading file
file = open("data/2sum.txt", "r")
data = file.readlines()
for i in range(len(data)):
data[i] = int(data[i])
##################################################
# Your task is to compute the number of target values t in the interval [-10000,10000] (inclusive)
# such that there are distinct numbers x, y in the input file that satisfy x + y = t.
tic = time.time()
Dict2Sum = {}
List2Sum = []
ans_list = []
num_ans = 0
for i in data:
Dict2Sum[i] = i
List2Sum.append(i)
for t in range(-10000, 10001):
for i in List2Sum:
if t - i in Dict2Sum.keys():
ans_list.append(t)
num_ans += 1
print("find x={}, y={}, t={}".format(i, t-i, t))
break
toc = time.time()
duration = toc - tic # 6892.771441221237
print(num_ans) # 427
print(duration) # 6892.771441221237
| 3.421875 | 3 |
config.py | Japjappedulap/InfoarenaToGithub | 3 | 12772243 | import os
username = os.environ['USER_NAME']
sources_directory = os.environ['SOURCE_DIRECTORY']
overwrite_flag = True if os.environ['OVERWRITE_FLAG'] == 'true' else False
# print(type(username), username)
# print(type(sources_directory), sources_directory)
# print(type(overwrite_flag), overwrite_flag)
# You shouldn't edit the following
username_url = '/' + username
infoarena_base_url = 'https://infoarena.ro'
infoarena_user_url = 'https://infoarena.ro/utilizator'
infoarena_monitor_url = 'https://infoarena.ro/monitor'
no_login_data = {
'force_view_source': 'Vezi+sursa'
}
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
@staticmethod
def ok_green(message):
return Colors.OKGREEN + str(message) + Colors.ENDC
@staticmethod
def ok_blue(message):
return Colors.OKBLUE + str(message) + Colors.ENDC
@staticmethod
def bold(message):
return Colors.BOLD + str(message) + Colors.ENDC
@staticmethod
def warning(message):
return Colors.WARNING + str(message) + Colors.ENDC
| 2.34375 | 2 |
archive/nexus-api-v2/API/Development/HTML/Interface.py | cloud-hybrid/delta | 0 | 12772244 | <gh_stars>0
# import asyncio
# import dataclasses
# import os
# import ssl
#
# __module__ = __name__
#
# import botocore.exceptions
#
# from fastapi import HTTPException
#
# from . import *
#
# import API.ASGI.Authentication.Token as Token
#
# from starlette.responses import HTMLResponse
#
# # =============================================================================
# # Standard Library
# # =============================================================================
#
# import time
# import asyncio
# import datetime
# import tempfile
#
# # =============================================================================
# # API - HTTP Application Programming Interface
# # =============================================================================
#
# import boto3.s3.transfer
#
# import Database.User.Interfaces.User.API
# import Database.User.Schemas.Nexus
#
# from devtools import debug
#
# import jinja2
#
# Template = jinja2.environment.Template
#
# Settings = {
# "block_start_string": "{%",
# "block_end_string": "%}",
# "variable_start_string": "{{",
# "variable_end_string": "}}",
# "comment_start_string": "((",
# "comment_end_string": "))",
# "enable_async": True,
# "optimized": True
# }
#
# Loader = lambda source: Template(source, **Settings)
#
# Head = """
# <!DOCTYPE html>
# <html lang="en">
# <head>
# <meta charset="UTF-8">
# <title>Title</title>
# <link rel="stylesheet" href="devices.css" type="text/css">
#
# <link rel="preconnect" href="https://fonts.googleapis.com">
# <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
# <link href="https://fonts.googleapis.com/css2?family=IBM+Plex+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;1,100;1,200;1,300;1,400;1,500;1,600;1,700&display=swap" rel="stylesheet">
#
# <script type="module">
# import "https://jspm.dev/carbon-web-components/es/components/button/button.js";
# </script>
#
# <style>
# html {
# box-sizing: border-box;
# }
#
# *,
# *::before,
# *::after {
# box-sizing: inherit;
# transition: 0.185s;
# transition-timing-function: cubic-bezier(0.3, 0.1, 0.125, 0.25);
# }
#
# body {
# -webkit-font-smoothing: subpixel-antialiased !important;
# font-family: "IBM Plex Sans", ui-sans-serif, sans-serif, sys;
# color: #888;
# margin: 0;
# }
#
# main {
# background: #242627;
# padding: 5.0rem;
# width: 100%;
# height: 100vh;
# text-align: center;
# }
#
# div.nexus-device.iphone-x {
# display: flex;
# margin: auto;
# }
#
# /* === Mobile Inner Contents === */
#
# .container {
# height: 100vh;
# margin: auto;
# }
#
# .container {
# vertical-align: middle;
# background: radial-gradient(ellipse at bottom,
# rgba(100, 100, 100, 1) 0%,
# rgba(125, 125, 125, 1) 0%,
# rgba(0, 0, 0, 1) 105%
# );
# }
#
# .container > h1 {
# font-size: 50px;
# display: inline-block;
# padding-right: 12px;
# animation: type .5s alternate infinite;
# margin-bottom: 0.5rem;
# }
#
# .container > h2 {
# margin-top: 0.0rem;
# }
#
# @keyframes type {
# from {
# box-shadow: inset -3px 0px 0px #888;
# }
# to {
# box-shadow: inset -3px 0px 0px transparent;
# }
# }
#
# .io-inline-button-group {
# display: grid;
# justify-items: auto;
# grid-template-columns: 1fr 1fr 1fr;
# column-count: 4;
# grid-gap: 1.0rem;
# margin: 1.0rem;
# }
#
# .io-vertical-stack-group {
# display: grid;
# justify-items: auto;
# grid-template-rows: 1fr 1fr 1fr;
#
# grid-gap: 1.0rem;
# margin: 1.0rem;
# }
#
# .io-vertical-stack-group > .io-inline-button-group {
# margin: 0.0rem;
# }
#
# .io-ghost {
# color: rgb(145, 145, 145);
# background: black;
# border-style: solid;
# border-width: 0.1rem;
# border-radius: 0.25rem;
# border-color: rgb(145, 145, 145);
# }
#
# .io-ghost:focus { /* Post-Button-Down */
# color: rgb(180, 180, 180);
# border-color: rgb(175, 175, 175);
# outline: none;
# }
#
# .io-ghost:active { /* Button-Down */
# color: rgb(150, 150, 150);
# border-color: rgb(150, 150, 150);
# }
#
# .io-ghost:active:hover { /* Button-Down + Hover */
# color: rgb(115, 115, 115);
# border-color: rgb(115, 115, 115);
# }
#
# .io-ghost:hover {
# color: rgb(205, 205, 205);
# border-color: rgb(205, 205, 205);
# }
# </style>
# </head>
# """
#
# string = """
# <!DOCTYPE html>
# <html class="no-js" lang="EN">
# {{ Header }}
# <body>
# <main>
# <div class="index-page">
# <h1>URL List</h1>
# <h2>Nexus.IO</h2>
# {{ Buttons }}
# </div>
# </main>
# <div id="Application"></div>
# <script>
#
# </script>
# </body>
# </html>
# """
#
# Button = """
# <button>{{ Content }}</button>
# """
#
# class HTTP(Request):
# """
# Application Programming Interface via HTTP(s)
#
# Generator is a Wrapper around FastAPI's Router
# """
#
# Endpoint = Request.Prefix + __module__.split(".")[-2]
#
# Generator = Request.Generator()
#
# Generator.prefix = Endpoint
# Generator.tags = Request.Tags + __module__.split(".")[1:-1]
# # Generator.dependencies = { Secure }
# Generator.include_in_schema = True
#
# @staticmethod
# @Generator.get("/Get-Object-Content",
# name="AWS S3 Page Index Development",
# responses=Request.Responses,
# response_class=HTMLResponse
# )
# async def getPageContent(Bucket: String, Object: String):
# return await Loader(string).render_async(
# Header = await Loader(Head).render_async(
# Title = "Ankawi"
# ),
# Buttons = "\n".join(
# [
# await Loader(Button).render_async(
# Content = "Ankawi-Button"
# ),
# await Loader(Button).render_async(
# Content = "Ankawi-Button"
# ),
# await Loader(Button).render_async(
# Content = "Ankawi-Button"
# ),
# await Loader(Button).render_async(
# Content = "Ankawi-Button"
# ),
# await Loader(Button).render_async(
# Content = "Ankawi-Button"
# )
# ]
# )
# )
#
# API.ASGI.Application.API.include_router(HTTP.Generator)
#
# __all__ = ["HTTP"]
| 1.757813 | 2 |
tests/test_vcs_get_set_cmap_colorcell.py | scottwittenburg/vcs | 11 | 12772245 | <filename>tests/test_vcs_get_set_cmap_colorcell.py
import vcs
import unittest
class TestVCSCMAP(unittest.TestCase):
def testGetSetCmapColorcell(self):
x = vcs.init()
gm = x.createboxfill()
cmap = x.createcolormap()
rgb = cmap.getcolorcell(25)
self.assertEqual(rgb, [28., 14., 45., 100.])
rgb = x.getcolorcell(25)
self.assertEqual(rgb, [28., 14., 45., 100.])
rgb = x.getcolorcell(25, x)
self.assertEqual(rgb, [28., 14., 45., 100.])
rgb = x.getcolorcell(25, gm)
self.assertEqual(rgb, [28., 14., 45., 100.])
cmap.setcolorcell(25, 56, 23, 29)
self.assertEqual(cmap.index[25], [56., 23., 29., 100.])
cmap.setcolorcell(25, 56, 23, 29, 55.7)
self.assertEqual(cmap.index[25], [56., 23., 29., 55.7])
| 2.46875 | 2 |
peeringdb_server/client_adaptor/backend.py | gordon-shumway-net/peeringdb | 0 | 12772246 | import re
from collections import defaultdict
from django.db.models import OneToOneRel
from django.core.exceptions import ValidationError
from django.conf import settings
from django.db import IntegrityError
from peeringdb import resource
import peeringdb_server.models as models
from django_peeringdb.client_adaptor.backend import (
Backend as BaseBackend,
reftag_to_cls,
)
__version__ = "1.0"
class Backend(BaseBackend):
"""
Custom tailored peeringdb_server backend for the
peeringdb client we can use to sync data from
another peeringdb server instance.
We can inherit most of the official django-peeringdb
Backend, however we need bind resources to the peeringdb
models and fix some issues with validation and relationships.
"""
# map peeringdb_server models to peeringdb client resources
RESOURCE_MAP = {
resource.Facility: models.Facility,
resource.InternetExchange: models.InternetExchange,
resource.InternetExchangeFacility: models.InternetExchangeFacility,
resource.InternetExchangeLan: models.IXLan,
resource.InternetExchangeLanPrefix: models.IXLanPrefix,
resource.Network: models.Network,
resource.NetworkContact: models.NetworkContact,
resource.NetworkFacility: models.NetworkFacility,
resource.NetworkIXLan: models.NetworkIXLan,
resource.Organization: models.Organization,
}
def get_resource(self, cls):
"""
Override this so it doesn't hard fail on a non
existing resource. As sync will try to obtain resources
for relationships in peeringdb_server that aren't
really resources (sponsorships, partnerships etc.)
"""
return self.CONCRETE_MAP.get(cls)
@reftag_to_cls
def get_fields(self, concrete):
"""
Sync currently doesnt support OneToOne relationships
and none of the ones that exist in peeringdb_server
are relevant to the data we want to sync.
However they still get processed, causing errors.
Here we make sure to not process OneToOneRel relationships
"""
_fields = super(Backend, self).get_fields(concrete)
fields = []
for field in _fields:
if isinstance(field, OneToOneRel):
continue
fields.append(field)
return fields
def set_relation_many_to_many(self, obj, field_name, objs):
"""
Sync will try to process sponsorship_set off of `org`
and run into an error, so we make sure to ignore it
when handling many 2 many relationships during sync
"""
if field_name in ["sponsorship_set"]:
return
return super(Backend, self).set_relation_many_to_many(obj, field_name, objs)
def clean(self, obj):
"""
We override the object validation here to handle
common validation issues that exist in the official production
db, where valdiators are set, but data has not yet been
fixed retroactively.
These instances are:
- info_prefixes4 on networks (adjust data)
- info_prefixes6 on networks (adjust data)
- overlapping prefixes on ixlan prefixes (skip validation)
- invalid prefix length on ixlan prefixes (skip validation)
- ipaddr4 out of prefix address space on netixlans (skip validation)
- ipaddr6 out of prefix address space on netixlans (skip validation)
"""
if isinstance(obj, models.Network):
obj.info_prefixes4 = min(
obj.info_prefixes4, settings.DATA_QUALITY_MAX_PREFIX_V4_LIMIT
)
obj.info_prefixes6 = min(
obj.info_prefixes6, settings.DATA_QUALITY_MAX_PREFIX_V6_LIMIT
)
obj.clean_fields()
obj.validate_unique()
if not isinstance(
obj, (models.IXLanPrefix, models.NetworkIXLan, models.NetworkFacility)
):
obj.clean()
def save(self, obj):
if obj.HandleRef.tag == "ix":
obj.save(create_ixlan=False)
else:
obj.save()
def detect_uniqueness_error(self, exc):
"""
Parse error, and if it describes any violations of a uniqueness constraint,
return the corresponding fields, else None
"""
pattern = r"(\w+) with this (\w+) already exists"
fields = []
if isinstance(exc, IntegrityError):
return self._detect_integrity_error(exc)
assert isinstance(exc, ValidationError), TypeError
error_dict = getattr(exc, "error_dict", getattr(exc, "message_dict", {}))
for name, err in error_dict.items():
if re.search(pattern, str(err)):
fields.append(name)
return fields or None
def detect_missing_relations(self, obj, exc):
"""
Parse error messages and collect the missing-relationship errors
as a dict of Resource -> {id set}
"""
missing = defaultdict(set)
error_dict = getattr(exc, "error_dict", getattr(exc, "message_dict", {}))
for name, err in error_dict.items():
# check if it was a relationship that doesnt exist locally
pattern = r".+ with id (\d+) does not exist.+"
m = re.match(pattern, str(err))
if m:
field = obj._meta.get_field(name)
res = self.get_resource(field.related_model)
missing[res].add(int(m.group(1)))
return missing
| 2.078125 | 2 |
src/python/dxpy/utils/thread_pool.py | psung/dx-toolkit | 0 | 12772247 | # Copyright (C) 2013-2014 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains PrioritizingThreadPool, an implementation of an
interface similar to that of concurrent.futures.Executor. See:
https://docs.python.org/dev/library/concurrent.futures.html
"""
import collections
import concurrent.futures
import threading
# Monkeypatch ThreadPoolExecutor with relevant logic from the patch for
# Python issue 16284. See:
#
# <http://bugs.python.org/issue16284>
# <http://hg.python.org/cpython/rev/70cef0a160cf/>
#
# We may need to apply the relevant parts of the patches to
# ProcessPoolExecutor and multiprocessing.Queue if we ever start using
# those, too.
def _non_leaky_worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if concurrent.futures.thread._shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
concurrent.futures.thread._base.LOGGER.critical('Exception in worker', exc_info=True)
def _chain_result(outer_future):
"""Returns a callable that can be supplied to Future.add_done_callback
to propagate a future's result to outer_future.
"""
def f(inner_future):
try:
result = inner_future.result()
except BaseException as e:
outer_future.set_exception(e)
else:
outer_future.set_result(result)
return f
concurrent.futures.thread._worker = _non_leaky_worker
def _run_callable_with_postamble(postamble, callable_, *args, **kwargs):
"""Returns a callable of no args that invokes callable_ (with the
specified args and kwargs) and then invokes postamble (with no
args).
The callable returns the result of (or exception thrown by)
callable_.
"""
def fn():
try:
return callable_(*args, **kwargs)
finally:
postamble()
return fn
class PrioritizingThreadPool(object):
"""Presents an abstraction similar to that of
concurrent.futures.Executor except that multiple clients may write
their tasks to separate queues (which may be distinguished by any
hashable object). Tasks are handled by different threads (in the
same process) simultaneously. The tasks in each queue are processed
in order; tasks written to different queues are processed as
follows:
When a task is submitted using submit_to_queue the client may
specify a priority_fn to go along with that task. Each time a worker
thread is ready to start a task, the priority_fn of each candidate
task (the head of each queue) is called, and the task that returns
the lowest value is chosen. (This is more generic than a priority
queue in that the priority value of each task is not a static value
that must be submitted at the time that the task is enqueued.)
When a task is enqueued, we return a Future for the result of that
task.
"""
def __init__(self, max_workers):
self._pool = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
self._tasks = threading.Semaphore(max_workers)
self._queue_lock = threading.Lock()
# Invariant: self._queues is a mapping of queue_id to a NONEMPTY
# list of Futures representing yet-unscheduled items in that
# queue. (This invariant may only be violated by threads that
# are holding _queue_lock.)
#
# Each Future is the future we gave to the client, augmented
# with:
# (1) a field "args" containing a tuple
# (callable, args, kwargs), and
# (2) a field "priority_fn" with the priority function for that
# task.
self._queues = {}
def _submit_one(self, callable_, *args, **kwargs):
"""Starts the next task (which, when complete, will, in turn, start one
more task when finished, which will, in turn, etc.). Returns a
future object corresponding to the newly started task.
Thread safety note: assumes that the caller has already reserved
a worker using self._tasks.
"""
def postamble():
self._tasks.release()
self._maybe_schedule_task()
return self._pool.submit(_run_callable_with_postamble(postamble, callable_, *args, **kwargs))
def _maybe_schedule_task(self):
"""Starts a task if there is an available worker to serve it.
Thread safe.
"""
if self._tasks.acquire(blocking=False):
# Atomically remove the item from the queue and feed it to
# the ThreadPoolExecutor.
self._queue_lock.acquire()
try:
outer_future = self._next()
except StopIteration:
# Oops, there is in fact no task to be served, so we
# won't be tying up a worker after all.
self._tasks.release()
else:
callable_, args, kwargs = outer_future.args
inner_future = self._submit_one(callable_, *args, **kwargs)
# Now that we have the real future (inner_future), chain
# its result to what we provided to our client
inner_future.add_done_callback(_chain_result(outer_future))
finally:
self._queue_lock.release()
def _next(self):
"""Pop the highest priority task.
Returns the Future corresponding to that task (and removes it
from the queue of items to be scheduled), or raises
StopIteration if no tasks are available.
Thread safety note: assumes the caller is holding
self._queue_lock (the caller will probably also want to hold the
same lock while scheduling the result of this method, so as to
make the pop+schedule operation atomic).
"""
if self._queue_lock.acquire(False):
raise AssertionError('Expected _queue_lock to be held here')
queue_ids = list(self._queues.keys())
if not queue_ids:
raise StopIteration()
# Find the queue whose head item has the lowest priority value
best_queue_id = None
best_priority_value = None
for candidate_queue_id in queue_ids:
selected_queue = self._queues[candidate_queue_id]
if not len(selected_queue):
raise AssertionError('Invariant violation: queue %r is empty' % (candidate_queue_id,))
head_of_queue = selected_queue[0]
priority_value = head_of_queue.priority_fn() if head_of_queue.priority_fn else 0
if best_queue_id is None or priority_value < best_priority_value:
best_queue_id = candidate_queue_id
best_priority_value = priority_value
queue_id = best_queue_id
assert queue_id is not None
next_task = self._queues[queue_id].popleft()
if len(self._queues[queue_id]) == 0:
del self._queues[queue_id]
return next_task
def submit(self, callable_, *args, **kwargs):
"""For compatibility with code that was previously using
ThreadPoolExecutor directly, provides a similar interface to the
submit method of that class.
Requests submitted in this way have a priority of 0 and go into
a single default queue.
Returns a Future corresponding to the specified task.
"""
return self.submit_to_queue('', None, callable_, *args, **kwargs)
def submit_to_queue(self, queue_id, priority_fn, callable_, *args, **kwargs):
"""Adds a new task to the end of the specified queue.
Returns a Future corresponding to the specified task.
:param queue_id: indicates which queue this request should go at
the end of
:param priority_fn: a function of no args. Whenever a worker is
available, the task whose priority_fn returns the lowest value
is selected. None may also be provided in which case the
priority_fn is considered to return 0.
"""
if queue_id is None:
# In _next, None is used as a sentinel value
raise AssertionError('queue_id may not be None')
outer_future = concurrent.futures._base.Future()
outer_future.priority_fn = priority_fn
outer_future.args = (callable_, args, kwargs)
with self._queue_lock:
if queue_id not in self._queues:
self._queues[queue_id] = collections.deque()
self._queues[queue_id].append(outer_future)
# Start the task now if there is a worker that can serve it.
self._maybe_schedule_task()
return outer_future
| 1.757813 | 2 |
src/exec/__init__.py | feimaomiao/PWDS | 2 | 12772248 | <gh_stars>1-10
from .alterPrefs import *
from .enc import *
from .funcs import *
from .pwd import *
from .userClass import * | 1.21875 | 1 |
src/junk/1DCNN_BDLSTM.py | spil3141/Reseach-2-Malware-Detection-using-1D-CNN-and-RNN | 1 | 12772249 | <reponame>spil3141/Reseach-2-Malware-Detection-using-1D-CNN-and-RNN<gh_stars>1-10
"""#### Importting library ###"""
import numpy
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Reshape, LSTM, Bidirectional
from tensorflow.keras.layers import Embedding, Reshape, TimeDistributed
from tensorflow.keras.layers import Flatten,Conv1D, GlobalMaxPooling1D,MaxPooling1D,GlobalAveragePooling1D
import tensorflow as tf
from tensorflow.keras import layers
from sklearn.metrics import confusion_matrix
import tensorflow
import pandas as pd
from datetime import datetime
from sklearn.preprocessing import StandardScaler
import h5py
import numpy
import os
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support
import time
from sklearn.metrics import accuracy_score
"""################ loading from file #################"""
# Loading Dataset from Hdf5 file format
filename = "/mnt/d/Dataset 2.0 (completed)/(Dataset 3.1)(8000 samples).hdf5"
def train_batch_generator():
with h5py.File(filename, "r") as f:
for X,Y in zip(f["/Dataset 3.1/Train/Feature"],f["/Dataset 3.1/Train/Label"]):
#make sure sample and target type is a numpy array
Y = tf.one_hot(Y, depth=2)
yield (X,Y)
def valid_batch_generator():
with h5py.File(filename, "r") as f:
for X,Y in zip(f["/Dataset 3.1/Valid/Feature"],f["/Dataset 3.1/Valid/Label"]):
#make sure sample and target type is a numpy array
Y = tf.one_hot(Y, depth=2)
yield (X,Y)
X_Train_data = tf.data.Dataset.from_generator(train_batch_generator,
output_types=('float32', 'int32'),
output_shapes=(tf.TensorShape((None,)), tf.TensorShape((None,)))
)
X_Valid_data = tf.data.Dataset.from_generator(valid_batch_generator,
output_types=('float32', 'int32'),
output_shapes=(tf.TensorShape((None,)), tf.TensorShape((None,)))
)
"""################ Data Preprocessing ###"""
def benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
for sample in dataset:
# Performing a training step
time.sleep(0.01)
print("Execution time:", time.perf_counter() - start_time)
## Visualizing dataset batches
def plot_batch_sizes(ds,name):
batch_sizes = [(numpy.asarray(batch)).shape[0] for batch in ds]
plt.bar(range(len(batch_sizes)), batch_sizes)
plt.xlabel('Batch number')
plt.ylabel('Batch size')
plt.title(name)
#plot_batch_sizes(X_Test_data,"Test Dataset")
# Pre-processing
def norm(x,_min,_max):
return (x - _min)/(_max - _min)
def normalize_samples(feature,label):
X = norm(feature,0,255)
Y = label
return X,Y
##Using the dataset batch function, shuffle and divide the dataset into batches
X_Train_data_norm = X_Train_data.map(normalize_samples)
X_Valid_data_norm = X_Valid_data.map(normalize_samples)
# Batch split
X_Train_data_norm = X_Train_data_norm.batch(125)
X_Valid_data_norm = X_Valid_data_norm.batch(125)
#benchmark(X_Test_data_norm)
#benchmark((X_Test_data_norm).prefetch(tf.data.experimental.AUTOTUNE))
#benchmark(tf.data.Dataset.range(2).interleave(X_Test_data,num_parallel_calls=tf.data.experimental.AUTOTUNE))
print("Done")
"""############################ model #####################################"""
EPOCHS = 10
Batch_size = None
#size = next(valid_batch_generator())
#maxlen_pad = size[0].shape[0]
with h5py.File(filename, "r") as f:
maxlen_pad = f["Dataset 3.1/Valid/Feature"][0].shape[0]
"""#### Distributed Learning ########"""
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
n_timesteps, n_features= int(maxlen_pad / 4), 4
#Input
inputs = tf.keras.layers.Input(shape=(maxlen_pad,))
#Reshape the input dataset to fits the 1D CNN input requirements.
x = tf.keras.layers.Reshape((n_timesteps, n_features))(inputs)
#1D CNN for 1D feature extractions
x = tf.keras.layers.Conv1D(64,64,strides=16, activation='relu')(x)
x = tf.keras.layers.MaxPooling1D(pool_size = 2)(x)
x = tf.keras.layers.Dropout(0.3)(x,training=True)
#Bi-D LSTM layer for sequential data learning
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(16))(x)
#Last Dense layer for Reducing the dimensional for getting the needed classes.
x = tf.keras.layers.Dense(100, activation='relu')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x, training=True)
outputs = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs,outputs=outputs)
"""
#Reshape the input dataset to fits the 1D CNN input requirements.
x = tf.keras.layers.Reshape((n_timesteps, n_features))(inputs)
#x = tf.keras.layers.BatchNormalization()(x)
#1D CNN for 1D feature extractions
x = tf.keras.layers.Conv1D(64,64,strides=16, activation='relu')(x)
x = tf.keras.layers.MaxPooling1D(pool_size = 2)(x)
x = tf.keras.layers.Dropout(0.3)(x,training=True)
x = tf.keras.layers.Conv1D(128,5,strides=1, activation='relu')(x)
x = tf.keras.layers.MaxPooling1D(pool_size = 2)(x)
x = tf.keras.layers.Dropout(0.3)(x,training=True)
x = tf.keras.layers.Conv1D(256,5,strides=1, activation='relu')(x)
x = tf.keras.layers.MaxPooling1D(pool_size = 2)(x)
x = tf.keras.layers.Dropout(0.25)(x,training=True)
#Bi-D LSTM layer for sequential data learning
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256,return_sequences=True))(x)
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128, return_sequences=True))(x)
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64))(x)
#Last Dense layer for Reducing the dimensional for getting the needed classes.
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x, training=True)
"""
model.summary()
print("Input Shapes: ")
for i in model.layers:
print(i.name,"\t" ,i.input_shape)
# Define the checkpoint directory to store the checkpoints
checkpoint_dir = 'training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
# Function for decaying the learning rate.
# You can define any decay function you need.
def decay(epoch):
if epoch < 3:
return 1e-3
elif epoch >= 3 and epoch < 7:
return 1e-4
else:
return 1e-5
# Callback for printing the LR at the end of each epoch.
class PrintLR(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
print('\nLearning rate for epoch {} is {}'.format(epoch + 1,
model.optimizer.lr.numpy()))
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir='logs'),
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix,
save_weights_only=True),
tf.keras.callbacks.LearningRateScheduler(decay),
PrintLR()
]
#model = test()
# model = create_model3()
""" Compiling mode"""
#opt = tensorflow.keras.optimizers.Adam(learning_rate=0.001)
#model.compile(loss='binary_crossentropy',
# optimizer=opt, metrics=['accuracy'])
#model.compile(optimizer='adam', loss='mae')
#model.compile(optimizer='adam',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# model.compile(optimizer= tf.keras.optimizers.SGD(learning_rate=0.01, #default = 0.01
# momentum=0.9,
# decay=1e-2,
# nesterov=False),
# loss="categorical_crossentropy",
# metrics = ["accuracy"])
#model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# optimizer=tf.keras.optimizers.Adam(),
# metrics=['accuracy'])
#model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss='categorical_crossentropy',
optimizer=opt, metrics=['accuracy'])
## Fitting Function
# Fit with tf.data """
history = model.fit(X_Train_data_norm,
epochs=EPOCHS,
callbacks= callbacks,
verbose=1,
validation_data = X_Valid_data_norm
)
"""### Saving history ###"""
hist_df = pd.DataFrame(history.history)
# or save to csv:
hist_csv_file = 'history.csv'
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
# Fit with generator """
#history = model.fit(train_batch_generator,
# epochs=EPOCHS,
# verbose=1,
# validation_data = valid_batch_generator
# )
# Fit Normal
#callback = tf.keras.callbacks.EarlyStopping(monitor="accuracy",patience=3)
#history = model.fit(X_train, y_train_onehot,
# batch_size= Batch_size, epochs=EPOCHS,
# validation_data = (X_valid,y_valid_onehot),
# callbacks=[callback],
# )
# load history
# history = pd.read_csv("history.csv")
#load model weights
# model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
#$ tensorboard --logdir=path/to/log-directory
| 2.109375 | 2 |
tests/googl/test_googl_link.py | goldsborough/lnk | 3 | 12772250 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ecstasy
import oauth2client.file
import pyperclip
import pytest
import requests
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
from collections import namedtuple
import tests.paths
import lnk.googl.link
VERSION = 1
KEY = '<KEY>'
API = 'https://www.googleapis.com/urlshortener'
LOCK = threading.Lock()
QUEUE = Queue()
def shorten(url='http://python.org'):
headers = {'content-type': 'application/json'}
data = '{{"longUrl": "{0}"}}'.format(url)
params = dict(key=KEY)
response = requests.post('{0}/v{1}/url'.format(API, VERSION),
headers=headers,
data=data,
params=params)
return response.json()['id']
def shorten_fmt(destination):
url = QUEUE.get()
short = shorten(url)
formatted = '{0} => {1}'.format(url, short)
LOCK.acquire()
destination.append(formatted)
LOCK.release()
def expand(url='http://goo.gl/Euc5', version=VERSION):
response = requests.get('{0}/v{1}/url'.format(API, version),
params=dict(shortUrl=url, key=KEY))
return response.json()['longUrl']
def expand_fmt(destination):
url = QUEUE.get()
expanded = expand(url)
formatted = '{0} => {1}'.format(url, expanded)
LOCK.acquire()
destination.append(formatted)
LOCK.release()
@pytest.fixture(scope='module')
def fixture():
Fixture = namedtuple('Fixture', [
'link',
'long',
'short',
'bold',
'long_to_short',
'short_to_long'
])
link = lnk.googl.link.Link(raw=True)
link.credentials = oauth2client.file.Storage(tests.paths.CREDENTIALS_PATH)
url = 'https://www.github.com/goldsborough/lnk'
short = shorten(url)
bold = ecstasy.beautify('<{0}>'.format(short), ecstasy.Style.Bold)
long_to_short = '{0} => {1}'.format(url, short)
short_to_long = '{0} => {1}'.format(short, url)
return Fixture(link, url, short, bold, long_to_short, short_to_long)
def test_copy_copies_to_clipboard_if_copy_true(fixture):
fixture.link.copy(True, fixture.short)
assert pyperclip.paste() == fixture.short
def test_copy_copies_only_first_url(fixture):
assert fixture.link.already_copied
fixture.link.copy(True, 'a')
fixture.link.copy(True, 'b')
fixture.link.copy(True, 'c')
assert pyperclip.paste() == fixture.short
def test_copy_copies_to_clipboard_if_copy_false(fixture):
pyperclip.copy('original')
fixture.link.copy(False, fixture.short)
assert pyperclip.paste() == 'original'
def test_copy_makes_copied_url_bold(fixture):
fixture.link.already_copied = False
returned_url = fixture.link.copy(True, fixture.short)
assert returned_url == fixture.bold
def test_get_long_expands_well(fixture):
result = fixture.link.get_long(fixture.short)
assert result == fixture.long
def test_get_short_shortens_well(fixture):
short = fixture.link.get_short(fixture.long)
result = fixture.link.get_long(short)
assert result == fixture.long
def test_shorten_formats_well(fixture):
result = []
fixture.link.queue.put(fixture.long)
fixture.link.shorten(result, False)
result = result[0].split()
assert result[0] == fixture.long
assert result[1] == '=>'
assert result[2].startswith('https://goo.gl/')
def test_expand_formats_well(fixture):
result = []
fixture.link.queue.put(fixture.short)
fixture.link.expand(result, False)
assert result[0] == fixture.short_to_long
def test_shorten_urls_works_for_single_url(fixture):
result = fixture.link.shorten_urls(False, True, [fixture.long])
result = result[0].split()
assert result[0] == fixture.long
assert result[1] == '=>'
expanded = fixture.link.get_long(result[2])
assert expanded == result[0]
def test_shorten_urls_works_for_many_urls(fixture):
urls = [
'http://facebook.com/',
'http://google.com/',
'http://python.org/'
]
result = fixture.link.shorten_urls(False, True, urls)
expected = []
threads = []
for url in urls:
QUEUE.put(url)
thread = threading.Thread(target=shorten_fmt, args=(expected,))
thread.daemon = True
threads.append(thread)
thread.start()
for thread in threads:
thread.join(timeout=10)
print(sorted(result), sorted(expected))
for got, wanted in zip(sorted(result), sorted(expected)):
got = got.split()
wanted = wanted.split()
assert got[0] == wanted[0]
assert got[1] == wanted[1] == '=>'
expanded = fixture.link.get_long(got[2])
assert expanded == got[0] == wanted[0]
def test_expand_urls_works_for_single_url(fixture):
result = fixture.link.expand_urls(False, [fixture.short])
assert result[0] == fixture.short_to_long
def test_expand_urls_works_for_many_urls(fixture):
urls = [
'http://goo.gl/Up0wrT',
'http://goo.gl/4Nuepy',
'http://goo.gl/bYm2EP'
]
result = fixture.link.expand_urls(False, urls)
expected = []
threads = []
for url in urls:
QUEUE.put(url)
thread = threading.Thread(target=expand_fmt, args=(expected,))
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join(timeout=10)
print(result, expected)
assert sorted(result) == sorted(expected)
def test_shorten_urls_warns_about_url_without_protocol(fixture, capsys):
fixture.link.shorten_urls(False, False, ['google.com'])
out = capsys.readouterr()
assert out
assert out[0].startswith("\aWarning: Prepending 'http://' to")
def test_fetch_works(fixture):
result = fixture.link.fetch(False,
True,
[fixture.short],
[fixture.long],
False)
expected = [fixture.long_to_short, fixture.short_to_long]
for got, wanted in zip(sorted(result), sorted(expected)):
got = got.split()
wanted = wanted.split()
assert got[0] == wanted[0]
assert got[1] == wanted[1] == '=>'
if got[0] == fixture.long:
expanded = fixture.link.get_long(got[2])
assert expanded == got[0] == wanted[0]
def test_fetch_correct_output_if_raw_false_pretty_false(fixture):
fixture.link.raw = False
result = fixture.link.fetch(False,
True,
[fixture.short],
[fixture.long],
False)
expected = '\n'.join([fixture.short_to_long, fixture.long_to_short])
return result == expected
| 2.203125 | 2 |