content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
"""
Defines a function to randomly generate particle positions according to
the desired surface density profile (sigma vs r) and the vertical profile
(rho vs r,z).
Created on Mon Jan 27 18:48:04 2014
@author: ibackus
"""
# External packages
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
# ICgen packages
import isaac
class pos:
"""
position class. Generates particle positions from rho and sigma
USAGE:
# method = 'grid' or 'random'
pos = pos_class.pos(ICobj, method)
ICobj should be an initial conditions object (ICgen.IC) with rho already
calculated.
"""
def __init__(self, ICobj, method = None):
self._parent = ICobj
# Check that sigma and rho have been generated
if not hasattr(ICobj, 'rho'):
raise NameError,'rho could not be found in the IC object'
if not hasattr(ICobj,'sigma'):
raise NameError,'sigma could not be found in the IC object'
if method == None:
self.method = ICobj.settings.pos_gen.method
else:
self.method = method
# Update settings in ICobj
ICobj.settings.pos_gen.method = method
self.nParticles = ICobj.settings.pos_gen.nParticles
print 'Generating {} particle positions using method: {}'.format(\
self.nParticles, self.method)
# Generate positions
self._generate_r()
self._generate_z()
self._generate_theta()
self._cartesian_pos()
def __getstate__(self):
"""
This is required to make the object pickle-able
"""
# Define a dictionary containing everything needed. Ignore self.parent
state = self.__dict__.copy()
state.pop('_parent', None)
return state
def _generate_r(self):
"""
Generate radial positions
"""
print 'Generating r positions'
cdf_inv_r = self._parent.sigma.cdf_inv
if self.method == 'grid':
# Generate linearly increasing values of m, using 2 more than
# necessary to avoid boundary issues
m = np.linspace(0,1,self.nParticles + 2)
# Calculate r from inverse CDF
r = cdf_inv_r(m[1:-1])
# Assign output
self.r = r
if self.method == 'random':
m = np.random.rand(self.nParticles)
r = cdf_inv_r(m)
self.r = r
def _generate_z(self):
"""
Generate z positions
"""
print 'Generating z positions'
# The inverse CDF over z as a function of r
cdf_inv_z = self._parent.rho.cdf_inv
# Random numbers between 0 and 1
m = np.random.rand(self.nParticles)
# Calculate z
z = cdf_inv_z(m, self.r)
# Randomly select sign of z
z = z * np.random.choice(np.array([-1,1]), self.nParticles)
# Assign output
self.z = z
def _generate_theta(self):
"""
Generate angular positions
"""
nParticles = self.nParticles
if self.method == 'grid':
r = self.r
dtheta = np.sqrt(2*np.pi*(1 - r[0:-1]/r[1:]))
dtheta = isaac.strip_units(dtheta)
theta = np.zeros(nParticles)
for n in range(nParticles - 1):
# NOTE: it's import to subtract (not add) dtheta. The particles
# will be moving counter-clockwise. To prevent the particle
# spirals from kinking, the particle spirals must go out
# clockwise
theta[n+1] = theta[n] - dtheta[n]
self.theta = theta
if self.method == 'random':
theta = 2*np.pi*np.random.rand(nParticles)
self.theta = theta
def _cartesian_pos(self):
"""
Generate x,y
"""
r = self.r
z = self.z
theta = self.theta
x = r*np.cos(theta)
y = r*np.sin(theta)
xyz = np.zeros([self.nParticles, 3])
xyz = isaac.match_units(xyz, r)[0]
xyz[:,0] = x
xyz[:,1] = y
xyz[:,2] = isaac.match_units(z, r)[0]
self.x = x
self.y = y
self.xyz = xyz |
import itertools
import os
import sqlite3
from pprint import pformat
from ..log import log
from ..utils import get_env_override, get_resource_path, get_storage_path
from ..version import __version__
from .db_base import YtdlDatabase, YtdlDatabaseError
class YtdlSqliteDatabase(YtdlDatabase):
def __init__(self):
'''
Open or create the SQLite database.
'''
super().__init__()
# Decide where to store the database file
# Priority (high to low): environment, config file, default
db_path = self.db_config.get('YDL_DB_PATH', get_storage_path('data.db'))
db_path = get_env_override('YDL_DB_PATH', default=db_path)
log.info(f'Using SQLite database at: {db_path}')
self.db_path = db_path
self.is_new_db = not os.path.exists(self.db_path)
self.db = sqlite3.connect(db_path)
self.db.row_factory = sqlite3.Row
if (hasattr(log, 'sql')):
self.db.set_trace_callback(log.sql)
# Make sure the settings and any overrides get logged initially
if (not self.is_new_db):
log.debug(pformat(self.get_settings(quiet=False)))
def do_migrations(self):
if (self.is_new_db):
self.init_new_database()
self.is_new_db = False
log.debug(pformat(self.get_settings(quiet=False)))
self._begin()
qstring = '''
INSERT OR REPLACE INTO download_failed (
video_id,
last_fail_datetime,
error_text
) SELECT
video_id,
start_datetime,
'Server crash or unexpected termination'
FROM download_in_progress
'''
self._execute(qstring)
qstring = '''DELETE FROM download_in_progress'''
self._execute(qstring)
self._commit()
# Make sure version stays up to date
# TODO: Migrations first
self._begin()
qstring = '''UPDATE setting SET version = ?'''
self._execute(qstring, [__version__])
self._commit()
def _begin(self):
# Do nothing since transactions are automatic on write with sqlite3
pass
def _execute(self, qstring, parameters=[]):
cursor = self.db.execute(qstring, parameters)
return cursor.fetchall()
def _commit(self):
self.db.commit()
def result_to_simple_type(self, result):
# Detect single-row results
if (len(result) > 0 and type(result[0]) != sqlite3.Row):
return dict(result)
# Multi-row results
else:
return [dict(row) for row in result]
def init_new_database(self):
'''
Setup all database tables and default values using the initialization script.
'''
with open(get_resource_path('db/sqlite-init.sql'), mode='r') as f:
qstring = f.read()
self._begin()
self.db.executescript(qstring)
self._commit()
self._begin()
qstring = '''
INSERT INTO setting (
version,
YDL_SERVER_PROFILE,
YDL_SERVER_HOST,
YDL_SERVER_PORT
) VALUES (?, ?, ?, ?);
'''
profile = get_env_override('YDL_SERVER_PROFILE', default='1')
address = get_env_override('YDL_SERVER_HOST', default='0.0.0.0')
port = get_env_override('YDL_SERVER_PORT', default=8080)
# Set the default settings for a new database
self._execute(qstring, [
__version__,
profile,
address,
port,
])
self._commit()
log.info('Initialized SQLite database')
def insert_extractor(self, ytdl_info):
self._begin()
qstring = '''
INSERT OR IGNORE INTO extractor (
name,
alt_name
) VALUES (?, ?);
'''
self._execute(qstring, [
ytdl_info['extractor_key'],
ytdl_info['extractor']
])
self._commit()
extractor = self.get_extractor_by_name(ytdl_info['extractor_key'])
if (extractor is None):
log.error(f'Could not retrieve extractor after insertion!')
raise YtdlDatabaseError('Extractor insertion not found')
return extractor['id']
def insert_collection(self, ytdl_info, collection_type):
'''
Insert a new collection for the given id/extractor/type if it does not exist already.
:return collection_db_id for the inserted or already existing collection
'''
self._begin()
qstring = '''
INSERT OR IGNORE INTO collection (
online_id,
online_title,
custom_title,
url,
type_id,
extractor_id
) VALUES (?, ?, ?, ?, ?,
(SELECT id FROM extractor WHERE name = ?)
);
'''
if (collection_type == YtdlDatabase.collection.CHANNEL):
online_id = ytdl_info['uploader_id']
self._execute(qstring, [
online_id,
ytdl_info['uploader'],
ytdl_info['uploader'],
ytdl_info['uploader_url'],
collection_type,
ytdl_info['extractor_key']
])
elif (collection_type == YtdlDatabase.collection.PLAYLIST):
online_id = ytdl_info['id']
self._execute(qstring, [
online_id,
ytdl_info['title'],
ytdl_info['title'],
ytdl_info['webpage_url'],
collection_type,
ytdl_info['extractor_key']
])
else:
raise YtdlDatabaseError(f'Invalid collection type: {collection_type}')
self._commit()
collection = self.get_collection_by_extractor_id(ytdl_info['extractor_key'], online_id)
if (collection is None):
log.error(f'Could not retrieve collection after insertion!')
raise YtdlDatabaseError('Collection insertion not found')
return collection['id']
def insert_video(self, ytdl_info, format_db_id = YtdlDatabase.formats.DEFAULT):
'''
Insert a new video. The video should not exist already.
:return video_db_id for the inserted video
'''
self._begin()
qstring = '''
INSERT INTO video (
online_id,
extractor_id,
url,
title,
format_id,
duration_s,
upload_date,
filepath
)
VALUES (?,
(SELECT id FROM extractor WHERE name = ?),
?, ?, ?, ?, ?, ?)
'''
# Convert date to match SQLite format
# From YYYYMMDD to YYYY-MM-DD
upload_date = ytdl_info['upload_date']
if (upload_date and len(upload_date) == len('YYYYMMDD')):
upload_date = f'{upload_date[0:4]}-{upload_date[4:6]}-{upload_date[6:8]}'
# Grab the filepath that we snuck in
filepath = ytdl_info['___filepath']
log.debug(f'Populated output template: {filepath}')
self._execute(qstring, [
ytdl_info['id'],
ytdl_info['extractor_key'],
ytdl_info['webpage_url'],
ytdl_info['title'],
format_db_id,
ytdl_info['duration'],
upload_date,
filepath
])
self._commit()
self._research_insert_uploader(ytdl_info)
video = self.get_video_by_extractor_id(ytdl_info['extractor_key'], ytdl_info['id'])
if (video is None):
log.error(f'Could not retrieve video after insertion!')
raise YtdlDatabaseError('Video insertion not found')
return video['id']
def insert_video_collection_xref(self, video_id, collection_id, ordered_index=-1):
self._begin()
qstring = '''
INSERT OR REPLACE INTO video_collection_xref (
video_id,
collection_id,
ordering_index
) VALUES (?, ?, ?)
'''
if (type(video_id) == list and type(ordered_index) == list):
if (len(video_id) != len(ordered_index)):
raise YtdlDatabaseError('Video ID list and video index list lengths were not equal')
self.db.executemany(qstring, zip(
video_id,
itertools.repeat(collection_id),
ordered_index
))
elif (type(video_id) == list):
self.db.executemany(qstring, zip(
video_id,
itertools.repeat(collection_id),
itertools.repeat(ordered_index),
))
elif (type(ordered_index) == list):
raise YtdlDatabaseError('Indices cannot be a list when video ID is scalar')
else:
self._execute(qstring, [
video_id,
collection_id,
ordered_index
])
self._commit()
def mark_file_status(self, video_db_id, is_present):
self._begin()
qstring = '''
UPDATE video SET
filepath_exists = ?,
filepath_last_checked = datetime('now', 'localtime')
WHERE
id = ?
'''
self._execute(qstring, [
is_present,
video_db_id
])
self._commit()
def __del__(self):
if (self.db):
self.db.close()
|
""" run direct from cli interactive w/o ee
"""
import os
import pytest
from typing import List
from .base import BaseClass
from .base import inventory_path
from .base import playbook_path
CLI_RUN = f"ansible-navigator run {playbook_path} -i {inventory_path}"
testdata_run: List = [
(0, CLI_RUN, "ansible-navigator run playbook", ["100%", "SUCCESSFUL"]),
(1, ":0", "play-1 details", ":help help"),
(2, ":0", "task-1 details", ":help help"),
(3, ":back", "play-1 details", ":help help"),
(4, ":1", "play-1 task-2 details", ":help help"),
(5, ":back", "play-1 details", ":help help"),
(6, ":back", "all play details", ":help help"),
(7, ":1", "play-2 details", ":help help"),
(8, ":0", "play-2 task-1 details", ":help help"),
(9, ":back", "play-2 details", ":help help"),
(10, ":1", "play-2 task-2 details", ":help help"),
(11, ":back", "play-2 details", ":help help"),
(12, ":back", "all play details", ":help help"),
(13, ":st", "display stream", ":help help"),
]
@pytest.mark.parametrize("index, user_input, comment, search_within_response", testdata_run)
class TestDirectRunNoee(BaseClass):
"""run the tests"""
TEST_FOR_MODE = "interactive"
UPDATE_FIXTURES = False
|
#8
row=0
while row<11:
col=0
while col<6:
if (row==0 and col!=0 and col!=5) or (col==0 and row!=10 and row!=0 and col!=5 and row!=5)or (row==5 and col==1)or (row==5 and col==2)or (row==5 and col==3)or (row==5 and col==4) or(col==5 and row!=10 and row!=5 and row!=0)or (row==10 and col==1)or (row==10 and col==2)or (row==10 and col==3)or (row==10 and col==4):
print("*",end=" ")
else:
print(" ",end=" ")
col +=1
row +=1
print()
|
"""Work-A-Holic (WAH)
for that feeling of "WAH, how did I spend 200 hours on this?"
Adapted from https://github.com/acoomans/gittime
Work in progress
"""
import argparse
import yaml
from colorama import init, Fore, Style, Back
from git import Repo
from git.objects.commit import Commit # typing
from pathlib import Path
from typing import List, Dict
import datetime
DATE_STRING = '%a %b%d %H:%M'
init()
def pretty_print_commit(
commit : Commit,
time_color : str,
time_symbol : str,
time_delta : datetime.timedelta
):
num_insertions = commit.stats.total['insertions']
num_deletions = commit.stats.total['deletions']
if time_delta > datetime.timedelta(hours=10):
time_symbol = ''
print(
Fore.YELLOW + commit.hexsha[0:6],
Style.RESET_ALL + commit.committed_datetime.strftime(DATE_STRING),
time_color + time_symbol + str(time_delta) + Style.RESET_ALL,
Style.RESET_ALL + f'+{num_insertions:4d} -{num_deletions:4d}',
Fore.CYAN + str(commit.summary or 'no commit message')[0:60] + Style.RESET_ALL,
)
def save(path, data):
path.write_text(yaml.dump(data))
if __name__ == '__main__':
parser = argparse.ArgumentParser('wah',
description = 'Gives a crappy estimation of ' \
'how long you wasted on this crappy code ;)',
epilog = 'Brought to you by Evan Chen'
)
parser.add_argument('repo', nargs='?', default = '.',
help = 'Path of the git repository you want to run on '
'(defaults to current directory).')
parser.add_argument('-e', '--emails', nargs = '+', default = [],
help = "The list of emails to use (filter by a committer).")
parser.add_argument('-t', '--min-time',
help = "Assume any interval less than this counts.")
parser.add_argument('-T', '--max-time',
help = "Assume any interval more than this doesn't count.")
parser.add_argument('-l', '--min-lpm',
help = "The minimum lines/minute expected when working over min_time.")
parser.add_argument('-L', '--max-lpm',
help = "Assume that any lines/minutes exceeding this counts.")
parser.add_argument('-w', '--write',
help = "Store the values of the [lLtT] flags into the WAH file " \
"so you don't have to put them again later.")
input_group = parser.add_mutually_exclusive_group()
input_group.add_argument('-i', '--interactive', action='store_true',
help = "In cases not covered by heuristics, ask the user.")
input_group.add_argument('-n', '--no', action='store_true',
help = "In cases not covered by heuristics, assume no.")
input_group.add_argument('-y', '--yes', action='store_true',
help = "In cases not covered by heuristics, assume yes.")
verbose_group = parser.add_mutually_exclusive_group()
verbose_group.add_argument('-v', '--verbose', action = 'store_true',
help = "Verbose mode: print commit messages and notes.")
verbose_group.add_argument('-q', '--quiet', action = 'store_true',
help = "Quiet mode: don't print stuff if not necessary.")
args = parser.parse_args()
committers : List[str] = args.emails
repo = Repo(args.repo, search_parent_directories=True)
assert isinstance(repo.git_dir, str)
git_dir_path = Path(repo.git_dir)
save_path = git_dir_path / 'wah'
commits = [commit for commit in repo.iter_commits('main') \
if (not committers) or (commit.committer.email in committers)]
commits.sort(key = lambda commit : commit.committed_datetime)
# read wah data
needs_save = False
if save_path.exists():
data = yaml.load(save_path.read_text(), Loader=yaml.SafeLoader)
else:
data = {
'min_hours' : None,
'max_hours' : None,
'min_lpm' : None,
'max_lpm' : None,
'decision' : {}
}
needs_save = True
if args.min_time is not None:
data['min_hours'] = args.min_time
needs_save = True
if args.max_time is not None:
data['max_hours'] = args.max_hours
needs_save = True
if args.min_time is not None:
data['min_lpm'] = args.min_lpm
needs_save = True
if args.min_time is not None:
data['max_lpm'] = args.max_lpm
needs_save = True
if needs_save is True:
save(save_path, data)
min_hours : float = _ if (_ := data['min_hours']) is not None else 0.75
max_hours : float = _ if (_ := data['max_hours']) is not None else 4.0
min_lpm : float = _ if (_ := data['min_lpm']) is not None else 0.1
max_lpm : float = _ if (_ := data['max_lpm']) is not None else 1
decision : Dict[str, bool] = data.get('decision', {}) # hexsha -> true or false
time = datetime.timedelta(hours = 0)
if args.verbose:
print('Root commit was at ' \
+ commits[0].committed_datetime.strftime(DATE_STRING))
for i in range(len(commits)-1):
a = commits[i]
b = commits[i+1]
delta : datetime.timedelta = b.committed_datetime - a.committed_datetime
hours = max(delta.total_seconds() / 3600, 1.0/3600)
minutes = hours * 60
lines_per_minute = b.stats.total['lines'] / minutes
if b.hexsha in decision:
if decision[b.hexsha] is True:
time += delta
time_color = Back.GREEN + Fore.WHITE
time_symbol = '+'
else:
time_color = Back.RED + Fore.WHITE
time_symbol = ' '
elif delta < datetime.timedelta(hours=min_hours):
time += delta
time_color = Fore.GREEN
time_symbol = '+'
elif delta > datetime.timedelta(hours=max_hours):
time_color = Fore.RED
time_symbol = ' '
elif lines_per_minute > max_lpm:
time += delta
time_color = Style.BRIGHT + Fore.GREEN
time_symbol = '+'
elif lines_per_minute < min_lpm:
time_color = Style.BRIGHT + Fore.RED
time_symbol = ' '
else:
if args.yes:
this_decision = True
elif args.no:
this_decision = False
else:
print("Decision needed...\n")
if not args.verbose:
# need to print some context
print("Before, we had:")
pretty_print_commit(a,
'',
' ',
a.committed_datetime - commits[i-1].committed_datetime
)
print("")
print(f"The next few commits are:")
for j in range(i+1, min(i+5, len(commits))):
c = commits[j]
pretty_print_commit(c,
Fore.LIGHTYELLOW_EX,
' ',
c.committed_datetime - commits[j-1].committed_datetime
)
print(f'...... starting {delta} later.\n')
print(f"This commit had {lines_per_minute:.5f} lines per minute.\n")
while (response := input('Is this part of the previous session?' \
' [y/n] ').lower().strip()[0:1]) not in ('y', 'n'):
pass
print('-'*60)
this_decision = (response == 'y')
decision[b.hexsha] = this_decision
if this_decision is True:
time += delta
time_color = Fore.LIGHTGREEN_EX
time_symbol = '+'
else:
time_color = Fore.LIGHTMAGENTA_EX
time_symbol = ' '
save(save_path, data)
# commit the new change immediately
if args.verbose:
pretty_print_commit(b, time_color, time_symbol, delta)
print(f"{time.total_seconds() / 3600:.2f}")
|
from importlib import import_module
from typing import Optional
from fabric import Task
from fabric.connection import Connection
from patchwork.files import directory, exists
import plush.fabric_commands
from plush.fabric_commands.git import clone
from plush.fabric_commands.permissions import set_permissions_file
from plush.fabric_commands.ssh_key import create_key
from plush.oauth_flow import verify_access_token
from plush.repo_keys import add_repo_key
from .deploy import checkout_branch, deploy, get_secret_repo_branch, get_secret_repo_dir
from .deploy import get_secret_repo_name, get_repo_dir, WEBADMIN_GROUP
REPO_FULL_NAME = 'kbarnes3/BaseDjangoAngular'
@Task
def setup_user(conn, user, disable_sudo_passwd=False, set_public_key_file=None):
messages = plush.fabric_commands.prepare_user(
conn,
user,
WEBADMIN_GROUP,
add_sudo=True,
no_sudo_passwd=disable_sudo_passwd)
add_authorized_key(conn, user, set_public_key_file)
if not exists(conn, '/usr/bin/createuser'):
plush.fabric_commands.install_packages(conn, ['postgresql'])
matching_user_count = conn.sudo(
"psql postgres -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{0}'\"".format(user),
user='postgres').stdout
if '1' not in matching_user_count:
conn.sudo('createuser -s {0}'.format(user), user='postgres')
if messages:
print("========================================")
print(messages)
print("========================================")
@Task
def add_authorized_key(conn, user, set_public_key_file):
if set_public_key_file:
with open(set_public_key_file, 'r') as public_key:
public_key_contents = public_key.read()
plush.fabric_commands.add_authorized_key(conn, user, public_key_contents)
@Task
def disable_ssh_passwords(conn):
sshd_config = '/etc/ssh/sshd_config'
conn.sudo("sed -i '/^ *PasswordAuthentication/d' {0}".format(sshd_config))
conn.sudo('echo "PasswordAuthentication no" | sudo tee -a {0}'.format(sshd_config), pty=True)
print("========================================")
print("Password authentication disabled for SSH.")
print("Restart the SSH daemon by logging into the console and running:")
print("sudo service ssh restart")
print("Alternatively, reboot the server if console access isn't readily available.")
print("========================================")
@Task
def setup_server(conn, setup_wins=False):
conn.sudo('add-apt-repository universe')
conn.sudo('apt-get update')
_setup_node(conn)
base_packages = [
'git',
'python3-venv',
'postgresql',
'python3-psycopg2',
'nginx',
'uwsgi',
'uwsgi-plugin-python3',
]
plush.fabric_commands.install_packages(conn, base_packages)
if setup_wins:
_setup_wins(conn)
conn.sudo('mkdir -p /etc/nginx/ssl')
directory(conn, '/var/www', group=WEBADMIN_GROUP, sudo=True)
directory(conn, '/var/www/python', group=WEBADMIN_GROUP, sudo=True)
matching_user_count = conn.run(
"psql postgres -tAc \"SELECT 1 FROM pg_roles WHERE rolname='root'\"").stdout
if '1' not in matching_user_count:
conn.run('createuser -s root')
directory(conn, '/var/uwsgi', user='root', group='root', mode='777', sudo=True)
default_site = '/etc/nginx/sites-enabled/default'
if exists(conn, default_site):
conn.sudo('rm {0}'.format(default_site))
conn.sudo('/etc/init.d/nginx start')
def _setup_node(conn: Connection):
conn.sudo('curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -', pty=True)
conn.sudo('apt-get update')
plush.fabric_commands.install_packages(conn, ['nodejs'])
def _setup_wins(conn: Connection):
wins_packages = [
'samba',
'smbclient',
'winbind',
]
plush.fabric_commands.install_packages(conn, wins_packages)
conn.sudo('sed -i s/\'hosts:.*/hosts: files dns wins/\' /etc/nsswitch.conf')
resolved_config = '/etc/systemd/resolved.conf'
conn.sudo("sed -i '/^ *Domains/d' {0}".format(resolved_config))
conn.sudo('echo "Domains=localdomain" | sudo tee -a {0}'.format(resolved_config), pty=True)
conn.sudo('service systemd-resolved restart')
@Task
def setup_deployment(conn, config, branch=None, secret_branch=None):
django_settings = import_module('back.newdjangosite.settings_{0}'.format(config))
db_settings = django_settings.DATABASES['default']
db_name = db_settings['NAME']
db_user = db_settings['USER']
db_password = db_settings['PASSWORD']
repo_dir = get_repo_dir(config)
# database_created = False
database_exists_count = conn.run(
"psql postgres -tAc \"SELECT 1 FROM pg_catalog.pg_database WHERE datname='{0}'\"".format(db_name)).stdout
if '1' not in database_exists_count:
conn.run(
('createdb '
'--encoding=UTF8 '
'--locale=en_US.UTF-8 '
'--owner=postgres '
'--template=template0 {0}').format(db_name))
# database_created = True
matching_user_count = conn.run(
"psql postgres -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{0}'\"".format(db_user)).stdout
if '1' not in matching_user_count:
conn.run('createuser -s {0}'.format(db_user))
conn.run('psql -d postgres -c \"ALTER ROLE {0} WITH ENCRYPTED PASSWORD \'{1}\';\"'.format(
db_user,
db_password))
_setup_main_repo(conn, repo_dir, config, branch)
_setup_secret_repo(conn, config, secret_branch)
with conn.cd(repo_dir):
if not exists('venv'):
conn.run('python3 -m venv --system-site-packages venv')
global_dir = '{0}/config/ubuntu-18.04/global'.format(repo_dir)
with conn.cd(global_dir):
uwsgi_socket = '/etc/systemd/system/uwsgi-app@.socket'
uwsgi_service = '/etc/systemd/system/uwsgi-app@.service'
if not exists(conn, uwsgi_socket):
conn.sudo('cp uwsgi-app@.socket {0}'.format(uwsgi_socket))
set_permissions_file(conn, uwsgi_socket, 'root', 'root', '644')
if not exists(uwsgi_service):
conn.sudo('cp uwsgi-app@.service {0}'.format(uwsgi_service))
set_permissions_file(conn, uwsgi_service, 'root', 'root', '644')
deploy(conn, config, branch, secret_branch)
# if database_created:
# with cd(repo_dir):
# run('venv/bin/python web/manage_{0}.py createsuperuser'.format(config))
def _setup_main_repo(conn: Connection, repo_dir: str, config: str, branch: Optional[str] = None):
_setup_repo(conn, repo_dir, REPO_FULL_NAME)
checkout_branch(conn, repo_dir, config, branch)
def _setup_secret_repo(conn: Connection, config: str, secret_branch_override: Optional[str] = None):
secret_repo_dir = get_secret_repo_dir(config)
secret_repo_name = get_secret_repo_name(config)
secret_branch = get_secret_repo_branch(config, secret_branch_override)
_setup_repo(conn, secret_repo_dir, secret_repo_name)
checkout_branch(conn, secret_repo_dir, config, secret_branch)
def _setup_repo(conn: Connection, repo_dir: str, repo_name: str):
directory(conn, repo_dir, group=WEBADMIN_GROUP, sudo=True)
if not exists(conn, '{0}/.git'.format(repo_dir)):
if not verify_access_token():
raise Exception("Unable to access GitHub account. Run 'auth' to fix this")
create_key(conn, repo_name, WEBADMIN_GROUP)
add_repo_key(conn, repo_name)
clone(conn, repo_name, repo_dir, skip_strict_key_checking=True)
|
from PyEasyQiwi.qiwi_service.recourse import * |
from main.utils.celery_tasks import celery_analyze_shots
from vision_video_analyzer.settings import MEDIA_ROOT
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.utils.safestring import mark_safe
from django.contrib import messages
from celery import chain
from main.models import Video
import os
import shutil
import json
from main.utils.analyzer import analyze_video, get_thumbnail, thumbnail_checker
from main.utils.shots_analyzer import analyze_shots, get_background, get_contrast, get_shot_screenshot, get_shots, get_shots_length
# Home view
def home(response):
#Checks if media folder exists
if os.path.isdir('./media') == False:
os.mkdir('media')
if os.path.isdir('./media/thumbnails') == False:
os.mkdir('media/thumbnails')
if os.path.isdir('./media/videos') == False:
os.mkdir('media/videos')
if os.path.isdir('./media/shots') == False:
os.mkdir('media/shots')
if os.path.isdir('./media/visualizations') == False:
os.mkdir('media/visualizations')
#Checks the validity of video upload
if response.method == "POST":
if (response.FILES.get('document') is None):
print("No video uploaded")
messages.error(response, "Please choose a video to upload!")
return render(response, "main/home.html", {})
uploaded_file = response.FILES['document']
if uploaded_file.name.endswith(".wmv"):
print("Upload unsuccessful")
messages.error(
response,
"WMV is currently not supported. Please upload your video in another format."
)
return render(response, "main/home.html", {})
if uploaded_file.content_type[:5] == "video":
print("upload successful")
uploaded_file.name = uploaded_file.name.replace(" ", "_")
video = Video(name=uploaded_file.name,
video=uploaded_file,
uploader=response.user,
thumbnail=get_thumbnail(uploaded_file))
video.save()
analyze_video(video, uploaded_file)
thumbnail_checker(video)
video.save()
messages.success(
response,
mark_safe(
"The video was uploaded successfully. Check out the video <a href='/videos/"
+ str(video.id) + "'>here</a>"))
else:
print("Upload unsuccessful")
messages.error(response, "The uploaded file is not a video!")
return render(response, "main/home.html", {})
# Videos view
def videos(response):
path = os.path.join(settings.MEDIA_ROOT, "videos")
video_list = list()
allVideos = Video.objects.filter(uploader=response.user)
for video in allVideos:
video_list.append(video)
context = {"videos": video_list}
return render(response, "main/videos.html", context)
# Video view
def video(response, id):
vid = Video.objects.filter(id=id).first()
shots = True if os.path.isdir('./media/shots/' +
str(vid)) == True else False
context = {"video": vid, "shots": shots}
return render(response, "main/video.html", context)
def delete(request, id):
vid = get_object_or_404(Video, id=id)
context = {"video": vid}
if request.method == "POST":
vid.video.delete()
if (os.path.isfile('./media/videos/' + str(vid)) == False):
if (os.path.isfile('./media/thumbnails/' + str(vid) +
".png") == True):
os.remove('./media/thumbnails/' + str(vid) + ".png")
if (os.path.isdir('./media/shots/' + str(vid)) == True):
shutil.rmtree('./media/shots/' + str(vid))
if (os.path.isdir('./media/visualizations/' + str(vid)) == True):
shutil.rmtree('./media/visualizations/' + str(vid))
vid.delete()
return HttpResponseRedirect("/videos")
return render(request, "main/delete.html", context)
def shots(response, id):
vid = Video.objects.filter(id=id).first()
video = str(vid.video)
shots_output_path = f'{MEDIA_ROOT}/shots/{vid.name}/'
shot_exposures = []
shot_lengths = []
shot_contrasts = []
shot_background_colors = []
shot_screenshots = []
shot_recommendations = []
if response.method == "POST" and os.path.isdir('./media/shots/' +
str(vid)) == False:
print("Analyzing shots")
result = celery_analyze_shots(video)
shot_exposures = result[0]
shot_lengths = result[1]
shot_contrasts = result[2]
shot_background_colors = result[3]
shot_screenshots = result[4]
shot_recommendations = result[5]
data_set = {
"exposures": shot_exposures,
"lengths": shot_lengths,
"contrasts": shot_contrasts,
"backgrounds": shot_background_colors,
"screenshots": shot_screenshots,
"recommendations": shot_recommendations
}
with open(os.path.join(shots_output_path, vid.name + ".json"),
'w') as json_file:
json.dump(data_set, json_file)
else:
with open(os.path.join(shots_output_path, vid.name + ".json"),
'r') as json_file:
data = json.load(json_file)
for exposure in data['exposures']:
shot_exposures.append(exposure)
for length in data['lengths']:
shot_lengths.append(length)
for contrast in data['contrasts']:
shot_contrasts.append(contrast)
for background in data['backgrounds']:
shot_background_colors.append(background)
for screenshot in data['screenshots']:
shot_screenshots.append(screenshot)
for recommendation in data['recommendations']:
shot_recommendations.append(recommendation)
context = {
"video":
vid,
"data":
zip(shot_exposures, shot_lengths, shot_contrasts,
shot_background_colors, shot_screenshots, shot_recommendations)
}
return render(response, "main/shots.html", context)
|
def keep_odds(iterable):
return [item for item in iterable if item % 2 == 1]
|
# to be executed in the fleur/ directory
from glob import glob
files = []
files.extend(glob("**/*.f90"))
files.extend(glob("**/*.F90"))
cmake_files = glob("**/CMakeLists.txt")
def rm_comments(line):
comm_pos = line.find("#")
if comm_pos == -1:
return line
else:
return line[:comm_pos]
def in_cmake(cmake_files, file):
for cmf in cmake_files:
with open(cmf) as f:
lines = f.readlines()
for l in lines:
if file in rm_comments(l):
return True
return False
for f in files:
if(not(in_cmake(cmake_files, f))):
print(f) |
import time
from dagster import In, Out, Output, graph, op
def nonce_op(name, n_inputs, n_outputs):
"""Creates an op with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce op."""
@op(
name=name,
ins={"input_{}".format(i): In() for i in range(n_inputs)},
out={"output_{}".format(i): Out() for i in range(n_outputs)},
)
def op_fn(context, **_kwargs):
for i in range(200):
time.sleep(0.02)
if i % 1000 == 420:
context.log.error("Error message seq={i} from op {name}".format(i=i, name=name))
elif i % 100 == 0:
context.log.warning("Warning message seq={i} from op {name}".format(i=i, name=name))
elif i % 10 == 0:
context.log.info("Info message seq={i} from op {name}".format(i=i, name=name))
else:
context.log.debug("Debug message seq={i} from op {name}".format(i=i, name=name))
for i in range(n_outputs):
yield Output(value="foo", output_name="output_{}".format(i))
return op_fn
@graph
def log_spew():
one_in_one_out = nonce_op("one_in_one_out", 1, 1)
two_in_one_out = nonce_op("two_in_one_out", 2, 1)
op_a = nonce_op("no_in_two_out", 0, 2).alias("op_a")
op_b = one_in_one_out.alias("op_b")
op_c = nonce_op("one_in_two_out", 1, 2).alias("op_c")
op_d = two_in_one_out.alias("op_d")
op_e = one_in_one_out.alias("op_e")
op_f = two_in_one_out.alias("op_f")
op_g = nonce_op("one_in_none_out", 1, 0).alias("op_g")
a_0, a_1 = op_a()
b = op_b(input_0=a_0)
c_0, _c_1 = op_c(input_0=a_1)
d = op_d(input_0=b, input_1=c_0)
e = op_e(input_0=c_0)
f = op_f(input_0=d, input_1=e)
op_g(input_0=f)
log_spew_job = log_spew.to_job(
description="Demo job that spits out different types of log messages to the event log."
)
|
import random
from app.genetic.genes.fundamental.yy_change.gene_yy_change import GeneYYChange
class PSyy(GeneYYChange):
def __init__(self):
super().__init__()
self.indicator = "P/S"
self.compared_value = random.uniform(0.8, 1.8)
|
# -*- coding: utf-8 -*-
# Spider Lv4
# Author: Yue H.W. Luo
# Mail: yue.rimoe@gmail.com
# License : http://www.apache.org/licenses/LICENSE-2.0
# More detial: https://blog.rimoe.xyz/2019/03/14/post01/
"""
## NOTE
Created on Mon Mar 11 19:07:03 2019
This programme is used to get data from Tencent street view.
Get the svid and location of a street view point.
"""
import os
import time
import json
import urllib2
os.chdir('data')
# key of Tencent map api
keys = ['K24BZ-SSSSS-YYYYY-SSSSS-UUUUU-23333',
'DK6BZ-SSSSS-YYYYY-SSSSS-UUUUU-23333',
'ZDSBZ-SSSSS-YYYYY-SSSSS-UUUUU-23333',
'PVPBZ-SSSSS-YYYYY-SSSSS-UUUUU-23333',
'E35BZ-SSSSS-YYYYY-SSSSS-UUUUU-23333']
url = 'https://apis.map.qq.com/ws/streetview/v1/getpano?location' +\
'={},{}&radius=100&key={}'
done_record = 0
i = 0
done = []
not_ok = []
# X/Y:GCJ02, X_/Y_:WGS84
with open('listnew.csv', 'r') as rf:
for line in rf:
if i < done_record:
continue
row = line.strip().split(',')
web = urllib2.urlopen(url.format(row[3], row[2], keys[i % 5]))
data = web.read()
data = json.loads(data)
if data['status'] == 0:
# save valid data
done.append(data)
print("done %s" % len(done))
elif data['status'] == 346:
# pass if no street view data
print("data pass%s" % i)
else:
# error log
not_ok.append([row[3], row[2], data['status']])
print("error %s" % data['status'])
i += 1
time.sleep(0.2)
with open('meta_raw_done.json', 'w') as wf:
json.dump(done, wf)
with open('meta_raw_not_ok.json', 'w') as wf:
json.dump(not_ok, wf)
# %%
# data clean: del same svid and make a list
# import os
# import json
clean = {}
# with open('meta_raw_done.json', 'r') as rf:
# raw = json.load(rf)
raw = done
for r in raw:
r = r['detail']
_tmp = r['location']
clean[r['id']] = [_tmp['lng'], _tmp['lat']]
with open('all_img_list_gcj.csv', 'w') as wf:
for c in clean:
wf.write(','.join([c, str(clean[c][0]), str(clean[c][1])]) + '\n')
|
from worker import Worker
import json
import pyspark as sp
import logging
logger = logging.getLogger()
class SparkWorker(Worker):
def __init__(self):
self.context = sp.SparkContext(appName="SparkWorker")
self.context.setLogLevel("ERROR")
def preparar_codigo(self, codigo, num_archivos):
if num_archivos > 1:
args_archivos = ["datos" + str(i+1)
for i in range(num_archivos)]
else:
args_archivos = ["datos"]
args_archivos = ",".join(args_archivos)
codigo = ["\t" + linea for linea in codigo.split("\n")]
codigo = (["def programa(context, {}):".format(args_archivos)]
+ codigo
+ ["\treturn datos"])
return "\n".join(codigo)
def agregar_resultado(self, output="", error=""):
self.resultados.append({"output": output, "error": error})
def cargar_archivo(self, archivo):
with open(archivo, "r") as f:
datos = json.loads(f.read())
return self.context.parallelize(datos)
def correr_trabajo(self, ejercicios):
self.resultados = []
modulo = None
for ejercicio in ejercicios:
codigo = ejercicio["codigo"]
if not Worker.codigo_es_seguro(codigo):
self.agregar_resultado(
error = "Código no seguro o con errores de sintaxis."
)
continue
# Preparamos los datos y el código.
archivos = ejercicio["archivos_entrada"].split(",")
datos = [self.cargar_archivo("datos/"+ar) for ar in archivos]
codigo = self.preparar_codigo(codigo, len(archivos))
# Cargamos el código y lo corremos.
modulo = Worker.cargar_como_modulo(codigo)
try:
# Corremos modulo.programa(pd, datos[0], datos[1], ...)
logger.error("modulo.programa(self.context, {})".format(
",".join(["datos[{}]".format(i) for i in range(len(datos))])
))
output = eval("modulo.programa(self.context, {})".format(
",".join(["datos[{}]".format(i) for i in range(len(datos))])
))
self.agregar_resultado(
output = json.dumps(output.collect())
)
except Exception as e:
self.agregar_resultado(
error = "Error en el código: " + str(e)
)
return self.resultados
|
# See http://maggotroot.blogspot.ch/2013/11/constrained-linear-least-squares-in.html for more info
"""
A simple library to solve constrained linear least squares problems
with sparse and dense matrices. Uses cvxopt library for
optimization
"""
__author__ = "Valeriy Vishnevskiy"
__email__ = "valera.vishnevskiy@yandex.ru"
__version__ = "1.0"
__date__ = "22.11.2013"
__license__ = "WTFPL"
import itertools
import numbers
from scipy import sparse
import numpy as np
from cvxopt import solvers, matrix, spmatrix, mul
def scipy_sparse_to_spmatrix(A):
coo = A.tocoo()
return spmatrix(coo.data, coo.row.tolist(), coo.col.tolist())
def spmatrix_sparse_to_scipy(A):
data = np.array(A.V).squeeze()
rows = np.array(A.I).squeeze()
cols = np.array(A.J).squeeze()
return sparse.coo_matrix((data, (rows, cols)))
def sparse_None_vstack(A1, A2):
if A1 is None:
return A2
else:
return sparse.vstack([A1, A2])
def numpy_None_vstack(A1, A2):
if A1 is None:
return A2
else:
return np.vstack([A1, A2])
def numpy_None_concatenate(A1, A2):
if A1 is None:
return A2
else:
return np.concatenate([A1, A2])
def get_shape(A):
if isinstance(A, spmatrix):
return A.size
else:
return A.shape
def numpy_to_cvxopt_matrix(A):
if A is None:
return A
elif is_sparse(A):
if isinstance(A, sparse.spmatrix):
return scipy_sparse_to_spmatrix(A)
else:
return A
else:
if isinstance(A, np.ndarray):
if A.ndim == 1:
return matrix(A, (A.shape[0], 1), "d")
else:
return matrix(A, A.shape, "d")
else:
return A
def cvxopt_to_numpy_matrix(A):
if A is None:
return A
elif isinstance(A, spmatrix):
return spmatrix_sparse_to_scipy(A)
elif isinstance(A, matrix):
return np.array(A).squeeze()
else:
return np.array(A).squeeze()
def as_double(A):
if isinstance(A, np.ndarray):
return A.astype(np.double)
elif isinstance(A, numbers.Number):
return np.array([A], dtype=np.double)
else:
return A
def is_sparse(A):
return sparse.issparse(A) or isinstance(A, spmatrix)
def lsqlin(
C,
d,
reg=0,
A=None,
b=None,
Aeq=None,
beq=None,
lb=None,
ub=None,
x0=None,
opts=None,
):
"""
Solve linear constrained l2-regularized least squares. Can
handle both dense and sparse matrices. Matlab's lsqlin
equivalent. It is actually wrapper around CVXOPT QP solver.
min_x ||C*x - d||^2_2 + reg * ||x||^2_2
s.t. A * x <= b
Aeq * x = beq
lb <= x <= ub
Input arguments:
C is n x m dense or sparse matrix
d is n x 1 dense matrix
reg is regularization parameter
A is p x n dense or sparse matrix
b is p x 1 dense matrix
Aeq is q x n dense or sparse matrix
beq is q x 1 dense matrix
lb is n x 1 matrix or scalar
ub is n x 1 matrix or scalar
Output arguments:
Return dictionary, the output of CVXOPT QP.
"""
C = as_double(C)
d = as_double(d)
A = as_double(A)
b = as_double(b)
Aeq = as_double(Aeq)
beq = as_double(beq)
lb = as_double(lb)
ub = as_double(ub)
x0 = as_double(x0)
sparse_case = False
if is_sparse(A):
sparse_case = True
# We need A to be scipy sparse, as I couldn't find how
# CVXOPT spmatrix can be vstacked
if isinstance(A, spmatrix):
A = spmatrix_sparse_to_scipy(A)
C = numpy_to_cvxopt_matrix(C)
d = numpy_to_cvxopt_matrix(d)
Q = C.T * C
q = -d.T * C
nvars = C.size[1]
if reg > 0:
if sparse_case:
I = scipy_sparse_to_spmatrix(sparse.eye(nvars, nvars, format="coo"))
else:
I = matrix(np.eye(nvars), (nvars, nvars), "d")
Q = Q + reg * I
lb = cvxopt_to_numpy_matrix(lb)
ub = cvxopt_to_numpy_matrix(ub)
b = cvxopt_to_numpy_matrix(b)
if lb is not None: # Modify 'A' and 'b' to add lb inequalities
if lb.size == 1:
lb = np.repeat(lb, nvars)
if sparse_case:
lb_A = -sparse.eye(nvars, nvars, format="coo")
A = sparse_None_vstack(A, lb_A)
else:
lb_A = -np.eye(nvars)
A = numpy_None_vstack(A, lb_A)
b = numpy_None_concatenate(b, -lb)
if ub is not None: # Modify 'A' and 'b' to add ub inequalities
if ub.size == 1:
ub = np.repeat(ub, nvars)
if sparse_case:
ub_A = sparse.eye(nvars, nvars, format="coo")
A = sparse_None_vstack(A, ub_A)
else:
ub_A = np.eye(nvars)
A = numpy_None_vstack(A, ub_A)
b = numpy_None_concatenate(b, ub)
# Convert data to CVXOPT format
A = numpy_to_cvxopt_matrix(A)
Aeq = numpy_to_cvxopt_matrix(Aeq)
b = numpy_to_cvxopt_matrix(b)
beq = numpy_to_cvxopt_matrix(beq)
# Set up options
if opts is not None:
for k, v in opts.items():
solvers.options[k] = v
# Run CVXOPT.SQP solver
sol = solvers.qp(Q, q.T, G=A, h=b, A=Aeq, b=beq, init_vals=x0)
return sol
def lsqnonneg(C, d):
"""
Solves nonnegative linear least-squares problem:
min_x ||C*x - d||_2^2, where x >= 0
"""
return lsqlin(C, d, lb=0, opts={"show_progress": False})
def lsqnorm(C, d):
"""
Solves normalized linear least-squares problem:
min_x ||C*x - d||_2^2, where 0 <= x <= 1 and sum(x) == 1
"""
Aeq = np.ones((1, get_shape(C)[1]))
return lsqlin(C, d, Aeq=Aeq, beq=1, lb=0, ub=1, opts={"show_progress": False})
|
#Radhika PC
#5/25/2016
#Homework 2- dictionaries
movie = { 'title': 'spy', 'release': '2015', 'director': 'Paul Feig', 'budget': '65000000' , 'revenue': '233125712' }
# TA-COMMENT: (-0.5) We can add entries to a dictionary AFTER making it. We wanted to see:
# movie['budget'] = 65000000
# rather than "hard coding" budget and revenue into the initial dictionary.
print("My favorite movie is", movie['title'], "which was released in", movie['release'], "and was directed by", movie['director'])
#flop or success ?
# TA-COMMENT: (-0.5) This is not what Soma's directions ask for. His directions state: If the movie cost more to make than it made in theaters, print "It was a flop". If the film's revenue was more than five times the amount it cost to make, print "That was a good investment."
if int(movie['budget']) < int(movie['revenue']):
print("Wow,that was a good investment!")
else:
print("It was a flop!")
# population of NYC
#Bronx has 1.4m, Queens has 2.3m and Staten Island has 470,000
# TA-COMMENT: You didn't have to store the numbers as strings; that would have saved you some work later (float() wouldn't be necessary).
NYC = { 'Manhattan': '1.6', 'Brooklyn': '2.6', 'Bronx': '1.4', 'Queens': '2.3' , 'Staten Island': '0.47' }
#Display the population of Brooklyn.
print("The population of Brooklyn is", float(NYC['Brooklyn']))
#Display the combined population of all five boroughs.
total_population = float(NYC['Manhattan']) + float(NYC['Brooklyn']) + float(NYC['Bronx']) + float(NYC['Queens']) + float(NYC['Staten Island'])
# TA-COMMENT: What would have been a more programmatic way of handling the above question?
print("The total NYC population is", total_population, "million")
#Display what percent of NYC's population lives in Manhattan.
Manhattan_percent = float(NYC['Manhattan']) / total_population * 100
print(Manhattan_percent, "% of total polution lives in Manhattan")
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 12:07:00 2020
@author: acrog
"""
'''
Notes: try joining box top and bottom edges together
first box top edge should be at y =0
last box bottom edge should be at y = height of image
**determine the letter each box is closest to then use max height
and min height of all boxes to solve it
'''
import cv2
import math
import requests
import os
from pdf2image import convert_from_path
import numpy as np
def dist(p1,p2):
return math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2)
def vconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
w_min = min(im.shape[1] for im in im_list)
im_list_resize = [cv2.resize(im, (w_min, int(im.shape[0] * w_min / im.shape[1])), interpolation=interpolation)
for im in im_list]
return cv2.vconcat(im_list_resize)
'''
FIND PDF
'''
whichclass = 26600
whichexam = 'FE'
whichsemester = 'F'
whichyear = 2018
whichmath = 'MA266'
directory = whichmath+'pdfs'
offset = 40
offsetBetween = 65
url = f'https://www.math.purdue.edu/php-scripts/courses/oldexams/serve_file.php?file={whichclass}{whichexam}-{whichsemester}{whichyear}.pdf'
response = requests.get(url, verify = False)
if not os.path.exists(directory):
os.makedirs(directory)
testtitle = f'{whichmath[-3:]}-{whichexam}-{whichsemester}-{whichyear}'
pdfname = f'{testtitle}.pdf'
if not os.path.exists(directory+'/'+testtitle):
os.makedirs(directory+'/'+testtitle)
if not os.path.exists(directory+'/'+testtitle+'/pages'):
os.makedirs(directory+'/'+testtitle+'/pages')
with open(directory+'/'+testtitle+'/'+pdfname, "wb") as file:
file.write(response.content)
pdf = convert_from_path(directory+'/'+testtitle+'/'+pdfname, poppler_path=r'C:\Program Files\poppler-0.68.0_x86\poppler-0.68.0\bin')
numPages = 1
for page in pdf:
page.save(f'{directory}/{testtitle}/pages/{testtitle}-P{numPages}.png', 'PNG')
img = cv2.imread(f'{directory}/{testtitle}/pages/{testtitle}-P{numPages}.png')
#img[0:190, 0:img.shape[1]] = np.full((190, img.shape[1], 3),255)
#image = img[170:img.shape[0], 0:img.shape[1]]
cv2.imwrite(f'{directory}/{testtitle}/pages/{testtitle}-P{numPages}.png',img)
numPages += 1
'''
scrape pdf
'''
page1 = cv2.imread(f'{whichmath}pdfs/{testtitle}/pages/{testtitle}-P1.png')
page2 = cv2.imread(f'{whichmath}pdfs/{testtitle}/pages/{testtitle}-P2.png')
pg1 = page1#[170:page1.shape[0], 0:page1.shape[1]]
pg2 = page2#[170:page2.shape[0], 0:page2.shape[1]]
cv2.namedWindow('stack',cv2.WINDOW_NORMAL)
cv2.resizeWindow('stack', 1000, 1000)
vertical = np.hstack((pg1,pg2))
cv2.imshow('stack',vertical)
cv2.waitKey(0)
cv2.destroyAllWindows()
inputValue = int(input("Title page number: "))
questionNumber = 1
# Load image, grayscale, Gaussian blur, Otsu's threshold
for imgNum in range(inputValue,numPages):
imgStr = f'{whichmath}pdfs/{testtitle}/pages/{testtitle}-P'+str(imgNum+1)+'.png'
#imgStr = 'Outlier images/Q'+str(imgNum+1)+'.png'
#imgStr = 'FA2019MA266/FA2019MA266-'+str(imgNum+1).rjust(2,'0')+'.png'
#imgStr = f'{whichmath}pdfs/{testtitle}/pages/{testtitle}-P6.png'
image = cv2.imread(imgStr)
#image[0:180, 0:image.shape[1]] = np.full((180, image.shape[1], 3),255)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Create rectangular structuring element and dilate
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dialateValue = 3
dilate = cv2.dilate(thresh, kernel, iterations=dialateValue)
# Find contours and draw rectangle
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
coordinates = []
allBoxes = []
yMax = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h),(255,255,12), 2)
allBoxes.append([x,y,w,h])
if y+h > yMax:
yMax = y+h
if w < 65 and h < 65:
coordinates.append([x,y,w,h])
coordinates = sorted(coordinates, key=lambda x: x[0], reverse=True)
print(coordinates)
allBoxes = sorted(allBoxes, key=lambda x: x[1], reverse=True)
#cv2.rectangle(image, (coordinates[len(coordinates)-1][0], coordinates[len(coordinates)-1][1]), (coordinates[len(coordinates)-1][0]+coordinates[len(coordinates)-1][2], coordinates[len(coordinates)-1][1]+coordinates[len(coordinates)-1][3]),(255,255,12), 2)
questionYCoord = [coordinates[len(coordinates)-1][1]]
for i in range(len(coordinates)-2,0,-1):
if abs(coordinates[i][0] - coordinates[len(coordinates)-1][0]) <= 20:
#cv2.rectangle(image, (coordinates[i][0], coordinates[i][1]), (coordinates[i][0]+coordinates[i][2], coordinates[i][1]+coordinates[i][3]),(255,255,12), 2)
questionYCoord.append(coordinates[i][1])
else:
break
questionYCoord.sort()
newImage = image.copy()
questions = []
for i in range(len(questionYCoord)):
if i != len(questionYCoord)-1:
questions.append(newImage[(questionYCoord[i]-offsetBetween):(questionYCoord[i+1]-offsetBetween), 0:(newImage.shape[1])])
else:
# page numbers or not
questions.append(newImage[(questionYCoord[i]-offsetBetween):(allBoxes[0][1]-offsetBetween), 0:(newImage.shape[1])])
#questions.append(newImage[(questionYCoord[i]-offsetBetween):(newImage.shape[0]), 0:(newImage.shape[1])])
''' i in range(len(questions)):
cv2.namedWindow('question '+ str(i+1),cv2.WINDOW_NORMAL)
cv2.resizeWindow('question '+ str(i+1), 1000, 1000)
cv2.imshow("question " + str(i+1), questions[i])
cv2.waitKey(0)
cv2.destroyAllWindows()'''
'''
FInd indivual questions and answers
'''
for numQuestions in range(len(questions)):
if not os.path.exists(directory+'/'+testtitle+'/questions'):
os.makedirs(directory+'/'+testtitle+'/questions')
if not os.path.exists(directory+'/'+testtitle+'/answers'):
os.makedirs(directory+'/'+testtitle+'/answers')
image = questions[numQuestions]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Create rectangular structuring element and dilate
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dialateValue = 2
dilate = cv2.dilate(thresh, kernel, iterations=dialateValue)
# Find contours and draw rectangle
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
coordinates = []
allBoxes = []
yMax = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h),(255,255,12), 2)
allBoxes.append([x,y,w,h])
if y+h > yMax:
yMax = y+h
if w < 65 and h < 65:
coordinates.append([x,y,w,h])
closeValues = []
closePoints = []
for i in range(len(coordinates)):
numValues = 0
for j in range(len(coordinates)):
if abs(coordinates[i][0]-coordinates[j][0]) <= 15:
numValues += 1
if numValues >= 4:
closeValues.append(coordinates[i][0])
closePoints.append(coordinates[i][0:2])
xValue = min(closeValues)
numPrint = 0
letterLocations = []
letterCenters = []
yValues = []
for i in range(len(coordinates)):
if abs(coordinates[i][0]-xValue) <= 20 and numPrint < 5:
numPrint += 1
letterLocations.append(coordinates[i])
letterCenters.append([coordinates[i][0]+coordinates[i][2]/2, coordinates[i][1]+coordinates[i][3]/2])
yValues.append(coordinates[i][1])
#cv2.rectangle(image, (coordinates[i][0], coordinates[i][1]), (coordinates[i][0] + coordinates[i][2], coordinates[i][1] + coordinates[i][3]), (36,255,12), 2)
dialateValue = 6
dilate = cv2.dilate(thresh, kernel, iterations=dialateValue)
# Find contours and draw rectangle
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
coordinates = []
allBoxes = []
yMax = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h),(255,255,12), 2)
allBoxes.append([x,y,w,h])
if y+h > yMax:
yMax = y+h
if w < 65 and h < 65:
coordinates.append([x,y,w,h])
yValue = min(yValues)
closestDist = 1000
closestYValue = yValue
for i in range(len(allBoxes)):
if allBoxes[i][1] + allBoxes[i][3] - yValue < 0 and abs(allBoxes[i][1] + allBoxes[i][3] - yValue) < closestDist:
closestDist = abs(allBoxes[i][1] + allBoxes[i][3] - yValue)
closestYValue = allBoxes[i][1] + allBoxes[i][3]
offset = yValue-closestYValue
question = image[0:(yValue-offset), 0:(image.shape[1])]
cv2.imwrite(directory+'/'+testtitle+'/questions/Q'+str(questionNumber)+'.png',question)
question = image[0:(yValue-offset), 0:(image.shape[1])]
answers = image[(yValue-offset):(yMax), 0:(image.shape[1])]
'''
ALTERING ANSWER IMAGE
'''
gray = cv2.cvtColor(answers, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
dialateValue = 2
count = 0
while True:
dilate = cv2.dilate(thresh, kernel, iterations=dialateValue)
# Find contours and draw rectangle
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
count = 0
for c in cnts:
count += 1
break
if count < 8 or count > 12:
dialateValue += 1
if dialateValue >= 250:
print("ERROR")
break
else:
break
coordAns = []
centers = []
xMax = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
if x+w > xMax:
xMax = x+w
coordAns.append([x,y,w,h])
centers.append([((x+w)+x)/2,((y+h)+y)/2, x, y, w, h])
#cv2.rectangle(answers, (x, y), (x+w, y+h),(255,255,12), 2)
for i in range(len(letterCenters)):
letterCenters[i][1] -= yValue-offset
letterLocations[i][1] -= yValue-offset
print(centers)
pointsCloseToLetters = [[],[],[],[],[]]
for i in range(len(centers)):
minDist = 100000
boxValue = 0
for j in range(len(letterCenters)):
if dist(centers[i][0:2],letterCenters[j]) < minDist:
boxValue = j
minDist = dist(centers[i][0:2],letterCenters[j])
pointsCloseToLetters[boxValue].append(centers[i])
options = ['E','D','C','B','A']
for i in range(len(pointsCloseToLetters)):
maxHeight = 0
minHeight = 10000
maxWidth = 0
minWidth = 10000
for j in range(len(pointsCloseToLetters[i])):
#max height
if pointsCloseToLetters[i][j][3] + pointsCloseToLetters[i][j][5] > maxHeight:
maxHeight = pointsCloseToLetters[i][j][3] + pointsCloseToLetters[i][j][5]
if pointsCloseToLetters[i][j][3] < minHeight:
minHeight = pointsCloseToLetters[i][j][3]
if pointsCloseToLetters[i][j][2] + pointsCloseToLetters[i][j][4] > maxWidth:
maxWidth = pointsCloseToLetters[i][j][2] + pointsCloseToLetters[i][j][4]
if pointsCloseToLetters[i][j][2] < minWidth:
minWidth = pointsCloseToLetters[i][j][2]
print(f'({minWidth, minHeight}) ({maxWidth, maxHeight})')
minWidth = letterLocations[i][0] + letterLocations[i][2]
specificAnswer = answers[minHeight:maxHeight, minWidth:maxWidth]
cv2.imwrite(directory+'/'+testtitle+'/answers/Q'+str(questionNumber)+options[i]+'.png',specificAnswer)
cv2.rectangle(answers, (minWidth, minHeight), (maxWidth, maxHeight),(0,255,0), 2)
cv2.rectangle(image, (0, 0), (image.shape[1], yValue-offset),(36,255,12), 2)
#cv2.namedWindow('img',cv2.WINDOW_NORMAL)
#cv2.resizeWindow('img', 1000, 1000)
#cv2.imshow('img', image)
#cv2.namedWindow('dilate',cv2.WINDOW_NORMAL)
#cv2.resizeWindow('dilate', 1000, 1000)
#cv2.namedWindow('thresh',cv2.WINDOW_NORMAL)
#cv2.resizeWindow('thresh', 1000, 1000)
#cv2.imshow('thresh', thresh)
#cv2.imshow('dilate', dilate)
cv2.waitKey(0)
cv2.destroyAllWindows()
#cv2.namedWindow('image',cv2.WINDOW_NORMAL)
#cv2.resizeWindow('image', 1000, 1000)
im_v_resize = vconcat_resize_min([question, answers])
cv2.imshow("question "+str(questionNumber), im_v_resize)
#cv2.imshow("answers "+str(questionNumber), answers)
#cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
questionNumber += 1
|
"""
Module: 'microWebTemplate' on esp32_LoBo
MCU: (sysname='esp32_LoBo', nodename='esp32_LoBo', release='3.2.24', version='ESP32_LoBo_v3.2.24 on 2018-09-06', machine='ESP32 board with ESP32')
Stubber: 1.0.0 - updated
"""
from typing import Any
class MicroWebTemplate:
""""""
def Execute(self, *args) -> Any:
pass
INSTRUCTION_ELIF = "elif"
INSTRUCTION_ELSE = "else"
INSTRUCTION_END = "end"
INSTRUCTION_FOR = "for"
INSTRUCTION_IF = "if"
INSTRUCTION_INCLUDE = "include"
INSTRUCTION_PYTHON = "py"
TOKEN_CLOSE = "}}"
TOKEN_CLOSE_LEN = 2
TOKEN_OPEN = "{{"
TOKEN_OPEN_LEN = 2
def Validate(self, *args) -> Any:
pass
def _parseBloc(self, *args) -> Any:
pass
def _parseCode(self, *args) -> Any:
pass
def _processInstructionELIF(self, *args) -> Any:
pass
def _processInstructionELSE(self, *args) -> Any:
pass
def _processInstructionEND(self, *args) -> Any:
pass
def _processInstructionFOR(self, *args) -> Any:
pass
def _processInstructionIF(self, *args) -> Any:
pass
def _processInstructionINCLUDE(self, *args) -> Any:
pass
def _processInstructionPYTHON(self, *args) -> Any:
pass
def _processToken(self, *args) -> Any:
pass
re = None
|
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv = None
odf_csv = None
class SeriesTakeTest(unittest.TestCase):
def setUp(self):
self.PRECISION = 5
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'USPricesSample.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.pdf_csv = pd.read_csv(data)
Csv.odf_csv = orca.read_csv(data)
@property
def ps(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
@property
def os(self):
return orca.Series([1, 2, 3, 4, 5, 6, 7], name='x')
def test_series_reindexing_selection_label_mainpulation_first(self):
ps = pd.Series([1, 2, 3, 4], index=pd.date_range('2018-04-09', periods=4, freq='2D'))
os = orca.Series(ps)
# TODO: orca error
# assert_series_equal(os.first('3D').to_pandas(), ps.first('3D'))
def test_series_reindexing_selection_label_mainpulation_last(self):
ps = pd.Series([1, 2, 3, 4], index=pd.date_range('2018-04-09', periods=4, freq='2D'))
os = orca.Series(ps)
# TODO: orca error
# assert_series_equal(os.last('3D').to_pandas(), ps.last('3D'))
def test_series_reindexing_selection_label_mainpulation_reset_index(self):
ps = pd.Series([1, 2, 3, 4], name='foo', index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
os = orca.Series(ps)
assert_frame_equal(os.reset_index().to_pandas(), ps.reset_index())
# TODO: orca error got an unexpected keyword argument 'name'
# assert_frame_equal(os.reset_index(name='values').to_pandas(), ps.reset_index(name='values'))
# TODO: orca output dataframe, pandas output series
# assert_series_equal(os.reset_index(drop=True).to_pandas(), ps.reset_index(drop=True))
# TODO: orca Cannot reset_index inplace on a Series to create a DataFrame
# assert_series_equal(os.reset_index(inplace=True, drop=True).to_pandas(), ps.reset_index(inplace=True, drop=True))
arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
np.array(['one', 'two', 'one', 'two'])]
ps = pd.Series(range(4), name='foo', index=pd.MultiIndex.from_arrays(arrays, names=['a', 'b']))
os = orca.Series(ps)
assert_frame_equal(os.reset_index(level='a').to_pandas(), ps.reset_index(level='a'))
def test_series_reindexing_selection_label_mainpulation_reset_mask(self):
pass
def test_series_reindexing_selection_label_mainpulation_take(self):
n = np.array([0, 1, 4])
assert_series_equal(self.os.take(n).to_pandas(), self.ps.take(n))
# TODO: iloc bug
# assert_series_equal(self.os.take([]).to_pandas(), self.ps.take([]))
assert_series_equal(self.os.take([-1, -2], axis=0).to_pandas(), self.ps.take([-1, -2], axis=0))
osa = orca.Series([10, 1, 19, np.nan], index=['a', 'b', 'c', 'd'])
psa = pd.Series([10, 1, 19, np.nan], index=['a', 'b', 'c', 'd'])
assert_series_equal(osa.take([3]).to_pandas(), psa.take([3]))
def test_series_reindexing_selection_label_manipulation_idxmax(self):
pser = pd.Series(data=[1, 4, 5], index=['A', 'B', 'C'])
oser = orca.Series(pser)
self.assertEqual(oser.idxmax(), pser.idxmax())
self.assertEqual(oser.idxmax(skipna=False), pser.idxmax(skipna=False))
index = pd.MultiIndex.from_arrays([
['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
pser = pd.Series(data=[1, 2, 4, 5], index=index)
oser = orca.Series(pser)
# TODO: multiIndex.idxmax()
# self.assertEqual(oser.idxmax(), pser.idxmax())
# self.assertEqual(oser.idxmax(skipna=False), pser.idxmax(skipna=False))
#
# oser = orca.Series([])
# with self.assertRaisesRegex(ValueError, "an empty sequence"):
# oser.idxmax()
def test_series_reindexing_selection_label_manipulation_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=['A', 'B', 'C'])
oser = orca.Series(pser)
self.assertEqual(oser.idxmin(), pser.idxmin())
# self.assertEqual(oser.idxmin(skipna=False), pser.idxmin(skipna=False))
#
# index = pd.MultiIndex.from_arrays([
# ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
# pser = pd.Series(data=[1, 2, 4, 5], index=index)
# oser = orca.Series(pser)
# TODO: multiIndex.idxmin()
# self.assertEqual(oser.idxmin(), pser.idxmin())
# self.assertEqual(oser.idxmin(skipna=False), pser.idxmin(skipna=False))
#
# oser = orca.Series([])
# with self.assertRaisesRegex(ValueError, "an empty sequence"):
# oser.idxmin()
def test_series_reindexing_selection_label_manipulation_duplicated(self):
s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], name='animal')
ds = orca.Series(s)
self.assertEqual(repr(ds.duplicated().to_pandas()), repr(s.duplicated()))
self.assertEqual(repr(ds.duplicated(keep='first').to_pandas()), repr(s.duplicated(keep='first')))
self.assertEqual(repr(ds.duplicated(keep='last').to_pandas()), repr(s.duplicated(keep='last')))
self.assertEqual(repr(ds.duplicated(keep=False).to_pandas()), repr(s.duplicated(keep=False)))
def test_series_reindexing_selection_label_manipulation_drop_duplicates(self):
s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], name='animal')
ds = orca.Series(s)
assert_series_equal(ds.drop_duplicates().to_pandas(), s.drop_duplicates())
assert_series_equal(ds.drop_duplicates(keep='first').to_pandas(), s.drop_duplicates(keep='first'))
assert_series_equal(ds.drop_duplicates(keep='last').to_pandas(), s.drop_duplicates(keep='last'))
assert_series_equal(ds.drop_duplicates(keep=False).to_pandas(), s.drop_duplicates(keep=False))
# TODO: series.drop_duplicates 不支持inplace参数
# assert_series_equal(ds.drop_duplicates(inplace=True).to_pandas(), s.drop_duplicates(inplace=True), check_names=False)
# s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], name='animal')
# ds = orca.Series(s)
# assert_series_equal(ds.drop_duplicates(keep='last', inplace=True).to_pandas(), s.drop_duplicates(keep='last', inplace=True))
# s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], name='animal')
# ds = orca.Series(s)
# assert_series_equal(ds.drop_duplicates(keep=False, inplace=True).to_pandas(), s.drop_duplicates(keep=False, inplace=True))
def test_series_reindexing_selection_label_manipulation_head_tail(self):
ps = self.ps
os = self.os
assert_series_equal(ps, os.to_pandas())
# head
assert_series_equal(ps.head(), os.head().to_pandas())
assert_series_equal(ps.head(10), os.head(10).to_pandas())
assert_series_equal(ps.head(7), os.head(7).to_pandas())
assert_series_equal(ps.head(5), os.head(5).to_pandas())
assert_series_equal(ps.head(3), os.head(3).to_pandas())
# TODO: orca.Series.head(0)
# assert_series_equal(ps.head(0), os.head(0).to_pandas())
assert_series_equal(ps.head(-3), os.head(-3).to_pandas())
assert_series_equal(ps[ps > 3].head(3), os[os > 3].head(3).to_pandas())
# TODO: orca.ArithExpression.tail(-3)
# assert_series_equal((ps+1).head(-3), (os+1).head(-3).to_pandas())
# tail
assert_series_equal(ps.tail(), os.tail().to_pandas())
assert_series_equal(ps.tail(10), os.tail(10).to_pandas())
assert_series_equal(ps.tail(7), os.tail(7).to_pandas())
assert_series_equal(ps.tail(5), os.tail(5).to_pandas())
assert_series_equal(ps.tail(3), os.tail(3).to_pandas())
# TODO: orca.Series.tail(0)
# assert_series_equal(ps.tail(0), os.tail(0).to_pandas())
assert_series_equal(ps.tail(-3), os.tail(-3).to_pandas())
assert_series_equal(ps[ps > 3].tail(3), os[os > 3].tail(3).to_pandas())
# TODO: orca.ArithExpression.tail(-3)
# assert_series_equal((ps+1).tail(-3), (os+1).tail(-3).to_pandas())
def test_series_reindexing_selection_label_manipulation_rename(self):
ps = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
os = orca.Series(ps)
ps.name = 'renamed'
os.name = 'renamed'
self.assertEqual(os.name, 'renamed')
assert_series_equal(ps, os.to_pandas())
pidx = ps.index
oidx = os.index
pidx.name = 'renamed'
oidx.name = 'renamed'
self.assertEqual(oidx.name, 'renamed')
assert_index_equal(pidx, oidx.to_pandas())
# TODO: orca.Series.rename('rename')
# assert_series_equal(ps.rename('y'), os.rename('y').to_pandas())
# self.assertEqual(os.name, 'renamed') # no mutation
# assert_series_equal(ps.rename(), os.rename().to_pandas())
# os.rename('z', inplace=True)
# ps.rename('z', inplace=True)
# self.assertEqual(os.name, 'z')
# assert_series_equal(ps, os.to_pandas())
def test_series_reindexing_selection_label_manipulation_rename_method(self):
# Series name
ps = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
os = orca.Series(ps)
assert_series_equal(ps.rename("a"), os.rename("a").to_pandas())
def test_series_reindexing_selection_label_manipulation_isin(self):
ps = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], name='animal')
os = orca.Series(ps)
assert_series_equal(os.isin(['cow', 'lama']).to_pandas(), ps.isin(['cow', 'lama']))
assert_series_equal(os.isin({'cow'}).to_pandas(), ps.isin({'cow'}))
# series name
# assert_series_equal(os.isin(orca.Series(['cow', 'lama'])).to_pandas(), ps.isin(pd.Series(['cow', 'lama']))) |
# Standard library
import unittest.mock
# Installed
import click.testing
import pytest
# Own modules
from dds_cli.user import User
from dds_cli.__main__ import dds_main
@pytest.fixture
def retrieve_token():
"""Fixture to mock authentication by having a None token for every user."""
with unittest.mock.patch.object(User, "_User__retrieve_token") as mock_A:
mock_A.return_value = None
yield mock_A
@pytest.fixture
def runner(retrieve_token):
"""
Fixture that returns the click cli runner.
The runner is invoked when the function returned by this fixture is called.
"""
runner_ = click.testing.CliRunner(mix_stderr=False)
def _run(cmd, input=None):
return runner_.invoke(
dds_main,
cmd,
catch_exceptions=True,
input=input,
)
yield _run
|
import numpy as np, pandas as pd, torch
def save_emb(d, experiment, data_dir):
emb = dict()
if experiment.Model.name == "ConEx":
for e in d.entities:
emb[e] = np.array(experiment.Model.cpu().emb_e_real(torch.tensor(experiment.entity_idxs[e])).detach().tolist()+experiment.Model.cpu().emb_e_img(torch.tensor(experiment.entity_idxs[e])).detach().tolist())
for r in d.relations:
emb[r] = np.array(experiment.Model.cpu().emb_rel_real(torch.tensor(experiment.relation_idxs[r])).detach().tolist()+experiment.Model.cpu().emb_rel_img(torch.tensor(experiment.relation_idxs[r])).detach().tolist())
elif experiment.Model.name == "Distmult":
for e in d.entities:
emb[e] = np.array(experiment.Model.cpu().emb_e(torch.tensor(experiment.entity_idxs[e])).detach().tolist())
for r in d.relations:
emb[r] = np.array(experiment.Model.cpu().emb_rel(torch.tensor(experiment.relation_idxs[r])).detach().tolist())
elif experiment.Model.name == "Tucker":
for e in d.entities:
emb[e] = np.array(experiment.Model.cpu().E(torch.tensor(experiment.entity_idxs[e])).detach().tolist())
for r in d.relations:
emb[r] = np.array(experiment.Model.cpu().R(torch.tensor(experiment.relation_idxs[r])).detach().tolist())
pd.DataFrame(emb.values(), index=emb.keys()).to_csv("./"+("/").join(data_dir.split("/")[1:-2])+"/"+experiment.Model.name+"_emb.csv")
print("Finished saving {} embeddings for {}".format(experiment.Model.name, data_dir.split("/")[-3]))
|
from rest_framework import serializers
from mneia_admin_backend.models import AreaURL
class AreaURLSerializer(serializers.ModelSerializer):
class Meta:
model = AreaURL
fields = ("id", "area", "url", "type")
|
from .web3_top_class import Web_Class_IPC
import redis
import datetime
class Save_Block_Chain_Data(object):
def __init__(self):
redis_handle = redis.StrictRedis( db=1 )
signing_key = '/mnt/ssd/ethereum/dev_data/keystore/UTC--2019-12-08T20-29-05.205871190Z--75dca28623f88b105b8d0c718b4bfde0f1568688'
ipc_socket = "/home/pi/geth.ipc"
self.w3 = Web_Class_IPC(ipc_socket,redis_handle,signing_key)
print(self.w3.get_block_number())
def append_data(self,contract_name,method,*data) :
contract_object = self.w3.get_contract(contract_name)
receipt = self.w3.transact_contract_data(contract_object,method, *data)
return receipt
if __name__ == "__main__":
save_block_chain_data = Save_Block_Chain_Data()
receipt = save_block_chain_data.append_data("EventHandler","transmit_event",["event_name","event_sub_id","data"])
print(receipt) |
#this script does the following:
#for each library
#get oligos
#add typeIIs cutters (BtsI)
#check that no new restriction sites added
#pad the oligos to same length
#add barcode with nicking sites (Nt.BspQI)
#check that no new restriction sites added
#add amplification primers
#check that no new restriction sites added
#output
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Alphabet import generic_dna
from Bio.Restriction import *
import pylab
##################################
#FUNCTIONS:
def getBuildOligos(filename,num_oligos):
handle = open(filename,'r')
buildoligos = []
countOligos = 0
localCount = num_oligos
for line in handle:
if line.startswith('>'):
#make sure the previous sequence had the correct number of oligos
if localCount != num_oligos:
print("Wrong num oligos. Before: "+str(buildoligos[-1]))
print("Total oligos: " +str(localCount)+". Expected: "+str(num_oligos))
buildoligos.append([line.strip()])
localCount = 0
elif line != "":
buildoligos[-1].append(Seq(line.strip(),generic_dna))
countOligos += 1
localCount += 1
handle.close()
return buildoligos
def obtainFastaSequences(filename):
handle = open(filename)
records = []
for seqrecord in SeqIO.parse(handle, "fasta"):
records.append(seqrecord)
#print(seqrecord.id)
#print(len(seqrecord))
#print(len(records))
return records
def addBtsI(constructs):
#add BtsI sites and check that only two exist
newconstructs = []
for construct in constructs:
newconstruct = []
newconstruct.append(construct[0])
for i in range(1,len(construct)):
newoligo = Seq("GCAGTG",generic_dna) + construct[i] + Seq("CACTGC",generic_dna)
rb = RestrictionBatch([BtsI, BspQI, NdeI, KpnI,EcoRI])
seqsearch = rb.search(newoligo)
if i==1:
#first oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=0 or len(seqsearch[NdeI])!= 1 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\naddBtsI: Bad number of restriction sites in first oligo")
print(construct[0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
elif i==(len(construct)-1):
#last oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=0 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=1:# or len(seqsearch[EcoRI])!=0:
print("\naddBtsI: Bad number of restriction sites in last oligo")
print(construct[0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
else:
#middle oligos
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=0 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\naddBtsI: Bad number of restriction sites in middle oligo")
print(construct[0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
newconstruct.append(newoligo)
newconstructs.append(newconstruct)
return newconstructs
def plotOligo_length(constructs,filename):
#plot histogram of lengths
construct_lengths = []
for construct in constructs:
for i in range(1,len(construct)):
construct_lengths.append(len(construct[i]))
generate_histogram = True
if generate_histogram:
data = construct_lengths
print("min Lev dist: " +str(min(data)) + " max Lev dist: " +str(max(data)))
pylab.hist(data, bins=(max(data)-min(data)))
pylab.title("%i oligos length distribution\nfrom %i to %i" \
% (len(data),min(data),max(data)))
pylab.xlabel("Oligo length (nt)")
pylab.ylabel("Count")
pylab.savefig(filename+'.png', bbox_inches='tight')
pylab.savefig(filename+'.pdf', bbox_inches='tight')
pylab.show()
def padOligoCP(constructs,totallength):
#add bases until oligo is correct length
newconstructs = []
for construct in constructs:
newconstruct = []
newconstruct.append(construct[0])
for i in range(1,len(construct)):
#pad oligo ATGC
oligo_len = len(construct[i])
length_to_add = totallength - oligo_len
full_seq = length_to_add//4
full_seq_mod = length_to_add % 4
if full_seq_mod == 3:
last_padding = "ATG"
elif full_seq_mod == 2:
last_padding = "AT"
elif full_seq_mod == 1:
last_padding = "A"
else:
last_padding = ""
full_padding_seq = "ATGC" * full_seq + last_padding
newoligo = Seq(full_padding_seq,generic_dna) + construct[i] # add padding between BspQI and BtsI
rb = RestrictionBatch([BtsI, BspQI, NdeI, KpnI, EcoRI])
seqsearch = rb.search(newoligo)
if i==1:
#first oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=0 or len(seqsearch[NdeI])!= 1 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\npadOligo: Bad number of restriction sites in first oligo")
print(construct[0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
elif i==(len(construct)-1):
#last oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=0 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=1:# or len(seqsearch[EcoRI])!=0:
print("\npadOligo: Bad number of restriction sites in last oligo")
print(construct[0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
else:
#middle oligos
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=0 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\npadOligo: Bad number of restriction sites in middle oligo")
print(construct[0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
newconstruct.append(newoligo)
newconstructs.append(newconstruct)
return newconstructs
def addBarcodes(constructs, barcodes):
newconstructs = []
for index in range(len(constructs)):
newconstruct = []
newconstruct.append(constructs[index][0]+';'+barcodes[index].id)
for i in range(1,len(constructs[index])):
newoligo = Seq("GCTCTTCG",generic_dna) + barcodes[index].seq + Seq("CGAAGAGC",generic_dna) + constructs[index][i]
rb = RestrictionBatch([BtsI, BspQI, NdeI, KpnI,EcoRI])
seqsearch = rb.search(newoligo)
if i==1:
#first oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=2 or len(seqsearch[NdeI])!= 1 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\naddBarcodes: Bad number of restriction sites in first oligo")
print(constructs[index][0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
elif i==(len(constructs[index])-1):
#last oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=2 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=1:# or len(seqsearch[EcoRI])!=0:
print("\naddBarcodes: Bad number of restriction sites in last oligo")
print(constructs[index][0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
else:
#middle oligos
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=2 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\naddBarcodes: Bad number of restriction sites in middle oligo")
print(constructs[index][0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
newconstruct.append(newoligo)
newconstructs.append(newconstruct)
return newconstructs
def addAmpPrimers(constructs,fwdprim,revprim):
newconstructs = []
for index in range(len(constructs)):
newconstruct = []
newconstruct.append(constructs[index][0])
for i in range(1,len(constructs[index])):
newoligo = fwdprim + constructs[index][i] + revprim.reverse_complement()
rb = RestrictionBatch([BtsI, BspQI, NdeI, KpnI, EcoRI])
seqsearch = rb.search(newoligo)
if i==1:
#first oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=2 or len(seqsearch[NdeI])!= 1 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\naddAmpPrimers: Bad number of restriction sites in first oligo")
print(constructs[index][0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
elif i==(len(constructs[index])-1):
#last oligo
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=2 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=1:# or len(seqsearch[EcoRI])!=0:
print("\naddAmpPrimers: Bad number of restriction sites in last oligo")
print(constructs[index][0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
else:
#middle oligos
if len(seqsearch[BtsI])!=2 or len(seqsearch[BspQI])!=2 or len(seqsearch[NdeI])!= 0 or len(seqsearch[KpnI])!=0:# or len(seqsearch[EcoRI])!=0:
print("\naddAmpPrimers: Bad number of restriction sites in middle oligo")
print(constructs[index][0] + '\t' + str(i))
print(newoligo)
print(seqsearch)
newconstruct.append(newoligo)
newconstructs.append(newconstruct)
return newconstructs
def printOligoLibrary(constructs):
for construct in constructs:
print(construct[0])
for oligo in construct[1:]:
print('\t' + oligo + '\t' + str(len(oligo)))
def outputOligoLibrary(constructs,filename,append_or_write,index):
handle = open(filename,append_or_write)
if index < 15:
codon_str = "Codon1"
else:
codon_str = "Codon2"
for construct in constructs:
basename = construct[0] + ';' + codon_str + ';'
for index in range(1,len(construct)):
handle.write(basename+str(index)+'\n')
handle.write(str(construct[index])+'\n')
handle.close()
#####################################
########## OPTIONS ##################
#####################################
filenames = ['db_oligo/DHFR_Lib01_4oligo.oligos','db_oligo/DHFR_Lib02_4oligo.oligos','db_oligo/DHFR_Lib03_4oligo.oligos',
'db_oligo/DHFR_Lib04_4oligo.oligos','db_oligo/DHFR_Lib05_4oligo.oligos','db_oligo/DHFR_Lib06_4oligo.oligos',
'db_oligo/DHFR_Lib07_4oligo.oligos','db_oligo/DHFR_Lib08_4oligo.oligos','db_oligo/DHFR_Lib09_4oligo.oligos',
'db_oligo/DHFR_Lib10_4oligo.oligos','db_oligo/DHFR_Lib11_4oligo.oligos','db_oligo/DHFR_Lib12_4oligo.oligos',
'db_oligo/DHFR_Lib13_4oligo.oligos','db_oligo/DHFR_Lib14_5oligo.oligos','db_oligo/DHFR_Lib15_5oligo.oligos',
'db_oligo/DHFR_Lib01_4oligo.codon2.oligos','db_oligo/DHFR_Lib02_4oligo.codon2.oligos','db_oligo/DHFR_Lib03_4oligo.codon2.oligos',
'db_oligo/DHFR_Lib04_4oligo.codon2.oligos','db_oligo/DHFR_Lib05_4oligo.codon2.oligos','db_oligo/DHFR_Lib06_4oligo.codon2.oligos',
'db_oligo/DHFR_Lib07_4oligo.codon2.oligos','db_oligo/DHFR_Lib08_4oligo.codon2.oligos','db_oligo/DHFR_Lib09_4oligo.codon2.oligos',
'db_oligo/DHFR_Lib10_4oligo.codon2.oligos','db_oligo/DHFR_Lib11_4oligo.codon2.oligos','db_oligo/DHFR_Lib12_4oligo.codon2.oligos',
'db_oligo/DHFR_Lib13_4oligo.codon2.oligos','db_oligo/DHFR_Lib14_5oligo.codon2.oligos','db_oligo/DHFR_Lib15_5oligo.codon2.oligos']
filenames_out = ['db_oligo/DHFR_Lib01_4oligo.oligos','db_oligo/DHFR_Lib02_4oligo.oligos','db_oligo/DHFR_Lib03_4oligo.oligos',
'db_oligo/DHFR_Lib04_4oligo.oligos','db_oligo/DHFR_Lib05_4oligo.oligos','db_oligo/DHFR_Lib06_4oligo.oligos',
'db_oligo/DHFR_Lib07_4oligo.oligos','db_oligo/DHFR_Lib08_4oligo.oligos','db_oligo/DHFR_Lib09_4oligo.oligos',
'db_oligo/DHFR_Lib10_4oligo.oligos','db_oligo/DHFR_Lib11_4oligo.oligos','db_oligo/DHFR_Lib12_4oligo.oligos',
'db_oligo/DHFR_Lib13_4oligo.oligos','db_oligo/DHFR_Lib14_5oligo.oligos','db_oligo/DHFR_Lib15_5oligo.oligos',
'db_oligo/DHFR_Lib16_4oligo.oligos','db_oligo/DHFR_Lib17_4oligo.oligos','db_oligo/DHFR_Lib18_4oligo.oligos',
'db_oligo/DHFR_Lib19_4oligo.oligos','db_oligo/DHFR_Lib20_4oligo.oligos','db_oligo/DHFR_Lib21_4oligo.oligos',
'db_oligo/DHFR_Lib22_4oligo.oligos','db_oligo/DHFR_Lib23_4oligo.oligos','db_oligo/DHFR_Lib24_4oligo.oligos',
'db_oligo/DHFR_Lib25_4oligo.oligos','db_oligo/DHFR_Lib26_4oligo.oligos','db_oligo/DHFR_Lib27_4oligo.oligos',
'db_oligo/DHFR_Lib28_4oligo.oligos','db_oligo/DHFR_Lib29_5oligo.oligos','db_oligo/DHFR_Lib30_5oligo.oligos']
#ampF from 00_primer_screen.py output: skpp15 F&R for amplification primers
#these have been further edited manually
ampprimf_file = 'ampprimers-skpp15/skpp15-forward_select_mod.faa'
ampprimersf = obtainFastaSequences(ampprimf_file)
ampylist = []
for ampy in ampprimersf:
ampylist.append(str(ampy.seq))
print(ampylist)
#ampR (not RC) from 00_primer_screen.py output:
ampprimr_file = 'ampprimers-skpp15/skpp15-reverse_select_mod.faa'
ampprimersr = obtainFastaSequences(ampprimr_file)
ampylist = []
for ampy in ampprimersr:
ampylist.append(str(ampy.seq))
print(ampylist)
barcodes = obtainFastaSequences('barcodes/filt_prim_12nt_Lev_3_Tm_40_42_GC_45_55_SD_2_mod_restriction.fasta')
# number of oligos to use to split genes in each lib:
num_oligos = [4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5]
constructs_per_lib = [384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384]
#length of payload + BtsaI sites + buffer such that all oligos are full length
length_padded_payload = 172 # 142nt for 200mer with 12nt BC, 172nt for 230mers
file_for_BLAT = "db_oligo/LibAll_finaloligos_noAmp.fasta"
#####################################
######### / OPTIONS #################
#####################################
for index in range(len(filenames)):
print("Processing Lib"+str(index+1))
buildoligos = getBuildOligos(filenames[index],num_oligos[index])
#plotOligo_length(buildoligos,"plots/Oligo_Lib"+str(index)+"_length_in_hist")
btsIoligos = addBtsI(buildoligos)
#plotOligo_length(btsIoligos,"plots/Oligo_Lib"+str(index)+"_length_after_btsI_hist")
paddedOligos = padOligoCP(btsIoligos,length_padded_payload)
#plotOligo_length(paddedOligos,"plots/Oligo_"+str(index)+"_length_after_padding_hist")
barcodedoligos = addBarcodes(paddedOligos,barcodes)
if index == 0:
outputOligoLibrary(barcodedoligos,file_for_BLAT,'w',index)
else:
outputOligoLibrary(barcodedoligos,file_for_BLAT,'a',index)
finaloligos = addAmpPrimers(barcodedoligos,ampprimersf[index].seq,ampprimersr[index].seq)
#printOligoLibrary(finaloligos)
print(filenames_out[index].replace('.oligos','-finaloligos.fasta'))
outputOligoLibrary(finaloligos,filenames_out[index].replace('.oligos','-finaloligos.fasta'),'w',index)
|
"""Unit tests for day 4 submission"""
from aoc2021 import day4
from aoc2021.day4 import BingoBoard
INPUT_FILE_PATH = 'tests/inputs/day4_tst.txt'
TEST_FILE_EXPECTED_NUMBERS = [7, 4, 9, 5, 11, 17, 23, 2, 0, 14,
21, 24, 10, 16, 13, 6, 15, 25, 12, 22, 18, 20, 8, 19, 3, 26, 1]
TEST_FILE_EXPECTED_BOARDS = [
BingoBoard(day4.BINGO_BOARD_DIM, [[22, 13, 17, 11, 0],
[8, 2, 23, 4, 24],
[21, 9, 14, 16, 7],
[6, 10, 3, 18, 5],
[1, 12, 20, 15, 19]]),
BingoBoard(day4.BINGO_BOARD_DIM, [[3, 15, 0, 2, 22],
[9, 18, 13, 17, 5],
[19, 8, 7, 25, 23],
[20, 11, 10, 24, 4],
[14, 21, 16, 12, 6]]),
BingoBoard(day4.BINGO_BOARD_DIM, [[14, 21, 17, 24, 4],
[10, 16, 15, 9, 19],
[18, 8, 23, 26, 20],
[22, 11, 13, 6, 5],
[2, 0, 12, 3, 7]])]
# pylint: disable=protected-access
def test_process_inputs():
"""Tests the input processing function to make sure each line is converted
into the proper list of tuples
"""
expected_numbers = TEST_FILE_EXPECTED_NUMBERS
expected_boards = TEST_FILE_EXPECTED_BOARDS
actual_numbers, actual_boards = day4._process_inputs(INPUT_FILE_PATH)
assert expected_numbers == actual_numbers
assert expected_boards == actual_boards
def test_solution():
"""Tests the whole integrated solution with a test input file
"""
expected_output = [4512, 1924]
actual_output = day4.solution(INPUT_FILE_PATH)
assert actual_output == expected_output
|
"""
Tests common to list and UserList.UserList
"""
import sys
import os
from test import test_support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
#FIXME: not working on Jython
if not test_support.is_jython:
l0 = []
for i in xrange(sys.getrecursionlimit() + 100):
l0 = [l0]
self.assertRaises(RuntimeError, repr, l0)
def test_print(self):
d = self.type2test(xrange(200))
d.append(d)
d.extend(xrange(200,400))
d.append(d)
d.append(400)
try:
with open(test_support.TESTFN, "wb") as fo:
print >> fo, d,
with open(test_support.TESTFN, "rb") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(test_support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, r.next)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0L] = 1
a[1L] = 2
a[2L] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2L] = 88
a[-1L] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setslice__, 0, 1, 5)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setslice__)
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:2L]
del a[0L:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2L:-1L]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:]
del a[:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1L:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxint, 4*sys.maxint), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxint,-4*sys.maxint)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in xrange(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_sort(self):
with test_support.check_py3k_warnings(
("the cmp argument is not supported", DeprecationWarning)):
self._test_sort()
def _test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
return cmp(b, a)
u.sort(revcmp)
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = self.type2test(range(12))
z.sort(myComparison)
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
return cmp(x, y)
self.assertRaises(ValueError, z.sort, selfmodifyingComparison)
self.assertRaises(TypeError, z.sort, lambda x, y: 's')
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super(CommonTest, self).test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
if not test_support.is_jython:
self.assertRaises(TypeError, u.__iadd__, None)
else:
import operator
self.assertRaises(TypeError, operator.__iadd__, u, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
#FIXME: not working on Jython
if not test_support.is_jython:
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
# XXX: CPython specific, PyList doesn't len() during init
def _test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
|
class Puck(object):
def __init__(self):
self.x = width / 2
self.y = height / 2
self.r = 12
self.reset()
self.rightscore = 0
self.leftscore = 0
def checkPaddleLeft(self, p):
if self.y - self.r < p.y + p.h / 2 and self.y + self.r > p.y - p.h / 2 and self.x - self.r < p.x + p.w / 2:
if self.x > p.x:
self.diff = self.y - (p.y - p.h / 2)
self.rad = radians(45)
self.angle = map(self.diff, 0, p.h, -self.rad, self.rad)
self.xspeed = 5 * cos(self.angle)
self.yspeed = 5 * sin(self.angle)
self.x = p.x + p.w / 2 + self.r
def checkPaddleRight(self, p):
if self.y - self.r < p.y + p.h / 2 and self.y + self.r > p.y - p.h / 2 and self.x + self.r > p.x - p.w / 2:
if self.x < p.x:
self.diff = self.y - (p.y - p.h / 2)
self.angle = map(self.diff, 0, p.h, radians(225), radians(135))
self.xspeed = 5 * cos(self.angle)
self.yspeed = 5 * sin(self.angle)
self.x = p.x - p.w / 2 - self.r
def update(self):
self.x += self.xspeed
self.y += self.yspeed
def edges(self):
if self.y < 0 or self.y > height:
self.yspeed *= -1
if self.x - self.r > width:
self.leftscore += 1
self.reset()
if self.x + self.r < 0:
self.rightscore += 1
self.reset()
def reset(self):
self.x = width / 2
self.y = height / 2
self.angle = random(-PI / 4, PI / 4)
self.xspeed = 5 * cos(self.angle)
self.yspeed = 5 * sin(self.angle)
if random(1) < 0.5:
self.xspeed *= -1
def show(self):
fill(255)
ellipse(self.x, self.y, self.r * 2, self.r * 2) |
# -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
from openerp.addons.web.controllers.main import manifest_list, module_boot, html_template
class PointOfSaleController(openerp.addons.web.http.Controller):
_cp_path = '/pos'
@openerp.addons.web.http.httprequest
def app(self, req, s_action=None, **kw):
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list(req, None, 'js'))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list(req, None, 'css'))
cookie = req.httprequest.cookies.get("instance0|session_id")
session_id = cookie.replace("%22","")
template = html_template.replace('<html','<html manifest="/pos/manifest?session_id=%s"'%session_id)
r = template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot(req)),
'init': 'var wc = new s.web.WebClient();wc.appendTo($(document.body));'
}
return r
@openerp.addons.web.http.httprequest
def manifest(self, req, **kwargs):
""" This generates a HTML5 cache manifest files that preloads the categories and products thumbnails
and other ressources necessary for the point of sale to work offline """
ml = ["CACHE MANIFEST"]
# loading all the images in the static/src/img/* directories
def load_css_img(srcdir,dstdir):
for f in os.listdir(srcdir):
path = os.path.join(srcdir,f)
dstpath = os.path.join(dstdir,f)
if os.path.isdir(path) :
load_css_img(path,dstpath)
elif f.endswith(('.png','.PNG','.jpg','.JPG','.jpeg','.JPEG','.gif','.GIF')):
ml.append(dstpath)
imgdir = openerp.modules.get_module_resource('point_of_sale','static/src/img');
load_css_img(imgdir,'/point_of_sale/static/src/img')
products = req.session.model('product.product')
for p in products.search_read([('pos_categ_id','!=',False)], ['name']):
product_id = p['id']
url = "/web/binary/image?session_id=%s&model=product.product&field=image&id=%s" % (req.session_id, product_id)
ml.append(url)
categories = req.session.model('pos.category')
for c in categories.search_read([],['name']):
category_id = c['id']
url = "/web/binary/image?session_id=%s&model=pos.category&field=image&id=%s" % (req.session_id, category_id)
ml.append(url)
ml += ["NETWORK:","*"]
m = "\n".join(ml)
return m
@openerp.addons.web.http.jsonrequest
def dispatch(self, request, iface, **kwargs):
method = 'iface_%s' % iface
return getattr(self, method)(request, **kwargs)
@openerp.addons.web.http.jsonrequest
def scan_item_success(self, request, ean):
"""
A product has been scanned with success
"""
print 'scan_item_success: ' + str(ean)
return
@openerp.addons.web.http.jsonrequest
def scan_item_error_unrecognized(self, request, ean):
"""
A product has been scanned without success
"""
print 'scan_item_error_unrecognized: ' + str(ean)
return
@openerp.addons.web.http.jsonrequest
def help_needed(self, request):
"""
The user wants an help (ex: light is on)
"""
print "help_needed"
return
@openerp.addons.web.http.jsonrequest
def help_canceled(self, request):
"""
The user stops the help request
"""
print "help_canceled"
return
@openerp.addons.web.http.jsonrequest
def weighting_start(self, request):
print "weighting_start"
return
@openerp.addons.web.http.jsonrequest
def weighting_read_kg(self, request):
print "weighting_read_kg"
return 0.0
@openerp.addons.web.http.jsonrequest
def weighting_end(self, request):
print "weighting_end"
return
@openerp.addons.web.http.jsonrequest
def payment_request(self, request, price):
"""
The PoS will activate the method payment
"""
print "payment_request: price:"+str(price)
return 'ok'
@openerp.addons.web.http.jsonrequest
def payment_status(self, request):
print "payment_status"
return { 'status':'waiting' }
@openerp.addons.web.http.jsonrequest
def payment_cancel(self, request):
print "payment_cancel"
return
@openerp.addons.web.http.jsonrequest
def transaction_start(self, request):
print 'transaction_start'
return
@openerp.addons.web.http.jsonrequest
def transaction_end(self, request):
print 'transaction_end'
return
@openerp.addons.web.http.jsonrequest
def cashier_mode_activated(self, request):
print 'cashier_mode_activated'
return
@openerp.addons.web.http.jsonrequest
def cashier_mode_deactivated(self, request):
print 'cashier_mode_deactivated'
return
@openerp.addons.web.http.jsonrequest
def open_cashbox(self, request):
print 'open_cashbox'
return
@openerp.addons.web.http.jsonrequest
def print_receipt(self, request, receipt):
print 'print_receipt' + str(receipt)
return
@openerp.addons.web.http.jsonrequest
def print_pdf_invoice(self, request, pdfinvoice):
print 'print_pdf_invoice' + str(pdfinvoice)
return
|
from flask import render_template,request,redirect,url_for,abort
from . import main
from .forms import BlogForm,UpdateProfile,CommentForm
from ..models import User,Blog,Comment,Subscribe
from flask_login import login_required,current_user
from .. import db
from ..request import get_quotes
# from ..email import mail_message
# Pitch = pitch.Pitch
@main.route('/')
def index():
""" View root page function that returns index page
"""
title = 'Home- Welcome'
all_blogs = Blog.query.all()
quote=get_quotes()
return render_template('index.html', title = title,all_blogs=all_blogs, quote= quote)
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user)
@main.route('/new_blog', methods=['GET', 'POST'])
@login_required
def new_blog():
blog_form = BlogForm()
if blog_form.validate_on_submit():
blog = blog_form.blog.data
# user_id = blog_form.user_id.data
new_blog = Blog(blog=blog,user_id=current_user.id)
new_blog.save_blogs()
subscriber=Subscribe.query.all()
for subscribe in subscriber:
mail_message("New Blog Post","email/welcome_user",subscribe.email, new_blog = new_blog )
return redirect(url_for('main.index'))
return render_template('new_blog.html', blog_form=blog_form)
@main.route('/comment/new/<int:id>', methods=['GET', 'POST'])
@login_required
def comment(id):
comment_form = CommentForm()
blog= Blog.query.filter_by(id=id).first()
if comment_form.validate_on_submit():
description = comment_form.description.data
# user_id = comment_form.user_id.data
new_comment = Comment(description=description, blogs_id = id, user_id=current_user.id)
new_comment.save_comments()
new_comment.delete_comments()
return redirect(url_for('main.index'))
return render_template('comment.html',comment_form=comment_form, blog= blog)
@main.route('/subscribe',methods=["GET","POST"])
def subscribe():
form=SubscribeForm()
if form.validate_on_submit():
email = form.email.data
subscribe = Subscribe(email=form.email.data)
db.session.add(subscribe)
db.session.commit()
mail_message("New Blog Post","email/welcome_user",subscribe.email)
return redirect(url_for('main.index'))
title = 'Subscribe'
return render_template('subscribe.html',form=form)
@main.route('/delete/comment/<int:id>', methods = ['GET', 'POST'])
@login_required
def delete_comment(id):
form=CommentForm()
comment=Comment.query.filter_by(id=id).first()
if comment is not None:
comment.delete_comments()
return redirect(url_for('main.index'))
return render_template('comment.html', form=form)
@main.route('/delete/post/<int:id>', methods = ['GET', 'POST'])
@login_required
def delete_blogs(id):
blog=Blog.query.filter_by(id=id).first()
if blog is not None:
blog.delete_blogs()
return redirect(url_for('main.index')) |
from app.http.api.middlewares import login_required, admin_or_owner
from app.servers.models import db
from app.plugins.pinger import Pinger
from app.servers.models import Server, ServerActivity
from app.servers.tasks import add_activity_log
def _update_status_callback(future):
status = future.serverinfo.result()
future.serverinfo.status = status
add_activity_log(future.serverinfo)
class PingHandler:
def __init__(self):
self.pinger = Pinger()
@admin_or_owner
def ping_async(self, id):
serverinfo = Server.query.get(id)
self.pinger.ping(serverinfo=serverinfo, async_callback=_update_status_callback)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run masked LM/next sentence masked_lm pre-training for BERT in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import optimization
import bert_models
import common_flags
import configs
import input_pipeline
from tf2_common.modeling import model_training_utils
from tf2_common.modeling import performance
from tf2_common.utils.misc import distribution_utils
from tf2_common.utils.mlp_log import mlp_log
import npu_device
from hccl.split.api import set_split_strategy_by_idx
flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16',
help='allow_fp32_to_fp16/force_fp16/ '
'must_keep_origin_dtype/allow_mix_precision.')
flags.DEFINE_boolean(name='over_dump', default=False,
help='if or not over detection, default is False')
flags.DEFINE_boolean(name='data_dump_flag', default=False,
help='data dump flag, default is False')
flags.DEFINE_string(name='data_dump_step', default="10",
help='data dump step, default is 10')
flags.DEFINE_boolean(name='profiling', default=False,
help='if or not profiling for performance debug, default is False')
flags.DEFINE_string(name='profiling_dump_path', default="/home/data",
help='the path to save profiling data')
flags.DEFINE_string(name='over_dump_path', default="/home/data",
help='the path to save over dump data')
flags.DEFINE_string(name='data_dump_path', default="/home/data",
help='the path to save dump data')
flags.DEFINE_boolean(name='autotune', default=False,
help='whether to enable autotune, default is False')
flags.DEFINE_boolean(name='use_npu_lamb', default=True,
help='whether to enable npu lamb optimizer, default is True')
flags.DEFINE_boolean(name='use_fastgelu', default=True,
help='whether to enable fastgelu, default is True')
flags.DEFINE_boolean(name='use_mixlist', default=True,
help='whether to enable mixlist, default is True')
flags.DEFINE_string(name='mixlist_file', default='ops_info.json',
help='mixlist file name, default is ops_info.json')
flags.DEFINE_boolean(name='use_npu_dropout', default=True,
help='whether to enable npu_dropout, default is True')
flags.DEFINE_string('train_files', None,
'File path to retrieve training data for pre-training.')
flags.DEFINE_string('eval_files', None,
'File path to retrieve eval data for pre-training.')
# Model training specific flags.
flags.DEFINE_integer(
'max_seq_length', 128,
'The maximum total input sequence length after WordPiece tokenization. '
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded.')
flags.DEFINE_integer('max_predictions_per_seq', 20,
'Maximum predictions per sequence_output.')
flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.')
flags.DEFINE_integer('num_steps_per_epoch', 1000,
'Total number of training steps to run per epoch.')
flags.DEFINE_float('warmup_steps', 10000,
'Warmup steps for optimizer.')
flags.DEFINE_integer('start_warmup_step', 0,
'The starting step of warmup.')
flags.DEFINE_integer('stop_steps', None,
'The number of steps to stop training.')
flags.DEFINE_bool('do_eval', False, 'Whether to run eval.')
flags.DEFINE_bool('device_warmup', False,
'Whether or not to enable device warmup.')
flags.DEFINE_integer('steps_between_eval', 10000,
'Steps between an eval. Is multiple of steps per loop.')
flags.DEFINE_integer('steps_before_eval_start', 0,
'Steps before starting eval.')
flags.DEFINE_integer('num_eval_samples', 10000, 'Number of eval samples.')
flags.DEFINE_integer('eval_batch_size', 32, 'Total batch size for training.')
flags.DEFINE_float('weight_decay_rate', 0.01,
'The weight_decay_rate value for the optimizer.')
flags.DEFINE_float('beta_1', 0.9, 'The beta_1 value for the optimizer.')
flags.DEFINE_float('beta_2', 0.999, 'The beta_2 value for the optimizer.')
flags.DEFINE_float('epsilon', 1e-6, 'The epsilon value for the optimizer.')
flags.DEFINE_integer('num_accumulation_steps', 1,
'number of steps to accumulate with large batch size.')
flags.DEFINE_float('stop_threshold', 0.912, 'Stop threshold for MLPerf.')
flags.DEFINE_float('poly_power', 1.0, 'The power of poly decay.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
def npu_config():
FLAGS = flags.FLAGS
npu_config = {}
if FLAGS.data_dump_flag:
npu_device.global_options().dump_config.enable_dump = True
npu_device.global_options().dump_config.dump_path = FLAGS.data_dump_path
npu_device.global_options().dump_config.dump_step = FLAGS.data_dump_step
npu_device.global_options().dump_config.dump_mode = "all"
if FLAGS.over_dump:
npu_device.global_options().dump_config.enable_dump_debug = True
npu_device.global_options().dump_config.dump_path = FLAGS.over_dump_path
npu_device.global_options().dump_config.dump_debug_mode = "all"
if FLAGS.profiling:
npu_device.global_options().profiling_config.enable_profiling = True
profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \
"training_trace":"on", \
"task_trace":"on", \
"aicpu":"on", \
"aic_metrics":"PipeUtilization",\
"fp_point":"While_body_while_body_44418_1223/while/model/bert_pretrainer/transformer_encoder/self_attention_mask/mul", \
"bp_point":"While_body_while_body_44418_1223/gradient_tape/while/model/bert_pretrainer/transformer_encoder/position_embedding/Pad"}'
npu_device.global_options().profiling_config.profiling_options = profiling_options
npu_device.global_options().precision_mode=FLAGS.precision_mode
npu_device.global_options().variable_memory_max_size=4*1024*1024*1024
#npu_device.global_options().graph_memory_max_size=str("27*1024*1024*1024")
npu_device.global_options().graph_memory_max_size=29205777612
if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision':
logging.info('start to set op blacklist according to %s',FLAGS.mixlist_file)
npu_device.global_options().modify_mixlist="../configs/"+FLAGS.mixlist_file
npu_device.open().as_default()
def get_pretrain_dataset_fn(input_file_pattern,
seq_length,
max_predictions_per_seq,
global_batch_size,
is_training,
use_synthetic,
num_eval_samples):
"""Returns input dataset from input file string."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
if use_synthetic:
input_patterns = ''
else:
input_patterns = input_file_pattern.split(',')
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
dataset = input_pipeline.create_pretrain_dataset(
input_patterns=input_patterns,
seq_length=seq_length,
max_predictions_per_seq=max_predictions_per_seq,
batch_size=batch_size,
is_training=is_training,
use_synthetic=use_synthetic,
input_pipeline_context=ctx,
num_eval_samples=num_eval_samples)
return dataset
return _dataset_fn
def get_loss_fn(loss_factor=1.0):
"""Returns loss function for BERT pretraining."""
def _bert_pretrain_loss_fn(unused_labels, losses, **unused_args):
return tf.reduce_mean(losses) * loss_factor
return _bert_pretrain_loss_fn
def clip_by_global_norm(grads_and_vars):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
return zip(grads, tvars)
def run_customized_training(strategy,
optimizer_type,
weight_decay_rate,
beta_1,
beta_2,
epsilon,
bert_config,
max_seq_length,
max_predictions_per_seq,
model_dir,
steps_per_epoch,
steps_per_loop,
epochs,
initial_lr,
warmup_steps,
train_files,
train_batch_size,
do_eval,
eval_files,
eval_batch_size,
num_eval_samples,
custom_callbacks,
init_checkpoint,
steps_between_eval,
steps_before_eval_start,
stop_threshold,
explicit_allreduce,
allreduce_bytes_per_pack,
enable_checkpoint_and_summary,
device_warmup):
"""Run BERT pretrain model training using low-level API."""
mlp_log.mlperf_print('cache_clear', True)
mlp_log.mlperf_print('init_start', None)
mlp_log.mlperf_print('global_batch_size', train_batch_size)
mlp_log.mlperf_print('max_sequence_length', max_seq_length)
mlp_log.mlperf_print('max_predictions_per_seq', max_predictions_per_seq)
mlp_log.mlperf_print('opt_base_learning_rate', initial_lr)
mlp_log.mlperf_print('opt_lamb_weight_decay_rate', weight_decay_rate)
mlp_log.mlperf_print('opt_lamb_beta_1', beta_1)
mlp_log.mlperf_print('opt_lamb_beta_2', beta_2)
mlp_log.mlperf_print('opt_gradient_accumulation_steps',
FLAGS.num_accumulation_steps)
mlp_log.mlperf_print('opt_learning_rate_warmup_epochs',
train_batch_size * warmup_steps)
mlp_log.mlperf_print('opt_learning_rate_warmup_steps', warmup_steps)
mlp_log.mlperf_print('num_warmup_steps', warmup_steps)
mlp_log.mlperf_print('start_warmup_step', FLAGS.start_warmup_step)
mlp_log.mlperf_print('opt_epsilon', epsilon)
mlp_log.mlperf_print('eval_samples', num_eval_samples)
mlp_log.mlperf_print('opt_lamb_learning_rate_decay_poly_power',
FLAGS.poly_power)
mlp_log.mlperf_print('opt_learning_rate_training_steps',
steps_per_epoch * epochs)
mlp_log.mlperf_print('train_samples',
train_batch_size * steps_per_epoch * epochs)
train_input_fn = get_pretrain_dataset_fn(
input_file_pattern=train_files,
seq_length=max_seq_length,
max_predictions_per_seq=max_predictions_per_seq,
global_batch_size=train_batch_size,
is_training=True,
num_eval_samples=num_eval_samples,
use_synthetic=False)
eval_input_fn = None
if do_eval:
eval_input_fn = get_pretrain_dataset_fn(
input_file_pattern=eval_files,
seq_length=max_seq_length,
max_predictions_per_seq=max_predictions_per_seq,
global_batch_size=eval_batch_size,
is_training=False,
num_eval_samples=num_eval_samples,
use_synthetic=False)
synthetic_train_input_fn = None
if device_warmup:
synthetic_train_input_fn = get_pretrain_dataset_fn(
input_file_pattern=None,
seq_length=max_seq_length,
max_predictions_per_seq=max_predictions_per_seq,
global_batch_size=train_batch_size,
is_training=True,
num_eval_samples=1,
use_synthetic=True)
def _get_pretrain_model():
"""Gets a pretraining model."""
pretrain_model, core_model, core_pretrain_model = bert_models.pretrain_model(
bert_config, max_seq_length, max_predictions_per_seq)
optimizer = optimization.create_optimizer(
initial_lr, steps_per_epoch * epochs, warmup_steps,
optimizer_type=optimizer_type, poly_power=FLAGS.poly_power,
start_warmup_step=FLAGS.start_warmup_step,
weight_decay_rate=weight_decay_rate,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
pretrain_model.optimizer = performance.configure_optimizer(
optimizer,
use_float16=common_flags.use_float16(),
use_graph_rewrite=common_flags.use_graph_rewrite())
return pretrain_model, core_model, core_pretrain_model
trained_model, masked_lm_accuracy, run_steps = model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=_get_pretrain_model,
loss_fn=get_loss_fn(
loss_factor=1.0 /
strategy.num_replicas_in_sync if FLAGS.scale_loss else 1.0),
model_dir=model_dir,
train_input_fn=train_input_fn,
steps_per_epoch=steps_per_epoch,
steps_per_loop=steps_per_loop,
epochs=epochs,
eval_input_fn=eval_input_fn,
eval_steps=math.floor(num_eval_samples / eval_batch_size),
#eval_steps=math.ceil(num_eval_samples / eval_batch_size),
steps_between_eval=steps_between_eval,
steps_before_eval_start=steps_before_eval_start,
sub_model_export_name='pretrained/bert_model',
init_checkpoint=init_checkpoint,
custom_callbacks=custom_callbacks,
device_warmup=device_warmup,
synthetic_train_input_fn=synthetic_train_input_fn,
explicit_allreduce=explicit_allreduce,
post_allreduce_callbacks=[clip_by_global_norm],
allreduce_bytes_per_pack=allreduce_bytes_per_pack,
enable_checkpoint_and_summary=enable_checkpoint_and_summary,
num_accumulation_steps=FLAGS.num_accumulation_steps,
stop_steps=FLAGS.stop_steps,
stop_threshold=stop_threshold)
return trained_model, masked_lm_accuracy, run_steps
def run_bert_pretrain(strategy, custom_callbacks=None):
"""Runs BERT pre-training."""
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
if not strategy:
raise ValueError('Distribution strategy is not specified.')
# Runs customized training loop.
logging.info('Training using customized training loop TF 2.0 with distrubuted'
'strategy.')
performance.set_mixed_precision_policy(common_flags.dtype())
if(bert_config.num_hidden_layers==24):
logging.info('Bert Large split strategy')
set_split_strategy_by_idx([49,113,177,241,305,353,385,397])
elif(bert_config.num_hidden_layers==12):
logging.info('Bert Base split strategy')
set_split_strategy_by_idx([8,56,104,152,200,205])
else:
logging.info("There is not split strategy")
_, masked_lm_accuracy, run_steps = run_customized_training(
strategy=strategy,
optimizer_type=FLAGS.optimizer_type,
weight_decay_rate=FLAGS.weight_decay_rate,
beta_1=FLAGS.beta_1,
beta_2=FLAGS.beta_2,
epsilon=FLAGS.epsilon,
bert_config=bert_config,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
model_dir=FLAGS.model_dir,
steps_per_epoch=FLAGS.num_steps_per_epoch,
steps_per_loop=FLAGS.steps_per_loop,
epochs=FLAGS.num_train_epochs,
initial_lr=FLAGS.learning_rate,
warmup_steps=FLAGS.warmup_steps,
train_files=FLAGS.train_files,
train_batch_size=FLAGS.train_batch_size,
eval_files=FLAGS.eval_files,
eval_batch_size=FLAGS.eval_batch_size,
do_eval=FLAGS.do_eval,
num_eval_samples=FLAGS.num_eval_samples,
steps_between_eval=FLAGS.steps_between_eval,
steps_before_eval_start=FLAGS.steps_before_eval_start,
stop_threshold=FLAGS.stop_threshold,
explicit_allreduce=FLAGS.explicit_allreduce,
allreduce_bytes_per_pack=FLAGS.allreduce_bytes_per_pack,
enable_checkpoint_and_summary=FLAGS.enable_checkpoint_and_summary,
custom_callbacks=custom_callbacks,
init_checkpoint=FLAGS.init_checkpoint,
device_warmup=FLAGS.device_warmup)
return masked_lm_accuracy, run_steps
def main(_):
# Users should always run this script under TF 2.x
tf.compat.v2.enable_v2_behavior()
npu_config()
if not FLAGS.model_dir:
FLAGS.model_dir = '/tmp/bert20/'
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if strategy:
print('***** Number of cores used : ', strategy.num_replicas_in_sync)
run_bert_pretrain(strategy)
if __name__ == '__main__':
app.run(main)
|
#!C:\Users\jjwri\PycharmProjects\mapping\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'ogrtools==0.7.3','console_scripts','ogr'
__requires__ = 'ogrtools==0.7.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('ogrtools==0.7.3', 'console_scripts', 'ogr')()
)
|
"""
Functions for managing configuration and sytem setup.
Do not import any django code in this file, if you must, inline the imports
in the function, NEVER at the top!!!
"""
import os
import re
import sys
from datetime import datetime
from typing import Optional, Dict, Union, Set, IO, List, Any
import shutil
import subprocess
from dotenv import dotenv_values
COMMIT_ID_LENGTH = 9
PLACEHOLDER_FOR_SECRET = 'set-this-value-in-secrets.env'
AUTOFIND_SECRET_SETTINGS = re.compile('^.*(PASSWORD|SECRET|API_KEY).*$')
AUTOFIND_SECRET_SETTINGS_EXCLUDED = {
'ENV_SECRETS',
'ENV_SECRETS_FILE',
'AUTH_PASSWORD_VALIDATORS',
'PASSWORD_RESET_TIMEOUT_DAYS',
'ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE',
'PLACEHOLDER_FOR_SECRET',
}
class AttributeDict(dict):
"""Helper to allow accessing dict values via Example.key or Example['key']"""
def __getattr__(self, attr: str) -> Any:
return dict.__getitem__(self, attr)
def __setattr__(self, attr: str, value: Any) -> None:
return dict.__setitem__(self, attr, value)
class PrettyTerm:
"""Helper for printing and formatting pretty terminal output"""
ANSI_COLOR = '\033[{fg};{bg}m'
COLORS = {
'RESET': ANSI_COLOR.format(fg='00', bg='00'),
'RED': ANSI_COLOR.format(fg='01', bg='31'),
'GREEN': ANSI_COLOR.format(fg='01', bg='32'),
'YELLOW': ANSI_COLOR.format(fg='01', bg='33'),
'BLUE': ANSI_COLOR.format(fg='01', bg='34'),
'PURPLE': ANSI_COLOR.format(fg='01', bg='35'),
'CYAN': ANSI_COLOR.format(fg='01', bg='36'),
'WHITE': ANSI_COLOR.format(fg='01', bg='37'),
}
color: bool
truncate: bool
fd: IO
def __init__(self, color: bool=False, truncate: bool=False, fd: IO=sys.stdout):
self.color = color
self.truncate = truncate
self.fd = fd
@classmethod
def num_control_chars(cls, string: str) -> int:
control_chars = 0
for color in cls.COLORS.values():
control_chars += (string.count(color) * len(color))
return control_chars
@classmethod
def truncated(cls, string: str) -> str:
invisible_chars = cls.num_control_chars(string)
max_columns = shutil.get_terminal_size((160, 10)).columns
return string[:max_columns + invisible_chars]
@classmethod
def colored(cls, string: str, color: Optional[str]) -> str:
if color is None:
return string
return f'{cls.COLORS[color.upper()]}{string}{cls.COLORS["RESET"]}'
def format(self, *string, color: Optional[str]=None) -> str:
out_str = ' '.join(str(s) for s in string)
if self.color:
if color:
out_str = self.colored(out_str, color)
else:
out_str = out_str.format(**self.COLORS)
else:
out_str = out_str.format(**{key: '' for key in self.COLORS.keys()})
if self.truncate:
out_str = self.truncated(out_str)
return out_str
def write(self, string: str) -> None:
self.fd.write(self.format(string))
# System Environment Getters
def get_current_django_command() -> Optional[str]:
"""currently running manage.py command, e.g. runserver, test, migrate, etc"""
if len(sys.argv) > 1:
return sys.argv[1].lower()
return None
def get_current_hostname(fqdn=False) -> str:
"""get short system hostname, e.g. squash, prod, jose-laptop, etc."""
hostname = os.uname()[1]
if fqdn:
if '.' not in hostname:
print('[!] Warning, tried to get host FQDN, but got short hostname')
return hostname
return hostname.split('.', 1)[0]
def get_current_pid() -> int:
return os.getpid()
def get_current_system_time() -> datetime:
return datetime.now()
def get_python_implementation() -> str:
return sys.implementation.name
def get_active_git_branch(repo_dir: str) -> str:
"""e.g. master"""
try:
with open(os.path.join(repo_dir, '.git', 'HEAD'), 'r') as f:
return f.read().strip().replace('ref: refs/heads/', '')
except Exception:
return 'unknown'
def get_active_git_commit(repo_dir: str, head: str) -> str:
"""e.g. 47df4ed31"""
try:
with open(os.path.join(repo_dir, '.git', 'refs', 'heads', head), 'r') as f:
return f.read().strip()[:COMMIT_ID_LENGTH]
except Exception:
return 'unknown'
# Environment Variable and Config Management
EnvSettingTypes = (str, bool, int, float, list)
EnvSetting = Union[str, bool, int, float, list]
def get_env_value(env: dict, key: str, default: EnvSetting=None):
"""get & cast a given value from a dictionary, or return the default"""
if key in env:
value = env[key]
else:
return default
ExpectedType = type(default)
assert ExpectedType in EnvSettingTypes, (
f'Tried to set unsupported environemnt variable {key} to {ExpectedType}')
def raise_typerror():
raise TypeError(f'Got bad environment variable {key}={value}'
f' (expected {ExpectedType})')
if ExpectedType is str:
return value
elif ExpectedType is bool:
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise_typerror()
elif ExpectedType is int:
if value.isdigit():
return int(value)
else:
raise_typerror()
elif ExpectedType is float:
try:
return float(value)
except ValueError:
raise_typerror()
elif ExpectedType is list:
return value.split(',')
def unique_env_settings(env: dict, defaults: dict) -> dict:
"""return all the new valid env settings in a dictionary of settings"""
existing_settings = {
setting_name: val
for setting_name, val in (defaults or env).items()
if not setting_name.startswith('_') and setting_name.isupper()
}
if not defaults:
return existing_settings
new_settings = {}
for setting_name, default_val in existing_settings.items():
loaded_val = get_env_value(env, setting_name, default_val)
if loaded_val != default_val:
new_settings[setting_name] = loaded_val
return new_settings
def load_env_settings(dotenv_path: str=None, env: dict=None, defaults: dict=None) -> dict:
"""load settings from a dotenv file or os.environ by default"""
assert not (dotenv_path and env), 'Only pass env or dotenv_path, not both'
env_values = (env or {}).copy()
defaults = (defaults or {}).copy()
if dotenv_path:
env_values = dotenv_values(dotenv_path=dotenv_path)
return unique_env_settings(env_values, defaults)
def get_setting_source(SETTINGS_SOURCES: Dict[str, Dict[str, Any]], key: str) -> str:
"""determine which file a specific setting was loaded from"""
for source_name, settings in reversed(list(SETTINGS_SOURCES.items())):
if key in settings:
return source_name
source_names = ', '.join(name for name in SETTINGS_SOURCES.keys())
raise ValueError(f'Setting {key} is not in any of {source_names})')
def get_secret_setting_names(settings: dict) -> Set[str]:
"""guess the setting names that likely contain sensitive values"""
return {
key for key in settings.keys()
if AUTOFIND_SECRET_SETTINGS.match(key)
and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED
} | {
key for key, value in settings['SETTINGS_DEFAULTS'].items()
if value == PLACEHOLDER_FOR_SECRET
and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED
}
# Invariant and Assertion Checkers
def check_system_invariants(settings: dict):
"""Check basic system setup and throw if there is any misconfiguration"""
s = AttributeDict(settings)
assert s.SERVER_ENV in s.ALLOWED_ENVS, (
f'SERVER_ENV={s.SERVER_ENV} is not one of the allowed values: '
f'{",".join(s.ALLOWED_ENVS)}')
assert sys.version_info >= s.MIN_PYTHON_VERSION, (
f'Installed Python version {sys.version_info} is not high enough '
f' (you must install Python >={s.MIN_PYTHON_VERSION.join(".")})'
)
assert sys.implementation.name in s.ALLOWED_PYTHON_IMPLEMENTATIONS, (
f'The active Python implementation type "{sys.implementation.name}" '
f'is not supported (must be one of {s.ALLOWED_PYTHON_IMPLEMENTATIONS})'
)
assert os.path.realpath(s.REPO_DIR) == os.path.realpath(s.ALLOWED_REPO_DIR), (
'Project directory was not found in the expected location. '
f'(you must move or symlink {s.REPO_DIR} to {s.ALLOWED_REPO_DIR})'
)
try:
with open('/etc/passwd', 'r') as f:
f.read()
if not s.ALLOW_ROOT:
raise PermissionError(f'Django should never be run as root ({s.DJANGO_USER} can read /etc/passwd)!')
except PermissionError:
pass
# running as root even once will corrupt the permissions on all the DATA_DIRS
if s.DJANGO_USER == 'root':
if not s.ALLOW_ROOT:
raise PermissionError('Django should never be run as root!')
else:
print(
'[!] Warning: Running Django as root because ALLOW_ROOT=True. '
'(You must manually fix the data folder permissions after '
'quitting in order to be able to run it as your normal user).'
)
# python -O strips asserts from our code, but we use them for critical logic
try:
assert not __debug__
print('Never run grater with python -O, asserts are needed in production.')
raise SystemExit(1)
except AssertionError:
pass
if hasattr(sys.stderr, 'encoding'):
# Set these in ~/.bashrc or ~/.config/fish/config.fish to fix encoding:
# LANG='en_US.UTF-8',
# LC_ALL='en_US.UTF-8',
# PYTHONIOENCODING='UTF-8'
assert sys.stderr.encoding == sys.stdout.encoding == 'UTF-8', (
f'Bad shell encoding setting "{sys.stdout.encoding}". '
'System, Shell, and Python system locales must be set to '
'(uppercase) "UTF-8" to run properly.'
)
def check_prod_safety(settings: dict):
s = AttributeDict(settings)
# DEBUG features and permissions mistakes must be forbidden on production boxes
if s.PROD_SAFETY_CHECK:
assert not s.DEBUG, 'DEBUG=True is never allowed on prod and beta!'
assert s.SERVER_ENV == 'PROD', 'Prod must always be run with SERVER_ENV=PROD'
assert s.DJANGO_USER == 'www-data', 'Django can only be run as user www-data'
assert s.DEFAULT_HTTP_PROTOCOL == 'https', 'https is required on prod servers'
assert s.TIME_ZONE == 'UTC', 'Prod servers must always be set to UTC timezone'
# tests can pollute the data dir and use lots of CPU / Memory
# only disable this check if you're 100% confident it's safe and have a
# very good reason to run tests on production. remember to try beta first
assert not s.IS_TESTING, 'Tests should not be run on prod machines'
elif s.SERVER_ENV == 'PROD':
# can be safely ignored when testing PROD mode on dev machines
print(
f'[!] Warning: Running with SERVER_ENV=PROD but '
f'PROD_SAFETY_CHECK is set to False! '
'(dangerous if server is publicly accessible)'
)
def check_http_settings(settings: dict):
"""check the server url scheme, host, and port config"""
s = AttributeDict(settings)
baseurl_scheme, baseurl_host, *_ = s.BASE_URL.split('://', 1) + ['']
baseurl_domain, baseurl_port, *_ = baseurl_host.split(':', 1) + ['']
# check scheme
assert baseurl_scheme in ('http', 'https'), (
'BASE_URL scheme must be http or https')
assert baseurl_scheme == s.DEFAULT_HTTP_PROTOCOL, (
'BASE_URL scheme must match DEFAULT_HTTP_PROTOCOL')
if baseurl_scheme == 'http':
assert not (s.SESSION_COOKIE_SECURE or s.CSRF_COOKIE_SECURE), (
'SESSION_COOKIE_SECURE and CSRF_COOKIE_SECURE must be False '
'when using http (to fix, set them to False in env/secrets.env)'
)
elif baseurl_scheme == 'https':
assert s.SESSION_COOKIE_SECURE and s.CSRF_COOKIE_SECURE, (
'SESSION_COOKIE_SECURE and CSRF_COOKIE_SECURE must be True '
'when using https (to fix, set them to False in env/secrets.env)'
)
# check host
assert baseurl_domain == s.DEFAULT_HOST, (
'BASE_URL must use the DEFAULT_HOST for links on the site to work')
assert baseurl_domain.replace('.', '').replace('-', '').isalnum(), (
'DEFAULT_HOST must be a valid hostname or IP address e.g. '
'127.0.0.1 or example.zalad.io (without scheme, port, or path)'
)
assert baseurl_domain in s.ALLOWED_HOSTS, (
'DEFAULT_HOST must be in ALLOWED_HOSTS for the site to be accessible')
# check port
assert isinstance(s.DEFAULT_HTTP_PORT, int)
if s.DEFAULT_HTTP_PORT == 80:
assert s.DEFAULT_HTTP_PROTOCOL == 'http', (
'DEFAULT_HTTP_PROTOCOL must be http when DEFAULT_HTTP_PORT=80')
elif s.DEFAULT_HTTP_PORT == 443:
assert s.DEFAULT_HTTP_PROTOCOL == 'https', (
'DEFAULT_HTTP_PROTOCOL must be https when DEFAULT_HTTP_PORT=443')
else:
assert s.DEFAULT_HTTP_PROTOCOL in ('http', 'https')
assert 65535 > s.DEFAULT_HTTP_PORT > 1024
if baseurl_port:
assert baseurl_port.isdigit()
assert int(baseurl_port) == s.DEFAULT_HTTP_PORT, 'port in '
else:
assert s.DEFAULT_HTTP_PORT in (80, 443)
# check BASE_URL
assert not s.BASE_URL.endswith('/'), (
'BASE_URL should not have a trailing slash')
if s.DEFAULT_HTTP_PORT in (80, 443):
assert ':' not in s.BASE_URL.split('://')[1], (
'Port should not be included in BASE_URL when using '
'https on 443 or http on 80')
else:
assert s.BASE_URL.endswith(f':{s.DEFAULT_HTTP_PORT}'), (
':port must be included in BASE_URL when using a non-standard port')
def check_secure_settings(settings: dict):
"""Check that all secure settings are defined safely in secrets.env"""
s = AttributeDict(settings)
# Some config should not be in git and can only be passed via secrets or os.env
SECURE_SETTINGS_SOURCES = (s.ENV_SECRETS_FILE, 'os.environ')
SECURE_SETTINGS = get_secret_setting_names(settings=settings)
# make sure all security-sensitive settings are coming from safe sources
for name in SECURE_SETTINGS:
value = settings.get(name, '')
defined_in = get_setting_source(s.SETTINGS_SOURCES, name)
# Do not comment this out, instead move the secret into secrets.env
# and PM the secret to other devs to update their secrets.env
try:
assert defined_in in SECURE_SETTINGS_SOURCES, (
'Security-sensitive settings must only be defined in secrets.env!\n'
f' Got: {name}={value} in {defined_in}\n'
f' Expected: {name}={value} in secrets.env'
)
# make sure settings are not defaults on prod
assert value and value != PLACEHOLDER_FOR_SECRET, (
'Required API key or secret was not defined in secrets.env or os.environ\n'
f' Got: {name}={value} from {defined_in}'
f' Expected: {name}=somesecretvalue or {name}=UNUSED in secrets.env'
)
except AssertionError as e:
if s.SERVER_ENV == 'PROD' or s.PROD_SAFETY_CHECK:
raise e
elif not s.DEBUG:
print(f'[!] Warning: {e}')
# if s.IS_TESTING:
# assert s.REDIS_DB != s.SETTINGS_DEFAULTS['REDIS_DB'], (
# 'Tests must be run with a different redis db than the main redis')
def get_django_status_line(settings: dict, pretty: bool=False) -> str:
"""the status line with process info printed every time django starts"""
# > ./manage.py check; ⚙️ DEV 👾 True 📂 ../data 🗄 127.0.0.1/project ...
s = AttributeDict(settings)
term = PrettyTerm(color=pretty, truncate=pretty)
cli_arguments = " ".join(sys.argv[1:])
mng_str = term.format('> ./manage.py ', color='yellow')
cmd_str = term.format(cli_arguments, color='blue')
env_str = term.format(s.SERVER_ENV, color='green')
debug_str = term.format(s.DEBUG, color=('green', 'red')[s.DEBUG])
pytype_str = " PYPY" if s.PY_TYPE == "pypy" else ""
path_str = s.DATA_DIR.replace(s.REPO_DIR + "/", "../")
icons = {
'env': '⚙️ ',
'debug': '👾 ',
'data': '📂 ',
'db': '🗄 ',
'git': '#️⃣ ',
'usr': '👤 ',
'pid': '🆔 ',
'ts': '🕐 ',
}
def section(name, value):
if pretty:
return f'{icons[name]}{value} '
return f'{name}={value} '
status_line = ''.join((
mng_str,
cmd_str,
'; ',
section("env", env_str),
section("debug", f'{debug_str}{pytype_str}'),
section("data", path_str),
section("db", f'{s.POSTGRES_HOST}/{s.POSTGRES_DB}'),
section("git", f'{s.GIT_SHA} ({s.GIT_HEAD})'),
section("usr", s.DJANGO_USER),
section("pid", s.PID),
section("ts", int(s.START_TIME.timestamp())),
))
return term.format(status_line)
def log_django_startup(settings: dict) -> None:
"""print and log the django status line to stdout and the reloads log"""
s = AttributeDict(settings)
# Log status line to reloads log
with open(s.RELOADS_LOGS, 'a+') as f:
f.write(f'{s.STATUS_LINE}\n')
# Log status line to stdout
if s.CLI_COLOR:
print(s.PRETTY_STATUS_LINE)
else:
print(s.STATUS_LINE)
# File and folder management
def mkchown(path: str, mode: str, user: str, group: str):
"""create and chown a directory to make sure a user can write to it"""
abilities = {'r': 'read', 'w': 'write to'}
assert mode in abilities
try:
if os.path.exists(path) and not os.path.isdir(path):
raise FileExistsError
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
if sys.platform == 'darwin':
# on mac, just chown as the user
shutil.chown(path, user=user)
else:
# on linux, chown as user:group
shutil.chown(path, user=user, group=group)
if mode == 'r':
os.listdir(path)
else:
os.listdir(path)
testfile = os.path.join(path, '.django_write_test')
with open(testfile, 'w') as f:
f.write('test')
os.remove(testfile)
except FileExistsError:
# sshfs folders can trigger a FileExistsError if permissions
# are not set up to allow user to access fuse filesystems
# make sure allow_other is passed to sshfs and www-data is in "fuse" group
print(
f'[!] Existing file conflicts with data dir path: {path}\n'
'(check for incorrect permissions, or remove it and try again)'
)
raise
except PermissionError:
print(
f'[!] Django user "{user}" is not able to {abilities[mode]} '
f'the data dir: {path}'
)
raise
def check_data_folders(settings: dict):
"""set the proper permissions on all the data folders used by django"""
s = AttributeDict(settings)
# Required permissions for each folder, r=read only, w=read and write
PROJECT_DIRS = {
s.REPO_DIR: 'r',
s.DATA_DIR: 'r',
s.LOGS_DIR: 'w',
s.STATIC_ROOT: 'r',
s.MEDIA_ROOT: 'w',
}
for path, mode in PROJECT_DIRS.items():
mkchown(
path,
mode=mode,
user=s.DJANGO_USER,
group=s.DJANGO_USER,
)
return PROJECT_DIRS
# Process Management
def ps_aux(pattern: Optional[str]=None) -> List[str]:
"""find all processes matching a given str pattern"""
return [
line for line in subprocess.Popen(
['ps', 'axw'],
stdout=subprocess.PIPE,
).stdout
if (pattern and pattern in line) or not pattern
]
def kill(pid_lines: list) -> None:
"""for each process line produced by ps_aux, kill the process"""
for line in pid_lines:
pid = line.decode().strip().split()[0]
assert pid.isdigit(), 'Found invalid text when expecting PID'
subprocess.Popen(['kill', pid])
|
# ----------------------------------------------------------------------
# ReportObjectAttrubuteResolver datasource
# ----------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import connection
# NOC modules
from .base import BaseReportColumn
from noc.sa.models.profile import Profile
from noc.inv.models.vendor import Vendor
from noc.inv.models.platform import Platform
from noc.inv.models.firmware import Firmware
class ReportAttrResolver(BaseReportColumn):
name = "reportattrresolver"
unknown_value = ["", "", "", ""]
ATTRS = ["profile", "vendor", "version", "platform"]
def extract(self):
"""
:param ids:
:return: Dict tuple MO attributes mo_id -> (attrs_list)
:rtype: dict
"""
platform = {
str(p["_id"]): p["name"]
for p in Platform.objects.all().as_pymongo().scalar("id", "name")
}
vendor = {
str(p["_id"]): p["name"] for p in Vendor.objects.all().as_pymongo().scalar("id", "name")
}
version = {
str(p["_id"]): p["version"]
for p in Firmware.objects.all().as_pymongo().scalar("id", "version")
}
profile = {
str(p["_id"]): p["name"]
for p in Profile.objects.all().as_pymongo().scalar("id", "name")
}
cursor = connection.cursor()
base_select = "select id, profile, vendor, platform, version from sa_managedobject"
query1 = base_select
query = query1
cursor.execute(query)
for val in cursor:
yield (
val[0],
profile.get(val[1], ""),
vendor.get(val[2], ""),
platform.get(val[3], ""),
version.get(val[4], ""),
)
|
#!/usr/bin/env python3
"""
Generates Netscape bookmark dumps
See:
- https://github.com/shaarli/netscape-bookmark-parser
"""
from argparse import ArgumentParser
from random import randint
from faker import Faker
HEADER = '''<!DOCTYPE NETSCAPE-Bookmark-file-1>
<!-- This is an automatically generated file.
It will be read and overwritten.
Do Not Edit! -->
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks</H1>'''
START_TAG = '<DL><p>'
END_TAG = '</DL><p>'
T_BOOKMARK_DT = (
'<DT><A HREF="{url}" ADD_DATE="{date}" PRIVATE="{private}"'
' TAGS="{tags}">{title}</A>'
)
T_BOOKMARK_DD = '<DD>{description}'
class FakeBookmark():
"""Bookmark entry generated by Faker"""
# pylint: disable=too-few-public-methods
def __init__(self, fake):
# pylint: disable=no-member
self.fake = fake
self.url = fake.uri()
self.date = fake.unix_time()
self.private = randint(0, 1)
self.tags = fake.words(nb=randint(0, 5))
self.title = fake.sentence(nb_words=randint(1, 10))
self.description = fake.paragraphs(nb=randint(0, 2))
def netscape_str(self):
"""Netscape entry representation"""
bkm = T_BOOKMARK_DT.format(
url=self.url,
date=self.date,
private=self.private,
tags=' '.join(self.tags),
title=self.title
)
if self.description:
bkm = '{dd}\n{dt}'.format(
dd=bkm,
dt=T_BOOKMARK_DD.format(description='\n'.join(self.description))
)
return bkm
def generate_bookmarks(locale, number):
"""Generate a fake Netscape bookmark list"""
fake = Faker(locale)
bookmarks = [FakeBookmark(fake).netscape_str() for _ in range(number)]
return '\n'.join([
HEADER,
START_TAG,
'\n'.join(bookmarks),
END_TAG
])
def main():
"""Main entrypoint"""
parser = ArgumentParser()
parser.add_argument(
'-l',
'--locale',
default=None,
help="Locale for the generated content"
)
parser.add_argument(
'-n',
'--number',
type=int,
default=1000,
help="Number of bookmarks to generate"
)
parser.add_argument(
'-o',
'--output',
default='bookmarks.htm',
help="Output file"
)
args = parser.parse_args()
with open(args.output, 'w') as f_out:
f_out.write(generate_bookmarks(args.locale, args.number))
if __name__ == '__main__':
main()
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import RealEstate, Property
from property.serializers import RealEstateSerializer
REAL_ESTATE_URL = reverse('property:realestate-list')
class PublicRealEstatesApiTest(TestCase):
"""Test public available real estates API"""
def setUp(self):
"""Helper function that run before the tests"""
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(REAL_ESTATE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRealEstateApiTest(TestCase):
"""Test Real estate can be retrieved by authorized user"""
def setUp(self):
"""Helper function that runs before the tests"""
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'itachidev@company.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_real_estates_list(self):
"""Test retrieve the real-estates list for the authenticated user"""
RealEstate.objects.create(
user=self.user,
name='Imobiliaria SP',
address='Rua Carlos dias n150'
)
RealEstate.objects.create(
user=self.user,
name='Imobiliaria SBC',
address='Vila Duzzi'
)
res = self.client.get(REAL_ESTATE_URL)
real_estates = RealEstate.objects.all().order_by('-name')
serializer = RealEstateSerializer(real_estates, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_real_estates_limited_for_user(self):
"""Test that 'real estate' can be only return for authenticated user"""
user2 = get_user_model().objects.create_user(
'devtwo@company.com',
'passwordtwo'
)
RealEstate.objects.create(
user=user2,
name='Imobiliaria Calazans',
address='Avenida Borges')
real_estate = RealEstate.objects.create(
user=self.user,
name='Imobiliaria Macedo',
address='Avenida Torres')
res = self.client.get(REAL_ESTATE_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], real_estate.name)
def test_create_real_estate_successfully(self):
"""Test that the authenticated user create a realestate successfully"""
payload = {
'name': 'Imobiliaria São Caetano',
'address': 'Jundiai n2344'
}
self.client.post(REAL_ESTATE_URL, payload)
exists = RealEstate.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_real_estate_invalid(self):
"""Test create a invalid real estate"""
payload = {
'name': '',
'address': ''
}
res = self.client.post(REAL_ESTATE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_real_estate_unique_item(self):
"""Test that retrieve a unique item for the authenticated user"""
realestate = RealEstate.objects.create(
user=self.user,
name='Imobiliaria Zeus',
address='Avenida R'
)
RealEstate.objects.create(
user=self.user,
name='Imobiliaria Carlos',
address='Avenida GT'
)
properties1 = Property.objects.create(
user=self.user,
name='Imovel 1',
address='Endereço 1',
description='etc',
features='tex',
status=False,
type='Home',
finality='residential'
)
properties2 = Property.objects.create(
user=self.user,
name='Imovel 1',
address='Endereço 1',
description='etc',
features='tex',
status=False,
type='Home',
finality='residential'
)
properties1.real_estates.add(realestate)
properties2.real_estates.add(realestate)
res = self.client.get(REAL_ESTATE_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
def test_retrieve_real_estates_assigned_to_properties(self):
"""Test filtering real estates by those assigned to properties"""
realestate1 = RealEstate.objects.create(
user=self.user,
name='Imobiliaria Zeus',
address='Avenida R'
)
realestate2 = RealEstate.objects.create(
user=self.user,
name='Imobiliaria Thor',
address='Avenida X'
)
properties = Property.objects.create(
user=self.user,
name='Imovel 1',
address='Endereço 1',
description='etc',
features='tex',
status=False,
type='Home',
finality='residential'
)
properties.real_estates.add(realestate1)
res = self.client.get(REAL_ESTATE_URL, {'assigned_only': 1})
serializer1 = RealEstateSerializer(realestate1)
serializer2 = RealEstateSerializer(realestate2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
|
#!/usr/bin/env python3.8
#
########################################
#
# Python Tips, by Wolfgang Azevedo
# https://github.com/wolfgang-azevedo/python-tips
#
# Loop For
# 2020-03-07
#
########################################
#
#
import time
vendors = ["CISCO", "HUAWEI", "JUNIPER", "ERICSSON", "MICROTIK"]
for i in vendors:
if i == 'CISCO':
print("Connecting to CISCO device....")
time.sleep(5)
print('.' * 1000)
time.sleep(5)
print("Wait 5 sec...")
time.sleep(5)
print("Connected!")
print('-+' * 50)
print('\n\n')
elif i == 'HUAWEI':
print('Connecting to HUAWEI device...')
time.sleep(5)
print('.' * 1000)
time.sleep(5)
print("Wait 5 sec...")
time.sleep(5)
print("Connected!") |
import tensorflow as tf
import numpy as np
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from pgn.model import PGN
from pgn.train_helper import train_model, get_train_msg
from utils.config_gpu import config_gpu
from utils.params import get_params
from utils.saveLoader import Vocab
from utils.config import PGN_CKPT
def train(params):
# GPU资源配置
config_gpu()
# 读取vocab训练
vocab = Vocab(params["vocab_path"], params["vocab_size"])
params['vocab_size'] = vocab.count
params["trained_epoch"] = get_train_msg(PGN_CKPT)
# 学习率衰减
params["learning_rate"] *= np.power(0.95, params["trained_epoch"])
# 构建模型
print("Building the model ...")
model = PGN(params)
# 获取保存管理者
checkpoint = tf.train.Checkpoint(PGN=model)
checkpoint_manager = tf.train.CheckpointManager(checkpoint, PGN_CKPT, max_to_keep=5)
checkpoint.restore(checkpoint_manager.latest_checkpoint)
if checkpoint_manager.latest_checkpoint:
print("Restored from {}".format(checkpoint_manager.latest_checkpoint))
else:
print("Initializing from scratch.")
# 训练模型
print("开始训练模型..")
print("trained_epoch:", params["trained_epoch"])
print("mode:", params["mode"])
print("epochs:", params["epochs"])
print("batch_size:", params["batch_size"])
print("max_enc_len:", params["max_enc_len"])
print("max_dec_len:", params["max_dec_len"])
print("learning_rate:", params["learning_rate"])
train_model(model, vocab, params, checkpoint_manager)
if __name__ == '__main__':
# 获得参数
params = get_params()
# params["debug_mode"] = True
# print(params["debug_mode"])
# 训练模型
train(params)
|
"""Graph a histogram of a remotely sensed image"""
# http://git.io/vqs41
# uses output from swap-bands.py script
from gdal import gdal_array
import turtle as t
def histogram(a, bins=list(range(0, 256))):
"""
Histogram function for multi-dimensional array.
a = array
bins = range of numbers to match
"""
fa = a.flat
n = gdal_array.numpy.searchsorted(gdal_array.numpy.sort(fa), bins)
n = gdal_array.numpy.concatenate([n, [len(fa)]])
hist = n[1:]-n[:-1]
return hist
def draw_histogram(hist, scale=True):
t.color("black")
# Draw the axes
axes = ((-355, -200), (355, -200), (-355, -200), (-355, 250))
t.up()
for p in axes:
t.goto(p)
t.down()
# Labels
t.up()
t.goto(0, -250)
t.write("VALUE", font=("Arial, ", 12, "bold"))
t.up()
t.goto(-400, 280)
t.write("FREQUENCY", font=("Arial, ", 12, "bold"))
# Tick marks
# x axis
x = -355
y = -200
t.up()
for i in range(1, 11):
x = x+65
t.goto(x, y)
t.down()
t.goto(x, y-10)
t.up()
t.goto(x, y-25)
t.write("{}".format((i*25)), align="center")
# y axis
x = -355
y = -200
t.up()
pixels = sum(hist[0])
if scale:
max = 0
for h in hist:
hmax = h.max()
if hmax > max:
max = hmax
pixels = max
label = int(pixels/10)
for i in range(1, 11):
y = y+45
t.goto(x, y)
t.down()
t.goto(x-10, y)
t.up()
t.goto(x-15, y-6)
t.write("{}" .format((i*label)), align="right")
# Plot each histogram as a colored line
x_ratio = 709.0 / 256
y_ratio = 450.0 / pixels
# Add more colors to this list if comparing
# more than 3 bands or 1 image
colors = ["red", "green", "blue"]
for j in range(len(hist)):
h = hist[j]
x = -354
y = -199
t.up()
t.goto(x, y)
t.down()
t.color(colors[j])
for i in range(256):
x = i * x_ratio
y = h[i] * y_ratio
x = x - (709/2)
y = y + -199
t.goto((x, y))
im = "swap.tif"
histograms = []
arr = gdal_array.LoadFile(im)
for b in arr:
histograms.append(histogram(b))
draw_histogram(histograms)
# Hide our pen
t.pen(shown=False)
t.done()
|
def _tmpfile():
"""Implementation of File::Temp tmpfile()"""
return tempfile.TemporaryFile()
|
# from dynaconf import settings
# from dynaconf.loaders.redis_loader import load as dc_redis_load
# from util.config.base_dynaconf_config import init_all_dynaconf_redis_config
# 根据需要初始化所有的dynaconf在redis中的config
# init_all_dynaconf_redis_config()
# 再次读取redis存储的dynaconf配置
# dc_redis_load(settings, key="DYNACONF_DEVELOPMENT")
|
# Generated by Django 2.1.9 on 2019-08-27 08:24
import datetime
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import resources.models.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('resources', '0080_payments_related_changes'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(choices=[('waiting', 'waiting'), ('confirmed', 'confirmed'), ('rejected', 'rejected'), ('expired', 'expired'), ('cancelled', 'cancelled')], default='waiting', max_length=32, verbose_name='state')),
('order_number', models.CharField(default=resources.models.utils.generate_id, max_length=64, unique=True, verbose_name='order number')),
('reservation', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='order', to='resources.Reservation', verbose_name='reservation')),
],
options={
'verbose_name': 'order',
'verbose_name_plural': 'orders',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='OrderLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='quantity')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_lines', to='payments.Order', verbose_name='order')),
],
options={
'verbose_name': 'order line',
'verbose_name_plural': 'order lines',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='OrderLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('state_change', models.CharField(blank=True, choices=[('waiting', 'waiting'), ('confirmed', 'confirmed'), ('rejected', 'rejected'), ('expired', 'expired'), ('cancelled', 'cancelled')], max_length=32, verbose_name='state change')),
('message', models.TextField(blank=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='log_entries', to='payments.Order', verbose_name='order log entry')),
],
options={
'verbose_name': 'order log entry',
'verbose_name_plural': 'order log entries',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('product_id', models.CharField(db_index=True, editable=False, max_length=100, verbose_name='internal product ID')),
('archived_at', models.DateTimeField(db_index=True, default=datetime.datetime(9999, 12, 31, 0, 0, tzinfo=utc), editable=False, verbose_name='archived_at')),
('type', models.CharField(choices=[('rent', 'rent'), ('extra', 'extra')], default='rent', max_length=32, verbose_name='type')),
('sku', models.CharField(max_length=255, verbose_name='SKU')),
('name', models.CharField(blank=True, max_length=100, verbose_name='name')),
('name_fi', models.CharField(blank=True, max_length=100, null=True, verbose_name='name')),
('name_en', models.CharField(blank=True, max_length=100, null=True, verbose_name='name')),
('name_sv', models.CharField(blank=True, max_length=100, null=True, verbose_name='name')),
('description', models.TextField(blank=True, verbose_name='description')),
('description_fi', models.TextField(blank=True, null=True, verbose_name='description')),
('description_en', models.TextField(blank=True, null=True, verbose_name='description')),
('description_sv', models.TextField(blank=True, null=True, verbose_name='description')),
('price', models.DecimalField(decimal_places=2, max_digits=10, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='price including VAT')),
('tax_percentage', models.DecimalField(choices=[(Decimal('0.00'), '0.00'), (Decimal('10.00'), '10.00'), (Decimal('14.00'), '14.00'), (Decimal('24.00'), '24.00')], decimal_places=2, default=Decimal('24.00'), max_digits=5, verbose_name='tax percentage')),
('price_type', models.CharField(choices=[('per_period', 'per period'), ('fixed', 'fixed')], default='per_period', max_length=32, verbose_name='price type')),
('price_period', models.DurationField(blank=True, default=datetime.timedelta(0, 3600), null=True, verbose_name='price period')),
('max_quantity', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)], verbose_name='max quantity')),
('resources', models.ManyToManyField(blank=True, related_name='products', to='resources.Resource', verbose_name='resources')),
],
options={
'verbose_name': 'product',
'verbose_name_plural': 'products',
'ordering': ('product_id',),
},
),
migrations.AddField(
model_name='orderline',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='order_lines', to='payments.Product', verbose_name='product'),
),
migrations.AlterUniqueTogether(
name='product',
unique_together={('archived_at', 'product_id')},
),
]
|
from uuid import uuid4
from django.db import models
class Shifts(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid4, editable=False)
name = models.CharField(
max_length=200,
blank=False, null=False)
start = models.TimeField(
blank=False, null=False
)
end = models.TimeField(
blank=False, null=False
)
class Meta:
ordering = ('name',)
verbose_name = 'Shifts'
verbose_name_plural = 'Shifts'
db_table = 'config_shifts'
def __str__(self):
return self.name
|
import csv
import urbackup_api
import time
import datetime
import logging
excel_output = True
def count_cbt_clients_server(serverurl, username, password):
server = urbackup_api.urbackup_server(serverurl, username, password)
clients = server.get_status()
if clients==None:
print("Getting clients from server "+serverurl+" failed")
return None
diff_time = 30*24*60*60 # 1 month
ret = 0
for client in clients:
# Client was seen in the last month
if client["lastseen"]!="-" and client["lastseen"] > time.time() - diff_time:
if "-cbt" in client["client_version_string"]:
lastseen_str = datetime.datetime.fromtimestamp(client["lastseen"]).strftime("%x %X")
print("CBT client "+client["name"]+" last seen at "+lastseen_str+" version "+client["client_version_string"])
ret+=1
return ret
def count_cbt_clients():
#logging.basicConfig(level=logging.DEBUG)
with open('servers.csv', newline='') as csvfile_in:
with open('server_cbt_counts.csv', 'w', newline='') as csvfile_out:
serverreader = csv.reader(csvfile_in, dialect='excel')
serverwriter = csv.writer(csvfile_out, dialect='excel')
if excel_output:
serverwriter.writerow(["sep=,"])
serverwriter.writerow(["Server URL", "CBT client count"])
idx=0
for row in serverreader:
if idx==0 and "sep=" in row[0]:
continue
if (idx==0 or idx==1) and row[0]=="Server URL":
continue
if len(row)<3:
print("Row "+str(idx+1)+" doesn't have enough columns. ("+", ".join(row)+")")
continue
serverurl = row[0]
username = row[1]
password = row[2]
num = count_cbt_clients_server(serverurl, username, password)
if num==None:
serverwriter.writerow([serverurl, "Getting CBT count failed"])
else:
serverwriter.writerow([serverurl, num])
idx+=1
if __name__=="__main__":
count_cbt_clients() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Manuel Widmer <mawidmer@cisco.com>
# Copyright: (c) 2021, Anvitha Jain (@anvitha-jain) <anvjain@cisco.com>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_vmm_vswitch_policy
short_description: Manage vSwitch policy for VMware virtual domains profiles (vmm:DomP)
description:
- Manage vSwitch policy for VMware VMM domains on Cisco ACI fabrics.
options:
port_channel_policy:
description:
- Name of the fabric access port-channel policy.
type: str
lldp_policy:
description:
- Name of the fabric access LLDP policy.
type: str
cdp_policy:
description:
- Name of the fabric access CDP policy.
type: str
mtu_policy:
description:
- VMWare only.
- Name of the fabric access MTU policy.
type: str
domain:
description:
- Name of the virtual domain profile.
type: str
aliases: [ domain_name, domain_profile ]
enhanced_lag:
description:
- List of enhanced LAG policies if vSwitch needs to be connected via VPC.
type: list
elements: dict
suboptions:
name:
description:
- Name of the enhanced Lag policy.
type: str
required: true
lacp_mode:
description:
- LACP port channel mode.
type: str
choices: [ active, passive ]
load_balancing_mode:
description:
- Load balancing mode of the port channel.
- See also https://pubhub.devnetcloud.com/media/apic-mim-ref-421/docs/MO-lacpEnhancedLagPol.html.
type: str
choices:
- dst-ip
- dst-ip-l4port
- dst-ip-vlan
- dst-ip-l4port-vlan
- dst-mac
- dst-l4port
- src-ip
- src-ip-l4port
- src-ip-vlan
- src-ip-l4port-vlan
- src-mac
- src-l4port
- src-dst-ip
- src-dst-ip-l4port
- src-dst-ip-vlan
- src-dst-ip-l4port-vlan
- src-dst-mac
- src-dst-l4port
- src-port-id
- vlan
number_uplinks:
description:
- Number of uplinks, must be between 2 and 8.
type: int
stp_policy:
description:
- SCVMM only.
- Name of the STP policy.
type: str
netflow_exporter:
description:
- Parameters for the netflow exporter policy
type: dict
suboptions:
name:
description:
- Name of the netflow exporter policy
type: str
required: true
active_flow_timeout:
description:
- Specifies the delay in seconds that NetFlow waits after the active flow is initiated, after which NetFlow sends the collected data.
- The range is from 60 to 3600. The default value is 60
type: int
idle_flow_timeout:
description:
- Specifies the delay in seconds that NetFlow waits after the idle flow is initiated, after which NetFlow sends the collected data.
- The range is from 10 to 600. The default value is 15.
type: int
sampling_rate:
description:
- (VDS only) Specifies how many packets that NetFlow will drop after every collected packet.
If you specify a value of 0, then NetFlow does not drop any packets.
- The range is from 0 to 1000. The default value is 0.
type: int
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
vm_provider:
description:
- The VM platform for VMM Domains.
- Support for Kubernetes was added in ACI v3.0.
- Support for CloudFoundry, OpenShift and Red Hat was added in ACI v3.1.
type: str
choices: [ cloudfoundry, kubernetes, microsoft, openshift, openstack, redhat, vmware ]
extends_documentation_fragment:
- cisco.aci.aci
seealso:
- module: cisco.aci.aci_domain
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(vmm:DomP)
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Manuel Widmer (@lumean)
- Anvitha Jain (@anvitha-jain)
'''
EXAMPLES = r'''
- name: Add a vSwitch policy with LLDP
cisco.aci.aci_vmm_vswitch_policy:
host: apic
username: admin
password: SomeSecretPassword
lldp_policy: LLDP_ENABLED
domain: vmware_dom
vm_provider: vmware
state: present
- name: Add a vSwitch policy with link aggregation
cisco.aci.aci_vmm_vswitch_policy:
host: apic
username: admin
password: SomeSecretPassword
port_channel_policy: LACP_ACTIVE
lldp_policy: LLDP_ENABLED
domain: vmware_dom
vm_provider: vmware
enhanced_lag:
- name: my_lacp_uplink
lacp_mode: active
load_balancing_mode: src-dst-ip
number_uplinks: 2
state: present
- name: Remove vSwitch Policy from VMware VMM domain
cisco.aci.aci_vmm_vswitch_policy:
host: apic
username: admin
password: SomeSecretPassword
domain: vmware_dom
vm_provider: vmware
state: absent
- name: Query the vSwitch policy of the VMWare domain
cisco.aci.aci_vmm_vswitch_policy:
host: apic
username: admin
password: SomeSecretPassword
domain: vmware_dom
vm_provider: vmware
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.aci.plugins.module_utils.aci import ACIModule, aci_argument_spec, enhanced_lag_spec, netflow_spec
# via UI vSwitch Policy can only be added for VMware and Microsoft vmm domains
# behavior for other domains is currently untested.
VM_PROVIDER_MAPPING = dict(
cloudfoundry='CloudFoundry',
kubernetes='Kubernetes',
microsoft='Microsoft',
openshift='OpenShift',
openstack='OpenStack',
redhat='Redhat',
vmware='VMware',
)
# enhanced_lag_spec = dict(
# name=dict(type='str', required=True),
# lacp_mode=dict(type='str', choices=['active', 'passive']),
# load_balancing_mode=dict(
# type='str',
# choices=['dst-ip', 'dst-ip-l4port', 'dst-ip-vlan', 'dst-ip-l4port-vlan', 'dst-mac', 'dst-l4port',
# 'src-ip', 'src-ip-l4port', 'src-ip-vlan', 'src-ip-l4port-vlan', 'src-mac', 'src-l4port',
# 'src-dst-ip', 'src-dst-ip-l4port', 'src-dst-ip-vlan', 'src-dst-ip-l4port-vlan', 'src-dst-mac',
# 'src-dst-l4port', 'src-port-id', 'vlan']),
# number_uplinks=dict(type='int'),
# )
# netflow_spec = dict(
# name=dict(type='str', required=True),
# active_flow_timeout=dict(type='int'),
# idle_flow_timeout=dict(type='int'),
# sampling_rate=dict(type='int'),
# )
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
port_channel_policy=dict(type='str'),
lldp_policy=dict(type='str'),
cdp_policy=dict(type='str'),
mtu_policy=dict(type='str'),
stp_policy=dict(type='str'),
enhanced_lag=dict(type='list', elements='dict', options=enhanced_lag_spec()),
netflow_exporter=dict(type='dict', options=netflow_spec()),
domain=dict(type='str', aliases=['domain_name', 'domain_profile']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
vm_provider=dict(type='str', choices=list(VM_PROVIDER_MAPPING.keys())),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['domain', 'vm_provider']],
['state', 'present', ['domain', 'vm_provider']],
],
)
port_channel_policy = module.params.get('port_channel_policy')
lldp_policy = module.params.get('lldp_policy')
cdp_policy = module.params.get('cdp_policy')
mtu_policy = module.params.get('mtu_policy')
stp_policy = module.params.get('stp_policy')
netflow_exporter = module.params.get('netflow_exporter')
enhanced_lag = module.params.get('enhanced_lag')
domain = module.params.get('domain')
state = module.params.get('state')
vm_provider = module.params.get('vm_provider')
aci = ACIModule(module)
vswitch_class = 'vmmVSwitchPolicyCont'
child_classes = [
'vmmRsVswitchOverrideLldpIfPol',
'vmmRsVswitchOverrideLacpPol',
'vmmRsVswitchOverrideCdpIfPol',
'lacpEnhancedLagPol'
]
if mtu_policy is not None:
child_classes.append('vmmRsVswitchOverrideMtuPol')
if stp_policy is not None:
child_classes.append('vmmRsVswitchOverrideStpPol')
if isinstance(netflow_exporter, dict):
child_classes.append('vmmRsVswitchExporterPol')
aci.construct_url(
root_class=dict(
aci_class='vmmProvP',
aci_rn='vmmp-{0}'.format(VM_PROVIDER_MAPPING.get(vm_provider)),
module_object=vm_provider,
target_filter={'name': vm_provider},
),
subclass_1=dict(
aci_class='vmmDomP',
aci_rn='dom-{0}'.format(domain),
module_object=domain,
target_filter={'name': domain},
),
subclass_2=dict(
aci_class='vmmVSwitchPolicyCont',
aci_rn='vswitchpolcont',
module_object='vswitchpolcont',
target_filter={'name': 'vswitchpolcont'},
),
child_classes=child_classes,
)
aci.get_existing()
if state == 'present':
children = list()
if port_channel_policy is not None:
children.append(dict(vmmRsVswitchOverrideLacpPol=dict(attributes=dict(
tDn='uni/infra/lacplagp-{0}'.format(port_channel_policy)
))))
if lldp_policy is not None:
children.append(dict(vmmRsVswitchOverrideLldpIfPol=dict(attributes=dict(
tDn='uni/infra/lldpIfP-{0}'.format(lldp_policy)
))))
if cdp_policy is not None:
children.append(dict(vmmRsVswitchOverrideCdpIfPol=dict(attributes=dict(
tDn='uni/infra/cdpIfP-{0}'.format(cdp_policy)
))))
if mtu_policy is not None:
children.append(dict(vmmRsVswitchOverrideMtuPol=dict(attributes=dict(
tDn='uni/fabric/l2pol-{0}'.format(mtu_policy)
))))
if stp_policy is not None:
children.append(dict(vmmRsVswitchOverrideStpPol=dict(attributes=dict(
tDn='uni/infra/ifPol-{0}'.format(stp_policy)
))))
if isinstance(netflow_exporter, dict):
children.append(dict(vmmRsVswitchExporterPol=dict(attributes=dict(
tDn='uni/infra/vmmexporterpol-{0}'.format(netflow_exporter['name']),
activeFlowTimeOut=netflow_exporter['active_flow_timeout'],
idleFlowTimeOut=netflow_exporter['idle_flow_timeout'],
samplingRate=netflow_exporter['sampling_rate'],
))))
if isinstance(enhanced_lag, list):
for lag_dict in enhanced_lag:
children.append(dict(lacpEnhancedLagPol=dict(attributes=dict(
name=lag_dict['name'],
mode=lag_dict['lacp_mode'],
lbmode=lag_dict['load_balancing_mode'],
numLinks=lag_dict['number_uplinks'],
))))
aci.payload(
aci_class=vswitch_class,
class_config=dict(rn='vswitchpolcont'),
child_configs=children
)
aci.get_diff(aci_class=vswitch_class)
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
def readLines(filename):
list = []
count = 0;
with open(filename, encoding="utf8") as f:
while True:
count += 1
# Get next line from file
line = f.readline()
list.append(line)
# if line is empty
# end of file is reached
if not line:
break
# print("Line{}: {}".format(count, line.strip()))
f.close()
return list
if __name__ == '__main__':
bert_vocab = readLines("vocab-bert.txt")
original_vocab = readLines("learning/treelstm/data/qald/vocab-cased-qald.txt")
s = set(bert_vocab)
temp3 = [x for x in original_vocab if x not in s]
print(len(temp3))
print("")
with open("listvocab_qald.txt", "w") as output:
for i in temp3:
output.write(i)
|
import torch
from models.common import AutoShape
class Yolo:
_instance = None
path_weights = "../data/weights/best.pt" # path to model weights 'path/to/best.pt'
def __init__(self) -> object:
pass
# Make yolo model
self.model = None
self.__model_load()
def __new__(cls, *args, **kwargs) -> object:
if not Yolo._instance:
Yolo._instance = super(Yolo, cls).__new__(cls, *args, **kwargs)
return Yolo._instance
def __model_load(self, autoshape=True) -> None:
try:
model = torch.load(self.path_weights)['model'] # load model
model = model.float() # weight to float
if autoshape:
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
except Exception as e:
s = f'{e}. Check if the file exists and if the "model" is present in the file'
raise Exception(s) from e
self.model = model
def get_boxes(self, image):
# Image inputs are:
# file: imgs = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
predict = self.model(image)
boxes = predict.xyxy
return boxes
|
# Import pandas as pd
import pandas as pd
# Import the cars.csv data: cars ,including index_col.
cars=pd.read_csv("cars.csv")
# Print out cars
print(cars)
# Import pandas as pd
import pandas as pd
# Fix import by including index_col ,excluding index_col;
cars = pd.read_csv('cars.csv',index_col=0)
# Print out cars
print(cars)
# Print out country column as Pandas Series
print(cars["country"])
# Print out country column as Pandas DataFrame
print(cars[["country"]])
# Print out DataFrame with country and drives_right columns
print(cars[["country","drives_right"]]) |
from queue import Queue
from glouton.shared.logger import logger
from glouton.workers.pageScanWorker import PageScanWorker
from glouton.shared import threadHelper
from glouton.infrastructure.satnogNetworkClient import SatnogNetworkClient
from threading import Event
class ObservationRepo:
def __init__(self, cmd, repos):
self.OBSERVATION_URL = 'observations/'
self.__repos = repos
self.__cmd = cmd
self.__threads = []
def extract(self):
client = SatnogNetworkClient()
threads = []
page_counter = 0
end_signal = Event()
while True:
for p in range(1, 5):
page = page_counter + p
url_params = self.__url_param_builder(
self.__cmd.start_date, self.__cmd.end_date, page)
pageScanner = PageScanWorker(
client, self.__cmd, self.__repos, self.OBSERVATION_URL, url_params, p, end_signal)
t = threadHelper.create_thread(pageScanner.scan)
threads.append(t)
threadHelper.wait(threads)
if end_signal.isSet():
break
page_counter += 4
print("\ndownloading started (Ctrl + C to stop)...\t~( ^o^)~")
self.__register_end_command()
self.__create_workers_and_wait()
def __register_end_command(self):
for repo in self.__repos:
repo.register_end_command(
self.__cmd.start_date, self.__cmd.end_date)
def __create_workers_and_wait(self):
for repo in self.__repos:
self.__threads.extend(repo.create_worker())
threadHelper.wait(self.__threads)
def __url_param_builder(self, start_date, end_date, page):
return {'satellite__norad_cat_id': self.__cmd.norad_id,
'ground_station': self.__cmd.ground_station_id,
'start': start_date.isoformat(),
'end': end_date.isoformat(),
'status': self.__cmd.observation_status,
'vetted_user': self.__cmd.user,
'transmitter_uuid': self.__cmd.transmitter_uuid,
'transmitter_mode': self.__cmd.transmitter_mode,
'transmitter_type': self.__cmd.transmitter_type,
'page': str(page),
'format': 'json'}
|
import os
import sys
import json
import torch
import logging
import traceback
from tqdm import tqdm
from . import loader_utils
from .loader_utils import load_dataset, flat_rank_pos, limit_scope_length, stemming
from ..constant import BOS_WORD, EOS_WORD
from torch.utils.data import Dataset
logger = logging.getLogger()
# -------------------------------------------------------------------------------------------
# preprocess label for bert2gram training
# -------------------------------------------------------------------------------------------
def limit_phrase_length(positions, max_phrase_words):
filter_positions = [pos for pos in positions if (pos[1]-pos[0]+1) <= max_phrase_words]
return filter_positions
def get_ngram_label(valid_length, start_end_pos, max_phrase_words):
# flatten, rank, filter overlap for answer positions
sorted_positions = flat_rank_pos(start_end_pos)
filter_positions = limit_phrase_length(sorted_positions, max_phrase_words)
if len(filter_positions) != len(sorted_positions):
overlen_flag = True
else:
overlen_flag = False
s_label, e_label = [], []
for s, e in filter_positions:
if e < valid_length:
s_label.append(s)
e_label.append(e)
else:
break
assert len(s_label) == len(e_label)
return {'s_label':s_label, 'e_label': e_label, 'overlen_flag':overlen_flag}
def prepare_data_for_bert2gram(examples, max_token, max_phrase_words, mode, pretrain_model):
logger.info('start preparing data for %s2Gram ...'%pretrain_model)
overlen_num = 0
new_examples = []
for idx, ex in enumerate(tqdm(examples)):
if len(ex['tokens']) < max_token:
max_word = max_token
else:
max_word = ex['tok_to_orig_index'][max_token-1] + 1
new_ex = {}
new_ex['url'] = ex['url']
new_ex['tokens'] = ex['tokens'][:max_token]
new_ex['valid_mask'] = ex['valid_mask'][:max_token]
new_ex['doc_words'] = ex['doc_words'][:max_word]
assert len(new_ex['tokens']) == len(new_ex['valid_mask'])
assert sum(new_ex['valid_mask']) == len(new_ex['doc_words'])
if mode == 'train':
parameter = {'valid_length': len(new_ex['doc_words']),
'start_end_pos': ex['start_end_pos'],
'max_phrase_words':max_phrase_words}
# ------------------------------------------------
label_dict = get_ngram_label(**parameter)
if label_dict['overlen_flag']:
overlen_num += 1
if not label_dict['s_label']:
continue
new_ex['s_label'] = label_dict['s_label']
new_ex['e_label'] = label_dict['e_label']
new_examples.append(new_ex)
logger.info('Delete Overlen Keyphrase (length > 5): %d (overlap / total = %.2f'
%(overlen_num, float(overlen_num / len(examples) * 100)) + '%)')
return new_examples
def reload_cached_dataset(cached_dataset_dir, dataset_class, name, pretrain_model):
logger.info("start Reloading %s2gram %s %s cached dataset ..." %(pretrain_model, dataset_class, name))
filename = os.path.join(cached_dataset_dir, "%s2gram.cached.%s.%s.json" % (pretrain_model, dataset_class, name))
examples = []
with open(filename, "r", encoding="utf-8") as f:
for l in tqdm(f):
examples.append(json.loads(l))
f.close()
logger.info("success loaded %s %s data : %d " %(dataset_class, name, len(examples)))
return examples
def save_cached_dataset(cached_examples, dataset_name, mode, pretrain_model):
logger.info("start saving %s2gram %s %s cached dataset ..." %(pretrain_model, dataset_name, mode))
cached_dataset_dir = "./Cached_Datasets"
if not os.path.exists(cached_dataset_dir):
os.mkdir(cached_dataset_dir)
filename = os.path.join(cached_dataset_dir, "%s2gram.cached.%s.%s.json" % (pretrain_model, dataset_name, mode))
with open(filename, 'w', encoding='utf-8') as f_pred:
for idx, ex in enumerate(tqdm(cached_examples)):
f_pred.write("{}\n".format(json.dumps(ex)))
f_pred.close()
logger.info("successfully saved %s2gram %s %s cached dataset to %s" %(pretrain_model, dataset_name, mode, filename))
# -------------------------------------------------------------------------------------------
# build dataset for bert2gram
# -------------------------------------------------------------------------------------------
class build_bert2gram_dataset(Dataset):
''' build datasets for train & eval '''
def __init__(self, args, examples, dataset_name, tokenizer, max_token, max_phrase_words,
mode, preprocess_folder, cached_dataset_dir=None, local_rank=-1):
# --------------------------------------------------------------------------------------------
self.sep_token_extra = False
self.pretrain_model = "bert"
if "roberta" in args.model_class:
self.sep_token_extra = True
self.pretrain_model = "roberta"
params = {'examples': examples, 'max_token': max_token, 'max_phrase_words':max_phrase_words,
'mode':mode, "pretrain_model":self.pretrain_model}
cached_examples = prepare_data_for_bert2gram(**params)
# del cache
examples.clear()
# Rank cost too much time to preprocess `train_dataset`, so we want to save it
if local_rank in [-1, 0] and mode == "train":
save_cached_dataset(cached_examples, dataset_name, mode, self.pretrain_model)
self.examples = cached_examples
self.mode = mode
self.tokenizer = tokenizer
self.max_phrase_words = max_phrase_words
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return convert_examples_to_features(index, self.examples[index], self.tokenizer, self.max_phrase_words, self.mode, self.sep_token_extra)
def convert_examples_to_features(index, ex, tokenizer, max_phrase_words, mode, sep_token_extra=False):
''' convert each batch data to tensor ; add [CLS] [SEP] tokens ;'''
src_tokens = [BOS_WORD] + ex['tokens'] + [EOS_WORD]
valid_ids = [0] + ex['valid_mask'] + [0]
if sep_token_extra:
src_tokens = src_tokens + [EOS_WORD]
valid_ids = valid_ids + [0]
src_tensor = torch.LongTensor(tokenizer.convert_tokens_to_ids(src_tokens))
valid_mask = torch.LongTensor(valid_ids)
orig_doc_len = sum(valid_ids)
if mode == 'train':
s_label = ex['s_label']
e_label = ex['e_label']
return index, src_tensor, valid_mask, s_label, e_label, orig_doc_len, max_phrase_words
else:
return index, src_tensor, valid_mask, orig_doc_len, max_phrase_words
def batchify_Bert2Gram_features_for_train(batch):
''' train dataloader & eval dataloader .'''
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
s_label_list = [ex[3] for ex in batch]
e_label_list = [ex[4] for ex in batch]
doc_word_lens = [ex[5] for ex in batch]
max_phrase_words = [ex[6] for ex in batch][0]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1] [2] src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, :v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] active mask : for n-gram
max_ngram_length = sum([max_word_len-n for n in range(max_phrase_words)])
active_mask = torch.LongTensor(len(docs), max_ngram_length).zero_()
for batch_i, word_len in enumerate(doc_word_lens):
pad_len = max_word_len - word_len
batch_mask = []
for n in range(max_phrase_words):
ngram_len = word_len - n
if ngram_len > 0:
gram_list = [1 for _ in range(ngram_len)] + [0 for _ in range(pad_len)]
else:
gram_list = [0 for _ in range(max_word_len-n)]
batch_mask.extend(gram_list)
active_mask[batch_i].copy_(torch.LongTensor(batch_mask))
# -------------------------------------------------------------------
# [5] label : for n-gram
# 1. empty label list
label_list = []
for _ in range(len(docs)):
batch_label = []
for n in range(max_phrase_words):
batch_label.append(torch.LongTensor([0 for _ in range(max_word_len-n)]))
label_list.append(batch_label)
# 2. valid label list
for batch_i in range(len(docs)):
for s, e in zip(s_label_list[batch_i], e_label_list[batch_i]):
gram = e-s
label_list[batch_i][gram][s] = 1
# 3. label tensor
ngram_label = torch.LongTensor(len(docs), max_ngram_length).zero_()
for batch_i, label in enumerate(label_list):
ngram_label[batch_i].copy_(torch.cat(label))
# 4. valid output
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return input_ids, input_mask, valid_ids, active_mask, valid_output, ngram_label, ids
def batchify_Bert2Gram_features_for_test(batch):
''' test dataloader for Dev & Public_Valid.'''
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
max_phrase_words = [ex[4] for ex in batch][0]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1] [2] src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, :v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] active mask : for n-gram
max_ngram_length = sum([max_word_len-n for n in range(max_phrase_words)])
active_mask = torch.LongTensor(len(docs), max_ngram_length).zero_()
for batch_i, word_len in enumerate(doc_word_lens):
pad_len = max_word_len - word_len
batch_mask = []
for n in range(max_phrase_words):
ngram_len = word_len - n
if ngram_len > 0:
gram_list = [1 for _ in range(ngram_len)] + [0 for _ in range(pad_len)]
else:
gram_list = [0 for _ in range(max_word_len-n)]
batch_mask.extend(gram_list)
active_mask[batch_i].copy_(torch.LongTensor(batch_mask))
# ---------------------------------------------------------------
# [5] valid output
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return input_ids, input_mask, valid_ids, active_mask, valid_output, doc_word_lens, ids
|
#!/usr/bin/env python3
"""Detects time conflicts in Zoom meetings and posts a summary to a webhook.
A meeting conflicts with another meeting if the (start, end) ranges overlap
and if the host is the same user.
"""
import collections
import datetime
import itertools
import logging
import json
import requests
import os
from zoomus import ZoomClient
from dateutil import tz
Meeting = collections.namedtuple('Meeting', ['id', 'title', 'start_time', 'end_time'])
def determine_conflicts(meetings):
conflicts = []
meeting_ranges = [
Meeting(
meeting['id'],
meeting['topic'],
datetime.datetime.strptime(meeting['start_time'], '%Y-%m-%dT%H:%M:%S%z'),
datetime.datetime.strptime(meeting['start_time'], '%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(minutes=meeting.get('duration', 90))
)
for meeting in meetings
if 'start_time' in meeting
]
# This is inefficient but the list comes sorted from Zoom and n < 30 in practice.
for pair in itertools.product(meeting_ranges, repeat=2):
if pair[0].id < pair[1].id: # Pick a direction since overlapping is commutative
if max(pair[0].start_time, pair[1].start_time) < min(pair[0].end_time, pair[1].end_time):
conflicts.append(pair)
return conflicts
def report_conflict(webhook_url, report_string):
logging.warning(report_string)
if webhook_url:
webhook_data = {'text': report_string}
response = requests.post(
webhook_url, data=json.dumps(webhook_data),
headers={'Content-Type': 'application/json'}
)
if response.status_code != 200:
raise ValueError(
'Request to webhook returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
def run(zoom_api_key, zoom_api_secret, webhook_url, timezone):
timezone = tz.gettz(timezone)
client = ZoomClient(zoom_api_key, zoom_api_secret)
user_list_response = client.user.list()
user_list = json.loads(user_list_response.content)
for user in user_list['users']:
user_id = user['id']
meetings = json.loads(client.meeting.list(user_id=user_id, type='upcoming', page_size=300).content)['meetings']
conflicts = determine_conflicts(meetings)
if conflicts:
for conflict in conflicts:
report = f'Conflict detected on account {user["email"]}\n'
(meeting_1, meeting_2) = conflict
time_format = "%m/%d/%Y, %I:%M:%S %p"
meeting_1_start = meeting_1.start_time.astimezone(timezone).strftime(time_format)
meeting_1_end = meeting_1.end_time.astimezone(timezone).strftime(time_format)
meeting_2_start = meeting_2.start_time.astimezone(timezone).strftime(time_format)
meeting_2_end = meeting_2.end_time.astimezone(timezone).strftime(time_format)
report += f'{meeting_1.title}: {meeting_1_start} - {meeting_1_end}\n'
report += f'{meeting_2.title}: {meeting_2_start} - {meeting_2_end}\n'
report_conflict(webhook_url, report)
else:
logging.info('No upcoming conflicts detected')
def main():
from dotenv import load_dotenv
load_dotenv()
run(
zoom_api_key=os.getenv('ZOOM_API_KEY'),
zoom_api_secret=os.getenv('ZOOM_API_SECRET'),
webhook_url=os.getenv('WEBHOOK_URL'),
timezone=os.getenv('TIMEZONE'),
)
if __name__ == '__main__':
main()
|
import os
SITE_SLUG = "bluetail"
CORE_APP_NAME = "bluetail"
SITE_NAME = 'bluetail'
LIVE_ROOT = ''
SHARE_IMAGE = ''
TWITTER_SHARE_IMAGE = ''
SITE_DESCRIPTION = ''
SITE_TWITTER = ''
GOOGLE_ANALYTICS_ACCOUNT = ''
SASSC_LOCATION = 'sassc'
# Preferred company identifier scheme
COMPANY_ID_SCHEME = os.getenv("COMPANY_ID_SCHEME",'GB-COH')
# URL format provider://username:password@host:port/databasename
DATABASE_URL = os.getenv("DATABASE_URL", 'postgres://bluetail:bluetail@localhost:5432/bluetail')
# This must be set in `config.py` or the environment variable.
SECRET_KEY = os.getenv('SECRET_KEY')
|
#
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
# Import system modules
import sys
# Import Numpy
import numpy as np
# Local imports
from seisflows.tools import unix
from seisflows.tools.tools import exists
from seisflows.config import custom_import, ParameterError
from seisflows.workflow.base import base
try:
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
solver = sys.modules['seisflows_solver']
postprocess = sys.modules['seisflows_postprocess']
except:
print("Check parameters and paths.")
migration = custom_import('workflow', 'migration')()
class test_postprocess(base):
""" Postprocessing class
"""
def check(self):
""" Checks parameters and paths
"""
migration.check()
if 'INPUT' not in PATH:
setattr(PATH, 'INPUT', None)
def main(self):
""" Writes gradient of objective function
"""
if not PATH.INPUT:
migration.main()
postprocess.process_kernels()
|
##from pathlib import Path
import requests
startI = int(input("Enter starting Index Number: "))
endI = int(input("Enter ending Index Number: "))
i = startI
while i < endI:
header = {
"authority": "admission.doenets.lk",
"method": "POST",
"path": "/api/admission",
"scheme": "https",
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "54",
"content-type": "application/json",
"origin": "https://admission.doenets.lk",
"referer": "https://admission.doenets.lk/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
}
data = {"admission": 75, "type": "index", "identifier": i}
url = "https://admission.doenets.lk/api/admission"
response = requests.post(url, headers=header, json=data)
res_json = response.json()
##print(res_json)
if res_json != {
"name": "Not Found",
"message": "No candidate record found",
"code": 0,
"status": 404,
"type": "yii\\web\\NotFoundHttpException",
}:
print("Index no:", i, "refference:", res_json["reference"])
##IF WANT TO DOWNLOAD THE PDF FILES UNCOMMENT BELOW LINES[NOT RECOMENNED THOUGH]
##pdf_url = "https://admission.doenets.lk/api/admission/" + res_json["reference"]
##filename = Path(str(i) + ".pdf")
##response = requests.get(pdf_url)
##filename.write_bytes(response.content)
i += 1
|
import os
from pathlib import Path
cwd = Path(__file__).parent
download_path = Path('inputs')
sas_url = os.environ.get('SAS_SIDS_CONTAINER')
azure_container = sas_url.split('?')[0]
azure_sas = sas_url.split('?')[1]
epsg = os.environ.get('EPSG', '4326')
xmin = os.environ.get('CLIP_XMIN', '-180')
xmax = os.environ.get('CLIP_XMAX', '180')
ymin = os.environ.get('CLIP_YMIN', '-35')
ymax = os.environ.get('CLIP_YMAX', '35')
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import os
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import json
try:
from flexget.plugins.api_tvdb import lookup_series
except ImportError:
raise plugin.DependencyError(issued_by='uoccin', missing='api_tvdb',
message='uoccin requires the `api_tvdb` plugin')
def load_uoccin_data(path):
udata = {}
ufile = os.path.join(path, 'uoccin.json')
if os.path.exists(ufile):
try:
with open(ufile, 'r') as f:
udata = json.load(f)
except Exception as err:
raise plugin.PluginError('error reading %s: %s' % (ufile, err))
udata.setdefault('movies', {})
udata.setdefault('series', {})
return udata
class UoccinEmit(object):
schema = {
'type': 'object',
'properties': {
'path': {'type': 'string', 'format': 'path'},
'type': {'type': 'string', 'enum': ['movies', 'series', 'episodes']},
'tags': {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
'check_tags': {'type': 'string', 'enum': ['any', 'all', 'none'], 'default': 'any'},
'ep_flags': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},
},
'required': ['path', 'type'],
'additionalProperties': False
}
def on_task_input(self, task, config):
"""Creates an entry for each item in your uoccin watchlist.
Example::
uoccin_emit:
path: /path/to/gdrive/uoccin
type: series
tags: [ 'favorite', 'hires' ]
check_tags: all
Options path and type are required while the others are for filtering:
- 'any' will include all the items marked with one or more tags in the list
- 'all' will only include the items marked with all the listed tags
- 'none' will only include the items not marked with any of the listed tags.
The entries created will have a valid imdb/tvdb url and id.
"""
imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance
udata = load_uoccin_data(config['path'])
section = udata['movies'] if config['type'] == 'movies' else udata['series']
entries = []
for eid, itm in list(section.items()):
if not itm['watchlist']:
continue
if 'tags' in config:
n = len(set(config['tags']) & set(itm.get('tags', [])))
if config['check_tags'] == 'any' and n <= 0:
continue
if config['check_tags'] == 'all' and n != len(config['tags']):
continue
if config['check_tags'] == 'none' and n > 0:
continue
if config['type'] == 'movies':
entry = Entry()
entry['url'] = 'http://www.imdb.com/title/' + eid
entry['imdb_id'] = eid
if itm['name'] != 'N/A':
entry['title'] = itm['name']
else:
try:
imdb_lookup.lookup(entry)
except plugin.PluginError as e:
self.log.trace('entry %s imdb failed (%s)' % (entry['imdb_id'], e.value))
continue
entry['title'] = entry.get('imdb_name')
if 'tags' in itm:
entry['uoccin_tags'] = itm['tags']
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
else:
sname = itm['name']
try:
sname = lookup_series(tvdb_id=eid).seriesname
except LookupError:
self.log.warning('Unable to lookup series %s from tvdb, using raw name.' % eid)
surl = 'http://thetvdb.com/?tab=series&id=' + eid
if config['type'] == 'series':
entry = Entry()
entry['url'] = surl
entry['title'] = sname
entry['tvdb_id'] = eid
if 'tags' in itm:
entry['uoccin_tags'] = itm['tags']
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
elif config['ep_flags'] == 'collected':
slist = itm.get('collected', {})
for sno in list(slist.keys()):
for eno in slist[sno]:
entry = Entry()
entry['url'] = surl
entry['title'] = '%s S%02dE%02d' % (sname, int(sno), int(eno))
entry['tvdb_id'] = eid
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
else:
slist = itm.get('watched', {})
for sno in list(slist.keys()):
for eno in slist[sno]:
entry = Entry()
entry['url'] = surl
entry['title'] = '%s S%02dE%02d' % (sname, int(sno), eno)
entry['tvdb_id'] = eid
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
entries.sort(key=lambda x: x['title'])
return entries
@event('plugin.register')
def register_plugin():
plugin.register(UoccinEmit, 'uoccin_emit', api_ver=2)
|
import decimal
from django.db import models
from django.db.models import Sum
class Marathon(models.Model):
DONATION_MINIMUM_DEFAULT = decimal.Decimal(1.0)
EURO = "EUR"
USD = "USD"
SEK = "SEK"
CURRENCY_CHOICES = [(EURO, "Euro"), (USD, "U.S. Dollar"), (SEK, "Swedish Krona")]
start_time = models.DateTimeField()
name = models.CharField(max_length=100, help_text="Name of the Marathon")
slug = models.SlugField(max_length=20, unique=True, allow_unicode=False)
recipient_name = models.CharField(
max_length=100, blank=True, verbose_name="Donation Recipient"
)
recipient_paypal = models.EmailField()
donation_minimum = models.DecimalField(
max_digits=10, decimal_places=2, default=DONATION_MINIMUM_DEFAULT
)
currency = models.CharField(max_length=3, choices=CURRENCY_CHOICES, default=EURO)
accept_donations = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
@staticmethod
def get_latest_marathon():
marathon = Marathon.objects.order_by("-start_time")
return marathon[0] if len(marathon) != 0 else None
# returns the donation total as a simple decimal number
def donations_get_total(self):
from .donation import Donation
donation_sum = self.donation_set.filter(
transaction_state=Donation.COMPLETED
).aggregate(Sum("amount"))["amount__sum"]
return donation_sum if donation_sum else decimal.Decimal(0)
def __str__(self):
return self.name
|
import shelve
db = shelve.open('class-shelve')
sue = db['sue']
sue.giveRaise(.25)
db['sue'] = sue
tom = db['tom']
tom.giveRaise(.20)
db['tom'] = tom
db.close()
|
from copy import deepcopy
from cpl import INF
from cpl.graph import AdjMatrix
def floyd_warshall(graph: AdjMatrix):
N: int = len(graph)
cost: AdjMatrix = deepcopy(graph)
for k in range(N):
for i in range(N):
for j in range(N):
if cost[i][k] == INF or cost[k][j] == INF:
continue
cost[i][j] = min(cost[i][j], cost[i][k] + cost[k][j])
return cost
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def make_mlp(in_channels, mlp_channels, act_builder=nn.ReLU, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(mlp_channels):
module_list.append(nn.Linear(c_in, c_out))
if last_act or idx < len(mlp_channels) - 1:
module_list.append(act_builder())
c_in = c_out
return nn.Sequential(*module_list)
def make_convs1d(in_channels, conv_channels, act_builder=nn.ReLU, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(conv_channels):
module_list.append(nn.Conv1d(c_in, c_out, kernel_size=1))
if last_act or idx < len(conv_channels) - 1:
module_list.append(act_builder())
c_in = c_out
return nn.Sequential(*module_list)
def make_convs2d(in_channels, conv_channels, act_builder=nn.ReLU, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(conv_channels):
module_list.append(nn.Conv2d(c_in, c_out, kernel_size=1))
if last_act or idx < len(conv_channels) - 1:
module_list.append(act_builder())
c_in = c_out
return nn.Sequential(*module_list)
def make_mlp_bn(in_channels, mlp_channels, act_builder=nn.ReLU, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(mlp_channels):
if idx < len(mlp_channels) - 1:
module_list.append(nn.Linear(c_in, c_out, bias=False))
module_list.append(nn.BatchNorm1d(c_out))
module_list.append(act_builder())
else:
if last_act:
module_list.append(nn.Linear(c_in, c_out, bias=False))
module_list.append(nn.BatchNorm1d(c_out))
module_list.append(act_builder())
else:
module_list.append(nn.Linear(c_in, c_out, bias=True))
c_in = c_out
return nn.Sequential(*module_list)
def make_mlp_gn(in_channels, mlp_channels, num_groups, act_builder=nn.ReLU, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(mlp_channels):
if idx < len(mlp_channels) - 1:
module_list.append(nn.Linear(c_in, c_out, bias=False))
module_list.append(nn.GroupNorm(num_groups, c_out))
module_list.append(act_builder())
else:
if last_act:
module_list.append(nn.Linear(c_in, c_out, bias=False))
module_list.append(nn.BatchNorm1d(num_groups, c_out))
module_list.append(act_builder())
else:
module_list.append(nn.Linear(c_in, c_out, bias=True))
c_in = c_out
return nn.Sequential(*module_list)
def make_convs2d_bn(in_channels, conv_channels, act_builder=nn.ReLU, bn=False, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(conv_channels):
if idx < len(conv_channels) - 1:
module_list.append(nn.Conv2d(c_in, c_out, 1, bias=False))
module_list.append(nn.BatchNorm2d(c_out))
module_list.append(act_builder())
else:
if last_act:
module_list.append(nn.Conv2d(c_in, c_out, 1, bias=False))
module_list.append(nn.BatchNorm2d(c_out))
module_list.append(act_builder())
else:
module_list.append(nn.Conv2d(c_in, c_out, 1, bias=True))
c_in = c_out
return nn.Sequential(*module_list)
class SpatialBroadcast(nn.Module):
def __init__(self, image_shape, stride=1):
super(SpatialBroadcast, self).__init__()
self.image_shape = image_shape
self.stride = stride
self.grid_shape = (int(image_shape[0] / stride), int(image_shape[1] / stride))
identity = torch.tensor([[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]]], dtype=torch.float32)
# (1, h, w, 2)
grid = F.affine_grid(identity, [1, 1, self.grid_shape[0], self.grid_shape[1]])
grid = grid.permute(0, 3, 1, 2).contiguous()
self.register_buffer('grid', grid)
def forward(self, x):
x_reshape = x.reshape(x.size(0), x.size(1), 1, 1).expand(-1, -1, self.grid_shape[0], self.grid_shape[1])
grid_reshape = self.grid.expand(x.size(0), -1, -1, -1)
return torch.cat([x_reshape, grid_reshape], dim=1)
class Flatten(nn.Module):
r"""
Flattens a contiguous range of dims into a tensor. For use with :class:`~nn.Sequential`.
Args:
start_dim: first dim to flatten (default = 1).
end_dim: last dim to flatten (default = -1).
Shape:
- Input: :math:`(N, *dims)`
- Output: :math:`(N, \prod *dims)` (for the default case).
Examples::
>>> m = nn.Sequential(
>>> nn.Conv2d(1, 32, 5, 1, 1),
>>> nn.Flatten()
>>> )
"""
__constants__ = ['start_dim', 'end_dim']
def __init__(self, start_dim=1, end_dim=-1):
super(Flatten, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input):
return input.flatten(self.start_dim, self.end_dim)
|
from flopz.core.function import Function
from flopz.core.module import Module, SequentialModule
from flopz.core.shellcode import Shellcode
from flopz.core.assembler import Assembler
from flopz.arch.ppc.vle.e200z0 import E200Z0
from flopz.arch.ppc.vle.instructions import *
from flopz.core.label import Label, LabelRef
from pytest import raises
def test_base_function():
arch = E200Z0()
# it should assemble in order: pre, logic, post
pre_m = Module(address=0, instructions=[
SeAdd(arch.r0, arch.r1),
], registers_written=[], registers_read=[])
post_m = Module(address=0, instructions=[
SeSubi(arch.r0, 1),
], registers_written=[], registers_read=[])
logic = Module(address=0, instructions=[
SeAdd(arch.r7, arch.r6),
], registers_written=[], registers_read=[])
f = Function(address=0, save_register_module=pre_m, restore_register_module=post_m, logic=logic)
assert(f.bytes() == (pre_m.bytes() + logic.bytes() + post_m.bytes()))
# it should clean registers automatically, using the provided modules
logic = Module(address=0, instructions=[
SeAdd(arch.r7, arch.r6),
], registers_written=[arch.r0, arch.r1], registers_read=[])
f = Function(address=0, save_register_module=Module(address=0, instructions_func=lambda conf: [
*[SeSubi(reg, 1) for reg in conf['registers']]
]), restore_register_module=post_m, logic=logic)
pre_bytes = SeSubi(arch.r0, 1).bytes() + SeSubi(arch.r1, 1).bytes()
assert(f.bytes()[:len(pre_bytes)] == pre_bytes)
def test_extra_arguments():
arch = E200Z0()
# it should allow passing extra arguments via the bytes(..) call
post_m = Module(address=0, instructions=[
SeSubi(arch.r0, 1),
], registers_written=[], registers_read=[])
logic = Module(address=0, instructions=[
SeAdd(arch.r7, arch.r6),
], registers_written=[], registers_read=[])
f = Function(address=0, save_register_module=Module(address=0, instructions_func=lambda conf: [
*[SeSubi(reg, 1) for reg in conf['registers']]
]), restore_register_module=post_m, logic=logic)
assert(f.bytes(instruction_args={'registers': []}) == (logic.bytes() + post_m.bytes())) |
# Copyright 2020 Neoinvest.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from fml.testing.fixtures import BaseTestCase, TickFactory
from fml.data import bars
class TickBarTestCase(BaseTestCase):
@classmethod
def setUpClass(cls) -> None:
cls.ticks_factory = TickFactory(
tick_number=100,
store_ticks=True
)
def test_tick_bar_data(self):
expected_ticks = []
count = 1
for tick in self.ticks_factory.ticks:
if count % 10 == 0:
expected_ticks.append(tick)
count += 1
with TestPipeline() as p: # Use TestPipeline for testing.
result = (
p | beam.Create(self.ticks_factory.ticks)
| bars.TickBar(threshold=10)
)
assert_that(result, equal_to(expected_ticks))
class VolumeBarTestCase(BaseTestCase):
@classmethod
def setUpClass(cls) -> None:
cls.ticks_factory = TickFactory(
tick_number=2,
store_ticks=True
)
def test_volume_bar_data(self):
expected_ticks = []
volume_threshold = 100000
volume = 0
for tick in self.ticks_factory.ticks:
volume += tick.price * tick.quantity
if volume >= volume_threshold:
expected_ticks.append(tick)
volume = 0
logging.info(f"Volume threshold = {volume_threshold}")
logging.info(expected_ticks)
with TestPipeline() as p: # Use TestPipeline for testing.
result = (
p | beam.Create(self.ticks_factory.ticks)
| bars.VolumeBar(threshold=volume_threshold)
)
assert_that(result, equal_to(expected_ticks))
def test_sample_volume_bar_data(self):
with TestPipeline() as p: # Use TestPipeline for testing.
result = (
p | beam.Create(self.TICK_DATA_PARSED)
| bars.VolumeBar(threshold=100000)
)
assert_that(result, equal_to(
[self.TICK_DATA_PARSED[1], self.TICK_DATA_PARSED[3],
self.TICK_DATA_PARSED[4], self.TICK_DATA_PARSED[6]]
))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
class DocbookXml(Package):
"""Docbook DTD XML files."""
homepage = "http://www.oasis-open.org/docbook"
url = "http://www.oasis-open.org/docbook/xml/4.5/docbook-xml-4.5.zip"
version('4.5', '03083e288e87a7e829e437358da7ef9e')
def install(self, spec, prefix):
for item in os.listdir('.'):
src = os.path.abspath(item)
dst = os.path.join(prefix, item)
if os.path.isdir(item):
install_tree(src, dst, symlinks=True)
else:
install(src, dst)
def setup_environment(self, spack_env, run_env):
catalog = os.path.join(self.spec.prefix, 'catalog.xml')
run_env.set('XML_CATALOG_FILES', catalog, separator=' ')
|
class Sequence(object):
def __init__(self, identifier, comment, seq):
self.id = identifier
self.comment = comment
self.seq = self._clean(seq)
def _clean(self, seq):
"""
remove newline from the string representing the sequence
:param seq: the string to clean
:return: the string without '\n'
:rtype: string
"""
return seq.replace('\n')
def gc_percent(self):
"""
:return: the gc ratio
:rtype: float
"""
seq = self.seq.upper()
return float(seq.count('G') + seq.count('C')) / len(seq)
dna1 = Sequence('gi214', 'the first sequence', 'tcgcgcaacgtcgcctacatctcaagattca')
dna2 = Sequence('gi3421', 'the second sequence', 'gagcatgagcggaattctgcatagcgcaagaatgcggc') |
from .server import Batch, EvaluatorServer
__all__ = [Batch, EvaluatorServer]
|
from tornado.web import Application, RequestHandler
import json
import asyncio
import pandas as pd
import datetime
from ..db import db_conn, dumper_job, job_interface
from ..appconfig import AppConfig
from dateutil import parser
from pathlib import Path
config = AppConfig()
from collections import OrderedDict
import logging
import pytz
import os
class JsonHandler(RequestHandler):
"""This handler accepts a json string with the following keywords:
ds_names: list of table names in measurements database to be queried
delta_time: start query with timestamp > (now - delta_time)
nsamples: the number of samples you want returned over the delta_time.
if we are upsampling we will back fill the missing points otherwise we
will take the mean.
"""
conn = db_conn()
jiface = job_interface()
async def post(self):
resp = {"errors": [], "info": [], "success":False}
try:
params = json.loads(self.request.body.decode())
except Exception as error:
self.write(resp["errors"].append(str(error)))
return
info = await self.conn.info()
good = []
for name in params["ds_names"]:
if any(info.ds_name == name):
good.append(name)
else:
resp["errors"].append(f"Bad ds_name {name}")
try:
start = pd.to_datetime(params["startdate"])
except Exception as err:
resp["errors"].append(str(err))
start=None
try:
stop = pd.to_datetime(params["enddate"])
except Exception as err:
resp["errors"].append(str(err))
stop = None
if len(good) == 0:
self.write(json.dumps(resp))
return
try:
nsamples = int(params["nsamples"])
except Exception as err:
resp["errors"].append(str(err))
nsamples = 100000
try:
fit_order = int(params["fit_order"])
except Exception as err:
resp["errors"].append(str(err))
fit_order = 5
job = dumper_job(good, nsamples, fit_order, start, stop)
#job = dumper_job(good, nsamples, fit_order, start, stop)
self.jiface.submit_job(job)
resp["info"] = job.state
resp["success"] = True
self.write(json.dumps(resp, default=str))
self.set_header("Content-Type", "application/json")
class TestHandler(RequestHandler):
def get(self, *args):
self.write(f"This is a test {args[0]} {args[1]}")
self.write(str(os.environ))
class JobHandler(RequestHandler):
jiface = job_interface()
def get(self):
uuid = self.get_argument('jobid')
job = self.jiface[uuid]
jsondata = json.dumps(job.state, default=str)
self.write(jsondata)
self.set_header("Content-Type", "application/json")
class JobInfoHandler(RequestHandler):
jiface = job_interface()
def get(self):
jobid = self.get_argument("jobid")
self.render(
'jobs.html',
jobid=jobid, job=self.jiface[jobid].state
)
class JobListHandler(RequestHandler):
jiface = job_interface()
def get(self, type):
if type == ".json":
self.write({'ids':list(self.jiface.iterids())})
else:
self.render(
'joblist.html',
jobs=self.jiface
)
class GetHandler(RequestHandler):
conn = db_conn()
def get(self):
self.write({'message': str(self.request.body)})
class InfoHandler(RequestHandler):
conn = db_conn()
async def get(self, otype='.html'):
df = await self.conn.info()
if otype == '.html':
self.write(df.to_html())
else:
self.write(df.to_json())
class System2LogHandler(RequestHandler):
conn = db_conn()
async def get(self):
try:
df = await self.conn.info()
except Exception as error:
self.write({"error": "Could not connect to database"})
return
systems = list(set(df['subsystem']))
systems = sorted(systems)
output = OrderedDict()
for sys in systems:
output[sys] = list(df[df['subsystem']==sys]['ds_name'])
self.write(output)
class TaskHandler(RequestHandler):
jiface = job_interface()
def get(self):
tasks = self.jiface.active_tasks()
self.render("tasklist.html", tasks=tasks)
class NotebookHandler(RequestHandler):
jiface = job_interface()
def get(self):
uid = int(os.environ['USER_ID'])
gid = int(os.environ['GROUP_ID'])
jobid = self.get_argument("jobid")
job = self.jiface[jobid]
notebook_name = f"{job.state['description']}_{jobid[:4]}"
for possible in Path("/notebooks/").iterdir():
if possible.stem == notebook_name:
self.redirect(f"/jupyter/notebooks/{notebook_name}.ipynb")
return
with open("jupyter-template") as fd:
template = fd.read()
outfile = Path(f"/notebooks/{notebook_name}.ipynb")
template=self.render_string("/db_dumper/jupyter-template", jobid=jobid)
try:
logging.debug(f"Writing file {outfile}")
with outfile.open('w') as fd:
fd.write(template.decode())
logging.debug(f"Setting notbook onwnership: gid is {gid} uid is {uid}")
os.chown(str(outfile), uid, gid )
except Exception as error:
logging.warning(f"Could not write jupyter template: {error}")
self.write(f"{error}")
self.write(str(outfile))
self.finish()
self.redirect(f"/jupyter/notebooks/{notebook_name}.ipynb")
def make_app():
urls = [("/recent/json", JsonHandler),
("/recent/get", JsonHandler),
(r"/info(.*)", InfoHandler)]
return Application(urls, debug=True)
|
# -*- coding: UTF-8 -*-
"""
Tesselect module based on Delaunay tesselation and provides
some grain analysis techniques for microscopy images.
Dependencies:
-------------
Python 3.x
NumPy
SciPy
Matplotlib
Shapely (optional - only for one function)
Available functions:
--------------------
- MinusND - substract N-degree curve from image line-by-line.
...
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014-2015, Anton Sergeev.
#
# Distributed under the terms of the MIT License.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
# Release data
__version__ = '0.1'
__author__ = "Anton Sergeev <antonsergeevphd@gmail.com>"
__license__ = "MIT"
import copy
import numpy as np
import scipy as sp
from scipy import ndimage, spatial
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from matplotlib.widgets import Cursor, MultiCursor, RectangleSelector, Slider
from matplotlib.patches import Rectangle
from shapely.geometry import MultiPoint
########################################################################
def MinusND(input_data, degree=2, direction='h'):
"""
Substract n-degree curve from image line-by-line.
Parameters
----------
input_data : ndarray
Array to substract from
degree : int, optional
Degree of polynome to substract
Default: 2
direction : str, optional
Direction of substraction lines
Legal values: 'h' (horizontal, default),
'v' (vertical),
'hv' (horizontal, then vertical),
'vh' (vertical, then horizontal)
Returns
-------
data : ndarray
Smoothened 3d array
"""
if type(direction) != str:
raise ValueError("Type of parameter 'direction' must be string ('h', 'v', 'hv' or 'vh')")
direction = direction.lower()
if direction not in ['h', 'v', 'hv', 'vh']:
raise ValueError("Parameter 'direction' must be equal to 'h', 'v', 'hv' or 'vh'")
M, N = np.shape(input_data)
line_data = np.zeros((M, N))
if direction[0] == 'h':
for i in np.arange(M):
line_func = np.poly1d(np.polyfit(np.arange(N), input_data[i, :], degree))
line_data[i, :] = line_func(np.arange(N))
if len(direction) == 2: # for parameter 'hv'
input_data = input_data - line_data
for i in np.arange(N):
line_func = np.poly1d(np.polyfit(np.arange(M), input_data[:, i], degree))
line_data[:, i] = line_func(np.arange(M))
elif direction[0] == 'v':
for i in np.arange(N):
line_func = np.poly1d(np.polyfit(np.arange(M), input_data[:, i], degree))
line_data[:, i] = line_func(np.arange(M))
if len(direction) == 2: # for parameter 'vh'
input_data = input_data - line_data
for i in np.arange(M):
line_func = np.poly1d(np.polyfit(np.arange(N), input_data[i, :], degree))
line_data[i, :] = line_func(np.arange(N))
data = input_data - line_data
data -= np.amin(data)
return data
########################################################################
def FindTriangulation(x, y, ratio=0.4):
"""
Function
"""
tri = mtri.Triangulation(x, y)
mask = mtri.TriAnalyzer(tri).get_flat_tri_mask(min_circle_ratio=ratio)
tri.set_mask(mask)
filtered_tri = mtri.Triangulation(x, y, tri.get_masked_triangles())
return filtered_tri
########################################################################
def TriEdges(tri):
"""
Fuction
"""
x = tri.x
y = tri.y
# sort point numbers of triangulation edges in ascending order:
edges = np.sort(tri.edges[np.lexsort(tri.edges.T)])
edge_lengths = np.zeros(len(edges))
for k in range(len(edges)):
edge = edges[k]
edge_lengths[k] = np.hypot(x[edge[0]] - x[edge[1]],
y[edge[0]] - y[edge[1]])
vertex_neighbours = [[] for k in range(len(x))]
for edge in edges:
vertex_neighbours[edge[0]].append(edge[1])
vertex_neighbours[edge[1]].append(edge[0])
coordination_numbers = np.asarray([len(k) for k in vertex_neighbours])
return edges, edge_lengths, coordination_numbers, vertex_neighbours
########################################################################
def BoundaryVertices(coordination_numbers, vertex_neighbours):
"""
Fuction
"""
mask1_bool = (coordination_numbers <= 4)
mask2_nonbool = [k for i in range(len(coordination_numbers)) if mask1_bool[i] for k in vertex_neighbours[i]]
mask2_nonbool = np.array(list(set(mask2_nonbool)))
mask2_bool = np.zeros_like(mask1_bool)
for k in mask2_nonbool:
mask2_bool[k] = True
mask_boundary = (mask1_bool + mask2_bool)
mask_center = ~mask_boundary
return mask_center, mask_boundary
########################################################################
def FindAngles(x, y, edges, vertex_neighbours):
"""
Fuction
"""
edge_angles = np.zeros(len(edges))
for i, edge in enumerate(edges):
edge_angles[i] = 180 / np.pi * np.arctan2(y[edge[1]] - y[edge[0]], x[edge[1]] - x[edge[0]])
grain_angles_all = copy.deepcopy(vertex_neighbours) # ---!!--- not returned ---!!---
grain_angles_all_norm = copy.deepcopy(vertex_neighbours) # ---!!--- not returned ---!!---
grain_angles_mean = np.zeros(len(x))
for i, neigs in enumerate(vertex_neighbours):
for k, neig in enumerate(neigs):
# first if
if i < neig:
grain_angles_all[i][k] = edge_angles[ np.all(edges==[i, neig], axis=1) ]
elif edge_angles[ np.all(edges==[neig, i], axis=1) ] > 0:
grain_angles_all[i][k] = edge_angles[ np.all(edges==[neig, i], axis=1) ] - 180
elif edge_angles[ np.all(edges==[neig, i], axis=1) ] < 0:
grain_angles_all[i][k] = edge_angles[ np.all(edges==[neig, i], axis=1) ] + 180
else:
grain_angles_all[i][k] = edge_angles[ np.all(edges==[neig, i], axis=1) ]
# second if
if grain_angles_all[i][k] > 150:
grain_angles_all_norm[i][k] = grain_angles_all[i][k] - 180
elif grain_angles_all[i][k] > 90:
grain_angles_all_norm[i][k] = grain_angles_all[i][k] - 120
elif grain_angles_all[i][k] > 30:
grain_angles_all_norm[i][k] = grain_angles_all[i][k] - 60
elif grain_angles_all[i][k] < -150:
grain_angles_all_norm[i][k] = grain_angles_all[i][k] + 180
elif grain_angles_all[i][k] < -90:
grain_angles_all_norm[i][k] = grain_angles_all[i][k] + 120
elif grain_angles_all[i][k] < -30:
grain_angles_all_norm[i][k] = grain_angles_all[i][k] + 60
else:
grain_angles_all_norm[i][k] = grain_angles_all[i][k]
grain_angles_mean[i] = np.mean(grain_angles_all_norm[i])
return edge_angles, grain_angles_mean
########################################################################
def FindMinsTri(data, label_data, x, y, edges):
"""
Функция находит минимумы двумерного массива "data" в точках с координатами (x,y), соединенных ребрами "edges"
Возвращает локальный минимум для каждого ребра в "edges"
"""
label_data = label_data.copy() - 1 # совпадение номеров с точками
min_list = [[] for k in range(len(x))]
argmin_array = np.zeros(len(edges))
argmin_xy = np.zeros((2, len(edges)))
for k, edge in enumerate(edges):
x0, y0 = x[edge[0]], y[edge[0]]
x1, y1 = x[edge[1]], y[edge[1]]
num = max(abs(x1-x0), abs(y1-y0)) + 1 # максимальное количество пикселей (выбор между x и y) между двумя точками
X, Y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
X, Y = np.rint(X).astype(int), np.rint(Y).astype(int)
data_cs = data[Y, X] # важно поменять местами X и Y
label_data_cs = label_data[Y, X]
if np.any(label_data_cs == edge[0]):
cs_start = np.where(label_data_cs == edge[0])[0][-1]
else:
cs_start = 0
if np.any(label_data_cs == edge[1]):
cs_end = np.where(label_data_cs == edge[1])[0][0]
else:
cs_end = len(label_data_cs) - 1
local_min = np.amin(data_cs[cs_start:cs_end+1])
min_list[edge[0]].append(local_min)
min_list[edge[1]].append(local_min)
local_argmin = np.argmin(data_cs[cs_start:cs_end+1]) + cs_start
argmin_array[k] = local_argmin
argmin_xy[0, k] = X[local_argmin]
argmin_xy[1, k] = Y[local_argmin]
return min_list, argmin_array, argmin_xy
########################################################################
def FindOutEdges(edges, num_of_points=-1):
"""
Fuction
"""
if type(num_of_points) != int:
raise ValueError("Type of parameter 'num_of_points' must be integer")
if num_of_points == -1:
num_of_points = max([max(i) for i in edges]) + 1
elif num_of_points < 2:
raise ValueError("Value of parameter 'num_of_points' must be greater than 1")
out_edges = [[] for k in range(num_of_points)]
for k, edge in enumerate(edges):
out_edges[edge[0]].append(k)
out_edges[edge[1]].append(k)
return out_edges
########################################################################
def PreciseLabels(data_shape, argmin_xy, out_edges, mask_center):
"""
Fuction
"""
mesh_x, mesh_y = np.meshgrid(np.arange(data_shape[1]), np.arange(data_shape[0]))
coords = np.vstack((mesh_x.ravel(), mesh_y.ravel())).T
coords = MultiPoint(coords)
label_data_prec = np.zeros(data_shape, dtype=int)
num = np.sum(mask_center) # number of precise labels
percentage = np.rint(np.linspace(0,num,21)).astype(int)
count = 0 # number of calculated labels
print('Calculated: ', end='')
for i, outs in enumerate(out_edges):
if mask_center[i] == True:
poly = MultiPoint(argmin_xy.T[outs]).convex_hull
inpoints = [point for point in coords if poly.contains(point)]
for point in inpoints:
label_data_prec[point.y, point.x] = i + 1
if count in percentage:
print('{}%... '.format(np.argwhere(percentage==count)[0,0]*5), end='')
elif count == num - 1:
print('100%')
count += 1
return label_data_prec
########################################################################
def FindDiameters(input_data, label_data, half_height, data_step):
"""
Fuction
"""
labels = np.unique(label_data)[1:]
grain_area = np.zeros(len(half_height))
grain_area_half = np.zeros(len(half_height))
for k in labels:
grain = input_data * (label_data == k)
grain_area[k-1] = np.sum(grain > 0) * data_step**2
grain_area_half[k-1] = np.sum(grain > half_height[k]) * data_step**2
return grain_area, grain_area_half
########################################################################
def RDF(x, y, data_step, area_total):
"""
Fuction
"""
grain_pdist = spatial.distance.pdist(np.vstack((x, y)).T)
rdf = np.zeros(np.ceil(np.max(grain_pdist)).astype(int), dtype=int)
for dis in np.floor(grain_pdist):
rdf[dis] += 1
area_r = np.zeros(len(rdf))
for k in range(len(rdf)):
area_r[k] = np.pi*((k+1)**2 - k**2)
area_r *= data_step**2
normarea = area_r / area_total
normrdf = rdf / np.sum(rdf)
rdf_y = normrdf / normarea
rdf_x = np.arange(len(rdf))*data_step
return rdf_x, rdf_y
########################################################################
|
n = input('Enter the name of the student: ')
marks = int(input('Enter the marks: '))
phoneNumber = int(input("Enter student's phone number: "))
print ('The name of the student is {},his marks are {} and phone number is {}.'.format(n, marks, phoneNumber))
|
from .version import VERSION
from .template_code import template_main
__all__ = ['template_main']
__version__ = VERSION
|
#!/anaconda2/bin/python
from units import Units
class Commodity( object):
def __init__( self, price, units):
self.price = price
self.units = Units( units)
def inverse( self):
return 1.0/self.price, self.units.inverse()
class Triarb():
def __init__( self, sideOne, sideTwo, sideThree):
self.sideOne = sideOne
self.sideTwo = sideTwo
self.sideThree = sideThree
@staticmethod
def compute( sideOne, sideTwo, sideThree):
t1 = Units.compute([sideOne.units,sideTwo.units])
f = Units.compute([Units(t1),sideThree])
if not f:
print t1, f
if __name__=='__main__':
main()
|
import tensorflow as tf
import numpy as np
import skimage.io
import itertools
import os
import bz2
import argparse
import scipy
import skimage.transform
CONTENT_LAYERS = ['4_1']
LOCAL_STYLE_LAYERS = ['1_1','2_1','3_1','4_1']
GLOBAL_STYLE_LAYERS=['1_1','2_1','3_1','4_1']
def conv2d(input_tensor, kernel, bias):
kernel = np.transpose(kernel, [2, 3, 1, 0])
x = tf.pad(input_tensor, [[0,0], [1,1], [1,1], [0,0]])
x = tf.nn.conv2d(x, tf.constant(kernel), (1,1,1,1), 'VALID')
x = tf.nn.bias_add(x, tf.constant(bias))
return tf.nn.relu(x)
def avg_pooling(input_tensor, size=2):
return tf.nn.pool(input_tensor, [size, size], 'AVG', 'VALID', strides=[size, size])
def norm(arr):
n, *shape = arr.shape
lst = []
for i in range(n):
v = arr[i, :].flatten()
v /= np.sqrt(sum(v**2))
lst.append(np.reshape(v, shape))
return lst
def build_base_net(input_tensor,input_map=None):
vgg19_file = os.path.join(os.path.dirname(__file__), 'vgg19.pkl.bz2')
assert os.path.exists(vgg19_file), ("Model file with pre-trained convolution layers not found. Download here: "
+"https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2")
data = np.load(bz2.open(vgg19_file, 'rb'))
k = 0
net = {}
# network divided into two parts,main and map,main downsamples the image,map dowsamples the semantic map
net['img'] = input_tensor
net['conv1_1'] = conv2d(net['img'], data[k], data[k+1])
k += 2
net['conv1_2'] = conv2d(net['conv1_1'], data[k], data[k+1])
k += 2
# average pooling without padding
net['pool1'] = avg_pooling(net['conv1_2'])
net['conv2_1'] = conv2d(net['pool1'], data[k], data[k+1])
k += 2
net['conv2_2'] = conv2d(net['conv2_1'], data[k], data[k+1])
k += 2
net['pool2'] = avg_pooling(net['conv2_2'])
net['conv3_1'] = conv2d(net['pool2'], data[k], data[k+1])
k += 2
net['conv3_2'] = conv2d(net['conv3_1'], data[k], data[k+1])
k += 2
net['conv3_3'] = conv2d(net['conv3_2'], data[k], data[k+1])
k += 2
net['conv3_4'] = conv2d(net['conv3_3'], data[k], data[k+1])
k += 2
net['pool3'] = avg_pooling(net['conv3_4'])
net['conv4_1'] = conv2d(net['pool3'], data[k], data[k+1])
k += 2
net['conv4_2'] = conv2d(net['conv4_1'], data[k], data[k+1])
k += 2
net['conv4_3'] = conv2d(net['conv4_2'], data[k], data[k+1])
k += 2
net['conv4_4'] = conv2d(net['conv4_3'], data[k], data[k+1])
k += 2
net['pool4'] = avg_pooling(net['conv4_4'])
net['conv5_1'] = conv2d(net['pool4'], data[k], data[k+1])
k += 2
net['conv5_2'] = conv2d(net['conv5_1'], data[k], data[k+1])
k += 2
net['conv5_3'] = conv2d(net['conv5_2'], data[k], data[k+1])
k += 2
net['conv5_4'] = conv2d(net['conv5_3'], data[k], data[k+1])
k += 2
net['main'] = net['conv5_4']
net['map'] = input_map
for j, i in itertools.product(range(5), range(4)):
if j < 2 and i > 1: continue
suffix = '%i_%i' % (j+1, i+1)
if i == 0:
net['map%i'%(j+1)] = avg_pooling(net['map'], 2**j)
net['sem'+suffix] = tf.concat([net['conv'+suffix], net['map%i'%(j+1)]], -1)
return net
def extract_target_data(content, content_mask, style, style_mask):
pixel_mean = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,1,1,3))
# local style patches extracting
input_tensor = style-pixel_mean
input_map= style_mask
net = build_base_net(input_tensor, input_map)
local_features = [net['sem'+layer] for layer in LOCAL_STYLE_LAYERS]
# layer aggregation for local style
LF=local_features[0]
for i in range(1,len(LOCAL_STYLE_LAYERS)):
lf=local_features[i]
LF=tf.image.resize_images(LF,[lf.shape[1],lf.shape[2]],method=tf.image.ResizeMethod.BILINEAR)
LF=tf.concat([LF,lf],3)
dim = LF.shape[-1].value
x = tf.extract_image_patches(LF, (1,3,3,1), (1,1,1,1), (1,1,1,1), 'VALID')
patches=tf.reshape(x, (-1, 3, 3, dim))
# content features
input_tensor = content-pixel_mean
input_map= content_mask
net = build_base_net(input_tensor, input_map)
content_features = [net['conv'+layer] for layer in CONTENT_LAYERS]
content_data=[]
# global feature correlations based on fused features
input_tensor = style-pixel_mean
input_map= style_mask
net = build_base_net(input_tensor, input_map)
global_features = [net['conv'+layer] for layer in GLOBAL_STYLE_LAYERS]
GF=global_features[0]
for i in range(1,len(GLOBAL_STYLE_LAYERS)):
gf=global_features[i]
GF=tf.image.resize_images(GF,[gf.shape[1],gf.shape[2]],method=tf.image.ResizeMethod.BILINEAR)
GF=tf.concat([GF,gf],3)
N=int(GF.shape[3])
M=int(GF.shape[1]*GF.shape[2])
GF=tf.reshape(GF,(M,N))
GF_corr=tf.matmul(tf.transpose(GF),GF)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
patches=patches.eval()
for c in content_features:
content_data.append(c.eval())
global_data=GF_corr.eval()
return content_data,patches,global_data
def format_and_norm(arr, depth, sem_weight):
n, *shape = arr.shape
norm = np.zeros(shape+[n], dtype=arr.dtype)
un_norm = np.zeros(shape+[n], dtype=arr.dtype)
for i in range(n):
t = arr[i, ...]
un_norm[..., i] = t
t1 = t[..., :depth]
t1 = t1/np.sqrt(3*np.sum(t1**2)+1e-6)
t2 = t[..., depth:]
t2 = t2/np.sqrt(sem_weight*np.sum(t2**2)+1e-6)
norm[..., i] = np.concatenate([t1,t2], -1)
return norm, un_norm
"""GLStyleNet"""
class Model(object):
def __init__(self, args, content, style, style2, content_mask=None, style_mask=None):
self.args = args
if len(args.device)>3 and args.device[:3]=='gpu':
os.environ["CUDA_VISIBLE_DEVICES"] = args.device[3:]
elif args.device=='cpu':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
self.pixel_mean = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,1,1,3))
self.content = np.expand_dims(content, 0).astype(np.float32)
self.style = np.expand_dims(style, 0).astype(np.float32)
self.style2= np.expand_dims(style2, 0).astype(np.float32)
if content_mask is not None:
self.content_mask = np.expand_dims(content_mask, 0).astype(np.float32)
else:
self.content_mask = np.ones(self.content.shape[:-1]+(1,), np.float32)
self.args.semantic_weight= 0.0
if style_mask is not None:
self.style_mask = np.expand_dims(style_mask, 0).astype(np.float32)
else:
self.style_mask = np.ones(self.style.shape[:-1]+(1,), np.float32)
self.args.semantic_weight = 0.0
assert self.content_mask.shape[-1] == self.style_mask.shape[-1]
self.args.semantic_weight=100/self.args.semantic_weight if self.args.semantic_weight else 1E+8
self.mask_depth = self.content_mask.shape[-1]
# get target content features, local patches, global feature correlations
self.content_data, self.local_data, self.global_data= extract_target_data(self.content, self.content_mask, self.style, self.style_mask)
tf.reset_default_graph()
if args.init=='style':
input_tensor = tf.Variable(self.style2)
elif args.init=='content':
input_tensor = tf.Variable(self.content)
else:
input_tensor = tf.Variable(np.random.uniform(16, 240, self.content.shape).astype(np.float32))
input_map=tf.Variable(self.content_mask)
self.net = build_base_net(input_tensor, input_map)
self.content_features = [self.net['conv'+layer] for layer in CONTENT_LAYERS]
self.local_features = [self.net['sem'+layer] for layer in LOCAL_STYLE_LAYERS]
self.global_features = [self.net['conv'+layer] for layer in GLOBAL_STYLE_LAYERS]
# local style layer aggregation
LF=self.local_features[0]
for i in range(1,len(LOCAL_STYLE_LAYERS)):
lf=self.local_features[i]
LF=tf.image.resize_images(LF,[lf.shape[1],lf.shape[2]],method=tf.image.ResizeMethod.BILINEAR)
LF=tf.concat([LF,lf],3)
# patch-matching,concatenate semantic maps
self.local_loss = 0
sem = LF
patches = tf.extract_image_patches(sem, (1,3,3,1), (1,1,1,1), (1,1,1,1), 'VALID')
patches = tf.reshape(patches, (-1, 3, 3, sem.shape[-1].value))
pow2 = patches**2
p1 = tf.reduce_sum(pow2[..., :-self.mask_depth], [1,2,3])
p1 = tf.reshape(p1, [-1,1,1,1])
p1 = pow2[..., :-self.mask_depth]/(3*p1+1e-6)
p2 = tf.reduce_sum(pow2[..., -self.mask_depth:], [1,2,3])
p2 = tf.reshape(p2, [-1,1,1,1])
p2 = pow2[..., -self.mask_depth:]/(self.args.semantic_weight*p2+1e-6)
norm_patch = tf.concat([p1, p2], -1)
norm_patch = tf.reshape(norm_patch, [-1, 9*sem.shape[-1].value])
norm, un_norm = format_and_norm(self.local_data, -self.mask_depth, self.args.semantic_weight)
norm = np.reshape(norm, [9*sem.shape[-1].value, -1])
sim = tf.matmul(norm_patch, norm)
max_ind = tf.argmax(sim, axis=-1)
target_patches = tf.gather(self.local_data, tf.reshape(max_ind, [-1]))
# local style loss
self.local_loss += tf.reduce_mean((patches[...,:-self.mask_depth]-target_patches[...,:-self.mask_depth])**2)
self.local_loss *= args.local_weight
# content loss
self.content_loss = 0
for c, t in zip(self.content_features, self.content_data) :
self.content_loss += tf.reduce_mean((c-t)**2)
self.content_loss *= args.content_weight
# total variation regularization loss
self.tv_loss = args.smoothness*(tf.reduce_mean(tf.abs(input_tensor[..., :-1,:]-input_tensor[..., 1:,:]))
+tf.reduce_mean(tf.abs(input_tensor[..., :, :-1]-input_tensor[..., :,1:])))
# global style loss
GF=self.global_features[0]
for i in range(1,len(GLOBAL_STYLE_LAYERS)):
gf=self.global_features[i]
GF=tf.image.resize_images(GF,[gf.shape[1],gf.shape[2]],method=tf.image.ResizeMethod.BILINEAR)
GF=tf.concat([GF,gf],3)
N=int(GF.shape[3])
M=int(GF.shape[1]*GF.shape[2])
GF=tf.reshape(GF,(M,N))
GF_corr=tf.matmul(tf.transpose(GF),GF)
self.global_loss = tf.reduce_sum(((GF_corr-self.global_data)**2)/((2*M*N)**2))
self.global_loss *= args.global_weight
# total loss
self.loss = self.local_loss + self.content_loss + self.tv_loss + self.global_loss
self.grad = tf.gradients(self.loss, self.net['img'])
tf.summary.scalar('loss', self.loss)
self.merged = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter('./summary', tf.get_default_graph())
def evaluate(self):
sess = tf.Session()
def func(img):
self.iter += 1
current_img = img.reshape(self.content.shape).astype(np.float32) - self.pixel_mean
feed_dict = {self.net['img']:current_img, self.net['map']:self.content_mask}
loss = 0
grads = 0
local_loss = 0
content_loss = 0
tv_loss=0
global_loss=0
sess.run(tf.global_variables_initializer())
loss, grads, local_loss, content_loss, tv_loss, global_loss, summ= sess.run(
[self.loss, self.grad, self.local_loss, self.content_loss, self.tv_loss, self.global_loss, self.merged],
feed_dict=feed_dict)
self.summary_writer.add_summary(summ, self.iter)
if self.iter % 10 == 0:
out = current_img + self.pixel_mean
out = np.squeeze(out)
out = np.clip(out, 0, 255).astype('uint8')
skimage.io.imsave('outputs/%s-%d.jpg'%(self.args.output, self.iter), out)
print('Epoch:%d,loss:%f,local loss:%f,global loss:%f,content loss:%f,tv loss: %f.'%
(self.iter, loss, local_loss, global_loss, content_loss, tv_loss))
if np.isnan(grads).any():
raise OverflowError("Optimization diverged; try using a different device or parameters.")
# Return the data in the right format for L-BFGS.
return loss, np.array(grads).flatten().astype(np.float64)
return func
def run(self):
args = self.args
if args.init=='style':
Xn = self.style2
elif args.init=='content':
Xn = self.content
else:
Xn = np.random.uniform(16, 240, self.content.shape).astype(np.float32)
self.iter = 0
# Optimization algorithm needs min and max bounds to prevent divergence.
data_bounds = np.zeros((np.product(Xn.shape), 2), dtype=np.float64)
data_bounds[:] = (0.0, 255.0)
print ("GLStyleNet: Start")
try:
Xn, *_ = scipy.optimize.fmin_l_bfgs_b(
self.evaluate(),
Xn.flatten(),
bounds=data_bounds,
factr=0.0, pgtol=0.0, # Disable automatic termination, set low threshold.
m=5, # Maximum correlations kept in memory by algorithm.
maxfun=args.iterations, # Limit number of calls to evaluate().
iprint=-1) # Handle our own logging of information.
except OverflowError:
print("The optimization diverged and NaNs were encountered.",
" - Try using a different `--device` or change the parameters.",
" - Make sure libraries are updated to work around platform bugs.")
except KeyboardInterrupt:
print("User canceled.")
except Exception as e:
print(e)
print ("GLStyleNet: Completed!")
self.summary_writer.close()
def prepare_mask(content_mask, style_mask, n):
from sklearn.cluster import KMeans
x1 = content_mask.reshape((-1, content_mask.shape[-1]))
x2 = style_mask.reshape((-1, style_mask.shape[-1]))
kmeans = KMeans(n_clusters=n, random_state=0).fit(x1)
y1 = kmeans.labels_
y2 = kmeans.predict(x2)
y1 = y1.reshape(content_mask.shape[:-1])
y2 = y2.reshape(style_mask.shape[:-1])
diag = np.diag([1 for _ in range(n)])
return diag[y1].astype(np.float32), diag[y2].astype(np.float32)
def main():
parser = argparse.ArgumentParser(description='GLStyleNet: transfer style of an image onto a content image.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_arg = parser.add_argument
add_arg('--content', default=None, type=str, help='Content image path.')
add_arg('--content-mask', default=None, type=str, help='Content image semantic mask.')
add_arg('--content-weight', default=10, type=float, help='Weight of content.')
add_arg('--style', default=None, type=str, help='Style image path.')
add_arg('--style-mask', default=None, type=str, help='Style image semantic map.')
add_arg('--local-weight', default=100, type=float, help='Weight of local style loss.')
add_arg('--semantic-weight', default=10, type=float, help='Weight of semantic map channel.')
add_arg('--global-weight', default=0.1, type=float, help='Weight of global style loss.')
add_arg('--output', default='output', type=str, help='Output image path.')
add_arg('--smoothness', default=1E+0, type=float, help='Weight of image smoothing scheme.')
add_arg('--init', default='content', type=str, help='Image path to initialize, "noise" or "content" or "style".')
add_arg('--iterations', default=500, type=int, help='Number of iterations.')
add_arg('--device', default='gpu', type=str, help='devices: "gpu"(default: all gpu) or "gpui"(e.g. gpu0) or "cpu" ')
add_arg('--class-num', default=5, type=int, help='Count of semantic mask classes.')
args = parser.parse_args()
style = skimage.io.imread(args.style)
if args.style_mask:
style_mask = skimage.io.imread(args.style_mask)
content = skimage.io.imread(args.content)
if args.content_mask:
content_mask = skimage.io.imread(args.content_mask)
if style.shape[0]==content.shape[0] and style.shape[1]==content.shape[1]:
style2=style
else:
style2=skimage.transform.resize(style,(content.shape[0],content.shape[1]))
if args.content_mask and args.style_mask:
content_mask, style_mask = prepare_mask(content_mask, style_mask, args.class_num)
model = Model(args, content, style, style2, content_mask, style_mask)
else:
model = Model(args, content, style, style2)
model.run()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/2 0002 12:47
# @Author : __Yanfeng
# @Site :
# @File : views.py
# @Software: PyCharm
from flask import redirect, render_template, url_for, request
from flask_login import login_required,current_user
from . import post
from .forms import PostForm
from apps.models import Post, db, User
@post.route('/posts')
@login_required
def posts():
current_page = request.args.get('page', 1, type=int)
query = Post.query.order_by(Post.add_time.desc())
pagination = query.paginate(current_page, per_page=20, error_out=False)
post = pagination.items
return render_template('back/posts.html', title='博文列表', page_title='欢迎来到博文列表', post=post, paginations=pagination)
@post.route('/post', methods=['POST', 'GET'])
@login_required
def post_():
form = PostForm()
if form.validate_on_submit():
my_post = Post()
my_post.title = form.title.data
my_post.body = form.body.data
my_post.desc = form.desc.data
my_post.author_id = 2
db.session.add(my_post)
db.session.commit()
return redirect(url_for('post.posts'))
return render_template('back/post.html', title='添加博文', page_title='添加博文', form=form)
@post.route('/detail/<int:post_id>', methods=['POST', 'GET'])
def details(post_id):
post = Post.query.get_or_404(int(post_id))
return render_template('front/detaile.html', title=post.title, post=post)
|
# -----------------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 8/22/2018 5:27 PM
# @Author : sunyonghai
# @Software: ZJ_AI
# -----------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import cv2
import numpy as np
import PIL.Image
import os
import random
from lxml.etree import Element, SubElement, tostring
from xml.dom.minidom import parseString
# read image from disk
def read_image(path, mode=None):
try:
image = cv2.imread(path,cv2.IMREAD_UNCHANGED) #np.asarray(PIL.Image.open(path).convert(mode))
except Exception as ex:
print('{}'.format(path))
return image.copy()
# laod image
def load_image(path):
return read_image(path)
def load_background(path):
bgs = os.listdir(path)
bg_paths = [os.path.join(path, name) for name in bgs]
random.shuffle(bg_paths)
return itertools.cycle(bg_paths)
def load_segmentation_object(path):
seg_obs = os.listdir(path)
seg_ob_paths = [os.path.join(path, name) for name in seg_obs]
# random.shuffle(seg_ob_paths)
return itertools.cycle(seg_ob_paths)
def resize_image(image, size):
# resize the image with the size
img = cv2.resize(image, size)
return img
def save_image(out_path, image):
# try:
# image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
# except Exception as ex:
# print(out_path)
#
try:
if out_path is not None:
dir = os.path.dirname(out_path)
if not os.path.exists(dir):
os.makedirs(dir)
cv2.imwrite(out_path, image)
except Exception as ex:
print(ex)
def create_png(mask_img, origin_img):
'''
:param mask_img:
:param origin_img:
:param out_path:
:return:
'''
result = np.zeros((origin_img.shape[0], origin_img.shape[1], 4), np.uint8)
# result[:, :, 3] = mask_img[:, :, 0].copy()
# if mask_img[:,:,0:3]!=0
mask = np.sum(mask_img,axis=-1)
result[:, :, 0:3][mask[:,:]!=0] = origin_img[:,:,:][mask[:,:]!=0]
result[:, :, 3] = np.where((mask[:,:]!=0), 255, 0)
result[:, :, 3] = np.where((mask[:,:]==640), 255, result[:, :, 3])
# save_image(out_path, result)
return result
def find_bbox(mask):
sum_each_col = mask.astype(np.double).sum(0).astype(np.uint8)
sum_each_row = mask.astype(np.double).sum(1).astype(np.uint8)
col_nonzero_index = np.nonzero(sum_each_col)[0].tolist()
row_nonzero_index = np.nonzero(sum_each_row)[0].tolist()
if(col_nonzero_index!=[] and row_nonzero_index!=[]):
minx = col_nonzero_index[0]
maxx = col_nonzero_index[-1]+1#minx + width
miny = row_nonzero_index[0]
maxy = row_nonzero_index[-1]+1#minx + height
return [(minx,miny),(maxx,maxy)]
else:
return [(),()]
def composite_bg(bg, obj_png):
# obj_height, obj_width = obj_png.shape[0:2]
img = bg.copy()
alpha = obj_png[:, :, 3]
img[:, :, :][alpha!=0] = obj_png[:,:,:3][alpha!=0]
# save_image(out_path, img)
return img
def paste_obj(bg_buffer, obj_png, box):
alpha = obj_png[:,:,3]
bg_buffer[box[0]:box[2], box[1]:box[3],:][alpha!=0] = obj_png[:,:,:][alpha!=0]
return bg_buffer
#
# def paste_obj(obj_png, bg_size):
# obj_height, obj_width = obj_png.shape[0:2]
# bg_buffer = np.zeros((bg_size+ (4,)), np.uint8)
#
# obj_alpha = obj_png[:,:,3]
# bg_buffer[0:obj_height,0:obj_width ,:][obj_alpha!=0] = obj_png[:,:,:][obj_alpha!=0]
# return bg_buffer
# def pasteObj(bg, obj_png):
# obj_height, obj_width = obj_png.shape[0:2]
# result = resize_image(bg, obj_height, obj_width)
#
# obj_alpha = obj_png[:,:,3]
# # for c in range(3):
# result[0:obj_height,0:obj_width ,:][obj_alpha!=0] = obj_png[:,:,:][obj_alpha!=0]
# return result
#
# root_path = '/home/syh/tf-faster-rcnn/data/fusion/'
#
# if __name__ == '__main__':
# path = os.path.join(root_path, "mask/aebs-aebsntbbt-dz-hhkw-120g/0.png")
# bg_path = os.path.join(root_path,'bg/bg_2018-05-30_10466.jpg')
#
# obj_mask_path = os.path.join(root_path,'obj_165_mask/aebs-aebsntbbt-dz-hhkw-120g/0.png')
#
# obj_im = read_image_rgb(path, 'RGBA')
# bg_im = read_image_rgb(bg_path, 'RGB')
# obj_mask_im = read_image_rgb(obj_mask_path, 'RGBA')
# img = cv2.imread(obj_mask_path, cv2.IMREAD_UNCHANGED)
# bg_im = resize_image(bg_im, obj_im.shape[:2][::-1])
#
# output_path = os.path.join(root_path,'output/JPEGImages/bg_2018-05-30_10466.jpg')
# bg_obj = pasteObj(bg_im, obj_im)
# cv2.imwrite(output_path, bg_obj)
# img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
# print(im.shape)
def make_xml(im_info, boxes,labels):
node_root = Element('annotation')
node_folder = SubElement(node_root, 'folder')
node_folder.text = 'JPEGImages'
node_filename = SubElement(node_root, 'filename')
node_filename.text = im_info.name + '.' +im_info.image_extension
node_path = SubElement(node_root, 'path')
node_path.text = im_info.path
node_size = SubElement(node_root, 'size')
node_width = SubElement(node_size, 'width')
node_width.text =str(im_info.width)
node_height = SubElement(node_size, 'height')
node_height.text =str(im_info.height)
node_depth = SubElement(node_size, 'depth')
node_depth.text = str(im_info.channel)
node_segmented = SubElement(node_root, 'segmented')
node_segmented.text = '0'
for b,label in zip(boxes, labels):
node_object = SubElement(node_root, 'object')
node_name = SubElement(node_object, 'name')
caption = "{}".format(label)
node_name.text = caption
node_pose = SubElement(node_object, 'pose')
node_pose.text = 'Unspecified'
node_truncated = SubElement(node_object, 'truncated')
node_truncated.text = '0'
node_difficult = SubElement(node_object, 'difficult')
node_difficult.text = '0'
node_bndbox = SubElement(node_object, 'bndbox')
node_xmin = SubElement(node_bndbox, 'xmin')
node_xmin.text = str(int(b[0]))
node_ymin = SubElement(node_bndbox, 'ymin')
node_ymin.text = str(int(b[1]))
node_xmax = SubElement(node_bndbox, 'xmax')
node_xmax.text = str(int(b[2]))
node_ymax = SubElement(node_bndbox, 'ymax')
node_ymax.text = str(int(b[3]))
xml = tostring(node_root, pretty_print=True)
dom = parseString(xml)
return dom
def save_annotations(save_dir, im_info, boxes, labels):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
dom = make_xml(im_info, boxes,labels )
xml_path = os.path.join(save_dir, im_info.name + '.xml')
with open(xml_path, 'w+') as f:
dom.writexml(f, addindent='', newl='', encoding='utf-8') |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Correl J. Roush, Gerónimo Oñativia
import base64
import os
import sys
import threading
import common
import search
import json
import xbmc
import xbmcgui
from basictypes.bytes import Bytes
_ = sys.modules["__main__"].__language__
__settings__ = sys.modules["__main__"].__settings__
BASE_ADDON_PATH = sys.modules["__main__"].BASE_ADDON_PATH
KEY_BUTTON_BACK = 275
KEY_KEYBOARD_ESC = 61467
KEY_MENU_ID = 92
EXIT_SCRIPT = (6, 10, 247, 275, 61467, 216, 257, 61448,)
CANCEL_DIALOG = EXIT_SCRIPT + (216, 257, 61448,)
UPDATE_INTERVAL = float(__settings__.getSetting('gui_refresh_interval'))
STATUS_ICONS = {'seed_pending': 'status_paused.png',
'seeding': 'status_done.png',
'fake_done': 'status_done.png',
'downloading': 'status_downloading.png'}
PLAY_REQUESTED = False
class TransmissionGUI(xbmcgui.WindowXMLDialog):
def __init__(self, strXMLname, strFallbackPath, strDefaultName, bforeFallback=0):
self.listItems = {}
self.torrents = {}
self.timer = None
@staticmethod
def set_settings(params):
__settings__.setSetting('torrent_deluge_host', params['host'])
__settings__.setSetting('torrent_deluge_port', params['port'])
__settings__.setSetting('torrent_deluge_password', params['password'])
__settings__.setSetting('torrent_deluge_path', params['path'])
def onInit(self):
if not len(__settings__.getSetting('download_path')):
self.close()
if xbmcgui.Dialog().yesno(_(32002), _(32903), _(32003)):
__settings__.openSettings()
return False
p = xbmcgui.DialogProgress()
p.create(_(32000), _(32001)) # 'Deluge', 'Connecting to Deluge'
self.deluge = common.get_rpc_client()
info = self.deluge.get_info()
if (not info or info['result']['connected'] is not True):
xbmc.log(json.dumps(info), xbmc.LOGDEBUG)
p.close()
self.close()
message = _(32901) # Unable to connect
if xbmcgui.Dialog().yesno(_(32002), message, _(32003)):
__settings__.openSettings()
del p
return False
self.updateTorrents()
p.close()
del p
self.timer = threading.Timer(UPDATE_INTERVAL, self.updateTorrents)
self.timer.start()
def updateTorrents(self):
torrents = self.deluge.list()
if torrents:
self.torrents = torrents
list = self.getControl(120)
keys = []
for torrent in self.torrents:
keys.append(torrent['id'])
statusline = "Down: %(down)s %(pct).2f%% %(dspeed)s/s | Up: %(up)s %(uspeed)s/s | Ratio: %(ratio).2f"% \
{'down': Bytes.format(torrent['download']), 'pct': torrent['progress'], \
'dspeed': Bytes.format(torrent['downspeed']), 'up': Bytes.format(torrent['upload']),
'uspeed': Bytes.format(torrent['upspeed']), 'ratio': torrent['ratio']}
if torrent['progress'] == 100: torrent['status'] = 'fake_done'
if torrent['id'] not in self.listItems:
# Create a new list item
l = xbmcgui.ListItem(label=torrent['name'], label2=statusline)
self.listItems[torrent['id']] = l
list.addItem(l)
else:
# Update existing list item
l = self.listItems[torrent['id']]
l.setLabel(torrent['name'])
l.setLabel2(statusline)
l.setProperty('TorrentID', str(torrent['id']))
l.setProperty('TorrentStatusIcon', STATUS_ICONS.get(torrent['status'], 'status_paused.png'))
l.setProperty('TorrentProgress', "%3d%%"%torrent['progress'])
removed = [id for id in self.listItems.keys() if id not in keys]
if len(removed) > 0:
# Clear torrents from the list that have been removed
for id in removed:
del self.listItems[id]
list.reset()
for id in self.listItems:
list.addItem(self.listItems[id])
list.setEnabled(bool(self.torrents))
# Update again, after an interval, but only if the timer has not been cancelled
if self.timer:
self.timer = threading.Timer(UPDATE_INTERVAL, self.updateTorrents)
self.timer.start()
def onClick(self, controlID):
list = self.getControl(120)
if (controlID == 111):
# Add torrent
engines = [
(_(32200), None),
(_(32204), search.Kickass),
(_(32208), search.EZTV),
(_(32202), search.TPB),
(_(32205), search.L337x),
(_(32206), search.YTS),
(_(32207), search.Lime),
]
selected = xbmcgui.Dialog().select(_(32000), [i[0] for i in engines])
if selected < 0:
return
engine = engines[selected][1]
if not engine:
filename = xbmcgui.Dialog().input(_(32000), '', xbmcgui.INPUT_ALPHANUM)
if (len(filename)):
self.deluge.add_url(filename, __settings__.getSetting('download_path'))
else:
kb = xbmc.Keyboard(__settings__.getSetting('last_search'), engines[selected][0])
kb.doModal()
if not kb.isConfirmed():
return
terms = kb.getText()
__settings__.setSetting('last_search', terms)
p = xbmcgui.DialogProgress()
p.create(_(32000), _(32290))
try:
results = engine().search(terms)
except:
p.close()
xbmcgui.Dialog().ok(_(32000), _(32292))
return
p.close()
del p
if not results:
xbmcgui.Dialog().ok(_(32000), _(32291))
return
selected = xbmcgui.Dialog().select(_(32000),
['[S:%d L:%d] %s'%(t['seeds'], t['leechers'], t['name']) for t in
results])
if selected < 0:
return
try:
self.deluge.add_url(results[selected]['url'], __settings__.getSetting('download_path'))
except:
xbmcgui.Dialog().ok(_(32000), _(32293))
return
if (controlID == 112):
# Remove selected torrent
item = list.getSelectedItem()
if item and xbmcgui.Dialog().yesno(_(32000), 'Remove \'%s\'?'%item.getLabel()):
remove_data = xbmcgui.Dialog().yesno(_(32000), 'Remove data as well?')
if remove_data:
self.deluge.action_simple('removedata', item.getProperty('TorrentID'))
else:
self.deluge.action_simple('remove', item.getProperty('TorrentID'))
if (controlID == 113):
# Stop selected torrent
item = list.getSelectedItem()
if item:
self.deluge.action_simple('stop', item.getProperty('TorrentID'))
if (controlID == 114):
# Start selected torrent
item = list.getSelectedItem()
if item:
self.deluge.action_simple('start', item.getProperty('TorrentID'))
if (controlID == 115):
# Stop all torrents
for torrent in self.torrents:
self.deluge.action_simple('stop', torrent['id'])
if (controlID == 116):
# Start all torrents
for torrent in self.torrents:
self.deluge.action_simple('start', torrent['id'])
if (controlID == 118):
# Settings button
prev_settings = common.get_settings()
__settings__.openSettings()
p = xbmcgui.DialogProgress()
p.create(_(32000), _(32001)) # 'Transmission', 'Connecting to Transmission'
try:
self.deluge = common.get_rpc_client()
self.updateTorrents()
p.close()
except:
p.close()
xbmcgui.Dialog().ok(_(32002), _(32901))
# restore settings
self.set_settings(prev_settings)
try:
self.deluge = common.get_rpc_client()
except err:
xbmcgui.Dialog().ok(_(32002), _(32901))
self.close()
del p
if (controlID == 120):
global PLAY_REQUESTED
# A torrent was chosen, show details
item = list.getSelectedItem()
w = TorrentInfoGUI("script-Deluge-details.xml", __settings__.getAddonInfo('path'), "Default")
w.setTorrent(self.deluge, item.getProperty('TorrentID'))
w.doModal()
del w
if PLAY_REQUESTED:
PLAY_REQUESTED = False
self.close()
if (controlID == 117):
# Exit button
self.close()
def onFocus(self, controlID):
# if controlID == 111 and xbmc.Player().isPlaying():
# xbmc.executebuiltin(
# "Notification(%s,%s,1500,%s)"%(_(32000), _(32005), BASE_ADDON_PATH + "/notification.png"))
# self.close()
pass
def onAction(self, action):
if (action.getButtonCode() in CANCEL_DIALOG) or (action.getId() == KEY_MENU_ID):
self.close()
def close(self):
if self.timer:
self.timer.cancel()
self.timer.join()
self.listItems.clear()
self.getControl(120).reset()
super(TransmissionGUI, self).close()
class TorrentInfoGUI(xbmcgui.WindowXMLDialog):
def __init__(self, strXMLname, strFallbackPath, strDefaultName, bforeFallback=0):
global PLAY_REQUESTED
PLAY_REQUESTED = False
self.deluge = None
self.torrent_id = None
self.torrent = None
self.list = {}
self.timer = None
def setTorrent(self, deluge, t_id):
self.listItems = {}
self.deluge = deluge
self.torrent_id = t_id
self.timer = threading.Timer(UPDATE_INTERVAL, self.updateTorrent)
self.timer.start()
def updateTorrent(self):
pbar = self.getControl(219)
list = self.getControl(220)
labelName = self.getControl(1)
labelDownload = self.getControl(2)
labelUpload = self.getControl(5)
labelETA = self.getControl(4)
labelProgress = self.getControl(11)
torrents = self.deluge.list()
if torrents is not False:
for torrent in torrents:
if torrent['id'] == self.torrent_id: break
self.torrent = torrent
labelName.setLabel(torrent['name'])
download_line = "Done %(down)s (%(pct).2f%%) | Speed %(dspeed)s/s"% \
{'down': Bytes.format(torrent['download']), 'pct': torrent['progress'],
'dspeed': Bytes.format(torrent['downspeed'])}
labelDownload.setLabel(download_line)
upload_line = "Sent %(up)s | Speed %(uspeed)s/s | Ratio: %(ratio).2f"% \
{'up': Bytes.format(torrent['upload']), 'uspeed': Bytes.format(torrent['upspeed']),
'ratio': torrent['ratio']}
labelUpload.setLabel(upload_line)
if torrent['status'] is 'downloading':
eta_line = "%(eta)s"%{'eta': self.formatEta(int(torrent['eta']))}
else:
eta_line = "inf."
labelETA.setLabel(eta_line)
labelProgress.setLabel('%3d%%'%(torrent['progress']))
pbar.setPercent(torrent['progress'])
files = self.deluge.listfiles(self.torrent_id) # [[path, percent, x['index'], size],...]
if files is not None:
for file in files:
if file[2] not in self.listItems.keys():
# Create a new list item
l = xbmcgui.ListItem(label=file[0])
list.addItem(l)
self.listItems[file[2]] = l
else:
# Update existing list item
l = self.listItems[file[2]]
l.setProperty('Progress', '[%3d%% of %s]'%(file[1], file[3]))
# Update again, after an interval
self.timer = threading.Timer(UPDATE_INTERVAL, self.updateTorrent)
self.timer.start()
def onInit(self):
self.updateTorrent()
def close(self):
if self.timer:
self.timer.cancel()
self.timer.join()
self.listItems.clear()
self.getControl(220).reset()
super(TorrentInfoGUI, self).close()
def onAction(self, action):
if (action.getButtonCode() in CANCEL_DIALOG) or (action.getId() == KEY_MENU_ID):
self.close()
pass
def onClick(self, controlID):
global PLAY_REQUESTED
if controlID == 111:
self.close()
if controlID == 220:
item = self.getControl(220).getSelectedItem()
xbmc.executebuiltin("Notification(%s,%s,1500,%s)"%(
_(32000), _(32004) + item.getLabel(), BASE_ADDON_PATH + "/notification.png"))
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
playlist.add(os.path.join(self.torrent['dir'], item.getLabel()))
xbmc.Player().play(playlist)
PLAY_REQUESTED = True
self.close()
def onFocus(self, controlID):
pass
def formatEta(self, eta, granularity=2):
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = eta//count
if value:
eta -= value*count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
|
i=3
j=2
print(i < j + 5 > j ** 5)
#False
#3<2+5 is True and 7>2**5 is False |
"""
Module used to test the base configuration class.
"""
import os
import pytest
from clickandobey.dockerized.webservice.configuration.webservice_configuration import WebserviceConfiguration
@pytest.mark.unit
@pytest.mark.WebserviceConfiguration
class TestWebserviceConfiguration:
"""
Class used to test the WebserviceConfiguration class.
"""
@staticmethod
def __clean_env_vars() -> None:
"""
Clean out environment variables between test runs.
"""
for variable in [
"ENVIRONMENT",
"USER",
"VERSION",
]:
if os.getenv(variable) is not None:
del os.environ[variable]
def test_configuration(self):
"""
Test to ensure the configuration works as expected.
"""
configuration = WebserviceConfiguration("1.0.0", "env", {"foo": "bar"})
assert configuration.version == "1.0.0", "Failed to get the right configuration version."
assert configuration.environment == "env", "Failed to get the right configuration environment."
assert configuration.config == {"foo": "bar"}, "Failed to get the right configuration config."
expected_dict = {
"Version": "1.0.0",
"Environment": "env",
"Configuration": {
"foo": "bar"
}
}
assert configuration.to_dict() == expected_dict, "Failed to match the expected dictionary output for the config"
self.__clean_env_vars()
os.environ["USER"] = "test"
configuration = WebserviceConfiguration()
assert configuration.version == "1.0.test", "Failed to get the right configuration version."
assert configuration.environment == "localhost", "Failed to get the right configuration environment."
assert configuration.config == {}, "Failed to get the right configuration config."
expected_dict = {
"Version": "1.0.test",
"Environment": "localhost",
"Configuration": {}
}
assert configuration.to_dict() == expected_dict, "Failed to match the expected dictionary output for the config"
self.__clean_env_vars()
os.environ["ENVIRONMENT"] = "configTest"
os.environ["VERSION"] = "1.2.3"
configuration = WebserviceConfiguration()
assert configuration.version == "1.2.3", "Failed to get the right configuration version."
assert configuration.environment == "configTest", "Failed to get the right configuration environment."
assert configuration.config == {}, "Failed to get the right configuration config."
expected_dict = {
"Version": "1.2.3",
"Environment": "configTest",
"Configuration": {}
}
assert configuration.to_dict() == expected_dict, "Failed to match the expected dictionary output for the config"
|
from blueprints import create_app
from config import config
api = create_app(config) |
# -*- coding: utf-8 -*-
from fabric.api import env, run, local, settings
from datetime import datetime
def github(m='Commit something to master...'):
'''github:m = "COMMIT LOGGING"\t(default as 'Commit something to master...')
'''
local('pwd'
'&& git add -A'
'&& git commit -am "{msg}"'
'&& git push'
) |
# -*- coding: utf-8 -*-
import abc
class BaseLearningRate(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get(self, t):
""" Get the learning rate for time t """
class ConstantLearningRate(BaseLearningRate):
def __init__(self, lr):
self.lr = lr
def get(self, t):
return self.lr
class ExponentialLearningRate(BaseLearningRate):
def __init__(self, lr_start, lr_end, steps):
self.lr_start = lr_start
self.lr_end = lr_end
self.steps = steps
def get(self, t):
return self.lr_start * pow(self.lr_end / self.lr_start, t / self.steps)
|
from dotenv import load_dotenv
import requests
import json
from dict2xml import dict2xml
import os
file_path = os.path.dirname(os.path.abspath(__file__))
env_path = os.path.join(file_path, "..", ".env")
load_dotenv(env_path)
server_name = os.getenv("SEVER_NAME")
user_name = os.getenv("USER_NAME")
password = os.getenv("PASSWORD")
device_serial_number = os.getenv("DEVICE_SERIALNUMBER")
def test_erase_device():
url = "https://" + server_name + ".jamfcloud.com/api/v1/auth/token"
headers = {"Accept": "application/json"}
response = requests.request(
"POST", url, headers=headers, auth=(user_name, password)
).json()
token = response.get("token")
json_file = open(
os.path.join(file_path, "json_data", "test_erase_device.json"), encoding="utf-8"
)
data = json.load(json_file)
data["mobile_device_command"]["mobile_devices"]["mobile_device"][
"name"
] = device_serial_number
data = dict2xml(data)
url = (
"https://"
+ server_name
+ ".jamfcloud.com/JSSResource/mobiledevicecommands/command"
)
headers = {"Accept": "application/json", "Authorization": "Bearer " + token}
result = requests.request("POST", url, headers=headers, data=data.encode())
assert result.status_code == 201
if __name__ == "__main__":
test_erase_device()
|
from re import findall
from typing import Dict, List, Optional, Tuple, Union
from PIL import Image, ImageFont, ImageColor
from .errors import (
FontNotFound,
InvalidColorFormat,
InvalidFieldLength,
InvalidFormatOption,
InvalidProfilePicturePath,
InvalidProfilePictureDimensions,
InvalidTweetName,
InvalidTweetText,
InvalidUsername,
InvalidUserTag,
MissingGraphicField,
MissingGraphicSettings,
MissingDictKeys
)
from .type_interfaces import DefaultFormats, GraphicSettings, TweetInfo
def __validate_dict_keys(
dict_data: Union[TweetInfo, GraphicSettings],
typed_dict: Union[TweetInfo, GraphicSettings],
dict_name: str,
) -> None:
"""Given either a dictionary of graphic settings (`graphic_settings`) or of tweet information (`tweet_info`), validate that is has all the required fields.
Parameters
----------
dict_data : Union[TweetInfo, GraphicSettings]
Dictionary to be validated.
typed_dict : Union[TweetInfo, GraphicSettings]
TypedDict that defines the required fields.
dict_name : str
Name of the dictionary under validation (tweet_info or graphic_settings)
Raises
------
MissingDictKeys
Raised when the passed dictionary is missing one or more required fields.
"""
# Get the keys from the type interface
if dict_name == "graphic_settings":
keys = typed_dict.__annotations__.keys()
elif dict_name == "tweet_info":
keys = typed_dict.__annotations__.keys()
# Get the keys given by the user
provided_keys = dict_data.keys()
# Get a list of the keys not provided by the user
missing_keys = [key for key in keys if key not in provided_keys]
# If there are any missing keys, raise an error
if missing_keys != list():
error_msg = f"The `{dict_name}` dictionary must include the keys:\n\t{keys}.\n\tYou are missing {missing_keys}"
raise MissingDictKeys(error_msg)
def __validate_font_family(value: str, error_msg: str) -> str:
"""Validate the font family chosen.
Parameters
----------
value : str
Font family name to be validated.
error_msg : str
Error message displayed for invalid font families
Returns
-------
str
Font family name validated (including the .ttf file extension).
Raises
------
FontNotFound
Raised when the font is not found on the user's machine.
"""
# If the user has only passed the font name, add the file extension
font_data = value.split(".")
if len(font_data) == 1:
value += ".ttf"
try:
dummy_font = ImageFont.truetype(value, 1, encoding="utf-8")
return value
except OSError:
raise FontNotFound(error_msg)
def __validate_integer_fields(value: int, error_msg: str) -> int:
"""Validate integer values from a dictionary.
Parameters
----------
value : int
Value to be validated.
error_msg : str
Error message for an invalid value.
Returns
-------
int
Validated value.
Raises
------
TypeError
Raised when the value is not valid (namely, when it is data that cannot be cast to int).
"""
try:
int_field = int(value)
return int_field
except ValueError:
raise TypeError(error_msg)
def __validate_size(
dimensions: List[int],
error_msg_length: str,
error_msg_type: str
) -> List[int]:
"""Validate the list that represents the size of the graphic (width and height).
Parameters
----------
dimensions : List[int]
List of integers (width and height).
error_msg_length : str
Error message to display for a list that has too many or too few values.
error_msg_type : str
Error message to be displayed if one of the two values is not a valid integer.
Returns
-------
List[int]
Validated list.
Raises
------
InvalidFieldLength
Raised when the list has more or less than two values.
"""
# Fist verify that the list has appropriate length
if len(dimensions) != 2:
raise InvalidFieldLength(error_msg_length)
# Now validate that both values are valid integers
width = __validate_integer_fields(dimensions[0], error_msg_type)
height = __validate_integer_fields(dimensions[1], error_msg_type)
return [width, height]
def __validate_profile_pic_size(
dimensions: List[int],
user_pic: str,
error_msg_length: str,
error_msg_type: str
) -> List[int]:
"""Validate the list that represents the final size of the profile picture (width and height).
Parameters
----------
dimensions : List[int]
List of integers (width and height).
user_pic : str
Path to the user profile picture.
error_msg_length : str
Error message to display for a list that has too many or too few values.
error_msg_type : str
Error message to be displayed if one of the two values is not a valid integer.
Returns
-------
List[int]
Validated list.
Raises
------
InvalidFieldLength
Raised when the list has more or less than two values.
InvalidProfilePictureDimensions
Raised when width is not the same as the height.
"""
# Fist verify that the list has appropriate length
if len(dimensions) != 2:
raise InvalidFieldLength(error_msg_length)
# If the path to the profile picture is blank, return the validated\
# dimensions right away
if user_pic == "":
return [0, 0]
# If either the width or height are None, then set them both to zero\
# (ensures it is not considered in future calculations nor causes\
# problems)
if None in dimensions:
width = 0
height = 0
# Otherwise validate both values
else:
width = __validate_integer_fields(dimensions[0], error_msg_type)
height = __validate_integer_fields(dimensions[1], error_msg_type)
if width != height:
raise InvalidProfilePictureDimensions(error_msg_type)
return [width, height]
def __validate_rgba(
rgba_color: str,
error_msg: str
) -> str:
"""Given a RGBA string with the transparency in the 0-1 range, transform it to the 0-255 range.
Parameters
----------
rgba_color : str
The RGBA color.
Returns
-------
str
The input RGBA color string with its transparency in the 0-255 range.
"""
rgba_pattern = r'^rgba?\((\d+),\s*(\d+),\s*(\d+)(?:,\s*(\d+(?:\.\d+)?))?\)$'
# List of lists of values/matches found
values = findall(rgba_pattern, rgba_color)
# If no values matched, then there are invalid values in the color
if values == list():
raise InvalidColorFormat(error_msg)
# Try to get the values from the list of matches, if there are not 4 values, the color has too few values
values = values[0]
if len(values) != 4:
raise InvalidColorFormat(error_msg)
# Verify the color channel values are in the valid range
for i in range(0, 3):
channel_value = int(values[i])
if (channel_value < 0) or (channel_value > 255):
raise InvalidColorFormat(error_msg)
# Verify the transparency value is in the 0-1 range
transparency_value = float(values[-1])
if (transparency_value < 0) or (transparency_value > 1):
raise InvalidColorFormat(error_msg)
# Transform the transparency to the 0-255 range
transparency = int(transparency_value * 255)
# Update the RGBA color to be PIL-valid
validated_color = f"rgba({values[0]}, {values[1]}, {values[2]}, {transparency})"
return validated_color
def __validate_color(
color: str,
error_msg: str
) -> str:
"""Validate a single color string.
Parameters
----------
color : str
Color to be validated.
error_msg : str
Error message for invalid colors.
Returns
-------
str
Validated color string.
Raises
------
InvalidColorFormat
Raised for invalid color strings.
"""
# If it is a RGBA color, validate it individually
if color.startswith("rgba"):
color_validated = __validate_rgba(color, error_msg)
# Otherwise, validate it with PIL by trying to convert it to RGB
else:
try:
color_validated = ImageColor.getrgb(color)
except ValueError as e:
raise InvalidColorFormat(error_msg)
return color_validated
def __validate_color_scheme(
value: List[str],
error_msg_size: str,
error_msg_color_format: str
) -> List[str]:
"""Validate the list that represents the graphic's color scheme (background and text colors in Hexadecimal format).
Parameters
----------
value : List[str]
List of the color scheme.
error_msg_size : str
Error message to display for a list that has too many or too few values.
error_msg_color_format : str
Error message to display if either of the colors is invalid.
Returns
-------
List[str]
Validated color scheme.
Raises
------
InvalidFieldLength
Raised if the list does not have the required length (two).
InvalidColorFormat
Raised if either the background or text color are not valid Hexadecimal or RGBA values.
"""
# Fist validate the list has the required length
if len(value) != 2:
raise InvalidFieldLength(error_msg_size)
bg_color = value[0]
text_color = value[1]
# The background color must be a string or `None` for transparent backgrounds
if (type(bg_color) != str) and (bg_color != None):
raise InvalidColorFormat(error_msg_color_format)
if (type(text_color) != str):
raise InvalidColorFormat(error_msg_color_format)
# The background color can be `None` for transparent backgrounds
if (bg_color == None):
bg_color_validated = bg_color
else:
bg_color_validated = __validate_color(bg_color, error_msg_color_format)
text_color_validated = __validate_color(text_color, error_msg_color_format)
# If no exception was raised, the colors are valid
return [bg_color_validated, text_color_validated]
def __validate_float_fields(value: float, error_msg: str) -> float:
"""Validate float values from a dictionary.
Parameters
----------
value : float
Value to be validated.
error_msg : str
Error message for an invalid value.
Returns
-------
float
Validated value.
Raises
------
TypeError
Raised when the value is not valid (namely, when it is data that cannot be cast to float).
"""
try:
float_field = float(value)
return float_field
except ValueError:
raise TypeError(error_msg)
def validate_settings_existence(g_settings: GraphicSettings, def_settings: str) -> None:
"""Validate that there is either custom or default graphic settings to be used (i.e., either the user passed a dictionary of custom settings or an empty dictionary along with the specification of a default settings format).
Parameters
----------
g_settings : GraphicSettings
Custom graphic settings.
def_settings : str
Default settings option chosen.
Raises
------
MissingGraphicSettings
Raised when the user passed an empty dictionary and chose custom settings for the default settings format.
"""
# Check if an empty dictionary was passed as the custom graphic settings
custom_settings_empty = g_settings == dict()
# Check if no default settings format was chosen (i.e., the user wants custom settings)
custom_format_chosen = def_settings == DefaultFormats.CUSTOM.value
# If True, then the custom settings are empty and the user chose a custom\
# settings format, i.e., there are no graphic settings to use
# Otherwise, there is either custom settings or a default format to use
settings_not_received = custom_settings_empty and custom_format_chosen
if settings_not_received == True:
raise MissingGraphicSettings(
'You did not pass custom settings (`graphic_settings`) nor a default settings format (`default_settings_format`).\n\tYou can either specify your own settings in a dictionary or, if you don\'t want that, pass an empty dictionary and specify a default format: "lyrics" or "quote".\n\tYou can call the `settings_help` method for indications on the fields needed for custom settings.'
)
def validate_format_option(format_option: str) -> str:
"""Validate that the user chose an existing default settings option.
Parameters
----------
format_option : str
Default settings format chosen.
Returns
-------
str
Validated settings format name.
Raises
------
InvalidFormatOption
Raised when the default settings format name does not exist.
"""
valid_options = [option.value for option in DefaultFormats]
format_option = format_option.lower()
if format_option in valid_options:
return format_option
else:
avail_options = [option for option in valid_options if option != ""]
error_msg = f"You chose an invalid default graphic settings format.\n\tPlease choose one of this: {avail_options}"
raise InvalidFormatOption(error_msg)
def validate_g_settings(
tweet_info: TweetInfo,
g_settings: GraphicSettings
) -> GraphicSettings:
"""Validate a complete `graphic_settings` dictionary.
Parameters
----------
tweet_info : TweetInfo
Dictionary with the necessary information about the tweet.
g_settings : GraphicSettings
Dictionary of graphic settings.
Returns
-------
GraphicSettings
Validated dictionary.
"""
# Validate if the dictionary has all the required fields
__validate_dict_keys(g_settings, GraphicSettings, "graphic_settings")
user_pic = tweet_info["user_pic"]
font_family_error_msg = f"The font {g_settings['font_family']} was not in found in your machine.\n\tPlease note you can provide an absolute path to your font if needed."
font_family_validated = __validate_font_family(
g_settings["font_family"], font_family_error_msg
)
font_size_error_msg = (
"Please provide a number for the font size (preferably an integer)."
)
font_size_header_validated = __validate_integer_fields(
g_settings["font_size_header"], font_size_error_msg
)
font_size_text_validated = __validate_integer_fields(
g_settings["font_size_text"], font_size_error_msg
)
size_error_msg_type = "Please provide a list of two integers for the width and height of the graphic."
size_error_msg_length = "Please provide two measures for the graphic size: a one for the width and a second for the height."
size_validated = __validate_size(
g_settings["size"], size_error_msg_length, size_error_msg_type
)
prof_pic_error_msg_type = "Please provide a list of two integers for the final width and height of the profile picture, taking into account the width and the height must be the same."
prof_pic_error_msg_length = "Please provide two measures for the profile picture size: one for the width and a second for the height."
profile_pic_size_validated = __validate_profile_pic_size(
g_settings["profile_pic_size"], user_pic, prof_pic_error_msg_length, prof_pic_error_msg_type
)
color_scheme_error_msg_format = (
"Please provide either Hexadecimal or RGBA values for the background and text colors, as strings. The transparency value for RGBA colors must be a number between 0 and 1, where 0 is completely transparent and 1 is completely opaque. However, the background color can also be `None` for transparent backgrounds."
)
color_scheme_error_msg_length = "Please provide two colors for the color scheme: a first one for the background and a second for the text."
color_scheme_validated = __validate_color_scheme(
g_settings["color_scheme"],
color_scheme_error_msg_length,
color_scheme_error_msg_format,
)
wrap_limit_error_msg = "Please provide a number for the maximum number of characters to include in each line of the graphic text (preferably an integer)."
wrap_limit_validated = __validate_integer_fields(
g_settings["wrap_limit"], wrap_limit_error_msg
)
margin_bottom_error_msg = (
"Please provide a number (float or int) for the margin bottom."
)
margin_bottom_validated = __validate_float_fields(
g_settings["margin_bottom"], margin_bottom_error_msg
)
validated_settings = {
"font_family": font_family_validated,
"font_size_header": font_size_header_validated,
"font_size_text": font_size_text_validated,
"size": size_validated,
"profile_pic_size": profile_pic_size_validated,
"color_scheme": color_scheme_validated,
"wrap_limit": wrap_limit_validated,
"margin_bottom": margin_bottom_validated
}
return validated_settings
def __validate_tweet_name(tweet_name: str, error_msg: str) -> str:
"""Validate the tweet's name.
Parameters
----------
tweet_name : str
Tweet's name.
error_msg : str
Error message to display for an invalid name.
Returns
-------
str
Validated tweet name.
Raises
------
InvalidTweetName
Raised for invalid tweet names.
"""
if tweet_name == "":
raise InvalidTweetName(error_msg)
else:
return tweet_name
def __validate_username(username: str, error_msg: str) -> str:
"""Validate the tweet's name.
Parameters
----------
username : str
Tweet's username.
error_msg : str
Error message to display for an invalid username.
Returns
-------
str
Validated username.
Raises
------
InvalidUsername
Raised for invalid tweet names.
"""
if username == "" or len(username) > 50:
raise InvalidUsername(error_msg)
else:
return username
def __validate_user_tag(user_tag: str, error_msg: str) -> str:
"""Validate the tweet's user tag/handle.
Parameters
----------
user_tag : str
Tweet's user tag/handle.
error_msg : str
Error message to display for an invalid user tag.
Returns
-------
str
Validated user tag.
Raises
------
InvalidUserTag
Raised for an invalid user tag.
"""
tag_pattern = r"[\w]{1,15}"
regex_match = findall(tag_pattern, user_tag)
if regex_match == list():
raise InvalidUserTag(error_msg)
if user_tag[0] == "@":
return user_tag
else:
return "@" + user_tag
def __validate_user_pic(user_pic_path: str, error_msg: str) -> str:
"""Validate the path to the user's profile picture.
Parameters
----------
user_pic_path : str
Path to the profile picture.
error_msg : str
Error message to display for an invalid picture path.
Returns
-------
str
Validated profile picture path.
Raises
------
InvalidProfilePicturePath
Raised for an invalid profile picture path.
"""
try:
if user_pic_path != "":
pic = Image.open(user_pic_path, "r")
return user_pic_path
else:
return user_pic_path
except:
raise InvalidProfilePicturePath(error_msg)
def __validate_tweet_text(tweet_text: str, error_msg: str) -> str:
"""Validate the tweet's text.
Parameters
----------
tweet_text : str
Tweet text to be validated.
error_msg : str
Error message to display for invalid tweet text.
Returns
-------
str
Validated tweet text.
Raises
------
InvalidTweetText
Raised for invalid tweet text.
"""
if len(tweet_text) > 280:
raise InvalidTweetText(tweet_text)
else:
tweet_text
def validate_tweet_info(t_info: TweetInfo) -> TweetInfo:
"""Validate the given tweet's information (`tweet_info`).
Parameters
----------
t_info : TweetInfo
Dictionary with the tweet's information.
Returns
-------
TweetInfo
Validated tweet information.
"""
# Validate if the input dictionary has all the required fields
__validate_dict_keys(t_info, TweetInfo, "tweet_info")
tweet_name_error_msg = "Please provide a valid name for your tweet. This will be used to name your graphic."
tweet_name_validated = __validate_tweet_name(
t_info["tweet_name"], tweet_name_error_msg
)
username_error_msg = "Please provide a valid Twitter username."
username_validated = __validate_username(
t_info["user_name"], username_error_msg)
user_tag_error_msg = "Please provide a valid Twitter user tag/handle."
user_tag_validated = __validate_user_tag(
t_info["user_tag"], user_tag_error_msg)
user_pic_error_msg = "Please provide a valid path for the profile picture location."
user_pic_validated = __validate_user_pic(
t_info["user_pic"], user_pic_error_msg)
tweet_text_error_msg = "The tweet text must complies with the same rules as a normal tweet (namely the maximum of 280 characters)."
tweet_text_validated = __validate_tweet_text(
t_info["tweet_text"], tweet_text_error_msg
)
t_info_validated = {
"tweet_name": tweet_name_validated,
"user_name": username_validated,
"user_tag": user_tag_validated,
"user_pic": user_pic_validated,
"tweet_text": tweet_text_validated,
}
return t_info_validated
|
import taos
from datetime import datetime, timezone, timedelta
def bind_params(num_columns: int, tz_offset, fields: list, **kwargs):
params = taos.new_bind_params(num_columns)
i = 0
for f in fields:
if f['Note'] == "":
param = kwargs.get(f['Field'])
if param is None:
params[i].null()
elif f['Type'] == 'TIMESTAMP':
if isinstance(param, str):
param = str2time(param)
param -= timedelta(hours=tz_offset)
params[i].timestamp(param)
elif f['Type'] == 'INT':
params[i].int(param)
elif f['Type'] == 'INT UNSIGNED':
params[i].int_unsigned(param)
elif f['Type'] == 'BIGINT':
params[i].bigint(param)
elif f['Type'] == 'BIGINT UNSIGNED':
params[i].bigint_unsigned(param)
elif f['Type'] == 'FLOAT':
params[i].float(param)
elif f['Type'] == 'DOUBLE':
params[i].double(param)
elif f['Type'] == 'BINARY':
params[i].binary(param)
elif f['Type'] == 'SMALLINT':
params[i].smallint(param)
elif f['Type'] == 'SMALLINT UNSIGNED':
params[i].smallint_unsigned(param)
elif f['Type'] == 'TINYINT':
params[i].tinyint(param)
elif f['Type'] == 'TINYINT UNSIGNED':
params[i].tinyint_unsigned(param)
elif f['Type'] == 'BOOL':
params[i].bool(param)
elif f['Type'] == 'NCHAR':
params[i].nchar(param)
i += 1
return params
def batch_bind_params(num_columns: int, tz_offset, fields: list, args: list):
params = []
for arg in args:
params.append(bind_params(num_columns, tz_offset, fields, **arg))
return params
def str2time(t_str: str):
str_len = len(t_str)
if str_len == 10:
return datetime.strptime(t_str, "%Y-%m-%d")
elif str_len == 13:
return datetime.strptime(t_str, "%Y-%m-%d %H")
elif str_len == 16:
return datetime.strptime(t_str, "%Y-%m-%d %H:%M")
elif str_len == 19:
return datetime.strptime(t_str, "%Y-%m-%d %H:%M:%S")
elif 21 <= str_len <= 26:
return datetime.strptime(t_str, "%Y-%m-%d %H:%M:%S.%f")
else:
raise KeyError("Invalid time format.") |
from django.shortcuts import render
def resume_index(request):
return render(request, "resume_index.html")
|
"""
File: hangman.py
-----------------------------
This program plays hangman game.
Users sees a dashed word, trying to
correctly figure the un-dashed word out
by inputting one character each round.
If the user input is correct, show the
updated word on console. Players have N_TURNS
to try in order to win this game.
"""
import random
# This constant controls the number of guess the player has
N_TURNS = 7
def main():
"""
The program plays hangman game. user has N_TURNS chance of guessing wrong.
"""
word = random_word()
initial_format = '-' * len(word)
print('The word looks like: ' + initial_format)
print('You have ' + str(N_TURNS) + ' guesses left.')
missing_letters = len(word)
left = N_TURNS
while True:
guessing = '' # The word includes guessed letters and missing letters
guess = input('Your guess: ')
guess = guess.upper()
if not guess.isalpha():
print('illegal format.')
elif len(guess) != 1:
print('illegal format.')
elif initial_format.find(guess) != -1:
print('You are correct!')
else:
for i in range(len(word)):
ch = word[i]
if ch == guess:
missing_letters -= 1
guessing += ch
else:
guessing += initial_format[i]
if guessing == initial_format:
left -= 1
print('There is no ' + guess + ' \'s in the word.')
else:
initial_format = guessing
print('You are correct!')
if left == 0:
print('You are completely hung :( ')
print('The word was: ' + word)
return
elif missing_letters == 0:
print('You WIN !!! ')
print('The word was: ' + word)
return
print('The word looks like: ' + initial_format)
print('You have ' + str(left) + ' guesses left.')
def random_word():
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
|
from setuptools import setup
setup(
name='devpotato-bot',
version='0.6.1',
description='Telegram bot for cryptopotato chat',
packages=['devpotato_bot'],
python_requires='>=3.8',
install_requires=[
'python-telegram-bot>=13.0',
'ujson>=3.0.0',
'cachetools>=4',
'pytz>=2020.4',
'SQLAlchemy>=1.3',
'alembic>=1.4'
],
author='Vladislav Glinsky',
author_email='cl0ne@mithril.org.ua',
url="https://code.nix.org.ua/cl0ne/cryptopotato-bot",
license='MIT',
license_file='LICENSE'
)
|
import pytest
from clearml_agent.helper.repo import VCS
@pytest.mark.parametrize(
["url", "expected"],
(
("a", None),
("foo://a/b", None),
("foo://a/b/", None),
("https://a/b/", None),
("https://example.com/a/b", None),
("https://example.com/a/b/", None),
("ftp://example.com/a/b", None),
("ftp://example.com/a/b/", None),
("github.com:foo/bar.git", "https://github.com/foo/bar.git"),
("git@github.com:foo/bar.git", "https://github.com/foo/bar.git"),
("bitbucket.org:foo/bar.git", "https://bitbucket.org/foo/bar.git"),
("hg@bitbucket.org:foo/bar.git", "https://bitbucket.org/foo/bar.git"),
("ssh://bitbucket.org/foo/bar.git", "https://bitbucket.org/foo/bar.git"),
("ssh://git@github.com/foo/bar.git", "https://github.com/foo/bar.git"),
("ssh://user@github.com/foo/bar.git", "https://user@github.com/foo/bar.git"),
("ssh://git:password@github.com/foo/bar.git", "https://git:password@github.com/foo/bar.git"),
("ssh://user:password@github.com/foo/bar.git", "https://user:password@github.com/foo/bar.git"),
("ssh://hg@bitbucket.org/foo/bar.git", "https://bitbucket.org/foo/bar.git"),
("ssh://user@bitbucket.org/foo/bar.git", "https://user@bitbucket.org/foo/bar.git"),
("ssh://hg:password@bitbucket.org/foo/bar.git", "https://hg:password@bitbucket.org/foo/bar.git"),
("ssh://user:password@bitbucket.org/foo/bar.git", "https://user:password@bitbucket.org/foo/bar.git"),
),
)
def test(url, expected):
result = VCS.replace_ssh_url(url)
expected = expected or url
assert result == expected
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from manga_dl.addons import mangabz
from manga_dl import config
proxy = 'socks5://127.0.0.1:1086'
proxies = {"http": proxy, "https": proxy}
config.init()
config.set("proxies", proxies)
def test_mangabz():
api = mangabz.Mangabz
manga_urls = api.fetch_keyword("關于我轉生后成為史萊姆的那件事")
assert manga_urls is not None
manga = api.fetch_manga("https://www.mangabz.com/207bz/")
assert manga is not None
|
import unittest
from pytown_core.runners import MyThread
import logging
class MyThreadTest(unittest.TestCase):
def setUp(self):
self.subject = Subject()
def test_my_thread_init(self):
self.subject.start()
self.subject.join()
self.assertEqual(self.subject.compteur, 10)
self.assertEqual(self.subject.pre_run_message, "PreRun process")
self.assertEqual(self.subject.post_run_message, "PostRun process")
class Subject(MyThread):
def __init__(self):
MyThread.__init__(self, "subject")
self.compteur = 0
self.pre_run_message = "None"
self.post_run_message = "None"
def do(self):
if self.compteur < 10:
self.compteur += 1
else:
self.stop()
def _pre_run(self):
logging.info("_pre_run")
self.pre_run_message = "PreRun process"
def _post_run(self):
logging.info("_post_run")
self.post_run_message = "PostRun process"
|
re.search(r'ab*c', 'abc ac adc abbbc')
re.search(r'b.*d', 'abc ac adc abbbc')
re.search(r'b.*d', 'abc ac adc abbbc')
re.search(r'b.*d', 'abc ac adc abbbc')[0]
re.search(r'b.*d', 'abc ac adc abbbc').group(0)
m = re.search(r'a(.*)d(.*a)', 'abc ac adc abbbc')
m[2]
m.groups()
re.sub(r'(a|b)\^2', lambda m: m[0].upper(), 'a^2 + b^2 - C*3')
re.findall(r'ab*c', 'abc ac adc abbbc')
re.findall(r'ab+c', 'abc ac adc abbbc')
re.findall(r'\bs?pare?\b', 'par spar apparent spare part pare')
re.findall(r't.*a', 'that is quite a fabricated tale')
re.findall(r't.*?a', 'that is quite a fabricated tale')
re.findall(r'a(b*)c', 'abc ac adc abbc xabbbcz bbb bc abbbbbc')
re.findall(r'(x*):(y*)', 'xx:yyy x: x:yy :y')
re.finditer(r'ab+c', 'abc ac adc abbbc')
m_iter = re.finditer(r'ab+c', 'abc ac adc abbbc')
for m in m_iter:
print(m)
m_iter = re.finditer(r'(x*):(y*)', 'xx:yyy x: x:yy :y')
[(m[1], m[2]) for m in m_iter]
m_iter = re.finditer(r'ab+c', 'abc ac adc abbbc')
for m in m_iter:
print(m[0].upper())
m_iter = re.finditer(r'ab+c', 'abc ac adc abbbc')
for m in m_iter:
print(m.span())
|
# coding: utf-8
"""
SignRequest API
API for SignRequest.com
OpenAPI spec version: v1
Contact: tech-support@signrequest.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import signrequest_python_client
from signrequest_python_client.api.api_tokens_api import ApiTokensApi # noqa: E501
from signrequest_python_client.rest import ApiException
class TestApiTokensApi(unittest.TestCase):
"""ApiTokensApi unit test stubs"""
def setUp(self):
self.api = signrequest_python_client.api.api_tokens_api.ApiTokensApi() # noqa: E501
def tearDown(self):
pass
def test_api_tokens_create(self):
"""Test case for api_tokens_create
Create an API token # noqa: E501
"""
pass
def test_api_tokens_delete(self):
"""Test case for api_tokens_delete
Delete an API token # noqa: E501
"""
pass
def test_api_tokens_list(self):
"""Test case for api_tokens_list
Retrieve a list of API tokens # noqa: E501
"""
pass
def test_api_tokens_read(self):
"""Test case for api_tokens_read
Retrieve an API token # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from sendim.models import Event
@login_required
def events(request) :
"""
List events and do processing on them.
Processes are in POST method :
- sendmail_q : Send a mail for a given event.
- treatment_q : Make exploitation processes.
"""
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib import messages
from sendim.exceptions import UnableToConnectGLPI
if request.method == 'POST' :
if 'eventPk' in request.POST :
eventPk = request.POST["eventPk"]
E = Event.objects.get(pk=eventPk)
A = E.get_primary_alert()
if "sendmail_q" in request.POST :
msg = E.prepare_mail(request.POST)
if E.send_mail(msg) :
messages.add_message(request,messages.SUCCESS,u"Envoi d'un mail pour l'\xe9v\xe9nement #"+str(E.pk)+"." )
else :
messages.add_message(request,messages.ERROR,u"Echec de l'nvoi d'un mail pour l'\xe9v\xe9nement #"+str(E.pk)+"." )
elif "treatment_q" in request.POST :
if E.criticity == '?' or not E.get_primary_alert().reference :
Forms = E.get_ReferenceFormSet()
return render(request, 'event/add-reference.html', {
'Forms':Forms, 'E':E,
'title':'Snafu - Ajout de Reference'
} )
# If havn't create a GLPI ticket
if not E.glpi :
try :
E.create_ticket()
messages.add_message(request,messages.SUCCESS,"Ticket #"+str(E.glpi)+u" associ\xe9 \xe0 Event #"+str(E.pk))
except UnableToConnectGLPI :
messages.add_message(request,messages.ERROR,u"Impossible de se connecter \xe0 GLPI.")
return redirect('/snafu/events')
# Create a mail preview
msg = E.make_mail()
# Retrieve graphs for the current Event
graphList = A.host.get_graph_list(A.service)
return render(request,'event/preview-mail.html', {
'msg':msg,
'E':E,
'graphList':graphList,
'title':'Snafu - Envoi de mail'
})
Es = Event.objects.filter(closed=False).order_by('-date')
else :
Es = list(Event.objects.filter(closed=False).order_by('-date'))
paginator = Paginator(Es, 100)
page = request.GET.get('page')
try:
Es = paginator.page(page)
except PageNotAnInteger:
Es = paginator.page(1)
except EmptyPage:
Es = paginator.page(paginator.num_pages)
return render(request, 'event/event-index.html', {
'Es':Es,
'title':'Snafu - Events'
})
@login_required
def EaddRef(request):
"""
Add a reference to DB.
In POST method only, else raise 403.
This view is used by AJAX.
"""
import django.utils.simplejson as json
from django.http import HttpResponse, HttpResponseForbidden
from referentiel.forms import ReferenceBigForm, HostReferenceForm
if request.method == 'GET' : raise HttpResponseForbidden
E = Event.objects.get(pk=request.POST['eventPk'])
if request.POST['form_type'] == 'big' :
form = ReferenceBigForm
elif request.POST['form_type'] == 'host' :
form = HostReferenceForm
form = form(request.POST)
if form.is_valid() :
R = form.save()
else :
errors = json.dumps(form.errors)
return HttpResponse(errors, mimetype='application/json')
for status in ('WARNING','CRITICAL','UNKNOWN','DOWN') :
As = E.get_alerts().filter(host=form.data['host'], service=form.data['service'],status__name=status)
for _A in As:
_A.link_to_reference()
A = E.get_primary_alert()
if _A.isPrimary :
E.criticity = A.reference.mail_criticity.name
E.save()
return render(request, 'event/event-index.html', {
})
|
import logging
from git_project_updater_business.settings.settings_repository import SettingsRepository
from git_project_updater_business.scanners.projects_scanner_factory import ProjectScannerFactory
class ProjectsRepository:
__instance = None
@staticmethod
def instance(settings_repository: SettingsRepository, project_scanner_factory: ProjectScannerFactory):
if ProjectsRepository.__instance is None:
ProjectsRepository(settings_repository, project_scanner_factory)
return ProjectsRepository.__instance
def __init__(self, settings_repository, project_scanner_factory):
""" Virtually private constructor. """
if ProjectsRepository.__instance is not None:
raise Exception("This class is a singleton!")
else:
self.settings_repository = settings_repository
self.project_scanner_factory = project_scanner_factory
self.__projects = None
ProjectsRepository.__instance = self
@property
def projects(self):
if not self.__projects:
self.refresh_projects()
return self.__projects
def refresh_projects(self):
self.__read_projects()
# --------------------------------------------- PRIVATE METHODS
def __read_projects(self):
logging.info("Reading projects...")
settings = self.settings_repository.settings
if not settings:
logging.error("No settings found in repository")
else:
self.__projects = self.project_scanner_factory.compute_scanner(
settings).scan_for_projects(settings)
|
from unittest import TestCase
class TBase (TestCase):
pass
|
def foodtobring(ponds):
bring=0
foods={}
for p in xrange(ponds):
A,B = map(int,raw_input().split())
if A==B:
continue
foods.setdefault(A,0)
foods.setdefault(B,0)
foods[A]=foods[A]+1
if foods[B]>0:
foods[B]=foods[B]-1
else:
bring+=1
return bring
if __name__ == "__main__":
for tc in xrange(int(raw_input())):
print foodtobring(int(raw_input()))
|
"""Utilities for interpretability tools."""
import numpy as np
from scipy import ndimage
def gaussian_blur(image: np.ndarray, sigma: float) -> np.ndarray:
"""Applies a Gaussian blur to a 3D (WxHxC) image.
Args:
image: 3 dimensional ndarray / input image (W x H x C).
sigma: Standard deviation for Gaussian blur kernel.
Returns:
The blurred image.
"""
if sigma == 0:
return image
return ndimage.gaussian_filter(
image, sigma=[sigma, sigma, 0], mode='constant')
|
# Generated by Django 3.2.2 on 2021-06-03 14:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('etudiorder', '0003_auto_20210603_1546'),
]
operations = [
migrations.RemoveField(
model_name='descricao',
name='slug',
),
migrations.AlterField(
model_name='descricao',
name='estado',
field=models.CharField(choices=[('draft', 'Rascunho'), ('published', 'Publicado')], default='draft', max_length=10),
),
]
|
from django.db import models
from django.utils import timezone
class Position(models.Model):
"""
Table generated by SkyBoT which has the solar system objects identified
in DES images (for more detailssee:http://vo.imcce.fr/webservices/skybot/?conesearch)
"""
name = models.CharField(
max_length=32,
verbose_name='Name',
help_text='(ucd=“meta.id;meta.main”) Object name (official or provisional designation).'
)
number = models.CharField(
max_length=35, default=None, null=True, blank=True,
verbose_name='Number',
help_text='(ucd=“meta.id;meta.number”) Object number (not all objects have numbers assigned).',
)
dynclass = models.CharField(
max_length=24,
verbose_name='Object classification',
help_text='(ucd=“meta.code.class;src.class”) Object class (TNO, Centaur, Trojan, etc.).'
)
base_dynclass = models.CharField(
max_length=24,
verbose_name='Base Object classification',
help_text='(ucd=“meta.code.class”) Base Object class (TNO, Centaur, Trojan, etc.).',
default=None, null=True, blank=True
)
ra = models.CharField(
max_length=20,
verbose_name='RA',
help_text='(ucd=“pos.eq.ra;meta.main”) Right ascension of the identified object.'
)
dec = models.CharField(
max_length=20,
verbose_name='Dec',
help_text='(ucd=“pos.eq.dec;meta.main”) Declination of the identified object.'
)
raj2000 = models.FloatField(
verbose_name='RA (deg)',
help_text='(ucd=“pos.eq.ra;meta.main”) Right ascension of the identified object in degrees.'
)
decj2000 = models.FloatField(
verbose_name='Dec (deg)',
help_text='(ucd=“pos.eq.dec;meta.main”) Declination of the identified object in degrees.'
)
mv = models.FloatField(
verbose_name='Mv',
help_text='(ucd=“phot.mag;em.opt.V”) Visual magnitude',
default=None, null=True, blank=True
)
errpos = models.FloatField(
verbose_name='ErrPos',
help_text='(ucd=“stat.error.sys”) Uncertainty on the (RA,DEC) coordinates',
default=None, null=True, blank=True
)
d = models.FloatField(
verbose_name='d',
help_text='(ucd="pos.ang") Body-to-center angular distance',
default=None, null=True, blank=True
)
dracosdec = models.FloatField(
verbose_name='dRAcosDec',
help_text='(ucd=“pos.pm;pos.eq.ra”) Motion in right ascension d(RA)cos(DEC)',
default=None, null=True, blank=True
)
ddec = models.FloatField(
verbose_name='dDEC',
help_text='(ucd=“pos.pm;pos.eq.dec”) Motion in declination d(DEC)',
default=None, null=True, blank=True
)
dgeo = models.FloatField(
verbose_name='Dgeo',
help_text='(ucd=“phys.distance”) Distance from observer',
default=None, null=True, blank=True
)
dhelio = models.FloatField(
verbose_name='Dhelio',
help_text='(ucd=“phys.distance”) Distance from the Sun',
default=None, null=True, blank=True
)
phase = models.FloatField(
verbose_name='Phase',
help_text='(ucd=“pos.phaseAng”) Phase angle, e.g. elongation of earth from sun as seen from object',
default=None, null=True, blank=True
)
solelong = models.FloatField(
verbose_name='SolElong',
help_text='(ucd=“pos.angDistance”) Solar elongation, e.g. elongation of object from sun as seen from Earth',
default=None, null=True, blank=True
)
px = models.FloatField(
verbose_name='Px',
help_text='(ucd=“src.orbital.pos;meta.id.x”) Mean J2000 heliocentric position vector, x component',
default=None, null=True, blank=True
)
py = models.FloatField(
verbose_name='Py',
help_text='(ucd=“src.orbital.pos;meta.id.y”) Mean J2000 heliocentric position vector, y component',
default=None, null=True, blank=True
)
pz = models.FloatField(
verbose_name='Pz',
help_text='(ucd=“src.orbital.pos;meta.id.z”) Mean J2000 heliocentric position vector, z component',
default=None, null=True, blank=True
)
vx = models.FloatField(
verbose_name='Vx',
help_text='(ucd=“src.veloc.orbital;meta.id.x”) Mean J2000 heliocentric velocity vector, x component',
default=None, null=True, blank=True
)
vy = models.FloatField(
verbose_name='Vy',
help_text='(ucd=“src.veloc.orbital;meta.id.y”) Mean J2000 heliocentric velocity vector, y component',
default=None, null=True, blank=True
)
vz = models.FloatField(
verbose_name='Vz',
help_text='(ucd=“src.veloc.orbital;meta.id.z”) Mean J2000 heliocentric velocity vector, z component',
default=None, null=True, blank=True
)
jdref = models.FloatField(
verbose_name='JDRef',
help_text='(ucd=“time.epoch”) Reference epoch of the position/velocity vector',
default=None, null=True, blank=True
)
ticket = models.BigIntegerField(
verbose_name='Skybot Ticket',
help_text='Id of the request made in the skybot. it serves to group all the positions that are of the same request.',
default=0,
db_index=True
)
# Momento em que o registro da posição aconteceu
created = models.DateTimeField(
verbose_name='Created',
auto_now_add=True,
null=True,
blank=True
)
class Meta:
# TODO: Rever esta regra é possivel que o não precise do ticket, apenas name, ra e dec para identificar que seja unico.
# A mesma posição não pode se repetir no resultado de uma requisição.
unique_together = ('name', 'raj2000', 'decj2000', 'ticket')
indexes = [
models.Index(fields=['number']),
models.Index(fields=['name']),
models.Index(fields=['dynclass']),
models.Index(fields=['ticket']),
models.Index(fields=['base_dynclass']),
]
def __str__(self):
return str(self.name)
|
from qgis.core import QgsRasterLayer
from qgis.PyQt.QtCore import QFileInfo
from qgis.core import QgsProject
class Result():
def __init__(self, path=None):
self.path = path
def display(self):
"""
Displays an image from the given path on a new created QGIS Layer.
"""
# Check if string is provided
if self.path:
fileInfo = QFileInfo(self.path)
path = fileInfo.filePath()
baseName = fileInfo.baseName()
layer = QgsRasterLayer(path, baseName)
QgsProject.instance().addMapLayer(layer)
if layer.isValid() is True:
print
"Layer was loaded successfully!"
else:
print
"Unable to read basename and file path - Your string is probably invalid"
|
import enum
import typing
T = typing.TypeVar('T')
@typing.runtime_checkable
class Provider(typing.Protocol[T]):
"""Client Adapter Interface Accepted by SyncContext"""
state_name: typing.ClassVar[str]
def is_closed(self, client: T) -> bool:
"""Returns if client is closed or released"""
...
def close_client(self, client: T) -> None:
"""Closes/releases client"""
def acquire(self) -> typing.ContextManager[T]:
"""Acquires a client `T` and releases at the end"""
...
@typing.runtime_checkable
class AsyncProvider(typing.Protocol[T]):
"""Client Adapter Interface Accepted by AsyncContext"""
state_name: typing.ClassVar[str]
def is_closed(self, client: T) -> bool:
"""Returns if client is closed or released"""
...
async def close_client(self, client: T) -> None:
"""Closes/releases client"""
...
def acquire(self) -> typing.AsyncContextManager[T]:
"""Acquires a client `T` and releases at the end"""
...
class ImmutableSyncProvider(typing.Generic[T]):
"""Wrapper class to prevent mutating provider"""
state_name = 'immutable_sync_provider'
def __init__(self, provider: Provider[T]) -> None:
self._provider = provider
def __getattribute__(self, name: str) -> typing.Any:
allowed = ['get_state_name', 'is_closed', 'close_client', 'acquire']
if name in allowed:
return super().__getattribute__(name)
message = "'{}' object has no attribute '{}'"
raise AttributeError(
message.format(ImmutableSyncProvider.__name__, name)
)
def get_state_name(self):
return super().__getattribute__('_provider').state_name
def is_closed(self, client: T) -> bool:
return super().__getattribute__('_provider').is_closed(client)
def close_client(self, client: T) -> None:
return super().__getattribute__('_provider').close_client(client)
def acquire(self) -> typing.ContextManager[T]:
return super().__getattribute__('_provider').acquire()
class ImmutableAsyncProvider(typing.Generic[T]):
"""Wrapper class to prevent mutating provider"""
state_name = 'immutable_async_provider'
def __init__(self, provider: AsyncProvider[T]) -> None:
self._provider = provider
def __getattribute__(self, name: str) -> typing.Any:
allowed = ['get_state_name', 'is_closed', 'close_client', 'acquire']
if name in allowed:
return super().__getattribute__(name)
message = "'{}' object has no attribute '{}'"
raise AttributeError(
message.format(ImmutableSyncProvider.__name__, name)
)
def get_state_name(self):
return super().__getattribute__('_provider').state_name
def is_closed(self, client: T) -> bool:
return super().__getattribute__('_provider').is_closed(client)
def close_client(self, client: T) -> typing.Coroutine[None, None, None]:
return super().__getattribute__('_provider').close_client(client)
def acquire(self) -> typing.AsyncContextManager[T]:
return super().__getattribute__('_provider').acquire()
@typing.runtime_checkable
class AbstractSyncContext(typing.Protocol[T]):
provider: 'ImmutableWrapper[ImmutableSyncProvider[T]]'
_inside_ctx: bool
def __init__(self, provider: Provider[T]) -> None:
...
def in_context(self) -> bool:
"""Returns if `.open()` or `.begin()` calls where made inside an open context"""
...
@property
def client(self) -> T:
"""Returns a client instance if context is open"""
...
def open(self) -> typing.ContextManager[None]:
"""Opens context"""
...
def begin(self) -> typing.ContextManager[T]:
"""Returns client from open context or a independent client if no context is open."""
...
def get_provider(self) -> ImmutableSyncProvider[T]:
"""Returns internal provider"""
...
@typing.runtime_checkable
class AbstractAsyncContext(typing.Protocol[T]):
provider: 'ImmutableWrapper[ImmutableAsyncProvider[T]]'
_inside_ctx: bool
def __init__(self, provider: AsyncProvider[T]) -> None:
...
def in_context(self) -> bool:
"""Returns if `.open()` or `.begin()` calls where made inside an open context"""
...
@property
def client(self) -> T:
"""Returns a client instance if context is open"""
...
def open(self) -> typing.AsyncContextManager[None]:
"""Opens context"""
...
def begin(self) -> typing.AsyncContextManager[T]:
"""Returns client from open context or a independent client if no context is open."""
...
def get_provider(self) -> ImmutableAsyncProvider[T]:
"""Returns internal provider"""
...
AbstractContext = typing.TypeVar(
'AbstractContext', AbstractSyncContext, AbstractAsyncContext
)
class _StateApp(typing.Protocol):
state: typing.Type
class _ContextApp(typing.Protocol):
context: typing.Type
class _CtxApp(typing.Protocol):
ctx: typing.Type
class _HasState(typing.Protocol):
state: typing.Type
app: '_StateApp'
class _HasContext(typing.Protocol):
context: typing.Type
app: '_ContextApp'
class _HasCtx(typing.Protocol):
ctx: typing.Type
app: '_CtxApp'
StateApp = typing.Union[_StateApp, _ContextApp, _CtxApp]
HasState = typing.Union[_HasState, _HasContext, _HasCtx]
class StateWrapper:
_valid_state_attrs = ['state', 'context', 'ctx']
def __init__(self, has_state: HasState) -> None:
self.has_state = has_state
self._validate_instance()
self._instance_state_attr = self._get_state_attr(self.has_state)
self._app_state_attr = self._get_state_attr(self.has_state.app)
def _validate_instance(self):
if not hasattr(self.has_state, 'app'):
raise TypeError("State Handler must have 'app' attribute")
def _get_state_attr(
self,
instance: typing.Union[HasState, StateApp],
):
for item in self._valid_state_attrs:
if hasattr(instance, item):
return item
raise NotImplementedError(
'State Handler does not have supported state_attrs'
)
@property
def _app_state(self):
return getattr(self.has_state.app, self._app_state_attr)
@property
def _instance_state(self):
return getattr(self.has_state, self._app_state_attr)
@staticmethod
def _get(
state: type, name: str, _cast: typing.Type[T]
) -> typing.Optional[T]:
return getattr(state, name, None)
@staticmethod
def _set(state: type, name: str, val: typing.Any):
setattr(state, name, val)
def app_get(
self, name: str, _cast: typing.Type[T] = typing.Any
) -> typing.Optional[T]:
return self._get(self._app_state, name, _cast)
def get(
self, name: str, _cast: typing.Type[T] = typing.Any
) -> typing.Optional[T]:
return self._get(self._instance_state, name, _cast)
def app_set(self, name: str, val: typing.Any):
self._set(self._app_state, name, val)
def set(self, name: str, val: typing.Any):
self._set(self._instance_state, name, val)
class AbstractSyncContextFactory(typing.Protocol[T]):
"""Creates a Context Factory to handle contexts inside a state"""
_provider_class: typing.Type[Provider[T]]
_context_class: typing.Type[AbstractSyncContext[T]]
_state_name: typing.Optional[str]
def _get_context(
self, state_wrapper: StateWrapper
) -> AbstractSyncContext[T]:
"""Initializes Context"""
...
def generate_state_name(self) -> str:
"""Returns a key name to store context in state"""
...
def has_active_context(
self, has_state: HasState
) -> typing.Optional[AbstractSyncContext[T]]:
"""Returns context from `has_state` if context in has_state, else None"""
def _set_active_context(
self, context: AbstractSyncContext[T], state_wrapper: StateWrapper
):
"""Sets context in state"""
def __call__(self, has_state: HasState) -> AbstractSyncContext[T]:
"""Returns context from has_state if exists or opens new context, stores in state, and then returns state"""
...
def from_provider(self, provider: typing.Type[Provider[T]]):
"""Returns context from a given provider"""
class AbstractAsyncContextFactory(typing.Protocol[T]):
"""Creates a Context Factory to handle contexts inside a state"""
_provider_class: typing.Type[AsyncProvider[T]]
_context_class: typing.Type[AbstractAsyncContext[T]]
_state_name: typing.Optional[str]
def _get_context(
self, state_wrapper: StateWrapper
) -> AbstractAsyncContext[T]:
"""Initializes Context"""
...
def generate_state_name(self) -> str:
"""Returns a key name to store context in state"""
...
def has_active_context(
self, has_state: HasState
) -> typing.Optional[AbstractAsyncContext[T]]:
"""Returns context from `has_state` if context in has_state, else None"""
def _set_active_context(
self, context: AbstractAsyncContext[T], state_wrapper: StateWrapper
):
"""Sets context in state"""
def __call__(self, has_state: HasState) -> AbstractAsyncContext[T]:
"""Returns context from has_state if exists or opens new context, stores in state, and then returns state"""
...
def from_provider(self, provider: typing.Type[AsyncProvider[T]]):
"""Returns context from a given provider"""
class ContextGetter:
class ArgType(enum.Enum):
INSTANCE = enum.auto()
CONTEXT = enum.auto()
HAS_STATE = enum.auto()
GET_CONTEXT = enum.auto()
VIEW = HAS_STATE
@classmethod
def get(cls, name: str):
return getattr(cls, name.upper())
def __init__(
self,
arg_type: ArgType,
*,
context_attr_name: typing.Optional[str] = None,
_factory: typing.Optional[
typing.Union[
AbstractAsyncContextFactory,
AbstractSyncContextFactory,
]
] = None,
) -> None:
self.arg_type = arg_type
self.context_attr_name = context_attr_name
self.factory = _factory
def _instance(self, instance):
return getattr(instance, self.context_attr_name) # type: ignore
def _context(self, context):
return context
def _has_state(self, has_state):
return self.factory(has_state) # type: ignore
def _get_context(self, instance):
return instance._get_context
def get(self, first_arg):
return getattr(self, '_{}'.format(self.arg_type.name.lower()))(
first_arg
)
ImmutableProviderT = typing.TypeVar(
'ImmutableProviderT', ImmutableSyncProvider, ImmutableAsyncProvider
)
class ImmutableWrapper(typing.Generic[ImmutableProviderT]):
def __init__(
self,
name: str,
immutable_provider: typing.Type[ImmutableProviderT],
) -> None:
self.name = name
self.immutable_provider = immutable_provider
def __get__(self, instance, owner=None) -> ImmutableProviderT:
if instance is not None:
return self.immutable_provider(getattr(instance, self.name))
raise AttributeError(
f'{self.name!r} object has no attribute {owner.__name__!r}'
)
|
import refine.refine as refine
import requests
import hashlib
import json
import csv
import os
import re
#############################################
dataset_e = []
dataset_p = []
#############################################
def generate_sha(fpath):
sha1 = hashlib.sha1()
f = open(fpath, 'rb')
try:
clean = re.sub("[\n\r]+", "", f.read());
sha1.update(clean)
finally:
f.close()
return sha1.hexdigest()
def add_to_big_dataset(dataset_id, pos, dataset_row):
if dataset_id in list_points:
if pos == 0 and len(dataset_p) == 0:
dataset_p.append(dataset_row)
elif pos > 0:
dataset_p.append(dataset_row)
else:
if pos == 0 and len(dataset_e) == 0:
dataset_e.append(dataset_row)
elif pos > 0:
dataset_e.append(dataset_row)
def write_big_dataset(dataset_name, dataset_values):
with open(dataset_folder + dataset_name + '.csv', 'wb') as f:
writ = csv.writer(f)
writ.writerows(dataset_values)
print "- Big dataset: " + dataset_name + " saved."
def write_dataset(dataset_id, dataset_values, add_function):
with open(dataset_folder + dataset_id + '_dataset.csv', 'wb') as f:
ec_file_list = dataset_values.split('\n')
writ = csv.writer(f)
for x in range(0, len(ec_file_list) - 1):
d = ec_file_list[x]
d = d.replace('\t ', '\t')
da = d.split('\t')
add_function(dataset_id, x, da)
writ.writerow(da)
print " * Dataset: " + dataset_id + " updated !"
def download_dataset(fpath, dataset_id, dataset_url):
with open(fpath, 'wb') as handle:
response = requests.get(dataset_url, stream=True)
for block in response.iter_content(1024):
handle.write(block)
print "- Downloaded Dataset: " + dataset_id
def get_project_options(identifier):
idjson = None
for i in operations_create:
if identifier in operations_create[i]:
idjson = i
break
with open('../refine_op/' + idjson + '.json', 'r') as handle:
return json.loads(handle.read())
def detect_dataset(old_path, new_path):
if os.path.exists(new_path):
if generate_sha(old_path) != generate_sha(new_path):
os.remove(old_path)
os.rename(new_path, old_path)
print " * Updating data for dataset ..."
else:
os.remove(new_path)
print " * Old data detected. Clean and do nothing."
return True
else:
print " * Previous version not found. Saved."
return False
#############################################
# Culture / Entertainment
datasets_ce = {
'206974': 'http://datos.madrid.es/egob/catalogo/206974-0-agenda-eventos-culturales-100.csv',
'212504': 'http://datos.madrid.es/egob/catalogo/212504-0-agenda-actividades-deportes.csv',
# '200652': 'http://datos.madrid.es/egob/catalogo/200652-1-areas-infantiles.csv',
# '200637': 'http://datos.madrid.es/egob/catalogo/200637-1-areas-mayores.csv',
# '206717': 'http://datos.madrid.es/egob/catalogo/206717-0-agenda-eventos-bibliotecas.csv',
'200186': 'http://datos.madrid.es/egob/catalogo/200186-0-polideportivos.csv',
# '212808': 'http://datos.madrid.es/egob/catalogo/212808-0-espacio-deporte.csv',
# '200215': 'http://datos.madrid.es/egob/catalogo/200215-0-instalaciones-deportivas.csv',
'210227': 'http://datos.madrid.es/egob/catalogo/210227-0-piscinas-publicas.csv',
# '209426': 'http://datos.madrid.es/egob/catalogo/209426-0-templos-catolicas.csv',
# '209434': 'http://datos.madrid.es/egob/catalogo/209434-0-templos-otros.csv',
# '208862046': 'http://datos.madrid.es/egob/catalogo/208862-7650046-ocio_salas.csv',
# '208862164': 'http://datos.madrid.es/egob/catalogo/208862-7650164-ocio_salas.csv',
# '208862180': 'http://datos.madrid.es/egob/catalogo/208862-7650180-ocio_salas.csv',
# '217921': 'http://datos.madrid.es/egob/catalogo/217921-0-salas-estudio.csv',
'200761': 'http://datos.madrid.es/egob/catalogo/200761-0-parques-jardines.csv',
# '300028': 'http://datos.madrid.es/egob/catalogo/300028-10037314-agenda-turismo.xml'
}
# Buildings / Centers
datasets_bc = {
# '201747': 'http://datos.madrid.es/egob/catalogo/201747-0-bibliobuses-bibliotecas.csv',
'212763': 'http://datos.madrid.es/egob/catalogo/212763-0-biblioteca-universitaria.csv',
'200304': 'http://datos.madrid.es/egob/catalogo/200304-0-centros-culturales.csv',
# '205244': 'http://datos.madrid.es/egob/catalogo/205244-0-infancia-familia-adolescentes.csv',
# '200342': 'http://datos.madrid.es/egob/catalogo/200342-0-centros-dia.csv',
# '209094': 'http://datos.madrid.es/egob/catalogo/209094-0-servicios-sociales.csv',
# '200337': 'http://datos.madrid.es/egob/catalogo/200337-0-centros-mayores.csv',
# '205712': 'http://datos.madrid.es/egob/catalogo/205712-0-servicios-sociales.csv',
# '205732': 'http://datos.madrid.es/egob/catalogo/205732-0-servicios-sociales.csv',
# '206117': 'http://datos.madrid.es/egob/catalogo/206117-0-entidades-participacion-ciudadan.csv',
# '202781': 'http://datos.madrid.es/egob/catalogo/202781-0-entidades-participacion-ciudadan.csv',
# '214440': 'http://datos.madrid.es/egob/catalogo/214440-0-farmacias-guardia.csv',
# '202162': 'http://datos.madrid.es/egob/catalogo/202162-0-instalaciones-accesibles-municip.csv',
# '202180': 'http://datos.madrid.es/egob/catalogo/202180-0-instalaciones-accesibles-no-muni.csv',
# '216619': 'http://datos.madrid.es/egob/catalogo/216619-0-wifi-municipal.csv',
# '202311': 'http://datos.madrid.es/egob/catalogo/202311-0-colegios-publicos.csv',
# '202318': 'http://datos.madrid.es/egob/catalogo/202318-0-escuelas-infantiles.csv',
# '212790': 'http://datos.madrid.es/egob/catalogo/212790-0-centros-educacion.csv',
# '212904': 'http://datos.madrid.es/egob/catalogo/212904-0-centros-ensenanza.csv',
# '212816': 'http://datos.madrid.es/egob/catalogo/212816-0-investigacion.csv',
'211642': 'http://datos.madrid.es/egob/catalogo/211642-0-bomberos-parques.csv',
'201544': 'http://datos.madrid.es/egob/catalogo/201544-0-centros-salud.csv',
'212769': 'http://datos.madrid.es/egob/catalogo/212769-0-atencion-medica.csv',
# '202105': 'http://datos.madrid.es/egob/catalogo/202105-0-mercadillos.csv',
# '200967': 'http://datos.madrid.es/egob/catalogo/200967-0-mercados.csv',
# '300048': 'http://datos.madrid.es/egob/catalogo/300048-0-ancianos-residencias-apartamento.csv',
'207044': 'http://datos.madrid.es/egob/catalogo/207044-0-oficina-policia.csv',
# '203166': 'http://datos.madrid.es/egob/catalogo/203166-0-universidades-educacion.csv',
# '212774': 'http://datos.madrid.es/egob/catalogo/212774-0-atencion-social.csv',
# '212841': 'http://datos.madrid.es/egob/catalogo/212841-0-oficinas-correos.csv',
# '212846': 'http://datos.madrid.es/egob/catalogo/212846-0-oficinas-correos.csv',
# '205736': 'http://datos.madrid.es/egob/catalogo/205736-0-servicios-sociales.csv'
}
# Viability
datasets_v = {
# '21241109': 'http://datos.madrid.es/egob/catalogo/212411-9-madrid-avisa.csv',
# '21241111': 'http://datos.madrid.es/egob/catalogo/212411-11-madrid-avisa.csv',
# '211346': 'http://datos.madrid.es/egob/catalogo/211346-0-estaciones-acusticas.xls',
# '215885': 'http://datos.madrid.es/egob/catalogo/215885-0-contaminacion-ruido.txt',
# '212629': 'http://datos.madrid.es/egob/catalogo/212629-0-estaciones-control-aire.xls',
# '212531': 'http://datos.madrid.es/egob/catalogo/212531-7916318-calidad-aire-tiempo-real.txt',
# '209799': 'http://datos.madrid.es/egob/catalogo/209799-3-contenedores_pilas_marquesinas.csv',
# '204410': 'http://datos.madrid.es/egob/catalogo/204410-1-contenedores-ropa.csv',
# '212616': 'http://datos.madrid.es/egob/catalogo/212616-18-policia-estadisticas.xls',
# '200284': 'http://datos.madrid.es/egob/catalogo/200284-0-puntos-limpios.csv'
}
# Transport
datasets_t = {
# '202625': 'http://datos.madrid.es/egob/catalogo/202625-0-aparcamientos-publicos.csv',
# '202584': 'http://datos.madrid.es/egob/catalogo/202584-0-aparcamientos-residentes.csv',
# '208083': 'http://datos.madrid.es/egob/catalogo/208083-0-estacionamiento-pmr.xls',
# '208789': 'http://datos.madrid.es/egob/catalogo/208789-7648433-transportes-emt-xls.xls',
# '202087': 'http://datos.madrid.es/egob/catalogo/202087-0-trafico-intensidad.xml',
# '202468': 'http://datos.madrid.es/egob/catalogo/202468-0-intensidad-trafico.zip',
# '202062': 'http://datos.madrid.es/egob/catalogo/202062-0-trafico-incidencias-viapublica.xml',
# '202974': 'http://datos.madrid.es/egob/catalogo/202974-0-trafico-semaforos.xml',
# '208426': 'http://datos.madrid.es/egob/catalogo/208426-0-trafico-semaforos-no-comunican.xml',
# '208327': 'http://datos.madrid.es/egob/catalogo/208327-1-transporte-bicicletas-bicimad.csv'
}
# List to create a unified dataset about datasets with same properties
list_points = ['200186', '200304', '200761', '201544', '207044', '210227', '211642', '212763', '212769']
list_events = ['206974', '212504']
# Relations between create refine scripts and datasets.
# For reuse the same create script.
operations_create = {
'creation_one': [
'200637', '200652', '214440', '208327'
],
'creation_five': [
'200186', '206974', '210227', '212504', '200761', '201747', '212763',
'200304', '211642', '201544', '212769', '202105', '200967', '207044',
'212841', '212846', '205736', '200284'
],
'creation_211346': ['211346'],
'creation_212629': ['212629'],
'creation_208789': ['208789']
}
datasets = datasets_ce.copy()
datasets.update(datasets_bc)
datasets.update(datasets_v)
datasets.update(datasets_t)
# Folder to save all datasets created
dataset_folder = '/tmp/datasets/'
#############################################
# Create datasets folder if it does not exist
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
print '\nTBADDownloader has started.'
print '\n * Checking (' + str(len(datasets.keys())) + ') datasets ...\n'
# Iterate over datasets
for i in datasets.keys():
# Save new data or save in the old dataset
ext = os.path.split(datasets[i])[-1].split('.')[-1]
name_path = dataset_folder + i + '.' + ext
name_new_path = dataset_folder + 'new_' + i + '.' + ext
file_path = name_path
if os.path.exists(name_path):
file_path = name_new_path
# Download dataset with the dict url
download_dataset(file_path, i, datasets[i])
# Check if it is old or new with sha
if not detect_dataset(name_path, name_new_path):
# Get Refine Options (create script)
opt = get_project_options(i)
r = refine.Refine()
p = r.new_project(name_path, opt)
# Apply Refine Operations from json file
p.apply_operations('../refine_op/clean_' + i + '.json')
# Write new Dataset with cleaned data
write_dataset(i, p.export_rows(format='tsv'), add_to_big_dataset)
# Delete Refine Project to clean memory
p.delete_project()
else:
# Write new Dataset with previous data
with open(dataset_folder + i + '_dataset.csv', 'r') as f:
ec_file_list = f.read().split('\r\n')
for x in range(0, len(ec_file_list) - 1):
d = ec_file_list[x]
d = d.replace('"','')
d = d.replace(', ','__ ').replace(',','\t').replace('__', ',')
d = d.split('\t')
add_to_big_dataset(i, x, d)
# Write or skip updated unified csv
write_big_dataset('dataset_points', dataset_p)
write_big_dataset('dataset_events', dataset_e)
print '\nTBADDownloader has finished.\n'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.