content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
from setuptools import setup
GITHUB_URL = 'https:github.com/jonas-hagen/bootstrap-sources'
VERSION = '4.0.0-1'
setup(
author='Jonas Hagen',
author_email='jonas.hagen3@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: MIT',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python 3.6',
],
description='Twitter boutstrap sources as pypi package.',
download_url=('{url}/archive/v{version}.tar.gz'
.format(url=GITHUB_URL,
version=VERSION)),
license='MIT',
maintainer='Jonas Hagen',
maintainer_email='jonas.hagen3@gmail.com',
name='bootstrap-sources',
packages=['bootstrap_sources'],
package_data={
'bootstrap_sources': ['js/*.js', 'scss/*.scss', 'scss/*/*.scss'],
},
python_requires='>=3.5.*, <4',
version=VERSION,
)
|
#
# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Download pre-built docTTTTTquery expanded CORD-19 indexes."""
import argparse
import sys
sys.path.insert(0, '../pyserini/')
import pyserini.util
all_indexes = {
'2020-06-19': ['https://www.dropbox.com/s/jqdcub1newrb5pa/lucene-index-cord19-abstract-docT5query-2020-06-19.tar.gz?dl=1',
'https://www.dropbox.com/s/bmdbg103zmjufnj/lucene-index-cord19-full-text-docT5query-2020-06-19.tar.gz?dl=1',
'https://www.dropbox.com/s/7dajfdff192dy9k/lucene-index-cord19-paragraph-docT5query-2020-06-19.tar.gz?dl=1'],
'2020-07-16': ['https://www.dropbox.com/s/gzq1d305oe465t1/lucene-index-cord19-abstract-docT5query-2020-07-16.tar.gz?dl=1',
'https://www.dropbox.com/s/63gbbzqossemkzk/lucene-index-cord19-full-text-docT5query-2020-07-16.tar.gz?dl=1',
'https://www.dropbox.com/s/9fml7m2si7qbm17/lucene-index-cord19-paragraph-docT5query-2020-07-16.tar.gz?dl=1']
}
def main(args):
if args.date not in all_indexes:
print(f'Unknown index {args.date}')
else:
for index in all_indexes[args.date]:
pyserini.util.download_and_unpack_index(index, force=args.force)
print('Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--date', type=str, metavar='YYYY-MM-DD', required=True, help='Date of the CORD-19 release.')
parser.add_argument('--force', action='store_true', help='Overwrite existing data.')
main(parser.parse_args())
|
"""
# trans-tool
# The translation files checker and syncing tool.
#
# Copyright ©2021 Marcin Orlowski <mail [@] MarcinOrlowski.com>
# https://github.com/MarcinOrlowski/trans-tool/
#
"""
import re
from typing import Dict, Union
from transtool.decorators.overrides import overrides
from transtool.prop.items import Translation
from transtool.report.group import ReportGroup
from .base.check import Check
# noinspection PyUnresolvedReferences
class KeyFormat(Check):
"""
This check verifies that translation keys follow specified naming convention.
"""
def __init__(self, config: Union[Dict, None] = None):
super().__init__(config)
self.is_single_file_check = True
@overrides(Check)
# Do NOT "fix" the PropFile reference and do not import it, or you step on circular dependency!
def check(self, translation_file: 'PropFile', reference_file: 'PropFile' = None) -> ReportGroup:
self.need_valid_config()
report = ReportGroup('Key naming pattern.')
if translation_file.items:
pattern = self.config['pattern']
compiled_pattern = re.compile(pattern)
for line_number, item in enumerate(translation_file.items):
# We care translations only for now.
# Do not try to be clever and filter() data first, because line_number values will no longer be correct.
if not isinstance(item, Translation):
continue
if compiled_pattern.match(item.key) is None:
report.error(line_number + 1, 'Invalid key name format.', item.key)
return report
@overrides(Check)
def get_default_config(self) -> Dict:
return {
'pattern': r'^[a-zA-Z]+[a-zA-Z0-9_.]*[a-zA-Z0-9]+$',
}
|
class Recorder:
def __init__(self):
self.model_score = 0
self.domain_score = 0
self.record_list = []
pass
def get_record_list(self):
return self.record_list
def append(self, record):
# {"size": size, "game": game, "index": round,
# "value": val, "best_value": best_val,
# "domain_selected": domain_selected,
# "domain_best_value": domain_best_value,
# "model_selected": model_selected,
# "model_best_value": model_best_value}
if record is None:
return
action = False
if record["domain_selected"] or record["model_selected"]:
action = True
if record["domain_best_value"] == record["model_best_value"]:
pass
# no points
else:
if record["domain_best_value"] > record["model_best_value"]:
if record["domain_best_value"] == record["best_value"]:
self.domain_score += 3
else: # record["domain_best_value"] < record["best_value"]
self.domain_score += 1
else: # record["domain_best_value"] < record["model_best_value"]
if record["model_best_value"] == record["best_value"]:
self.model_score += 3
else: # record["model_best_value"] < record["best_value"]
self.model_score += 1
record["action"] = True
record["model_score"] = self.model_score
record["domain_score"] = self.domain_score
self.record_list.append(record)
if action or self.record_list.__len__() % 10 == 0:
# print current result
print("Current result: MODEL:{model_score} ; DOMAIN:{domain_score} "
"\t\tsize: {size}, game: {game}, index: {round}, "
"value: {val}, best_value: {best_val}".format(model_score=self.model_score,
domain_score=self.domain_score, size=record["size"],
game=record["game"], round=record["index"],
val=record["value"],
best_val=record["best_value"]))
pass
|
# CPU: 0.05 s
start, end, _ = map(int, input().split())
walk = list(map(int, input().split()))
bus = list(map(int, input().split()))
interval = list(map(int, input().split()))
for idx in range(len(walk)):
# Time taken on a bus
if idx != 0:
start += bus[idx - 1]
# Time to walk to the next station
start += walk[idx]
# Time to wait for the bus
if idx != len(walk) - 1 and start % interval[idx] != 0:
start += interval[idx] - start % interval[idx]
print("yes" if start <= end else "no")
|
import json
import os
import shutil
import hashlib
from shutil import copyfile
from .steady_download_and_compute_sha1 import steady_download_and_compute_sha1
import random
import time
from .filelock import FileLock
import mtlogging
# TODO: implement cleanup() for Sha1Cache
# removing .record.json and .hints.json files that are no longer relevant
class Sha1Cache():
def __init__(self):
self._directory = None
self._alternate_directories = None
def directory(self):
if self._directory:
return self._directory
else:
return os.getenv('KBUCKET_CACHE_DIR', '/tmp/sha1-cache')
def alternateDirectories(self):
if self._alternate_directories:
return self._alternate_directories
else:
val = os.getenv('KBUCKET_CACHE_DIR_ALT', None)
if val:
return val.split(':')
else:
return []
def setDirectory(self, directory):
self._directory = directory
def setAlternateDirectories(self, directories):
self._alternate_directories = directories
def findFile(self, sha1):
path, alternate_paths = self._get_path(
sha1, create=False, return_alternates=True)
# if file is available return it
if os.path.exists(path):
return path
# return first alternate path that exists
for altpath in alternate_paths:
if os.path.exists(altpath):
return altpath
hints_fname = path + '.hints.json'
# if path.hints.json exists then read it
if os.path.exists(hints_fname):
hints = _read_json_file(hints_fname, delete_on_error=True)
if hints and ('files' in hints):
files = hints['files']
matching_files = []
for file in files:
path0 = file['stat']['path']
if os.path.exists(path0) and os.path.isfile(path0):
stat_obj0 = _get_stat_object(path0)
if stat_obj0:
if (_stat_objects_match(stat_obj0, file['stat'])):
matching_files.append(file)
if matching_files:
hints['files'] = matching_files
try:
_write_json_file(hints, hints_fname)
except:
print('Warning: problem writing hints file: ' + hints_fname)
return matching_files[0]['stat']['path']
else:
_safe_remove_file(hints_fname)
else:
print(
'Warning: failed to load hints json file, or invalid file. Removing: ' + hints_fname)
_safe_remove_file(hints_fname)
return None
def downloadFile(self, url, sha1, target_path=None, size=None, verbose=False, show_progress=False):
alternate_target_path = False
if target_path is None:
target_path = self._get_path(sha1, create=True)
else:
alternate_target_path = True
path_tmp = target_path + '.downloading.' + _random_string(6)
if (verbose) or (show_progress) or (size > 10000):
print(
'Downloading file --- ({}): {} -> {}'.format(_format_file_size(size), url, target_path))
timer = time.time()
sha1b = steady_download_and_compute_sha1(url=url, target_path=path_tmp)
elapsed = time.time() - timer
if (verbose) or (show_progress) or (size > 10000):
print('Downloaded file ({}) in {} sec.'.format(_format_file_size(size), elapsed))
if not sha1b:
if os.path.exists(path_tmp):
_safe_remove_file(path_tmp)
if sha1 != sha1b:
if os.path.exists(path_tmp):
_safe_remove_file(path_tmp)
raise Exception(
'sha1 of downloaded file does not match expected {} {}'.format(url, sha1))
if alternate_target_path:
if os.path.exists(target_path):
_safe_remove_file(target_path)
_rename_file(path_tmp, target_path, remove_if_exists=True)
self.reportFileSha1(target_path, sha1)
else:
if not os.path.exists(target_path):
_rename_file(path_tmp, target_path, remove_if_exists=False)
else:
_safe_remove_file(path_tmp)
return target_path
def moveFileToCache(self, path):
sha1 = self.computeFileSha1(path)
path0 = self._get_path(sha1, create=True)
if os.path.exists(path0):
if path != path0:
_safe_remove_file(path)
else:
tmp_fname = path0 + '.copying.' + _random_string(6)
_rename_or_copy(path, tmp_fname)
_rename_file(tmp_fname, path0, remove_if_exists=False)
return path0
@mtlogging.log()
def copyFileToCache(self, path):
sha1 = self.computeFileSha1(path)
path0 = self._get_path(sha1, create=True)
if not os.path.exists(path0):
tmp_path = path0 + '.copying.' + _random_string(6)
copyfile(path, tmp_path)
_rename_file(tmp_path, path0, remove_if_exists=False)
return path0, sha1
@mtlogging.log()
def computeFileSha1(self, path, _known_sha1=None):
path = os.path.abspath(path)
basename = os.path.basename(path)
if len(basename) == 40:
# suspect it is itself a file in the cache
if self._get_path(sha1=basename) == path:
# in that case we don't need to compute
return basename
aa = _get_stat_object(path)
aa_hash = _compute_string_sha1(json.dumps(aa, sort_keys=True))
path0 = self._get_path(aa_hash, create=True) + '.record.json'
if not _known_sha1:
if os.path.exists(path0):
obj = _read_json_file(path0, delete_on_error=True)
if obj:
bb = obj['stat']
if _stat_objects_match(aa, bb):
if obj.get('sha1', None):
return obj['sha1']
if _known_sha1 is None:
sha1 = _compute_file_sha1(path)
else:
sha1 = _known_sha1
if not sha1:
return None
obj = dict(
sha1=sha1,
stat=aa
)
try:
_write_json_file(obj, path0)
except:
print('Warning: problem writing .record.json file: ' + path0)
path1 = self._get_path(sha1, create=True, directory=self.directory()) + '.hints.json'
if os.path.exists(path1):
hints = _read_json_file(path1, delete_on_error=True)
else:
hints = None
if not hints:
hints = {'files': []}
hints['files'].append(obj)
try:
_write_json_file(hints, path1)
except:
print('Warning: problem writing .hints.json file: ' + path1)
# todo: use hints for findFile
return sha1
def reportFileSha1(self, path, sha1):
self.computeFileSha1(path, _known_sha1=sha1)
def _get_path(self, sha1, *, create=True, directory=None, return_alternates=False):
if directory is None:
directory = self.directory()
path1 = os.path.join(sha1[0], sha1[1:3])
path0 = os.path.join(directory, path1)
if create:
if not os.path.exists(path0):
try:
os.makedirs(path0)
except:
if not os.path.exists(path0):
raise Exception('Unable to make directory: ' + path0)
if not return_alternates:
return os.path.join(path0, sha1)
else:
altpaths = []
alt_dirs = self.alternateDirectories()
for altdir in alt_dirs:
altpaths.append(os.path.join(altdir, path1, sha1))
return os.path.join(path0, sha1), altpaths
@mtlogging.log()
def _compute_file_sha1(path):
if not os.path.exists(path):
return None
if (os.path.getsize(path) > 1024 * 1024 * 100):
print('Computing sha1 of {}'.format(path))
BLOCKSIZE = 65536
sha = hashlib.sha1()
with open(path, 'rb') as file:
buf = file.read(BLOCKSIZE)
while len(buf) > 0:
sha.update(buf)
buf = file.read(BLOCKSIZE)
return sha.hexdigest()
def _get_stat_object(fname):
try:
stat0 = os.stat(fname)
obj = dict(
path=fname,
size=stat0.st_size,
ino=stat0.st_ino,
mtime=stat0.st_mtime,
ctime=stat0.st_ctime
)
return obj
except:
return None
def _stat_objects_match(aa, bb):
str1 = json.dumps(aa, sort_keys=True)
str2 = json.dumps(bb, sort_keys=True)
return (str1 == str2)
def _compute_string_sha1(txt):
hash_object = hashlib.sha1(txt.encode('utf-8'))
return hash_object.hexdigest()
def _safe_remove_file(fname):
try:
os.remove(fname)
except:
print('Warning: unable to remove file that we thought existed: ' + fname)
@mtlogging.log()
def _read_json_file(path, *, delete_on_error=False):
with FileLock(path + '.lock', exclusive=False):
try:
with open(path) as f:
return json.load(f)
except:
if delete_on_error:
print('Warning: Unable to read or parse json file. Deleting: ' + path)
try:
os.unlink(path)
except:
print('Warning: unable to delete file: ' + path)
pass
else:
print('Warning: Unable to read or parse json file: ' + path)
return None
def _write_json_file(obj, path):
with FileLock(path + '.lock', exclusive=True):
with open(path, 'w') as f:
return json.dump(obj, f)
def _safe_list_dir(path):
try:
ret = os.listdir(path)
return ret
except:
return []
def _rename_or_copy(path1, path2):
if os.path.abspath(path1) == os.path.abspath(path2):
return
try:
os.rename(path1, path2)
except:
try:
shutil.copyfile(path1, path2)
except:
raise Exception('Problem renaming or copying file: {} -> {}'.format(path1, path2))
@mtlogging.log()
def _rename_file(path1, path2, remove_if_exists):
if os.path.abspath(path1) == os.path.abspath(path2):
return
if os.path.exists(path2):
if remove_if_exists:
try:
os.unlink(path2)
except:
# maybe it was removed by someone else
pass
else:
# already exists, let's just let it be
return
try:
os.rename(path1, path2)
except:
if os.path.exists(path2):
if not remove_if_exists:
# all good
return
raise Exception('Problem renaming file: {} -> {}'.format(path1, path2))
else:
raise Exception('Problem renaming file:: {} -> {}'.format(path1, path2))
# thanks: https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def _format_file_size(size):
if not size:
return 'Unknown'
if size <= 1024:
return '{} B'.format(size)
return _sizeof_fmt(size)
def _sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def _random_string(num_chars):
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return ''.join(random.choice(chars) for _ in range(num_chars))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('task', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='TeamMember',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('email', models.EmailField(unique=True, max_length=254)),
('preferred_notifying_time', models.TimeField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='WorkDay',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(editable=False)),
('person', models.ForeignKey(to='teamwork.TeamMember')),
],
),
migrations.CreateModel(
name='WorkTrackerText',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
],
),
migrations.AddField(
model_name='task',
name='day',
field=models.ForeignKey(to='teamwork.WorkDay'),
),
]
|
from ._utils import getImageFromElementCSS
from ._conditions import is_crime_news
from ._request import simple_get
from ._parser import parse, tojson
class JoyScraper:
@staticmethod
def get_headlines(retry=False, retrytimes=0, retrymillis=0):
response = simple_get('https://www.myjoyonline.com/ghana-news/news.php', retry, retrytimes, retrymillis)
response = parse(response)
if response is not '':
mainentry = response.find('div', class_="entry-page-wrapper")
if mainentry:
articles = mainentry.findAll('article')
if articles:
result = []
for article in articles:
data = {}
data['link'] = article.div.find('a')['href']
data['title'] = article.div.find('h2', class_="entry-title").text
category = article.find('div', class_="entry-category")
if category:
data['category'] = category.text
else:
data['category'] = 'Main Headline'
data['image'] = getImageFromElementCSS(article.div['style'])
result.append(data)
return tojson(result)
else:
return None
else:
return None
else:
return None
def get_crimenews(retry=False, retrytimes=0, retrymillis=0):
response = simple_get('https://www.myjoyonline.com/ghana-news/news.php', retry, retrytimes, retrymillis)
response = parse(response)
if response:
content = response.findAll('div', class_="wpb_wrapper")
if content:
for cont in content:
result = cont.find(is_crime_news)
if result:
articles = result.findAll('article')
if articles:
result = []
for article in articles:
data = {}
# Extracting the links
data['link'] = article.div.find('a')['href']
# Extracting the Trending Title
trending_title = article.div.find('h2', class_="entry-title")
if trending_title:
data['title'] = trending_title.text
# Getting the Trending Category
trending_cat = article.find('div', class_="entry-category")
if trending_cat:
data['category'] = trending_cat.text
# Getting the Trending Image
if article.div.has_attr('style'):
style = article.div['style']
data['image'] = getImageFromElementCSS(style)
# Getting the Sideline Crime New Image
imgdiv = article.find('img')
if imgdiv:
style = imgdiv['style']
if style:
data['image'] = getImageFromElementCSS(style)
# Getting the Sideline Crime New Title
titlediv = article.find('h3', class_="entry-title")
if titlediv:
data['title'] = titlediv.a.text
# Getting the Sideline Crime Date
datediv = article.find('span', class_="date-display")
if datediv:
data['date'] = datediv.text
# Getting the Sideline Views Count
viewcount = article.find('span', class_="views-count")
if viewcount:
data['views'] = viewcount.text
result.append(data)
return tojson(result)
else:
return None
|
import os
import sys
from pathlib import Path
import pytest
from isort import exceptions, settings
from isort.settings import Config
from isort.wrap_modes import WrapModes
class TestConfig:
instance = Config()
def test_init(self):
assert Config()
def test_init_unsupported_settings_fails_gracefully(self):
with pytest.raises(exceptions.UnsupportedSettings):
Config(apply=True)
try:
Config(apply=True)
except exceptions.UnsupportedSettings as error:
assert error.unsupported_settings == {"apply": {"value": True, "source": "runtime"}}
def test_known_settings(self):
assert Config(known_third_party=["one"]).known_third_party == frozenset({"one"})
assert Config(known_thirdparty=["two"]).known_third_party == frozenset({"two"})
assert Config(
known_third_party=["one"], known_thirdparty=["two"]
).known_third_party == frozenset({"one"})
def test_invalid_settings_path(self):
with pytest.raises(exceptions.InvalidSettingsPath):
Config(settings_path="this_couldnt_possibly_actually_exists/could_it")
def test_invalid_pyversion(self):
with pytest.raises(ValueError):
Config(py_version=10)
def test_invalid_profile(self):
with pytest.raises(exceptions.ProfileDoesNotExist):
Config(profile="blackandwhitestylemixedwithpep8")
def test_is_skipped(self):
assert Config().is_skipped(Path("C:\\path\\isort.py"))
assert Config(skip=["/path/isort.py"]).is_skipped(Path("C:\\path\\isort.py"))
def test_is_supported_filetype(self):
assert self.instance.is_supported_filetype("file.py")
assert self.instance.is_supported_filetype("file.pyi")
assert self.instance.is_supported_filetype("file.pyx")
assert self.instance.is_supported_filetype("file.pxd")
assert not self.instance.is_supported_filetype("file.pyc")
assert not self.instance.is_supported_filetype("file.txt")
assert not self.instance.is_supported_filetype("file.pex")
def test_is_supported_filetype_ioerror(self, tmpdir):
does_not_exist = tmpdir.join("fake.txt")
assert not self.instance.is_supported_filetype(str(does_not_exist))
def test_is_supported_filetype_shebang(self, tmpdir):
path = tmpdir.join("myscript")
path.write("#!/usr/bin/env python\n")
assert self.instance.is_supported_filetype(str(path))
def test_is_supported_filetype_editor_backup(self, tmpdir):
path = tmpdir.join("myscript~")
path.write("#!/usr/bin/env python\n")
assert not self.instance.is_supported_filetype(str(path))
def test_is_supported_filetype_defaults(self, tmpdir):
assert self.instance.is_supported_filetype(str(tmpdir.join("stub.pyi")))
assert self.instance.is_supported_filetype(str(tmpdir.join("source.py")))
assert self.instance.is_supported_filetype(str(tmpdir.join("source.pyx")))
def test_is_supported_filetype_configuration(self, tmpdir):
config = Config(supported_extensions=("pyx",), blocked_extensions=("py",))
assert config.is_supported_filetype(str(tmpdir.join("stub.pyx")))
assert not config.is_supported_filetype(str(tmpdir.join("stub.py")))
@pytest.mark.skipif(
sys.platform == "win32", reason="cannot create fifo file on Windows platform"
)
def test_is_supported_filetype_fifo(self, tmpdir):
fifo_file = os.path.join(tmpdir, "fifo_file")
os.mkfifo(fifo_file)
assert not self.instance.is_supported_filetype(fifo_file)
def test_src_paths_are_combined_and_deduplicated(self):
src_paths = ["src", "tests"]
src_full_paths = (Path(os.getcwd()) / f for f in src_paths)
assert sorted(Config(src_paths=src_paths * 2).src_paths) == sorted(src_full_paths)
def test_src_paths_supports_glob_expansion(self, tmp_path):
libs = tmp_path / "libs"
libs.mkdir()
requests = libs / "requests"
requests.mkdir()
beautifulpasta = libs / "beautifulpasta"
beautifulpasta.mkdir()
assert sorted(Config(directory=tmp_path, src_paths=["libs/*"]).src_paths) == sorted(
(beautifulpasta, requests)
)
def test_deprecated_multi_line_output(self):
assert Config(multi_line_output=6).multi_line_output == WrapModes.VERTICAL_GRID_GROUPED # type: ignore # noqa
def test_as_list():
assert settings._as_list([" one "]) == ["one"] # type: ignore
assert settings._as_list("one,two") == ["one", "two"]
def _write_simple_settings(tmp_file):
tmp_file.write_text(
"""
[isort]
force_grid_wrap=true
""",
"utf8",
)
def test_find_config(tmpdir):
tmp_config = tmpdir.join(".isort.cfg")
# can't find config if it has no relevant section
settings._find_config.cache_clear()
settings._get_config_data.cache_clear()
tmp_config.write_text(
"""
[section]
force_grid_wrap=true
""",
"utf8",
)
assert not settings._find_config(str(tmpdir))[1]
# or if it is malformed
settings._find_config.cache_clear()
settings._get_config_data.cache_clear()
tmp_config.write_text("""arstoyrsyan arienrsaeinrastyngpuywnlguyn354q^%$)(%_)@$""", "utf8")
assert not settings._find_config(str(tmpdir))[1]
# can when it has either a file format, or generic relevant section
settings._find_config.cache_clear()
settings._get_config_data.cache_clear()
_write_simple_settings(tmp_config)
assert settings._find_config(str(tmpdir))[1]
def test_find_config_deep(tmpdir):
# can't find config if it is further up than MAX_CONFIG_SEARCH_DEPTH
dirs = [f"dir{i}" for i in range(settings.MAX_CONFIG_SEARCH_DEPTH + 1)]
tmp_dirs = tmpdir.ensure(*dirs, dirs=True)
tmp_config = tmpdir.join("dir0", ".isort.cfg")
settings._find_config.cache_clear()
settings._get_config_data.cache_clear()
_write_simple_settings(tmp_config)
assert not settings._find_config(str(tmp_dirs))[1]
# but can find config if it is MAX_CONFIG_SEARCH_DEPTH up
one_parent_up = os.path.split(str(tmp_dirs))[0]
assert settings._find_config(one_parent_up)[1]
def test_get_config_data(tmpdir):
test_config = tmpdir.join("test_config.editorconfig")
test_config.write_text(
"""
root = true
[*.{js,py}]
indent_style=tab
indent_size=tab
[*.py]
force_grid_wrap=false
comment_prefix="text"
[*.{java}]
indent_style = space
""",
"utf8",
)
loaded_settings = settings._get_config_data(
str(test_config), sections=settings.CONFIG_SECTIONS[".editorconfig"]
)
assert loaded_settings
assert loaded_settings["comment_prefix"] == "text"
assert loaded_settings["force_grid_wrap"] == 0
assert loaded_settings["indent"] == "\t"
assert str(tmpdir) in loaded_settings["source"]
def test_editorconfig_without_sections(tmpdir):
test_config = tmpdir.join("test_config.editorconfig")
test_config.write_text("\nroot = true\n", "utf8")
loaded_settings = settings._get_config_data(str(test_config), sections=("*.py",))
assert not loaded_settings
def test_get_config_data_with_toml_and_utf8(tmpdir):
test_config = tmpdir.join("pyproject.toml")
# Exception: UnicodeDecodeError: 'gbk' codec can't decode byte 0x84 in position 57
test_config.write_text(
"""
[tool.poetry]
description = "基于FastAPI + Mysql的 TodoList" # Exception: UnicodeDecodeError
name = "TodoList"
version = "0.1.0"
[tool.isort]
multi_line_output = 3
""",
"utf8",
)
loaded_settings = settings._get_config_data(
str(test_config), sections=settings.CONFIG_SECTIONS["pyproject.toml"]
)
assert loaded_settings
assert str(tmpdir) in loaded_settings["source"]
def test_as_bool():
assert settings._as_bool("TrUe") is True
assert settings._as_bool("true") is True
assert settings._as_bool("t") is True
assert settings._as_bool("FALSE") is False
assert settings._as_bool("faLSE") is False
assert settings._as_bool("f") is False
with pytest.raises(ValueError):
settings._as_bool("")
with pytest.raises(ValueError):
settings._as_bool("falsey")
with pytest.raises(ValueError):
settings._as_bool("truthy")
def test_find_all_configs(tmpdir):
setup_cfg = """
[isort]
profile=django
"""
pyproject_toml = """
[tool.isort]
profile = "hug"
"""
isort_cfg = """
[settings]
profile=black
"""
pyproject_toml_broken = """
[tool.isorts]
something = nothing
"""
dir1 = tmpdir / "subdir1"
dir2 = tmpdir / "subdir2"
dir3 = tmpdir / "subdir3"
dir4 = tmpdir / "subdir4"
dir1.mkdir()
dir2.mkdir()
dir3.mkdir()
dir4.mkdir()
setup_cfg_file = dir1 / "setup.cfg"
setup_cfg_file.write_text(setup_cfg, "utf-8")
pyproject_toml_file = dir2 / "pyproject.toml"
pyproject_toml_file.write_text(pyproject_toml, "utf-8")
isort_cfg_file = dir3 / ".isort.cfg"
isort_cfg_file.write_text(isort_cfg, "utf-8")
pyproject_toml_file_broken = dir4 / "pyproject.toml"
pyproject_toml_file_broken.write_text(pyproject_toml_broken, "utf-8")
config_trie = settings.find_all_configs(str(tmpdir))
config_info_1 = config_trie.search(str(dir1 / "test1.py"))
assert config_info_1[0] == str(setup_cfg_file)
assert config_info_1[0] == str(setup_cfg_file) and config_info_1[1]["profile"] == "django"
config_info_2 = config_trie.search(str(dir2 / "test2.py"))
assert config_info_2[0] == str(pyproject_toml_file)
assert config_info_2[0] == str(pyproject_toml_file) and config_info_2[1]["profile"] == "hug"
config_info_3 = config_trie.search(str(dir3 / "test3.py"))
assert config_info_3[0] == str(isort_cfg_file)
assert config_info_3[0] == str(isort_cfg_file) and config_info_3[1]["profile"] == "black"
config_info_4 = config_trie.search(str(tmpdir / "file4.py"))
assert config_info_4[0] == "default"
|
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import pandas as pd
from PIL import Image
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
import csv
plt.rcParams.update({'font.size': 18})
import plotly.express as px # Be sure to import express
from covid_flu import config
def crop_center(pil_img):
img_width, img_height = pil_img.size
img_length = min(img_width, img_height)
return pil_img.crop(((img_width - img_length) // 2,
(img_height - img_length) // 2,
(img_width + img_length) // 2,
(img_height + img_length) // 2))
def main():
with open(str(config.streamlit / 'about.css')) as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
st.title("About us")
st.write("""
We are a group of Data Science Master's students at the Institute for Applied Computational
Science (IACS) at Harvard University.""")
st.write("### The team")
imfiles = ["ben", "dimitris", "matthieu", "will"]
images = [crop_center(Image.open(config.images / 'about' / f'{f}.jpg')) for f in imfiles]
captions = [
"Benjamin Levy (https://benlevyx.github.io)",
"Dimitris Vamvourellis (https://github.com/dvamvourellis)",
"Matthieu Meeus (https://github.com/matthieumeeus)",
"Will Fried (https://github.com/williamfried)"
]
st.image(images, width=300, caption=captions)
# st.image(Image.open(config.images / 'about' / 'ben.jpg'), width=300, caption="Benjamin Levy(https://benlevyx.github.io)")
# st.image(Image.open(config.images / 'about' / 'dimitris.jpg'), width=300, caption="Dimitris Vamvourellis")
# st.image(Image.open(config.images / 'about' / 'will.jpg'), width=300, caption="Will Fried")
# st.image(Image.open(config.images / 'about' / 'matthieu.jpg'), width=300, caption="Matthieu Meeus") |
# -*- coding: utf-8 -*-
from pasa.utils import get_or_else
# カテゴリー辞書のためのクラス
# カテゴリ辞書の構成
# Categorys
# - [Category]<-
# - [noun]
# - category_name
class CategoryDict(object):
def __init__(self, yaml):
self.category_name = get_or_else(yaml, 'category_name', "")
self.noun = set(get_or_else(yaml, 'noun', []))
def __repr__(self):
return "{{category_name={}, noun={}}}".format(self.category_name, self.noun)
|
# -*- coding: utf-8 -*-
#
# This file is part of SplashSync Project.
#
# Copyright (C) 2015-2020 Splash Sync <www.splashsync.com>
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import base64
import hashlib
class FilesHelper:
@staticmethod
def encodeFromRaw(contents, name, filename, path, public_url, b64=False):
"""Encode Splash File from Raw Contents"""
# ====================================================================#
# Detect Base64 Files
if b64 is True:
file = base64.b64decode(contents)
else:
file = contents
# ====================================================================#
# Build Splash File Data
return {
"name": name,
"filename": filename,
"md5": FilesHelper.md5(contents, b64),
"path": path,
"size": len(file),
}
@staticmethod
def md5(contents, b64=False):
"""Detect File Md5"""
if not isinstance(contents, (bytes, str)):
return None
if b64 is True:
return hashlib.md5(base64.b64decode(contents)).hexdigest()
else:
return hashlib.md5(contents).hexdigest()
class ImagesHelper(FilesHelper):
@staticmethod
def encodeFromRaw(contents, name, filename, path, public_url, b64=False):
"""Encode Splash Image from Raw Contents"""
if not isinstance(contents, (bytes, str)):
return None
# ====================================================================#
# Detect Base64 Images
if b64 is True:
image = base64.b64decode(contents)
else:
image = contents
# ====================================================================#
# Detect Image Dimensions
dims = ImagesHelper.get_pil_dims(image)
# ====================================================================#
# Build Splash Image Data
return {
"name": name,
"filename": filename,
"md5": ImagesHelper.md5(contents, b64),
"path": path,
"size": len(image),
"url": public_url,
"width": dims[0],
"height": dims[1],
}
@staticmethod
def encodeFromFile (base64_content):
"""Encode Splash Image from File """
raise NotImplementedError("Not implemented yet.")
@staticmethod
def is_image(contents, b64=False):
"""Check if File Contents is an Image"""
if ImagesHelper.get_extension(contents, b64) is None:
return False
return True
@staticmethod
def get_extension(contents, b64=False):
"""Detect Extension if Raw File Contents is an Image"""
if not isinstance(contents, (bytes, str)):
return None
import imghdr
if b64 is True:
return imghdr.what(None, h=base64.b64decode(contents))
else:
return imghdr.what(None, h=contents)
@staticmethod
def get_pil_dims(raw_contents):
"""
Detect Image Dimensions using PIL
:param raw_contents: str
:return: int
"""
try:
from PIL import Image
import io
except ImportError:
return 0
image = Image.open(io.BytesIO(raw_contents))
return image.size
|
from django.contrib import admin
from ..models import Language, LanguageLang, RetiredLanguage, \
MacroLanguageMapping, Script, ScriptLang, Country, CountryLang, \
Subdivision, SubdivisionLang, SubdivisionType, SubdivisionTypeLang, Lang, \
LangLang, Locale, LocaleLang
class LanguageAdmin(admin.ModelAdmin):
list_display = ('id', 'iso_639_3', 'iso_639_1', 'iso_639_2b', 'iso_639_2t', 'scope', 'type')
list_display_links = ('id',)
list_editable = ('iso_639_3', 'iso_639_1', 'iso_639_2b', 'iso_639_2t', 'scope', 'type')
search_fields = ['iso_639_3', 'iso_639_1', 'iso_639_2b', 'iso_639_2t']
admin.site.register(Language, LanguageAdmin)
class LanguageLangAdmin(admin.ModelAdmin):
list_display = ('id', 'lang', 'language', 'ref_name', 'print_name', 'inverted_name')
list_display_links = ('language',)
list_editable = ('ref_name', 'print_name', 'inverted_name')
search_fields = ['print_name','inverted_name', 'language__iso_639_3', 'language__iso_639_2b', 'language__iso_639_2t','language__iso_639_1']
admin.site.register(LanguageLang, LanguageLangAdmin)
class RetiredLanguageAdmin(admin.ModelAdmin):
list_display = ('id', 'iso_639_3', 'ref_name', 'reason', 'change_to', 'ret_remedy', 'effective_date')
list_display_links = ('id',)
list_editable = ('iso_639_3', 'ref_name', 'reason', 'change_to', 'ret_remedy', 'effective_date')
search_fields = ['iso_639_3', 'ref_name', 'change_to', 'ret_remedy']
admin.site.register(RetiredLanguage,RetiredLanguageAdmin)
class MacroLanguageMappingAdmin(admin.ModelAdmin):
list_display = ('id', 'macrolanguage', 'status', 'individual_language', 'retired_individual_language')
list_display_links = ('id',)
raw_id_fields = ['macrolanguage', 'individual_language', 'retired_individual_language']
list_editable = ['status',]
search_fields = ['macrolanguage', 'status', 'individual_language', 'retired_individual_language']
admin.site.register(MacroLanguageMapping, MacroLanguageMappingAdmin)
class ScriptAdmin(admin.ModelAdmin):
list_display = ('id', 'iso_15294', 'code_number', 'unicode_alias','unicode_version', 'version_date')
list_display_links = ('iso_15294',)
list_editable = ('code_number', 'unicode_alias','unicode_version', 'version_date')
admin.site.register(Script, ScriptAdmin)
class ScriptLangAdmin(admin.ModelAdmin):
list_display = ('id', 'lang', 'script', 'name')
list_display_links = ('script',)
list_editable = ('name',)
admin.site.register(ScriptLang, ScriptLangAdmin)
class CountryAdmin(admin.ModelAdmin):
list_display = ('id', 'alpha_2', 'alpha_3', 'numeric')
list_display_links = ('id',)
admin.site.register(Country, CountryAdmin)
class CountryLangAdmin(admin.ModelAdmin):
list_display = ('id', 'lang', 'country', 'name', 'official_name', 'common_name', 'UN_formal_name', 'UN_short_name')
list_display_links = ('country',)
list_editable = ('official_name', 'UN_formal_name', 'UN_short_name',)
admin.site.register(CountryLang, CountryLangAdmin)
class SubdivisionAdmin(admin.ModelAdmin):
list_display = ('id', 'code', 'country', 'parent', 'type')
list_display_links = ('id',)
search_fields = ['code', 'country__alpha_2']
admin.site.register(Subdivision, SubdivisionAdmin)
class SubdivisionLangAdmin(admin.ModelAdmin):
list_display = ('id', 'lang', 'subdivision', 'name')
list_display_links = ('subdivision',)
search_fields = ['name','subdivision__code', ]
admin.site.register(SubdivisionLang, SubdivisionLangAdmin)
class LangAdmin(admin.ModelAdmin):
list_display = ('id', 'language', 'script', 'country')
list_display_links = ('id',)
raw_id_fields = ['language', 'script', 'country']
list_editable = ['language', 'script', 'country',]
admin.site.register(Lang, LangAdmin)
class LangLangAdmin(admin.ModelAdmin):
list_display = ('id','lang', 'namedlang','name')
list_display_links = ('id',)
list_editable = ('name',)
admin.site.register(LangLang, LangLangAdmin)
class LocaleAdmin(admin.ModelAdmin):
list_display = ('id', 'language', 'script', 'country')
list_display_links = ('id',)
list_editable = ('language', 'script', 'country',)
admin.site.register(Locale, LocaleAdmin)
class LocaleLangAdmin(admin.ModelAdmin):
list_display = ('id','lang', 'locale', 'name')
list_display_links = ('id',)
list_editable = ('name',)
admin.site.register(LocaleLang, LocaleLangAdmin)
|
#!/usr/bin/python
# -*- coding: utf8 -*-
from datetime import datetime, timedelta
from base_test_case import BaseTestCase
from models import Goal
from flow import app as tst_app
from constants import USER, TASK
from models import Habit, HabitDay, Task, Project, Event, Readable, Quote, Snapshot
from services.agent import ConversationAgent
import json
import tools
class APITestCase(BaseTestCase):
def setUp(self):
self.set_application(tst_app)
self.setup_testbed()
self.init_datastore_stub()
self.init_memcache_stub()
self.init_taskqueue_stub()
self.init_mail_stub()
self.register_search_api_stub()
self.init_app_basics()
self.u = u = self.users[0]
self.u.Update(name="George")
self.u.put()
h = Habit.Create(u)
h.Update(name="Run")
h.put()
done, hd = HabitDay.Toggle(h, datetime.today())
t = Task.Create(u, "Dont forget the milk")
t.put()
g = Goal.CreateMonthly(u, date=datetime.today().date())
g.Update(text=["Get it done", "Also get exercise"])
g.put()
def test_user_calls(self):
# Update self
DOB = '1985-10-21'
TZ = 'Africa/Nairobi'
response = self.post_json("/api/user/me", {
'timezone': TZ,
'birthday': DOB
}, headers=self.api_headers)
u = response.get('user')
self.assertIsNotNone(u)
self.assertEqual(u.get('birthday'), DOB)
self.assertEqual(u.get('timezone'), TZ)
def test_habit_calls(self):
# List
response = self.get_json("/api/habit", {}, headers=self.api_headers)
h = response.get('habits')[0]
self.assertEqual(h.get('name'), "Run")
# Update
response = self.post_json("/api/habit", {'id': h.get('id'), 'name': 'Walk'}, headers=self.api_headers)
h = response.get('habit')
self.assertEqual(h.get('name'), 'Walk')
# Actions
today = datetime.now()
DAY = tools.iso_date(today - timedelta(days=1))
hid = h.get('id')
actions = [
{'action': 'commit', 'expected_prop': 'committed'},
{'action': 'toggle', 'expected_prop': 'done'}
]
for act in actions:
params = {
'habit_id': hid,
'date': DAY
}
response = self.post_json("/api/habit/%s" % act.get('action'), params, headers=self.api_headers)
hd = response.get('habitday')
prop = act.get('expected_prop')
self.assertTrue(hd.get(prop))
# Recent
response = self.get_json("/api/habit/recent", {'days': 3}, headers=self.api_headers)
habitdays = response.get('habitdays')
self.assertTrue(hd.get('id') in habitdays)
# Recent range
params = {
'start_date': tools.iso_date(today - timedelta(days=2)),
'end_date': tools.iso_date(today)
}
response = self.get_json("/api/habit/range", params, headers=self.api_headers)
habitdays = response.get('habitdays')
self.assertTrue(hd.get('id') in habitdays)
# Delete
response = self.post_json("/api/habit/delete", {'id': h.get('id')}, headers=self.api_headers)
h = Habit.get_by_id(h.get('id'), parent=self.u.key)
self.assertIsNone(h) # Confirm habit deleted
self.execute_tasks_until_empty() # History deletion runs in background
response = self.get_json("/api/habit/recent", {'days': 3}, headers=self.api_headers)
habitdays = response.get('habitdays')
self.assertEqual(len(habitdays), 0) # Confirm habit history deleted
def test_goal_calls(self):
response = self.get_json("/api/goal", {}, headers=self.api_headers)
goal = response.get('goals')[0]
self.assertEqual(goal.get('text')[0], "Get it done")
# Update
params = {
'id': goal.get('id'),
'text': json.dumps(['New goal 1', u'New goal 2 with unicode. ありがとう'])
}
response = self.post_json("/api/goal", params, headers=self.api_headers)
goal = response.get('goal')
self.assertEqual(goal.get('text')[0], 'New goal 1')
self.assertEqual(goal.get('text')[1], u'New goal 2 with unicode. ありがとう')
def test_task_calls(self):
response = self.get_json("/api/task", {}, headers=self.api_headers)
h = response.get('tasks')[0]
self.assertEqual(h.get('title'), "Dont forget the milk")
# Update
response = self.post_json("/api/task", {'id': h.get('id'), 'title': 'Dont forget the sugar', 'status': TASK.DONE}, headers=self.api_headers)
task = response.get('task')
task_id = task.get('id')
self.assertEqual(task.get('title'), 'Dont forget the sugar')
self.assertEqual(task.get('status'), TASK.DONE)
# Archive complete
response = self.post_json("/api/task/action", {'action': 'archive_complete'}, headers=self.api_headers)
task = self.u.get(Task, id=task_id)
self.assertTrue(task.archived)
# Delete
response = self.post_json("/api/task/delete", {'id': h.get('id')}, headers=self.api_headers)
task = self.u.get(Task, id=task.key.id())
self.assertIsNone(task) # Confirm deletion
def test_project_calls(self):
p = Project.Create(self.u)
p.Update(urls=['http://www.x.com', 'http://www.y.com'],
title="New Project",
subhead="Details")
p.put()
# List
response = self.get_json("/api/project", {}, headers=self.api_headers)
prj = response.get('projects')[0]
self.assertEqual(prj.get('title'), "New Project")
# Update
params = {
'id': prj.get('id'),
'title': 'New Name', 'due': '2018-01-01',
'milestones': json.dumps(["Milestone 1", "", "", "ありがとう", "", "", "", "", "", ""])
}
response = self.post_json("/api/project", params, headers=self.api_headers)
prj = response.get('project')
self.assertEqual(prj.get('title'), 'New Name')
self.assertEqual(prj.get('due'), '2018-01-01')
self.assertEqual(prj.get('milestones')[0], 'Milestone 1')
# Delete
response = self.post_json("/api/project/delete", {'id': prj.get('id')}, headers=self.api_headers)
prj = self.u.get(Project, id=prj.get('id'))
self.assertIsNone(prj) # Confirm deletion
def test_event_calls(self):
date_start = datetime.today()
e = Event.Create(self.u, date_start)
e.Update(title="New Event",
details="Details")
e.put()
self.assertEqual(e.title, "New Event")
self.assertEqual(e.details, "Details")
# Batch create
params = {'events': json.dumps([
{'title': "Batch event 1", 'date_start': '2017-01-01', 'date_end': '2017-02-01'},
{'title': "Batch event 2", 'date_start': '2017-04-04', 'date_end': '2017-04-06'},
{'title': "Batch event 3", 'date_start': '2017-05-01', 'date_end': '2017-05-20'}
])}
response = self.post_json("/api/event/batch", params, headers=self.api_headers)
message = response.get('message')
self.assertEqual(message, "Creating 3 event(s)")
# List
response = self.get_json("/api/event", {}, headers=self.api_headers)
h = response.get('events')[-1]
self.assertEqual(h.get('title'), "New Event")
# Update
response = self.post_json("/api/event", {'id': h.get('id'), 'title': 'New Name'}, headers=self.api_headers)
h = response.get('event')
self.assertEqual(h.get('title'), 'New Name')
# Delete
response = self.post_json("/api/event/delete", {'id': h.get('id')}, headers=self.api_headers)
h = self.u.get(Event, id=h.get('id'))
self.assertIsNone(h) # Confirm deletion
def test_readable_calls(self):
# Create
r = Readable.CreateOrUpdate(self.u, '1234', title="An Article", source='x', url="http://www.nytimes.com/1")
r.put()
self.assertEqual(r.title, "An Article")
self.assertEqual(r.url, "http://www.nytimes.com/1")
# List
response = self.get_json("/api/readable", {}, headers=self.api_headers)
r = response.get('readables')[0]
self.assertEqual(r.get('title'), "An Article")
# Update
params = {
'id': r.get('id'),
'title': 'New Article Name',
'author': "Andy Clark",
'source': "New Source",
'excerpt': "Excerpt...",
'notes': "Notes...",
'word_count': 1850,
'url': 'http://www.example.com'
}
response = self.post_json("/api/readable", params, headers=self.api_headers)
r = response.get('readable')
for key, val in params.items():
self.assertEqual(r.get(key), val)
# Search
response = self.get_json("/api/readable/search", {'term': "clark"}, headers=self.api_headers)
readables = response.get('readables')
self.assertEqual(len(readables), 1)
# Delete
response = self.post_json("/api/readable/delete", {'id': r.get('id')}, headers=self.api_headers)
r = self.u.get(Readable, id=r.get('id'))
self.assertIsNone(r) # Confirm deletion
def test_quote_calls(self):
# Create
q = Quote.Create(self.u, 'Overheard', "I think therefore I am")
q.put()
self.assertEqual(q.content, "I think therefore I am")
self.assertEqual(q.source, "Overheard")
# List
response = self.get_json("/api/quote", {}, headers=self.api_headers)
q = response.get('quotes')[0]
self.assertEqual(q.get('content'), "I think therefore I am")
# Update
params = {
'id': q.get('id'),
'source': 'Somewhere else',
'content': "I think therefore I'm not",
'link': 'http://www.example.com',
'location': 'Location 100 of 1200',
'tags': 'tag1,tag2'
}
response = self.post_json("/api/quote", params, headers=self.api_headers)
q = response.get('quote')
for key, val in params.items():
if key == 'tags':
self.assertEqual(q.get(key), val.split(','))
else:
self.assertEqual(q.get(key), val)
# Search
response = self.get_json("/api/quote/search", {'term': "think"}, headers=self.api_headers)
quotes = response.get('quotes')
self.assertEqual(len(quotes), 1)
def test_journal_calls(self):
# Create / Submit
params = {
'data': json.dumps({
'metric1': 10
})
}
response = self.post_json("/api/journal/submit", params, headers=self.api_headers)
jrnl = response.get('journal')
self.assertIsNotNone(jrnl)
# Update
params = {
'id': jrnl.get('id'),
'data': json.dumps({
'metric1': 20
})
}
response = self.post_json("/api/journal", params, headers=self.api_headers)
jrnl = response.get('journal')
self.assertIsNotNone(jrnl)
self.assertEqual(jrnl.get('data').get('metric1'), 20)
# Today
response = self.get_json("/api/journal/today", {}, headers=self.api_headers)
today_jrnl = response.get('journal')
self.assertEqual(today_jrnl.get('id'), jrnl.get('id'))
# List
response = self.get_json("/api/journal", {}, headers=self.api_headers)
listed_jrnls = response.get('journals')
self.assertEqual(len(listed_jrnls), 1)
self.assertEqual(listed_jrnls[0].get('id'), jrnl.get('id'))
def test_snapshot_calls(self):
# Create
snap = Snapshot.Create(self.u, activity="Eating", place="Restaurant", people=["Elizabeth"],
metrics={'stress': 2})
snap.put()
self.assertEqual(snap.get_data_value('stress'), 2)
self.assertEqual(snap.activity, "Eating")
# List
response = self.get_json("/api/snapshot", {}, headers=self.api_headers)
snap = response.get('snapshots')[0]
print response
self.assertEqual(snap.get('activity'), "Eating")
def test_tracking_calls(self):
# Post an update to the tracking object for Jan 1, 2017
DATE = "2017-01-01"
response = self.post_json("/api/tracking", {'date': DATE, 'data': json.dumps({'foo': 'bar'})}, headers=self.api_headers)
td = response.get('tracking_day')
self.assertIsNotNone(td)
self.assertEqual(td.get('iso_date'), DATE)
self.assertEqual(td.get('data', {}).get('foo'), 'bar')
# Malformed request with no date
response = self.post_json("/api/tracking", {'data': json.dumps({'foo': 'bar'})}, headers=self.api_headers)
self.assertFalse(response.get('success'))
def test_flowapp_agent_api(self):
response = self.post_json("/api/agent/flowapp/request", {'message': "hi"}, headers=self.api_headers)
reply = response.get('reply')
self.assertTrue(reply in ConversationAgent.HELLO_BANTER)
|
from attrdict import AttrDict
from annotator_store.models import BaseAnnotation, AnnotationQuerySet
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.urls import reverse
from djiffy.models import Canvas
from derrida.books.models import Language
from derrida.common.models import Named, Notable
from derrida.common.utils import absolutize_url
from derrida.people.models import Person
#: intervention type codes to distinguish annotations and insertions
INTERVENTION_TYPES = AttrDict({
'ANNOTATION': 'A',
'INSERTION': 'I',
'BOTH': 'AI',
})
def get_default_intervener():
"""Function to either return the pk of a :class:`~derrida.people.models.Person`
object representing Jacques Derrida if he exists in the database or None"""
try:
return (Person.objects.get(authorized_name='Derrida, Jacques')).pk
except ObjectDoesNotExist:
return None
class TagQuerySet(models.QuerySet):
'''Custom :class:`~django.db.models.QuerySet` for :class:`Tag` to
make it easy to find tags that apply to a particular kind of
Intervention.'''
def for_annotations(self):
'''Find tags that apply to annotations'''
return self.filter(applies_to__contains=INTERVENTION_TYPES.ANNOTATION)
def for_insertions(self):
'''Find tags that apply to insertions'''
return self.filter(applies_to__contains=INTERVENTION_TYPES.INSERTION)
class Tag(Named, Notable):
APPLIES_TO_CHOICES = (
(INTERVENTION_TYPES.ANNOTATION, 'Annotations only'),
(INTERVENTION_TYPES.INSERTION, 'Insertions only'),
(INTERVENTION_TYPES.BOTH, 'Both Annotations and Insertions'),
)
applies_to = models.CharField(max_length=2, choices=APPLIES_TO_CHOICES,
default=INTERVENTION_TYPES.BOTH,
help_text='Type or types of interventions this tag is applicable to.')
objects = TagQuerySet.as_manager()
class InterventionQuerySet(AnnotationQuerySet):
def sorted_by_page_loc(self):
'''
Return a list of :class:`~derrida.interventions.models.Intervention`
objects sorted by their y value on the page.
'''
def sort_y(item):
# assume zero if not present
y_percent = item.extra_data.get('image_selection', {}).get('y', '0').strip('%')
return float(y_percent)
# return sorted list of current queryset based on y coord image selection
return sorted(self, key=sort_y)
class Intervention(BaseAnnotation):
INTERVENTION_TYPE_CHOICES = (
(INTERVENTION_TYPES.ANNOTATION, 'Annotation'),
(INTERVENTION_TYPES.INSERTION, 'Insertion'),
)
intervention_type = models.CharField(
max_length=2,
choices=INTERVENTION_TYPE_CHOICES,
default=INTERVENTION_TYPES.ANNOTATION,
)
#: associated IIIF :class:`djiffy.models.Canvas` for interventions
#: related to an image
canvas = models.ForeignKey(Canvas, null=True, blank=True)
#: Tags to describe the intervention and its characteristics;
#: many-to-many relationship to :class:`Tag`
tags = models.ManyToManyField(Tag, blank=True,
help_text='Tags to describe this intervation and its characteristics')
#: language of the intervention text (i.e. :attr:`text`)
text_language = models.ForeignKey(Language, null=True, blank=True,
help_text='Language of the intervention text', related_name='+')
#: translation language of the intervention text (i.e. :attr:`text`)
text_translation = models.TextField(blank=True,
help_text='Translation of the intervention text (optional)')
#: language of the quoted text or anchor text (i.e. :attr:`quote`)
quote_language = models.ForeignKey(Language, null=True, blank=True,
help_text='Language of the anchor text', related_name='+')
#: Associated author, instance of :class:`~derrida.people.models.Person`
author = models.ForeignKey(Person, null=True, blank=True,
default=get_default_intervener)
objects = InterventionQuerySet.as_manager()
def __str__(self):
"""Override str to make sure that something is displayed
for Django admin and autocompletes"""
if not self.quote and not self.text:
string = '%s with no text' % self.get_intervention_type_display()
if self.tags.all():
tag_names = ', '.join(
sorted([tag.name for tag in self.tags.all()])
)
string = '%s, tagged as %s' % (string, tag_names)
# Organize so that self.quote is set if it exists
if self.text:
string = self.text
if self.quote:
string = self.quote
# If there's an associated canvas, supply that
if self.canvas:
string = '%s (%s)' % (string, self.canvas.label)
return string
class Meta:
# extend default permissions to add a view option
# change_annotation and delete_annotation provided by django
permissions = (
('view_intervention', 'View intervention'),
)
def save(self, *args, **kwargs):
# for image annotation, URI should be set to canvas URI; look up
# canvas by URI and associate with the record
# if canvas is already set and uri matches annotation uri, do nothing
if self.canvas and self.uri == self.canvas.uri:
pass
else:
# otherwise, lookup canvas and associate
# (clear out in case there is no match for the new uri)
self.canvas = None
try:
self.canvas = Canvas.objects.get(uri=self.uri)
except Canvas.DoesNotExist:
pass
super(Intervention, self).save()
def get_uri(self):
'''Return a public URI for this intervention that can be used as an identifier'''
return absolutize_url(reverse('interventions:view', args=[self.id]))
def is_verbal(self):
'''Return whether a :class:`Intervention` has a verbal component.'''
return bool(self.text)
# Sorts on the binary of whether an intervention does or does not
# have text
is_verbal.boolean = True
is_verbal.admin_order_field = 'text'
def is_annotation(self):
'''Return whether :class:`Intervention` object is an annotation.'''
return self.intervention_type == INTERVENTION_TYPES.ANNOTATION
def is_insertion(self):
'''Return whether :class:`Intervention` object is an insersetion.'''
return self.intervention_type == INTERVENTION_TYPES.INSERTION
@property
def digital_edition(self):
'''digital edition this annotation is associated, via
:class:`djiffy.models.Canvas`'''
return self.canvas.manifest
@property
def work_instance(self):
'''Annotated library work :class:`derrida.books.models.Instance`,
via associated :attr:`digital_edition`.'''
if self.canvas:
return self.canvas.manifest.instance
@property
def annotation_type(self):
'''List of annotation types. Generated from tags, excluding ink
and pencil tags, uncertain and illegible tags, and with the
addition of verbal or nonverbal annotation.'''
# tags from being treated as annotation types?
tags = [tag.name for tag in self.tags.all() if not any(
['ink' in tag.name, 'pencil' in tag.name,
'uncertain' in tag.name, 'illegible' in tag.name])]
if self.is_verbal():
tags.append('verbal annotation')
else:
tags.append('nonverbal annotation')
return tags
@property
def ink(self):
'''pen ink color or pencil, from tags'''
return [tag.name for tag in self.tags.all() if any(
['ink' in tag.name, 'pencil' in tag.name])]
# NOTE: iiif_image_selection and admin_thumbnail borrowed
# directly from cdh winthrop annotation code
img_info_to_iiif = {'w': 'width', 'h': 'height', 'x': 'x', 'y': 'y'}
def iiif_image_selection(self):
'''
Generate a IIIF image selection for a :class:`Intervention` if it
image selection information is present and a canvas is associated.
'''
# if image selection information is present in annotation
# and canvas is associated, generated a IIIF image for the
# selected portion of the canvas
if 'image_selection' in self.extra_data and self.canvas:
# convert stored image info into the format used by
# piffle for generating iiif image region
img_selection = {
self.img_info_to_iiif[key]: float(val.rstrip('%'))
for key, val in self.extra_data['image_selection'].items()
if key in self.img_info_to_iiif
}
return self.canvas.image.region(percent=True, **img_selection)
def admin_thumbnail(self):
'''
Provide an admin thumbnail image of associated IIIF image selection.
'''
img_selection = self.iiif_image_selection()
# if image selection is available, display small thumbnail
if img_selection:
return u'<img src="%s" />' % img_selection.mini_thumbnail()
# otherwise, if canvas is set, display canvas small thumbnail
if self.canvas:
return u'<img src="%s" />' % self.canvas.image.mini_thumbnail()
admin_thumbnail.short_description = 'Thumbnail'
admin_thumbnail.allow_tags = True
def handle_extra_data(self, data, request):
'''Handle "extra" data that is not part of the stock annotation
data model. Used to support custom fields that are
specific to :class:`Intervention`. Data is as provided from json
request data, as sent by annotator.js.'''
# If the object does not yet exist in the database, it must be
# saved before adding foreign key or many-to-many relationships.
if self._state.adding:
super(Intervention, self).save()
# Add author if in the annotation
if 'author' in data:
try:
self.author = Person.objects.get(authorized_name=data['author'])
except ObjectDoesNotExist:
self.author = None
# If it doesn't exist, also explicitly set None to avoid default
else:
self.author = None
# Set any tags that are passed if they already exist in the db
# (tag vocabulary is enforced; unrecognized tags are ignored)
if 'tags' in data:
tags = Tag.objects.filter(name__in=data['tags'])
self.tags.set(tags)
del data['tags']
# annotation text language; unset or invalid clears out the language
try:
self.text_language = Language.objects.get(name=data.get('text_language', None))
except Language.DoesNotExist:
self.text_language = None
# quote/anchor text language; unset or invalid clears it out
try:
self.quote_language = Language.objects.get(name=data.get('quote_language', None))
except Language.DoesNotExist:
self.quote_language = None
self.text_translation = data.get('text_translation', '')
# remove fields if present, but don't error if they are not
for field in ['text_language', 'quote_language', 'text_translation']:
try:
del data[field]
except KeyError:
pass
return data
def info(self):
'''Return a dictionary of fields and values for
display in the JSON object representation of the annotation.'''
# Must include all local database fields in the output
info = super(Intervention, self).info()
info.update({
'tags': [tag.name for tag in self.tags.all()],
})
# languages - display language name
if self.text_language:
info['text_language'] = self.text_language.name
if self.quote_language:
info['quote_language'] = self.quote_language.name
if self.text_translation:
info['text_translation'] = self.text_translation
# author - display author name
if self.author:
info['author'] = self.author.authorized_name
return info
|
import logging
import re
from datetime import datetime, timedelta
from functools import total_ordering
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Unicode,
and_,
delete,
desc,
func,
select,
update,
)
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy.orm import backref, relation
from flexget import db_schema, plugin
from flexget.components.series.utils import normalize_series_name
from flexget.event import event, fire_event
from flexget.manager import Session
from flexget.utils.database import quality_property, with_session
from flexget.utils.sqlalchemy_utils import (
create_index,
drop_tables,
table_add_column,
table_columns,
table_exists,
table_schema,
)
from flexget.utils.tools import parse_episode_identifier
SCHEMA_VER = 14
log = logging.getLogger('series.db')
Base = db_schema.versioned_base('series', SCHEMA_VER)
class NormalizedComparator(Comparator):
def operate(self, op, other):
if isinstance(other, list):
other = [normalize_series_name(o) for o in other]
else:
other = normalize_series_name(other)
return op(self.__clause_element__(), other)
class Series(Base):
""" Name is handled case insensitively transparently
"""
__tablename__ = 'series'
id = Column(Integer, primary_key=True)
_name = Column('name', Unicode)
_name_normalized = Column('name_lower', Unicode, index=True, unique=True)
identified_by = Column(String)
begin_episode_id = Column(
Integer, ForeignKey('series_episodes.id', name='begin_episode_id', use_alter=True)
)
begin = relation(
'Episode',
uselist=False,
primaryjoin="Series.begin_episode_id == Episode.id",
foreign_keys=[begin_episode_id],
post_update=True,
backref='begins_series',
)
episodes = relation(
'Episode',
backref='series',
cascade='all, delete, delete-orphan',
primaryjoin='Series.id == Episode.series_id',
)
in_tasks = relation(
'SeriesTask',
backref=backref('series', uselist=False),
cascade='all, delete, delete-orphan',
)
alternate_names = relation(
'AlternateNames', backref='series', cascade='all, delete, delete-orphan'
)
seasons = relation('Season', backref='series', cascade='all, delete, delete-orphan')
# Make a special property that does indexed case insensitive lookups on name, but stores/returns specified case
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
self._name_normalized = normalize_series_name(value)
@name.comparator
def name(self):
return NormalizedComparator(self._name_normalized)
@property
def name_normalized(self):
return self._name_normalized
def __str__(self):
return '<Series(id=%s,name=%s)>' % (self.id, self.name)
def __repr__(self):
return str(self).encode('ascii', 'replace')
def episodes_for_season(self, season_num):
return len(
[
episode
for episode in self.episodes
if episode.season == season_num and episode.downloaded_releases
]
)
@property
def completed_seasons(self):
return [season.season for season in self.seasons if season.completed]
class Season(Base):
__tablename__ = 'series_seasons'
id = Column(Integer, primary_key=True)
identifier = Column(String)
identified_by = Column(String)
season = Column(Integer)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
releases = relation('SeasonRelease', backref='season', cascade='all, delete, delete-orphan')
is_season = True
@property
def completed(self):
"""
Return True if the season has any released marked as downloaded
"""
if not self.releases:
return False
return any(release.downloaded for release in self.releases)
@property
def downloaded_releases(self):
return [release for release in self.releases if release.downloaded]
@hybrid_property
def first_seen(self):
if not self.releases:
return None
return min(release.first_seen for release in self.releases)
@first_seen.expression
def first_seen(cls):
return (
select([func.min(SeasonRelease.first_seen)])
.where(SeasonRelease.season_id == cls.id)
.correlate(Season.__table__)
.label('first_seen')
)
@property
def age(self):
"""
:return: Pretty string representing age of episode. eg "23d 12h" or "No releases seen"
"""
if not self.first_seen:
return 'No releases seen'
diff = datetime.now() - self.first_seen
age_days = diff.days
age_hours = diff.seconds // 60 // 60
age = ''
if age_days:
age += '%sd ' % age_days
age += '%sh' % age_hours
return age
@property
def age_timedelta(self):
"""
:return: Timedelta or None if seasons is never seen
"""
if not self.first_seen:
return None
return datetime.now() - self.first_seen
@property
def is_premiere(self):
return False
def __str__(self):
return '<Season(id=%s,identifier=%s,season=%s,completed=%s)>' % (
self.id,
self.identifier,
self.season,
self.completed,
)
def __repr__(self):
return str(self).encode('ascii', 'replace')
def __lt__(self, other):
if other is None:
log.trace('comparing %s to None', self)
return False
if not isinstance(other, (Season, Episode)):
log.error('Cannot compare Season to %s', other)
return NotImplemented
if self.identified_by != 'ep':
log.error('Can only compare with an \'ep\' style identifier')
return NotImplemented
log.trace('checking if %s is smaller than %s', self.season, other.season)
return self.season < other.season
def __hash__(self):
return self.id
def to_dict(self):
return {
'id': self.id,
'identifier': self.identifier,
'season': self.season,
'identified_by': self.identified_by,
'series_id': self.series_id,
'first_seen': self.first_seen,
'number_of_releases': len(self.releases),
}
@property
def latest_release(self):
"""
:return: Latest downloaded Release or None
"""
if not self.releases:
return None
return sorted(
self.downloaded_releases,
key=lambda rel: rel.first_seen if rel.downloaded else None,
reverse=True,
)[0]
@total_ordering
class Episode(Base):
__tablename__ = 'series_episodes'
id = Column(Integer, primary_key=True)
identifier = Column(String)
season = Column(Integer)
number = Column(Integer)
identified_by = Column(String)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
releases = relation('EpisodeRelease', backref='episode', cascade='all, delete, delete-orphan')
is_season = False
@hybrid_property
def first_seen(self):
if not self.releases:
return None
return min(release.first_seen for release in self.releases)
@first_seen.expression
def first_seen(cls):
return (
select([func.min(EpisodeRelease.first_seen)])
.where(EpisodeRelease.episode_id == cls.id)
.correlate(Episode.__table__)
.label('first_seen')
)
@property
def age(self):
"""
:return: Pretty string representing age of episode. eg "23d 12h" or "No releases seen"
"""
if not self.first_seen:
return 'No releases seen'
diff = datetime.now() - self.first_seen
age_days = diff.days
age_hours = diff.seconds // 60 // 60
age = ''
if age_days:
age += '%sd ' % age_days
age += '%sh' % age_hours
return age
@property
def age_timedelta(self):
"""
:return: Timedelta or None if episode is never seen
"""
if not self.first_seen:
return None
return datetime.now() - self.first_seen
@property
def is_premiere(self):
if self.season == 1 and self.number in (0, 1):
return 'Series Premiere'
elif self.number in (0, 1):
return 'Season Premiere'
return False
@property
def downloaded_releases(self):
return [release for release in self.releases if release.downloaded]
@property
def latest_release(self):
"""
:return: Latest downloaded Release or None
"""
if not self.releases:
return None
return sorted(
self.downloaded_releases,
key=lambda rel: rel.first_seen if rel.downloaded else None,
reverse=True,
)[0]
def __str__(self):
return '<Episode(id=%s,identifier=%s,season=%s,number=%s)>' % (
self.id,
self.identifier,
self.season,
self.number,
)
def __repr__(self):
return str(self).encode('ascii', 'replace')
def __eq__(self, other):
if other is None:
log.trace('comparing %s to None', self)
return False
if isinstance(other, Season):
log.trace('comparing %s to Season', self)
return False
elif not isinstance(other, Episode):
log.error('Cannot compare Episode with %s', other)
return NotImplemented
if self.identified_by != other.identified_by:
log.error(
'Cannot compare %s identifier with %s', self.identified_by, other.identified_by
)
return NotImplemented
log.trace('comparing %s with %s', self.identifier, other.identifier)
return self.identifier == other.identifier
def __lt__(self, other):
if other is None:
log.trace('comparing %s to None', self)
return False
elif isinstance(other, Episode):
if self.identified_by is None or other.identified_by is None:
bad_ep = other if other.identified_by is None else self
log.error('cannot compare episode without an identifier type: %s', bad_ep)
return False
if self.identified_by != other.identified_by:
if self.identified_by == 'special':
log.trace('Comparing special episode')
return False
log.error('cannot compare %s with %s', self.identified_by, other.identified_by)
return NotImplemented
if self.identified_by in ['ep', 'sequence']:
log.trace('comparing %s and %s', self, other)
return self.season < other.season or (
self.season == other.season and self.number < other.number
)
elif self.identified_by == 'date':
log.trace('comparing %s and %s', self.identifier, other.identifier)
return self.identifier < other.identifier
else:
log.error('cannot compare when identifier is %s', self.identified_by)
return NotImplemented
elif isinstance(other, Season):
if self.identified_by != 'ep':
log.error('cannot compare season when identifier is not \'ep\'')
return NotImplemented
log.trace('comparing %s with %s', self.season, other.season)
return self.season < other.season
else:
log.error('can only compare with Episode or Season, not %s', other)
return NotImplemented
def __hash__(self):
return self.id
def to_dict(self):
return {
'id': self.id,
'identifier': self.identifier,
'season': self.season,
'identified_by': self.identified_by,
'number': self.number,
'series_id': self.series_id,
'first_seen': self.first_seen,
'premiere': self.is_premiere,
'number_of_releases': len(self.releases),
}
class EpisodeRelease(Base):
__tablename__ = 'episode_releases'
id = Column(Integer, primary_key=True)
episode_id = Column(Integer, ForeignKey('series_episodes.id'), nullable=False, index=True)
_quality = Column('quality', String)
quality = quality_property('_quality')
downloaded = Column(Boolean, default=False)
proper_count = Column(Integer, default=0)
title = Column(Unicode)
first_seen = Column(DateTime)
def __init__(self):
self.first_seen = datetime.now()
@property
def proper(self):
# TODO: TEMP
import warnings
warnings.warn("accessing deprecated release.proper, use release.proper_count instead")
return self.proper_count > 0
def __str__(self):
return '<Release(id=%s,quality=%s,downloaded=%s,proper_count=%s,title=%s)>' % (
self.id,
self.quality,
self.downloaded,
self.proper_count,
self.title,
)
def __repr__(self):
return str(self).encode('ascii', 'replace')
def to_dict(self):
return {
'id': self.id,
'title': self.title,
'downloaded': self.downloaded,
'quality': self.quality.name,
'proper_count': self.proper_count,
'first_seen': self.first_seen,
'episode_id': self.episode_id,
}
class SeasonRelease(Base):
__tablename__ = 'season_releases'
id = Column(Integer, primary_key=True)
season_id = Column(Integer, ForeignKey('series_seasons.id'), nullable=False, index=True)
_quality = Column('quality', String)
quality = quality_property('_quality')
downloaded = Column(Boolean, default=False)
proper_count = Column(Integer, default=0)
title = Column(Unicode)
first_seen = Column(DateTime)
def __init__(self):
self.first_seen = datetime.now()
@property
def proper(self):
# TODO: TEMP
import warnings
warnings.warn("accessing deprecated release.proper, use release.proper_count instead")
return self.proper_count > 0
def __str__(self):
return '<Release(id=%s,quality=%s,downloaded=%s,proper_count=%s,title=%s)>' % (
self.id,
self.quality,
self.downloaded,
self.proper_count,
self.title,
)
def __repr__(self):
return str(self).encode('ascii', 'replace')
def to_dict(self):
return {
'id': self.id,
'title': self.title,
'downloaded': self.downloaded,
'quality': self.quality.name,
'proper_count': self.proper_count,
'first_seen': self.first_seen,
'season_id': self.season_id,
}
class AlternateNames(Base):
""" Similar to Series. Name is handled case insensitively transparently.
"""
__tablename__ = 'series_alternate_names'
id = Column(Integer, primary_key=True)
_alt_name = Column('alt_name', Unicode)
_alt_name_normalized = Column('alt_name_normalized', Unicode, index=True, unique=True)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
@hybrid_property
def alt_name(self):
return self._alt_name
@alt_name.setter
def alt_name(self, value):
self._alt_name = value
self._alt_name_normalized = normalize_series_name(value)
@alt_name.comparator
def alt_name(self):
return NormalizedComparator(self._alt_name_normalized)
@property
def name_normalized(self):
return self._alt_name_normalized
def __init__(self, name):
self.alt_name = name
def __str__(self):
return '<SeriesAlternateName(series_id=%s, alt_name=%s)>' % (self.series_id, self.alt_name)
def __repr__(self):
return str(self).encode('ascii', 'replace')
class SeriesTask(Base):
__tablename__ = 'series_tasks'
id = Column(Integer, primary_key=True)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
name = Column(Unicode, index=True)
def __init__(self, name):
self.name = name
Index('episode_series_identifier', Episode.series_id, Episode.identifier)
@db_schema.upgrade('series')
def upgrade(ver, session):
if ver is None:
if table_exists('episode_qualities', session):
log.info(
'Series database format is too old to upgrade, dropping and recreating tables.'
)
# Drop the deprecated data
drop_tables(['series', 'series_episodes', 'episode_qualities'], session)
# Create new tables from the current models
Base.metadata.create_all(bind=session.bind)
# Upgrade episode_releases table to have a proper count and seed it with appropriate numbers
columns = table_columns('episode_releases', session)
if 'proper_count' not in columns:
log.info('Upgrading episode_releases table to have proper_count column')
table_add_column('episode_releases', 'proper_count', Integer, session)
release_table = table_schema('episode_releases', session)
for row in session.execute(select([release_table.c.id, release_table.c.title])):
# Recalculate the proper_count from title for old episodes
proper_count = (
plugin.get('parsing', 'series.db').parse_series(row['title']).proper_count
)
session.execute(
update(
release_table,
release_table.c.id == row['id'],
{'proper_count': proper_count},
)
)
ver = 0
if ver == 0:
log.info('Migrating first_seen column from series_episodes to episode_releases table.')
# Create the column in episode_releases
table_add_column('episode_releases', 'first_seen', DateTime, session)
# Seed the first_seen value for all the past releases with the first_seen of their episode.
episode_table = table_schema('series_episodes', session)
release_table = table_schema('episode_releases', session)
for row in session.execute(select([episode_table.c.id, episode_table.c.first_seen])):
session.execute(
update(
release_table,
release_table.c.episode_id == row['id'],
{'first_seen': row['first_seen']},
)
)
ver = 1
if ver == 1:
log.info('Adding `identified_by` column to series table.')
table_add_column('series', 'identified_by', String, session)
ver = 2
if ver == 2:
log.info('Creating index on episode_releases table.')
create_index('episode_releases', session, 'episode_id')
ver = 3
if ver == 3:
# Remove index on Series.name
try:
Index('ix_series_name').drop(bind=session.bind)
except OperationalError:
log.debug('There was no ix_series_name index to remove.')
# Add Series.name_lower column
log.info('Adding `name_lower` column to series table.')
table_add_column('series', 'name_lower', Unicode, session)
series_table = table_schema('series', session)
create_index('series', session, 'name_lower')
# Fill in lower case name column
session.execute(
update(series_table, values={'name_lower': func.lower(series_table.c.name)})
)
ver = 4
if ver == 4:
log.info('Adding `identified_by` column to episodes table.')
table_add_column('series_episodes', 'identified_by', String, session)
series_table = table_schema('series', session)
# Clear out identified_by id series so that they can be auto detected again
session.execute(
update(series_table, series_table.c.identified_by != 'ep', {'identified_by': None})
)
# Warn users about a possible config change needed.
log.warning(
'If you are using `identified_by: id` for the series plugin for a date-identified '
'or abolute-numbered series, you will need to update your config. Two new identified_by modes have '
'been added: `date` and `sequence`. In addition, if you are using `identified_by: auto`, it will'
'be relearned based on upcoming episodes.'
)
ver = 5
if ver == 5:
# Episode advancement now relies on identified_by being filled for the episodes.
# This action retroactively marks 'ep' mode for all episodes where the series is already in 'ep' mode.
series_table = table_schema('series', session)
ep_table = table_schema('series_episodes', session)
ep_mode_series = select([series_table.c.id], series_table.c.identified_by == 'ep')
where_clause = and_(
ep_table.c.series_id.in_(ep_mode_series),
ep_table.c.season != None,
ep_table.c.number != None,
ep_table.c.identified_by == None,
)
session.execute(update(ep_table, where_clause, {'identified_by': 'ep'}))
ver = 6
if ver == 6:
# Translate old qualities into new quality requirements
release_table = table_schema('episode_releases', session)
for row in session.execute(select([release_table.c.id, release_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row['quality'].replace('web-dl', 'webdl')
if row['quality'] != new_qual:
session.execute(
update(release_table, release_table.c.id == row['id'], {'quality': new_qual})
)
ver = 7
# Normalization rules changed for 7 and 8, but only run this once
if ver in [7, 8]:
# Merge series that qualify as duplicates with new normalization scheme
series_table = table_schema('series', session)
ep_table = table_schema('series_episodes', session)
all_series = session.execute(select([series_table.c.name, series_table.c.id]))
unique_series = {}
for row in all_series:
unique_series.setdefault(normalize_series_name(row['name']), []).append(row['id'])
for series, ids in unique_series.items():
session.execute(update(ep_table, ep_table.c.series_id.in_(ids), {'series_id': ids[0]}))
if len(ids) > 1:
session.execute(delete(series_table, series_table.c.id.in_(ids[1:])))
session.execute(
update(series_table, series_table.c.id == ids[0], {'name_lower': series})
)
ver = 9
if ver == 9:
table_add_column('series', 'begin_episode_id', Integer, session)
ver = 10
if ver == 10:
# Due to bad db cleanups there may be invalid entries in series_tasks table
series_tasks = table_schema('series_tasks', session)
series_table = table_schema('series', session)
log.verbose('Repairing series_tasks table data')
session.execute(
delete(series_tasks, ~series_tasks.c.series_id.in_(select([series_table.c.id])))
)
ver = 11
if ver == 11:
# SeriesTasks was cleared out due to a bug, make sure they get recalculated next run #2772
from flexget.task import config_changed
config_changed(session=session)
ver = 12
if ver == 12:
# Force identified_by value None to 'auto'
series_table = table_schema('series', session)
session.execute(
update(series_table, series_table.c.identified_by == None, {'identified_by': 'auto'})
)
ver = 13
if ver == 13:
# New season_releases table, added by "create_all"
log.info('Adding season_releases table')
ver = 14
return ver
@event('manager.db_cleanup')
def db_cleanup(manager, session):
# Clean up old undownloaded releases
result = (
session.query(EpisodeRelease)
.filter(EpisodeRelease.downloaded == False)
.filter(EpisodeRelease.first_seen < datetime.now() - timedelta(days=120))
.delete(False)
)
if result:
log.verbose('Removed %d undownloaded episode releases.', result)
# Clean up episodes without releases
result = (
session.query(Episode)
.filter(~Episode.releases.any())
.filter(~Episode.begins_series.any())
.delete(False)
)
if result:
log.verbose('Removed %d episodes without releases.', result)
# Clean up series without episodes that aren't in any tasks
result = (
session.query(Series)
.filter(~Series.episodes.any())
.filter(~Series.in_tasks.any())
.delete(False)
)
if result:
log.verbose('Removed %d series without episodes.', result)
def set_alt_names(alt_names, db_series, session):
db_alt_names = []
for alt_name in alt_names:
db_series_alt = (
session.query(AlternateNames).filter(AlternateNames.alt_name == alt_name).first()
)
if db_series_alt:
if not db_series_alt.series_id == db_series.id:
raise plugin.PluginError(
'Error adding alternate name for `%s`: `%s` is already associated with `%s`. '
'Check your settings.' % (db_series.name, alt_name, db_series_alt.series.name)
)
else:
log.debug(
'alternate name `%s` already associated with series `%s`, no change needed',
alt_name,
db_series.name,
)
db_alt_names.append(db_series_alt)
else:
db_alt_names.append(AlternateNames(alt_name))
log.debug('adding alternate name `%s` to series `%s`', alt_name, db_series.name)
db_series.alternate_names[:] = db_alt_names
def show_seasons(series, start=None, stop=None, count=False, descending=False, session=None):
""" Return all seasons of a given series """
seasons = session.query(Season).filter(Season.series_id == series.id)
if count:
return seasons.count()
seasons = (
seasons.order_by(Season.season.desc()) if descending else seasons.order_by(Season.season)
)
return seasons.slice(start, stop).from_self().all()
def get_all_entities(series, session, sort_by='age', reverse=False):
episodes = show_episodes(series, session=session)
seasons = show_seasons(series, session=session)
if sort_by == 'identifier':
key = lambda e: e.identifier
else:
key = lambda e: (e.first_seen or datetime.min, e.identifier)
return sorted(episodes + seasons, key=key, reverse=reverse)
def get_episode_releases(
episode,
downloaded=None,
start=None,
stop=None,
count=False,
descending=False,
sort_by=None,
session=None,
):
""" Return all releases for a given episode """
releases = session.query(EpisodeRelease).filter(EpisodeRelease.episode_id == episode.id)
if downloaded is not None:
releases = releases.filter(EpisodeRelease.downloaded == downloaded)
if count:
return releases.count()
releases = releases.slice(start, stop).from_self()
if descending:
releases = releases.order_by(getattr(EpisodeRelease, sort_by).desc())
else:
releases = releases.order_by(getattr(EpisodeRelease, sort_by))
return releases.all()
def get_season_releases(
season,
downloaded=None,
start=None,
stop=None,
count=False,
descending=False,
sort_by=None,
session=None,
):
""" Return all releases for a given season """
releases = session.query(SeasonRelease).filter(SeasonRelease.season_id == season.id)
if downloaded is not None:
releases = releases.filter(SeasonRelease.downloaded == downloaded)
if count:
return releases.count()
releases = releases.slice(start, stop).from_self()
if descending:
releases = releases.order_by(getattr(SeasonRelease, sort_by).desc())
else:
releases = releases.order_by(getattr(SeasonRelease, sort_by))
return releases.all()
def episode_in_show(series_id, episode_id):
""" Return True if `episode_id` is part of show with `series_id`, else return False """
with Session() as session:
episode = session.query(Episode).filter(Episode.id == episode_id).one()
return episode.series_id == series_id
def season_in_show(series_id, season_id):
""" Return True if `episode_id` is part of show with `series_id`, else return False """
with Session() as session:
season = session.query(Season).filter(Season.id == season_id).one()
return season.series_id == series_id
def release_in_episode(episode_id, release_id):
""" Return True if `release_id` is part of episode with `episode_id`, else return False """
with Session() as session:
release = session.query(EpisodeRelease).filter(EpisodeRelease.id == release_id).one()
return release.episode_id == episode_id
def release_in_season(season_id, release_id):
""" Return True if `release_id` is part of episode with `episode_id`, else return False """
with Session() as session:
release = session.query(SeasonRelease).filter(SeasonRelease.id == release_id).one()
return release.season_id == season_id
def _add_alt_name(alt, db_series, series_name, session):
alt = str(alt)
db_series_alt = session.query(AlternateNames).filter(AlternateNames.alt_name == alt).first()
if db_series_alt and db_series_alt.series_id == db_series.id:
# Already exists, no need to create it then
# TODO is checking the list for duplicates faster/better than querying the DB?
db_series_alt.alt_name = alt
elif db_series_alt:
if not db_series_alt.series:
# Not sure how this can happen
log.debug(
'Found an alternate name not attached to series. Re-attatching `%s` to `%s`.',
alt,
series_name,
)
db_series.alternate_names.append(db_series_alt)
else:
# Alternate name already exists for another series. Not good.
raise plugin.PluginError(
'Error adding alternate name for `%s`: `%s` is already associated with `%s`. '
'Check your settings.' % (series_name, alt, db_series_alt.series.name)
)
else:
log.debug('adding alternate name `%s` for `%s` into db', alt, series_name)
db_series_alt = AlternateNames(alt)
db_series.alternate_names.append(db_series_alt)
log.debug('-> added %s', db_series_alt)
@with_session
def get_series_summary(
configured=None,
premieres=None,
start=None,
stop=None,
count=False,
sort_by='show_name',
descending=None,
session=None,
name=None,
):
"""
Return a query with results for all series.
:param configured: 'configured' for shows in config, 'unconfigured' for shows not in config, 'all' for both.
Default is 'all'
:param premieres: Return only shows with 1 season and less than 3 episodes
:param count: Decides whether to return count of all shows or data itself
:param session: Passed session
:return:
"""
if not configured:
configured = 'configured'
elif configured not in ['configured', 'unconfigured', 'all']:
raise LookupError(
'"configured" parameter must be either "configured", "unconfigured", or "all"'
)
query = session.query(Series)
query = (
query.outerjoin(Series.episodes)
.outerjoin(Episode.releases)
.outerjoin(Series.in_tasks)
.group_by(Series.id)
)
if configured == 'configured':
query = query.having(func.count(SeriesTask.id) >= 1)
elif configured == 'unconfigured':
query = query.having(func.count(SeriesTask.id) < 1)
if name:
query = query.filter(Series._name_normalized.contains(name))
if premieres:
query = (
query.having(func.max(Episode.season) <= 1).having(func.max(Episode.number) <= 2)
).filter(EpisodeRelease.downloaded == True)
if count:
return query.group_by(Series).count()
if sort_by == 'show_name':
order_by = Series.name
else:
order_by = func.max(EpisodeRelease.first_seen)
query = query.order_by(desc(order_by)) if descending else query.order_by(order_by)
return query.slice(start, stop).from_self()
def auto_identified_by(series):
"""
Determine if series `name` should be considered identified by episode or id format
Returns 'ep', 'sequence', 'date' or 'id' if enough history is present to identify the series' id type.
Returns 'auto' if there is not enough history to determine the format yet
"""
session = Session.object_session(series)
type_totals = dict(
session.query(Episode.identified_by, func.count(Episode.identified_by))
.join(Episode.series)
.filter(Series.id == series.id)
.group_by(Episode.identified_by)
.all()
)
# Remove None and specials from the dict,
# we are only considering episodes that we know the type of (parsed with new parser)
type_totals.pop(None, None)
type_totals.pop('special', None)
if not type_totals:
return 'auto'
log.debug('%s episode type totals: %r', series.name, type_totals)
# Find total number of parsed episodes
total = sum(type_totals.values())
# See which type has the most
best = max(type_totals, key=lambda x: type_totals[x])
# Ep mode locks in faster than the rest. At 2 seen episodes.
if type_totals.get('ep', 0) >= 2 and type_totals['ep'] > total / 3:
log.info('identified_by has locked in to type `ep` for %s', series.name)
return 'ep'
# If we have over 3 episodes all of the same type, lock in
if len(type_totals) == 1 and total >= 3:
return best
# Otherwise wait until 5 episodes to lock in
if total >= 5:
log.info('identified_by has locked in to type `%s` for %s', best, series.name)
return best
log.verbose(
'identified by is currently on `auto` for %s. '
'Multiple id types may be accepted until it locks in on the appropriate type.',
series.name,
)
return 'auto'
def get_latest_season_pack_release(series, downloaded=True, season=None):
"""
Return the latest season pack release for a series
:param Series series: Series object
:param bool downloaded: Flag to return only downloaded season packs
:param season: Filter by season number
:return: Latest release of a season object
"""
session = Session.object_session(series)
releases = (
session.query(Season).join(Season.releases, Season.series).filter(Series.id == series.id)
)
if downloaded:
releases = releases.filter(SeasonRelease.downloaded == True)
if season is not None:
releases = releases.filter(Season.season == season)
latest_season_pack_release = releases.order_by(desc(Season.season)).first()
if not latest_season_pack_release:
log.debug(
'no season packs found for series `%s` with parameters season: %s, downloaded: %s',
series.name,
season,
downloaded,
)
return
log.debug(
'latest season pack for series %s, with downloaded set to %s and season set to %s',
series,
downloaded,
season,
)
return latest_season_pack_release
def get_latest_episode_release(series, downloaded=True, season=None):
"""
:param series series: SQLAlchemy session
:param downloaded: find only downloaded releases
:param season: season to find newest release for
:return: Instance of Episode or None if not found.
"""
session = Session.object_session(series)
releases = (
session.query(Episode)
.join(Episode.releases, Episode.series)
.filter(Series.id == series.id)
)
if downloaded:
releases = releases.filter(EpisodeRelease.downloaded == True)
if season is not None:
releases = releases.filter(Episode.season == season)
if series.identified_by and series.identified_by != 'auto':
releases = releases.filter(Episode.identified_by == series.identified_by)
if series.identified_by in ['ep', 'sequence']:
latest_episode_release = releases.order_by(
desc(Episode.season), desc(Episode.number)
).first()
elif series.identified_by == 'date':
latest_episode_release = releases.order_by(desc(Episode.identifier)).first()
else:
# We have to label the order_by clause to disambiguate from Release.first_seen #3055
latest_episode_release = releases.order_by(
desc(Episode.first_seen.label('ep_first_seen'))
).first()
if not latest_episode_release:
log.debug(
'no episodes found for series `%s` with parameters season: %s, downloaded: %s',
series.name,
season,
downloaded,
)
return
log.debug(
'latest episode for series %s, with downloaded set to %s and season set to %s',
series,
downloaded,
season,
)
return latest_episode_release
def get_latest_release(series, downloaded=True, season=None):
"""
Return the latest downloaded entity of a series, either season pack or episode
:param Series series: Series object
:param bool downloaded: Downloaded flag
:param int season: Filter by season
:return:
"""
latest_ep = get_latest_episode_release(series, downloaded, season)
latest_season = get_latest_season_pack_release(series, downloaded, season)
if latest_season is None and latest_ep is None:
return None
return max(latest_season, latest_ep)
def new_eps_after(series, since_ep, session):
"""
:param since_ep: Episode instance
:return: Number of episodes since then
"""
series_eps = session.query(Episode).join(Episode.series).filter(Series.id == series.id)
if series.identified_by == 'ep':
if since_ep.season is None or since_ep.number is None:
log.debug(
'new_eps_after for `%s` falling back to timestamp because latest dl in non-ep format',
series.name,
)
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count(), 'eps'
count = series_eps.filter(
(Episode.identified_by == 'ep')
& (
((Episode.season == since_ep.season) & (Episode.number > since_ep.number))
| (Episode.season > since_ep.season)
)
).count()
elif series.identified_by == 'seq':
count = series_eps.filter(Episode.number > since_ep.number).count()
elif series.identified_by == 'id':
count = series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
else:
log.debug('unsupported identified_by `%s`', series.identified_by)
count = 0
return count, 'eps'
def new_seasons_after(series, since_season, session):
series_seasons = session.query(Season).join(Season.series).filter(Season.id == series.id)
return series_seasons.filter(Season.first_seen > since_season.first_seen).count(), 'seasons'
def new_entities_after(since_entity):
session = Session.object_session(since_entity)
series = since_entity.series
if since_entity.is_season:
func = new_seasons_after
else:
func = new_eps_after
return func(series, since_entity, session)
def set_series_begin(series, ep_id):
"""
Set beginning for series
:param Series series: Series instance
:param ep_id: Integer for sequence mode, SxxEyy for episodic and yyyy-mm-dd for date.
:raises ValueError: If malformed ep_id or series in different mode
:return: tuple containing identified_by and identity_type
"""
# If identified_by is not explicitly specified, auto-detect it based on begin identifier
# TODO: use some method of series parser to do the identifier parsing
session = Session.object_session(series)
identified_by, entity_type = parse_episode_identifier(ep_id, identify_season=True)
if identified_by == 'ep':
ep_id = ep_id.upper()
if entity_type == 'season':
ep_id += 'E01'
if series.identified_by not in ['auto', '', None]:
if identified_by != series.identified_by:
raise ValueError(
'`begin` value `%s` does not match identifier type for identified_by `%s`'
% (ep_id, series.identified_by)
)
series.identified_by = identified_by
episode = (
session.query(Episode)
.filter(Episode.series_id == series.id)
.filter(Episode.identified_by == series.identified_by)
.filter(Episode.identifier == str(ep_id))
.first()
)
if not episode:
# TODO: Don't duplicate code from self.store method
episode = Episode()
episode.identifier = ep_id
episode.identified_by = identified_by
if identified_by == 'ep':
match = re.match(r'S(\d+)E(\d+)', ep_id)
episode.season = int(match.group(1))
episode.number = int(match.group(2))
elif identified_by == 'sequence':
episode.season = 0
episode.number = ep_id
series.episodes.append(episode)
# Need to flush to get an id on new Episode before assigning it as series begin
session.flush()
series.begin = episode
return (identified_by, entity_type)
def remove_series(name, forget=False):
"""
Remove a whole series `name` from database.
:param name: Name of series to be removed
:param forget: Indication whether or not to fire a 'forget' event
"""
downloaded_releases = []
with Session() as session:
series = session.query(Series).filter(Series.name == name).all()
if series:
for s in series:
if forget:
for entity in s.episodes + s.seasons:
for release in entity.downloaded_releases:
downloaded_releases.append(release.title)
session.delete(s)
session.commit()
log.debug('Removed series `%s` from database.', name)
else:
raise ValueError('Unknown series `%s`' % name)
for downloaded_release in downloaded_releases:
fire_event('forget', downloaded_release)
def remove_series_entity(name, identifier, forget=False):
"""
Remove all entities by `identifier` from series `name` from database.
:param name: Name of series to be removed
:param identifier: Series identifier to be deleted,
:param forget: Indication whether or not to fire a 'forget' event
"""
downloaded_releases = []
with Session() as session:
series = session.query(Series).filter(Series.name == name).first()
if not series:
raise ValueError('Unknown series `%s`' % name)
def remove_entity(entity):
if not series.begin:
series.identified_by = (
'' # reset identified_by flag so that it will be recalculated
)
session.delete(entity)
log.debug('Entity `%s` from series `%s` removed from database.', identifier, name)
return [release.title for release in entity.downloaded_releases]
name_to_parse = '{} {}'.format(series.name, identifier)
parsed = plugin.get('parsing', 'series.db').parse_series(name_to_parse, name=series.name)
if not parsed.valid:
raise ValueError(
'Invalid identifier for series `{}`: `{}`'.format(series.name, identifier)
)
removed = False
if parsed.season_pack:
season = (
session.query(Season)
.filter(Season.season == parsed.season)
.filter(Season.series_id == series.id)
.first()
)
if season:
removed = True
downloaded_releases = remove_entity(season)
else:
episode = session.query(Episode).filter(Episode.series_id == series.id)
if parsed.episode:
episode = episode.filter(Episode.number == parsed.episode).filter(
Episode.season == parsed.season
)
else:
episode = episode.filter(Episode.identifier == parsed.identifier)
episode = episode.first()
if episode:
removed = True
downloaded_releases = remove_entity(episode)
if not removed:
raise ValueError(
'Unknown identifier `%s` for series `%s`' % (identifier, name.capitalize())
)
if forget:
for downloaded_release in downloaded_releases:
fire_event('forget', downloaded_release)
def delete_episode_release_by_id(release_id):
with Session() as session:
release = session.query(EpisodeRelease).filter(EpisodeRelease.id == release_id).first()
if release:
session.delete(release)
session.commit()
log.debug('Deleted release ID `%s`', release_id)
else:
raise ValueError('Unknown identifier `%s` for release' % release_id)
def delete_season_release_by_id(release_id):
with Session() as session:
release = session.query(SeasonRelease).filter(SeasonRelease.id == release_id).first()
if release:
session.delete(release)
session.commit()
log.debug('Deleted release ID `%s`', release_id)
else:
raise ValueError('Unknown identifier `%s` for release' % release_id)
def shows_by_name(normalized_name, session=None):
""" Returns all series matching `normalized_name` """
return (
session.query(Series)
.filter(Series._name_normalized.contains(normalized_name))
.order_by(func.char_length(Series.name))
.all()
)
def shows_by_exact_name(normalized_name, session=None):
""" Returns all series matching `normalized_name` """
return (
session.query(Series)
.filter(Series._name_normalized == normalized_name)
.order_by(func.char_length(Series.name))
.all()
)
def show_by_id(show_id, session=None):
""" Return an instance of a show by querying its ID """
return session.query(Series).filter(Series.id == show_id).one()
def season_by_id(season_id, session=None):
""" Return an instance of an season by querying its ID """
return session.query(Season).filter(Season.id == season_id).one()
def episode_by_id(episode_id, session=None):
""" Return an instance of an episode by querying its ID """
return session.query(Episode).filter(Episode.id == episode_id).one()
def episode_release_by_id(release_id, session=None):
""" Return an instance of an episode release by querying its ID """
return session.query(EpisodeRelease).filter(EpisodeRelease.id == release_id).one()
def season_release_by_id(release_id, session=None):
""" Return an instance of an episode release by querying its ID """
return session.query(SeasonRelease).filter(SeasonRelease.id == release_id).one()
def show_episodes(series, start=None, stop=None, count=False, descending=False, session=None):
""" Return all episodes of a given series """
episodes = session.query(Episode).filter(Episode.series_id == series.id)
if count:
return episodes.count()
# Query episodes in sane order instead of iterating from series.episodes
if series.identified_by == 'sequence':
episodes = (
episodes.order_by(Episode.number.desc())
if descending
else episodes.order_by(Episode.number)
)
elif series.identified_by == 'ep':
episodes = (
episodes.order_by(Episode.season.desc(), Episode.number.desc())
if descending
else episodes.order_by(Episode.season, Episode.number)
)
else:
episodes = (
episodes.order_by(Episode.identifier.desc())
if descending
else episodes.order_by(Episode.identifier)
)
return episodes.slice(start, stop).from_self().all()
def store_parser(session, parser, series=None, quality=None):
"""
Push series information into database. Returns added/existing release.
:param session: Database session to use
:param parser: parser for release that should be added to database
:param series: Series in database to add release to. Will be looked up if not provided.
:param quality: If supplied, this will override the quality from the series parser
:return: List of Releases
"""
if quality is None:
quality = parser.quality
if not series:
# if series does not exist in database, add new
series = (
session.query(Series)
.filter(Series.name == parser.name)
.filter(Series.id != None)
.first()
)
if not series:
log.debug('adding series `%s` into db', parser.name)
series = Series()
series.name = parser.name
session.add(series)
log.debug('-> added `%s`', series)
releases = []
for ix, identifier in enumerate(parser.identifiers):
if parser.season_pack:
# Checks if season object exist
season = (
session.query(Season)
.filter(Season.season == parser.season)
.filter(Season.series_id == series.id)
.filter(Season.identifier == identifier)
.first()
)
if not season:
log.debug('adding season `%s` into series `%s`', identifier, parser.name)
season = Season()
season.identifier = identifier
season.identified_by = parser.id_type
season.season = parser.season
series.seasons.append(season)
log.debug('-> added season `%s`', season)
session.flush()
# Sets the filter_by, and filter_id for later releases query
filter_id = season.id
table = SeasonRelease
filter_by = table.season_id
entity = season
else:
# if episode does not exist in series, add new
episode = (
session.query(Episode)
.filter(Episode.series_id == series.id)
.filter(Episode.identifier == identifier)
.filter(Episode.series_id != None)
.first()
)
if not episode:
log.debug('adding episode `%s` into series `%s`', identifier, parser.name)
episode = Episode()
episode.identifier = identifier
episode.identified_by = parser.id_type
# if episodic format
if parser.id_type == 'ep':
episode.season = parser.season
episode.number = parser.episode + ix
elif parser.id_type == 'sequence':
episode.season = 0
episode.number = parser.id + ix
series.episodes.append(episode) # pylint:disable=E1103
log.debug('-> added `%s`', episode)
session.flush()
# Sets the filter_by, and filter_id for later releases query
table = EpisodeRelease
filter_by = table.episode_id
filter_id = episode.id
entity = episode
# if release does not exists in episode or season, add new
#
# NOTE:
#
# filter(Release.episode_id != None) fixes weird bug where release had/has been added
# to database but doesn't have episode_id, this causes all kinds of havoc with the plugin.
# perhaps a bug in sqlalchemy?
release = (
session.query(table)
.filter(filter_by == filter_id)
.filter(table.title == parser.data)
.filter(table.quality == quality)
.filter(table.proper_count == parser.proper_count)
.filter(filter_by != None)
.first()
)
if not release:
log.debug('adding release `%s`', parser)
release = table()
release.quality = quality
release.proper_count = parser.proper_count
release.title = parser.data
entity.releases.append(release) # pylint:disable=E1103
log.debug('-> added `%s`', release)
releases.append(release)
session.flush() # Make sure autonumber ids are populated
return releases
def add_series_entity(session, series, identifier, quality=None):
"""
Adds entity identified by `identifier` to series `name` in database.
:param series: Series in database to add entity to.
:param identifier: Series identifier to be added.
:param quality: If supplied, this will override the quality from the series parser.
"""
name_to_parse = '{} {}'.format(series.name, identifier)
if quality:
name_to_parse += ' {}'.format(quality)
parsed = plugin.get('parsing', 'series.db').parse_series(name_to_parse, name=series.name)
if not parsed.valid:
raise ValueError(
'Invalid identifier for series `{}`: `{}`.'.format(series.name, identifier)
)
added = store_parser(session, parsed, series=series)
if not added:
raise ValueError(
'Unable to add `%s` to series `%s`.' % (identifier, series.name.capitalize())
)
else:
for release in added:
release.downloaded = True
log.debug('Entity `%s` from series `%s` added to database.', identifier, series.name)
|
import argparse
import ast
import json
import random
from pathlib import Path
import numpy as np
import pandas as pd
import spacy
from spacy.gold import GoldParse
from spacy.util import minibatch
import torch
from spacy_transformers import TransformersLanguage
from spacy_transformers.util import PIPES, cyclic_triangular_rate
def is_transformer(nlp):
"""
Determine whether the given spacy language instance is backed
by a transformer model or a regular spaCy model.
"""
return isinstance(nlp, TransformersLanguage)
def read_unique_labels(labels_path):
"""
Read the list of unique labels from the given path.
"""
return labels_path.read_text(encoding="utf-8").split("\n")
def read_data(file_path, has_labels):
"""
Read some text data with optional labels from the given path.
Return a 2-tuple of texts and labels (if any).
"""
df = pd.read_csv(file_path, sep="\t", dtype="str", keep_default_na=False)
X = df["Text"].tolist()
y = None
if has_labels:
# Since spaCy defaults to multilabel paradigm, read in all data in that
# format -- assume labels are a list
y = df["Label"].apply(lambda labels: set(ast.literal_eval(labels))).tolist()
return X, y
def spacy_format_labels(ys, labels):
"""Convert a list of labels to the format spaCy expects for model training."""
return [{l: int(l in y) for l in labels} for y in ys]
def evaluate(tokenizer, nlp, valid_data, labels):
"""Evaluate model performance on a test dataset."""
texts, cats = zip(*valid_data)
golds = []
# Use the model's ops module
# to make sure this is compatible with GPU (cupy array)
# or without (numpy array)
scores = np.zeros((len(cats), len(labels)), dtype="f")
if is_transformer(nlp):
textcat = nlp.get_pipe(PIPES.textcat)
else:
textcat = nlp.get_pipe("textcat")
scores = textcat.model.ops.asarray(scores)
num_correct = 0
for i, doc in enumerate(nlp.pipe(texts)):
gold_cats = cats[i]["cats"]
for j, (label, score) in enumerate(doc.cats.items()):
if label not in gold_cats:
raise ValueError(f"Prediction for unexpected label: {label}")
scores[i, j] = score
doc_prediction = score > 0.5
if doc_prediction == bool(gold_cats[label]):
num_correct += 1
golds.append(GoldParse(doc, cats=gold_cats))
accuracy = num_correct / ((len(texts) * len(labels)) + 1e-8)
loss, _ = textcat.get_loss(texts, golds, scores)
return accuracy, loss
def train(
*,
input_dir,
output_dir,
nlp,
architecture,
train_batch_size,
num_train_epochs,
labels,
dropout,
disabled_components,
multilabel,
):
"""
Train the TextCategorizer component of the passed pipeline on the given data.
Return training/validation metrics and save the model to the specified output directory.
Make sure to restore any disabled pipeline components before saving so we can reuse the
saved checkpoint however we need to.
"""
if is_transformer(nlp):
textcat_pipe_name = PIPES.textcat
textcat = nlp.create_pipe(
textcat_pipe_name,
config={
"architecture": "softmax_last_hidden",
"exclusive_classes": not multilabel,
# We get an error about token_vector_width being unset if it isn't set
# explicitly here. We can't set it to an arbitrary value, either. It must
# be set based on the model
"token_vector_width": nlp.get_pipe(PIPES.tok2vec).model.nO,
},
)
else:
textcat_pipe_name = "textcat"
textcat = nlp.create_pipe(
textcat_pipe_name,
config={"exclusive_classes": not multilabel, "architecture": architecture},
)
nlp.add_pipe(textcat, last=True)
for label in labels:
textcat.add_label(label)
X_train, y_train = read_data(input_dir / "train.tsv", True)
X_valid, y_valid = read_data(input_dir / "dev.tsv", True)
train_labels = spacy_format_labels(y_train, labels)
valid_labels = spacy_format_labels(y_valid, labels)
train_data = list(zip(X_train, [{"cats": cats} for cats in train_labels]))
valid_data = list(zip(X_valid, [{"cats": cats} for cats in valid_labels]))
with nlp.disable_pipes(*disabled_components):
if is_transformer(nlp):
optimizer = nlp.resume_training()
optimizer.alpha = 0.001
optimizer.trf_weight_decay = 0.005
optimizer.L2 = 0.0
learn_rate = 2e-5
learn_rates = cyclic_triangular_rate(
learn_rate / 3, learn_rate * 3, 2 * len(train_data) // train_batch_size
)
else:
optimizer = nlp.begin_training()
for i in range(num_train_epochs):
losses = {}
random.shuffle(train_data)
batches = minibatch(train_data, train_batch_size)
for batch in batches:
texts, annotations = zip(*batch)
if is_transformer(nlp):
optimizer.trf_lr = next(learn_rates)
nlp.update(
texts, annotations, sgd=optimizer, drop=dropout, losses=losses
)
with textcat.model.use_params(optimizer.averages):
accuracy, valid_loss = evaluate(nlp.tokenizer, nlp, valid_data, labels)
train_loss = losses[textcat_pipe_name]
print(
f"Iter {i}\tTrain Loss: {train_loss:.3f}\tValid Loss: {valid_loss:.3f}\tAccuracy: {accuracy:.3f}"
)
checkpoint_dir = output_dir / "checkpoint"
checkpoint_dir.mkdir(exist_ok=True, parents=True)
with nlp.use_params(optimizer.averages):
nlp.to_disk(checkpoint_dir)
metrics = {
"valid_accuracy": accuracy,
"mean_train_loss": losses[textcat_pipe_name] / len(X_train),
"mean_valid_loss": valid_loss / len(X_valid),
}
with open(output_dir / "valid_results.json", "w") as f:
json.dump(metrics, f)
def predict(*, input_dir, output_dir, nlp, labels, disabled_components):
"""
Generate predictions for the given dataset using the TextCategorizer component
of the passed pipeline.
"""
X_test, _ = read_data(input_dir / "test.tsv", False)
pred_probas = []
with nlp.disable_pipes(*disabled_components):
for doc in nlp.pipe(X_test):
pred_probas.append({label: doc.cats.get(label, 0.0) for label in labels})
df = pd.DataFrame(pred_probas)
df.to_csv(output_dir / "test_results.tsv", index=False, sep="\t")
def embed(*, input_dir, output_dir, nlp, embed_pooling, disabled_components):
"""
Generate embeddings for the given dataset using the vectors from the
passed pipeline.
"""
embeddings = []
X_embed, _ = read_data(input_dir / "input.tsv", False)
with nlp.disable_pipes(*disabled_components):
with open(output_dir / "embeddings.jsonl", "w") as f:
for doc in nlp.pipe(X_embed):
if embed_pooling == "mean":
row_json = {"embedding": doc.vector.tolist()}
elif embed_pooling == "none":
embeddings = []
tokens = []
for tok in doc:
embeddings.append(tok.vector.tolist())
tokens.append(tok.text)
row_json = {"embedding": embeddings, "tokens": tokens}
else:
raise ValueError(f"Unsupported pooling type: {embed_pooling}")
f.write(f"{json.dumps(row_json)}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"mode",
choices=["train", "predict", "embed"],
help="Action to perform with the model. Train: read in a training "
"and validation dataset, train on training and evaluate the model "
"on validation dataset. Predict: output predictions on a test dataset. "
"Embed: return embeddings for a test dataset.",
)
parser.add_argument(
"--input-dir",
required=True,
help="Directory containing input files. The exact files needed vary "
"depending on the mode.",
)
parser.add_argument(
"--output-dir",
required=True,
help="Directory containing output files. The exact files created vary "
"depending on the mode.",
)
parser.add_argument(
"--cache-dir",
required=True,
help="Directory to use for caching spaCy downloads.",
)
parser.add_argument(
"--model",
required=True,
help="SpaCy language model to use. This could be "
"either the name of a stock spaCy model (in which case it's assumed to already "
"be installed via pip) or a path to a custom spaCy model on disk.",
)
parser.add_argument(
"--architecture",
required=True,
help="Architecture for the spaCy TextCategorizer.",
)
parser.add_argument(
"--multilabel",
action="store_true",
help="If True, model will train in a multilabel context (i.e. it's allowed to make multiple "
"class predictions for each observation).",
)
parser.add_argument(
"--full-pipeline",
action="store_true",
help="If passed, use the full spaCy language pipeline (including tagging, "
"parsing, and named entity recognition) for the TextCategorizer model used in "
"training and prediction. This makes training/prediction much slower but theoretically "
"provides more information to the model.",
)
parser.add_argument(
"--train-batch-size",
type=int,
default=32,
help="Per-GPU batch size to use for training.",
)
parser.add_argument(
"--embed-batch-size",
type=int,
default=32,
help="Per-GPU batch size to use for embedding.",
)
parser.add_argument(
"--num-train-epochs",
type=int,
default=3,
help="Number of epochs to run training on.",
)
parser.add_argument(
"--dropout", type=float, default=0.2, help="Dropout proportion for training."
)
parser.add_argument(
"--embed-pooling",
choices=["mean", "none"],
default="mean",
help="Pooling strategy to use for combining embeddings. If None, embeddings for "
"each token will be returned along with the token. Ignored if method != 'embed'.",
)
args = parser.parse_args()
input_dir = Path(args.input_dir)
output_dir = Path(args.output_dir)
using_gpu = spacy.prefer_gpu()
if using_gpu:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
device = "gpu" if using_gpu else "cpu"
print(f"Using device: {device}")
print("Initializing spaCy model...")
print(f" Language Model: {args.model}")
nlp = spacy.load(args.model)
if not is_transformer(nlp):
print(f" TextCategorizer Architecture: {args.architecture}")
model_name = nlp.meta.get("name", "")
print(f"Model '{model_name}' loaded.")
disabled_components = set()
if args.mode == "embed":
# Don't need the text categorizer (if present) for embeddings
for textcat_pipe in ("textcat", "trf_textcat"):
if nlp.has_pipe(textcat_pipe):
disabled_components.add(textcat_pipe)
if model_name.endswith("sm"):
# No vectors available for small models -- we need to enable the
# other pipeline components to provide tensors, which aren't as good as vectors
# but will suffice in a pinch
pass
else:
# If vectors are available, disable everything, since we just want the vectors
for component in ("tagger", "parser", "ner"):
if nlp.has_pipe(component):
disabled_components.add(component)
elif args.mode in ("train", "predict"):
if args.full_pipeline:
# Enable all parsing components to provide maximum information to the
# text categorization model
pass
else:
# Otherwise, disable everything that isn't part of the text categorizer model
for component in ("tagger", "parser", "ner"):
if nlp.has_pipe(component):
disabled_components.add(component)
# We need a list of labels for training or prediction so we know what the
# output shape of the model should be
labels = None
if args.mode in ("train", "predict"):
labels_file = input_dir / "labels.tsv"
labels = read_unique_labels(input_dir / "labels.tsv")
num_labels = len(labels)
print(f"Inferred number of labels: {num_labels}")
if args.mode == "train":
train(
input_dir=input_dir,
output_dir=output_dir,
nlp=nlp,
architecture=args.architecture,
labels=labels,
train_batch_size=args.train_batch_size,
num_train_epochs=args.num_train_epochs,
dropout=args.dropout,
disabled_components=disabled_components,
multilabel=args.multilabel,
)
elif args.mode == "predict":
predict(
input_dir=input_dir,
output_dir=output_dir,
nlp=nlp,
labels=labels,
disabled_components=disabled_components,
)
elif args.mode == "embed":
embed(
input_dir=input_dir,
output_dir=output_dir,
nlp=nlp,
embed_pooling=args.embed_pooling,
disabled_components=disabled_components,
)
else:
raise ValueError(f"invalid mode: {args.mode}")
|
from flask_microservices import MicroServicesApp
app = MicroServicesApp(__name__)
enabled_modules = [
'admin',
'forum',
'home'
]
# By default, this will assume your modules directory is "./modules"
# if a second argument is not provided.
app.register_urls(enabled_modules)
app.config['DEBUG'] = True
#app.config['EXPLAIN_TEMPLATE_LOADING'] = True
|
# Generated by Django 3.2.3 on 2021-05-26 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_post_last_edited_date'),
]
operations = [
migrations.AddField(
model_name='post',
name='thumbnail',
field=models.ImageField(default='../demo_media/blog/images/default_thumbnail.png', upload_to='../demo_media/blog/images/'),
),
]
|
from selenium.webdriver.common.by import By
from Locators import Locator
class Login(object):
def __init__(self, driver):
self.driver = driver
self.Login = driver.find_element(By.XPATH, Locator.Login)
self.Password = driver.find_element(By.XPATH, Locator.Password)
self.EnterButton=driver.find_element(By.XPATH, Locator.EnterButton)
def setLogin(self, login):
self.Login.clear()
self.Login.send_keys(login)
def setPassword(self, password):
self.Password.clear()
self.Password.send_keys(password)
def click_EnterButton(self):
self.EnterButton.click()
|
#!/usr/bin/python
#***************************************************************************
# Copyright 2015 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#***************************************************************************
# To import this package in an extension run the following in _init.sh:
# export PYTHONPATH=$EXT_DIR/utilities:$PYTHONPATH
import json
import logging
import logging.handlers
import os
import os.path
import sys
import timeit
from subprocess import call, Popen, PIPE
# ascii color codes for output
LABEL_GREEN='\033[0;32m'
LABEL_RED='\033[0;31m'
LABEL_COLOR='\033[0;33m'
LABEL_NO_COLOR='\033[0m'
STARS="**********************************************************************"
DEFAULT_SERVICE_PLAN="free"
DEFAULT_SERVICE_KEY="pipeline_service_key"
DEFAULT_BRIDGEAPP_NAME="pipeline_bridge_app"
EXT_DIR=os.getenv('EXT_DIR', ".")
DEBUG=os.environ.get('DEBUG')
SCRIPT_START_TIME = timeit.default_timer()
LOGGER = None
FULL_WAIT_TIME = 5
WAIT_TIME = 0
# setup logmet logging connection if it's available
def setup_logging ():
logger = logging.getLogger('pipeline')
if DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# if logmet is enabled, send the log through our pipeline logfile as well
if os.environ.get('LOGMET_LOGGING_ENABLED'):
pipeline_logfile = os.environ.get('PIPELINE_LOGGING_FILE')
if pipeline_logfile:
handler = logging.FileHandler(pipeline_logfile)
logger.addHandler(handler)
# don't send debug info through syslog
handler.setLevel(logging.INFO)
# set formatting on this to be json style
formatter = logging.Formatter('{\"@timestamp\": \"%(asctime)s\", \"loglevel\": \"%(levelname)s\", \"module\": \"%(name)s\", \"message\": \"%(message)s\"}\n')
handler.setFormatter(formatter)
# in any case, dump logging to the screen
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
if DEBUG:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
# load bearer token and space guid from ~/.cf/config.json
# used for a variety of things, including calls to the CCS server
def load_cf_auth_info ():
bearer_token = None
space_guid = None
cf_filename = "%s/.cf/config.json" % os.path.expanduser("~")
with open( cf_filename ) as cf_config_file:
config_info = json.load(cf_config_file)
bearer_token = config_info["AccessToken"]
if bearer_token.lower().startswith("bearer "):
bearer_token=bearer_token[7:]
space_guid = config_info["SpaceFields"]["Guid"]
return bearer_token, space_guid
# check with cf to find the api server
# adjust to find the ICE api server
# return both
def find_api_servers ():
cf_api_server = None
ice_api_server = None
command = "cf api"
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
msg = "Error: Unable to find api server, rc was " + str(proc.returncode)
if LOGGER:
LOGGER.error(msg)
raise Exception(msg)
# cf api output comes back in the form:
# API endpoint: https://api.ng.bluemix.net (API version: 2.23.0)
# so take out just the part we need
words = out.split()
for word in words:
if word.startswith("https://"):
cf_api_server=word
# get ice server as well by adjusting cf server
ice_api_server = cf_api_server
ice_api_server = ice_api_server.replace ( 'api.', 'containers-api.')
if DEBUG=="1":
if LOGGER:
LOGGER.debug("cf_api_server set to " + str(cf_api_server))
LOGGER.debug("ice_api_server set to " + str(ice_api_server))
return cf_api_server, ice_api_server
# return the remaining time to wait
# first time, will prime from env var and subtract init script time
#
# return is the expected max time left in seconds we're allowed to wait
# for pending jobs to complete
def get_remaining_wait_time (first = False):
global FULL_WAIT_TIME
if first:
# first time through, set up the var from env
try:
FULL_WAIT_TIME = int(os.getenv('WAIT_TIME', "5"))
except ValueError:
FULL_WAIT_TIME = 5
# convert to seconds
time_to_wait = FULL_WAIT_TIME * 60
# and (if not 0) subtract out init time
if time_to_wait != 0:
try:
initTime = int(os.getenv("INT_EST_TIME", "0"))
except ValueError:
initTime = 0
time_to_wait -= initTime
else:
# just get the initial start time
time_to_wait = WAIT_TIME
# if no time to wait, no point subtracting anything
if time_to_wait != 0:
time_so_far = int(timeit.default_timer() - SCRIPT_START_TIME)
time_to_wait -= time_so_far
# can't wait negative time, fix it
if time_to_wait < 0:
time_to_wait = 0
return time_to_wait
# find the given service in our space, get its service name, or None
# if it's not there yet
def find_service_name_in_space (service):
command = "cf services"
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
LOGGER.info("Unable to lookup services, error was: " + out)
return None
foundHeader = False
serviceStart = -1
serviceEnd = -1
serviceName = None
for line in out.splitlines():
if (foundHeader == False) and (line.startswith("name")):
# this is the header bar, find out the spacing to parse later
# header is of the format:
#name service plan bound apps last operation
# and the spacing is maintained for following lines
serviceStart = line.find("service")
serviceEnd = line.find("plan")-1
foundHeader = True
elif foundHeader:
# have found the headers, looking for our service
if service in line:
# maybe found it, double check by making
# sure the service is in the right place,
# assuming we can check it
if (serviceStart > 0) and (serviceEnd > 0):
if service in line[serviceStart:serviceEnd]:
# this is the correct line - find the bound app(s)
# if there are any
serviceName = line[:serviceStart]
serviceName = serviceName.strip()
else:
continue
return serviceName
# find a service in our space, and if it's there, get the dashboard
# url for user info on it
def find_service_dashboard (service):
serviceName = find_service_name_in_space(service)
if serviceName == None:
return None
command = "cf service \"" + serviceName + "\""
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
return None
serviceURL = None
for line in out.splitlines():
if line.startswith("Dashboard: "):
serviceURL = line[11:]
else:
continue
return serviceURL
# search cf, find an app in our space bound to the given service, and return
# the app name if found, or None if not
def find_bound_app_for_service (service):
proc = Popen(["cf services"], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
return None
foundHeader = False
serviceStart = -1
serviceEnd = -1
boundStart = -1
boundEnd = -1
boundApp = None
for line in out.splitlines():
if (foundHeader == False) and (line.startswith("name")):
# this is the header bar, find out the spacing to parse later
# header is of the format:
#name service plan bound apps last operation
# and the spacing is maintained for following lines
serviceStart = line.find("service")
serviceEnd = line.find("plan")-1
boundStart = line.find("bound apps")
boundEnd = line.find("last operation")
foundHeader = True
elif foundHeader:
# have found the headers, looking for our service
if service in line:
# maybe found it, double check by making
# sure the service is in the right place,
# assuming we can check it
if (serviceStart > 0) and (serviceEnd > 0) and (boundStart > 0) and (boundEnd > 0):
if service in line[serviceStart:serviceEnd]:
# this is the correct line - find the bound app(s)
# if there are any
boundApp = line[boundStart:boundEnd]
else:
continue
# if we found a binding, make sure we only care about the first one
if boundApp != None:
if boundApp.find(",") >=0 :
boundApp = boundApp[:boundApp.find(",")]
boundApp = boundApp.strip()
if boundApp=="":
boundApp = None
if DEBUG:
if boundApp == None:
LOGGER.debug("No existing apps found bound to service \"" + service + "\"")
else:
LOGGER.debug("Found existing service \"" + boundApp + "\" bound to service \"" + service + "\"")
return boundApp
# look for our default bridge app. if it's not there, create it
def check_and_create_bridge_app ():
# first look to see if the bridge app already exists
command = "cf apps"
LOGGER.debug("Executing command \"" + command + "\"")
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if DEBUG:
LOGGER.debug("command \"" + command + "\" returned with rc=" + str(proc.returncode))
LOGGER.debug("\tstdout was " + out)
LOGGER.debug("\tstderr was " + err)
if proc.returncode != 0:
return None
for line in out.splitlines():
if line.startswith(DEFAULT_BRIDGEAPP_NAME + " "):
# found it!
return True
# our bridge app isn't around, create it
LOGGER.info("Bridge app does not exist, attempting to create it")
if os.environ.get('OLDCF_LOCATION'):
command = os.environ.get('OLDCF_LOCATION')
if not os.path.isfile(command):
command = 'cf'
else:
command = 'cf'
command = command +" push " + DEFAULT_BRIDGEAPP_NAME + " -i 1 -d mybluemix.net -k 1M -m 64M --no-hostname --no-manifest --no-route --no-start"
LOGGER.debug("Executing command \"" + command + "\"")
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if DEBUG:
LOGGER.debug("command \"" + command + "\" returned with rc=" + str(proc.returncode))
LOGGER.debug("\tstdout was " + out)
LOGGER.debug("\tstderr was " + err)
if proc.returncode != 0:
LOGGER.info("Unable to create bridge app, error was: " + out)
return False
return True
# look for our bridge app to bind this service to. If it's not there,
# attempt to create it. Then bind the service to that app under the
# given plan. If it all works, return that app name as the bound app
def create_bound_app_for_service (service, plan=DEFAULT_SERVICE_PLAN):
if not check_and_create_bridge_app():
return None
# get or create the service if necessary
serviceName = get_or_create_service(service, plan)
if serviceName is None:
return None
# now try to bind the service to our bridge app
LOGGER.info("Binding service \"" + serviceName + "\" to app \"" + DEFAULT_BRIDGEAPP_NAME + "\"")
proc = Popen(["cf bind-service " + DEFAULT_BRIDGEAPP_NAME + " \"" + serviceName + "\""],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
LOGGER.info("Unable to bind service to the bridge app, error was: " + out)
return None
return DEFAULT_BRIDGEAPP_NAME
# return the service name for the service, if the service doesn't exist, create it.
def get_or_create_service(service, plan=DEFAULT_SERVICE_PLAN):
serviceName = find_service_name_in_space(service)
# if we don't have the service name, means the tile isn't created in our space, so go
# load it into our space if possible
if serviceName == None:
LOGGER.info("Service \"" + service + "\" is not loaded in this space, attempting to load it")
serviceName = service
command = "cf create-service \"" + service + "\" \"" + plan + "\" \"" + serviceName + "\""
LOGGER.debug("Executing command \"" + command + "\"")
proc = Popen([command],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
LOGGER.info("Unable to create service in this space, error was: " + out)
return None
return serviceName
# find given bound app, and look for the passed bound service in cf. once
# found in VCAP_SERVICES, look for the credentials setting, and return the
# dict. Raises Exception on errors
def get_credentials_from_bound_app (service, binding_app=None, plan=DEFAULT_SERVICE_PLAN):
# if no binding app parm passed, go looking to find a bound app for this one
if binding_app == None:
binding_app = find_bound_app_for_service(service)
# if still no binding app, and the user agreed, CREATE IT!
if binding_app == None:
setupSpace = os.environ.get('SETUP_SERVICE_SPACE')
if (setupSpace != None) and (setupSpace.lower() == "true"):
binding_app = create_bound_app_for_service(service=service, plan=plan)
else:
raise Exception("Service \"" + service + "\" is not loaded and bound in this space. " + LABEL_COLOR + "Please add the service to the space and bind it to an app, or set the parameter to allow the space to be setup automatically" + LABEL_NO_COLOR)
# if STILL no binding app, we're out of options, just fail out
if binding_app == None:
raise Exception("Unable to access an app bound to the " + service + " service - this must be set to get the proper credentials.")
# try to read the env vars off the bound app in cloud foundry, the one we
# care about is "VCAP_SERVICES"
verProc = Popen(["cf env \"" + binding_app + "\""], shell=True,
stdout=PIPE, stderr=PIPE)
verOut, verErr = verProc.communicate();
if verProc.returncode != 0:
raise Exception("Unable to read credential information off the app bound to the " + service + " service - please check that it is set correctly.")
envList = []
envIndex = 0
inSection = False
# the cf env var data comes back in the form
# blah blah blah
# {
# <some json data for a var>
# }
# ... repeat, possibly including blah blah blah
#
# parse through it, and extract out just the json blocks
for line in verOut.splitlines():
if inSection:
envList[envIndex] += line
if line.startswith("}"):
# block end
inSection = False
envIndex = envIndex+1
elif line.startswith("{"):
# starting a block
envList.append(line)
inSection = True
else:
# just ignore this line
pass
# now parse that collected json data to get the actual vars
jsonEnvList = {}
for x in envList:
jsonEnvList.update(json.loads(x))
return_cred_list = []
found = False
# find the credentials for the service in question
if jsonEnvList != None:
serviceList = jsonEnvList['VCAP_SERVICES']
if serviceList != None:
analyzerService = serviceList[service]
if analyzerService != None:
credentials = analyzerService[0]['credentials']
if credentials != None:
found = True
return credentials
if not found:
raise Exception("Unable to get bound credentials for access to the " + service + " service.")
return None
# retrieve the credentials for non-binding service brokers which (optionally) implement the service_keys endpoint
def get_credentials_for_non_binding_service(service, plan=DEFAULT_SERVICE_PLAN, key_name=DEFAULT_SERVICE_KEY):
# get or create the service if necessary
service_name = get_or_create_service(service, plan)
if service_name is None:
return None
result = execute_cf_cmd("cf service-keys '%s'" % service_name)
debug("Raw result: \n" + str(result))
# ignore the header and grab the first service key
result = result.splitlines()[3:4:]
debug("Raw filtered result: \n" + str(result))
if len(result) == 0:
#create the default service key
execute_cf_cmd("cf csk '%s' '%s'" % (service_name, key_name))
result = execute_cf_cmd("cf service-keys '%s'" % service_name)
debug("Raw result: \n" + str(result))
# ignore the header and grab the first service key
result = result.splitlines()[3:4:]
debug("Raw filtered result: \n" + str(result))
if len(result) > 0:
result = execute_cf_cmd("cf service-key '%s' '%s'" % (service_name, result[0].strip()))
debug("Raw result: \n" + str(result))
# extract out only the json portion of the command result
result = '\n'.join(result.split('\n')[1:-1])
debug("Raw filtered result: \n" + str(result))
result = json.loads(result)
debug("JSON result: \n" + str(result))
# return the json as-is, let the caller pull the appropriate data out (which may vary from one service broker
# to another)
return result
else:
LOGGER.error("No service key for service instance %s", service_name)
return None
def execute_cf_cmd(command):
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
debug("Executing command \"%s\" \n%s" % (command, out))
if proc.returncode != 0:
LOGGER.error("An error occurred running command '%s' " + out % command)
return None
return out
def debug(message):
if DEBUG:
LOGGER.debug(message)
|
"""
ulid/consts
~~~~~~~~~~~
Contains public API constant values.
"""
from . import ulid
__all__ = ['MIN_TIMESTAMP', 'MAX_TIMESTAMP', 'MIN_RANDOMNESS', 'MAX_RANDOMNESS', 'MIN_ULID', 'MAX_ULID']
#: Minimum possible timestamp value (0).
MIN_TIMESTAMP = ulid.Timestamp(b'\x00\x00\x00\x00\x00\x00')
#: Maximum possible timestamp value (281474976710.655 epoch).
MAX_TIMESTAMP = ulid.Timestamp(b'\xff\xff\xff\xff\xff\xff')
#: Minimum possible randomness value (0).
MIN_RANDOMNESS = ulid.Randomness(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
#: Maximum possible randomness value (1208925819614629174706175).
MAX_RANDOMNESS = ulid.Randomness(b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff')
#: Minimum possible ULID value (0).
MIN_ULID = ulid.ULID(MIN_TIMESTAMP.bytes + MIN_RANDOMNESS.bytes)
#: Maximum possible ULID value (340282366920938463463374607431768211455).
MAX_ULID = ulid.ULID(MAX_TIMESTAMP.bytes + MAX_RANDOMNESS.bytes)
|
import unittest
import logging
import os
import json
from unittest.mock import patch
from Museum_API.museum import MuseumAPI
logging.basicConfig(filename='logging_statements.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
class TestMuseumApi(unittest.TestCase):
def setUp(self):
self.endpoint = "objects"
def test_api_url(self):
museum_url = MuseumAPI()
self.assertEqual(museum_url.get_response(self.endpoint).status_code, 200)
def test_object_url(self):
"""checks for the status code from the actual url"""
museum_url = MuseumAPI()
self.assertEqual(museum_url.get_response(self.endpoint+'y').status_code, 404)
@patch('Museum_API.museum.requests.get')
def test_to_get_response_ok(self, mock_get):
"""get ok response from the mocked url"""
museum_url = MuseumAPI()
# Configure the mock to return a response with an OK status code.
mock_get.return_value.ok = True
response = museum_url.get_object_ids()
self.assertIsNotNone(response)
def test_get_object_id(self):
"""assert the mocked data generated after calling original
function with the truth file"""
try:
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'actual_data/objectids.json'), encoding='utf-8') as file_name:
json_data = json.load(file_name)
with patch('Museum_API.museum.requests.get') as mock_get:
mock_get.return_value.json.return_value = json_data
sample = MuseumAPI()
resp = sample.get_object_ids()
self.assertEqual(resp, json_data)
except FileNotFoundError as file_err:
logging.exception('check for correct path because %s', file_err)
def test_get_object_for_id(self):
"""assert the mocked data generated after calling original
function with the truth file"""
try:
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'actual_data/objdata.json'), encoding='utf-8') as file_name:
json_data = json.load(file_name)
with patch('Museum_API.museum.requests.get') as mock_get:
mock_get.return_value.json.return_value = json_data
sample = MuseumAPI()
resp = sample.get_object_for_ids('1')
self.assertEqual(resp, json_data)
except FileNotFoundError as file_err:
logging.exception('check for correct path because %s', file_err)
if __name__ == '__main__':
unittest.main() |
'''
handles all kind of database connection tasks
'''
from django.db import models
class users(models.Model):
"""Store users information"""
user_name = models.CharField(max_length=50,null=False,blank=False,primary_key=True)
password = models.CharField(max_length=50,null=False,blank=False)
token = models.CharField(max_length=500,null=False,blank=False)
token_expire_time = models.DateTimeField(null=False,blank=False)
class news(models.Model):
"""Store news information"""
news_id = models.AutoField(serialize=True, auto_created=True, primary_key=True)
title = models.CharField(max_length=50000, null=False, blank=False)
body = models.CharField(max_length=50000, null=False, blank=False)
author = models.CharField(max_length=50000, null=False, blank=False)
|
import logging
import threading
import time
result_T1=['vide',0,0,0]
result_T2=['vide',0,0,0]
result_T3=['vide',0,0,0]
result_T4=['vide',0,0,0]
def thread_function1(name):
global result_T1
local_result_T1 = result_T1
logging.info("Thread %s: starting", name)
for i in range (0,10000):
for j in range (0,10000):
k=i*j
local_result_T1=['toto',12,65,'BUY']
result_T1=local_result_T1
logging.info("Thread %s: finishing", name)
def thread_function2(name):
global result_T2
local_result_T2 = result_T2
logging.info("Thread %s: starting", name)
for i in range (0,10000):
for j in range (0,10000):
k=i*j
local_result_T2=['toto',12,65,'BUY']
result_T2=local_result_T2
logging.info("Thread %s: finishing", name)
def thread_function3(name):
global result_T3
local_result_T3 = result_T3
logging.info("Thread %s: starting", name)
for i in range (0,10000):
for j in range (0,10000):
k=i*j
local_result_T3=['toto',12,65,'BUY']
result_T3=local_result_T3
logging.info("Thread %s: finishing", name)
def thread_function4(name):
global result_T4
local_result_T4 = result_T4
logging.info("Thread %s: starting", name)
for i in range (0,10000):
for j in range (0,10000):
k=i*j
local_result_T4=['toto',12,65,'BUY']
result_T4=local_result_T4
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,datefmt="%H:%M:%S")
logging.info("Main : before creating thread")
w = threading.Thread(target=thread_function1, args=(1,))
x = threading.Thread(target=thread_function1, args=(2,))
y = threading.Thread(target=thread_function1, args=(3,))
z = threading.Thread(target=thread_function1, args=(4,))
logging.info("Main : before running thread")
w.start()
x.start()
y.start()
z.start()
logging.info("Main : wait for the thread to finish")
print (result_T1)
w.join()
logging.info("Main : all done")
print (result_T1)
print (result_T2)
print (result_T3)
print (result_T4)
|
import json
from authemail.models import SignupCode
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TransactionTestCase
from django.utils.translation import activate
from rest_framework.test import APIClient
from rest_framework_jwt.settings import api_settings
from api.accounts.models import MyUser
activate('en-us')
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
token_regex = '^[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*$'
class AccountMeTestCase(TransactionTestCase):
token = None
def setUp(self):
user = MyUser.objects.create(email='test@byom.de', first_name='Test', last_name='User', phone='+49192481024')
user.set_password('test123')
user.is_verified = True
user.save()
payload = jwt_payload_handler(user)
self.token = jwt_encode_handler(payload)
def test_account_me_valid(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.get(reverse('v1:authemail-me'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['email'], 'test@byom.de')
self.assertEqual(data['first_name'], 'Test')
self.assertEqual(data['last_name'], 'User')
self.assertFalse(data['is_staff'])
self.assertIsNotNone(data['phone'])
def test_account_me_unauthorized(self):
client = APIClient()
response = client.get(reverse('v1:authemail-me'))
self.assertEqual(response.status_code, 401)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['detail'],
'Authentication credentials were not provided.')
def test_account_me_invalid_token(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT absolute_invalid_token')
response = client.get(reverse('v1:authemail-me'))
self.assertEqual(response.status_code, 401)
def test_account_me_staff(self):
user = MyUser.objects.first()
user.is_staff = True
user.save()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.get(reverse('v1:authemail-me'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['email'], 'test@byom.de')
self.assertEqual(data['first_name'], 'Test')
self.assertEqual(data['last_name'], 'User')
self.assertTrue(data['is_staff'])
self.assertIsNotNone(data['phone'])
def test_account_me_update_missing_data(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'first_name': 'New'
})
self.assertEqual(response.status_code, 400)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['email'], ['This field is required.'])
self.assertEqual(data['phone'], ['This field is required.'])
def test_account_me_update_first_name(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'test@byom.de',
'first_name': 'New',
'last_name': 'User',
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertEqual(data['email'], 'test@byom.de')
self.assertEqual(data['first_name'], 'New')
self.assertEqual(data['last_name'], 'User')
user = MyUser.objects.first()
self.assertEqual(user.email, 'test@byom.de')
self.assertEqual(user.first_name, 'New')
self.assertEqual(user.last_name, 'User')
def test_account_me_update_last_name(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'test@byom.de',
'first_name': 'Test',
'last_name': 'New',
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertEqual(data['email'], 'test@byom.de')
self.assertEqual(data['first_name'], 'Test')
self.assertEqual(data['last_name'], 'New')
user = MyUser.objects.first()
self.assertEqual(user.email, 'test@byom.de')
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'New')
def test_account_me_update_phone(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'test@byom.de',
'first_name': 'Test',
'last_name': 'New',
'phone': '+49123456789',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertEqual(data['email'], 'test@byom.de')
self.assertEqual(data['first_name'], 'Test')
self.assertEqual(data['last_name'], 'New')
self.assertEqual(data['phone'], '+49123456789')
user = MyUser.objects.first()
self.assertEqual(user.email, 'test@byom.de')
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'New')
self.assertEqual(user.phone, '+49123456789')
def test_account_me_update_phone_invalid(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'test@byom.de',
'first_name': 'Test',
'last_name': 'New',
'phone': 'invalid_phone_number',
})
self.assertEqual(response.status_code, 400)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['phone'], ["Phone number must be entered in the format: "
"'+999999999'. Up to 15 digits allowed.",
"Ensure this field has no more than 17 characters."])
user = MyUser.objects.first()
self.assertEqual(user.phone, '+49192481024')
def test_account_me_update_email(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'new@byom.de',
'first_name': 'Test',
'last_name': 'User',
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertEqual(data['email'], 'new@byom.de')
self.assertEqual(data['first_name'], 'Test')
self.assertEqual(data['last_name'], 'User')
self.assertFalse(data['is_verified'])
user = MyUser.objects.first()
self.assertEqual(user.email, 'new@byom.de')
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
self.assertFalse(user.is_verified)
def test_account_me_update_email_multiple_signup_codes(self):
user = MyUser.objects.first()
SignupCode.objects.create_signup_code(user, '127.0.0.1')
SignupCode.objects.create_signup_code(user, '127.0.0.1')
self.assertEqual(SignupCode.objects.filter(user=user).count(), 2)
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'new@byom.de',
'first_name': 'Test',
'last_name': 'User',
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertEqual(data['email'], 'new@byom.de')
self.assertEqual(data['first_name'], 'Test')
self.assertEqual(data['last_name'], 'User')
self.assertFalse(data['is_verified'])
user = MyUser.objects.first()
self.assertEqual(SignupCode.objects.filter(user=user).count(), 1)
self.assertFalse(user.is_verified)
def test_account_me_update_email_send_mail(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'new@byom.de',
'first_name': 'Test',
'last_name': 'User',
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Confirm your Email Address')
def test_account_me_update_email_respond_token(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'new@byom.de',
'first_name': 'Test',
'last_name': 'User',
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['token'])
self.assertRegexpMatches(data['token'], token_regex)
def test_account_me_update_all(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'new@byom.de',
'first_name': 'New',
'last_name': 'Name',
'phone': '+49123456789',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertEqual(data['email'], 'new@byom.de')
self.assertEqual(data['first_name'], 'New')
self.assertEqual(data['last_name'], 'Name')
self.assertEqual(data['phone'], '+49123456789')
user = MyUser.objects.first()
self.assertEqual(user.email, 'new@byom.de')
self.assertEqual(user.first_name, 'New')
self.assertEqual(user.last_name, 'Name')
def test_account_me_update_prevent_is_staff(self):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
response = client.put(reverse('v1:authemail-me'), {
'email': 'test@byom.de',
'first_name': 'Test',
'last_name': 'User',
'is_staff': True,
'phone': '+49192481024',
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))['user']
self.assertFalse(data['is_staff'])
user = MyUser.objects.first()
self.assertFalse(user.is_staff)
|
#!/usr/bin/python
# coding:utf-8
import urllib2
import re
import os
def execute_answer():
headers = {
'Cookie': 'gr_user_id=3949c66a-56d6-49be-99e6-dc48710481cd; uuid=5811c0d121dd4; è¿é¢ççæ¡æ¯oreo=eyJpdiI6Ik13RVwvTVlPd0pHMGFkWTJFdElndnhRPT0iLCJ2YWx1ZSI6ImJKUlMxaFA4a1lYV2J3b0NnN0ZUS1E9PSIsIm1hYyI6ImRjZTBjYjU4NDg5ZjNjYjk5OGNmZmE0MTk2MWU1NmIyMTA4NmViMmM2OWUxYTA3NjdhYjNiODdlOTNkMmFlNTYifQ%3D%3D; XSRF-TOKEN=eyJpdiI6IndsTFlJekoreDNFcTltQjJvQWYwNVE9PSIsInZhbHVlIjoiYis4SnV6SjliMWRhRVc0VnJzOVNCZGd6THREVENtOVR2ejV5QUQwRVNLdElFa2taeCs3cWJteVBseVdhdXJ2ZEI3SWRPd3pmNnRXSHNaeERBcVdaRHc9PSIsIm1hYyI6IjBhODM4MjczODQ3MmU3MDgzNWQ2OThiMGVmYzdkMjEzNWNhOWRiNmMxMjgzNDlkYjI1ZjBlYjMxYWU0NjVhZjcifQ%3D%3D; laravel_session=eyJpdiI6IkpPWGlaWXBmSWkzYVwvdE5NYTdUbWR3PT0iLCJ2YWx1ZSI6IndxZXN4MUdFc0pQTkFLZENWM1ZxMmxuMkRzODh0R0dUQ2liVVlJYlZkTmpodUVnR3g0YzR2bnRMNVBiR1hkMkhMdHN1RXFyY3NJME12VGtTWUFYa0pnPT0iLCJtYWMiOiI1NjAxODE1MjM5YjNiYzYyMDE5NjBlZTRlNWNlOWYxNzllY2Y5NjQ1ZThlNjI3N2MwOWI1YmY1ZWFmNDgyZTdlIn0%3D; gr_session_id_80dfd8ec4495dedb=00c7f621-140a-44d1-b53c-d356c37de177; Hm_lvt_420590b976ac0a82d0b82a80985a3b8a=1477531541,1477533371,1477535066,1477558482; Hm_lpvt_420590b976ac0a82d0b82a80985a3b8a=1477566715'
}
request = urllib2.Request('http://www.qlcoder.com/train/autocr', headers=headers)
response_stream = urllib2.urlopen(request, timeout=200)
html_str = response_stream.read()
my_list = html_str.split('\n')
for ele in my_list:
if re.match('level=.*[0-9]+', ele):
print ele
ele = ele[0:len(ele) - 4]
print ele
params = ele.split('&')
level = int(params[0].split('=')[1])
row_num = int(params[1].split('=')[1])
col_num = int(params[2].split('=')[1])
map_info = params[3].split('=')[1]
params_dict = {'level': level, 'row_num': row_num, 'col_num': col_num, 'map_info': map_info}
# time.sleep(5)
print params_dict
params_str = str(params_dict['row_num']) + ' ' + str(params_dict['col_num']) + \
' ' + params_dict['map_info'] + ' >output_res.txt'
print params_str
os.system('../cpp_robot/build/cpp_robot ' + params_str)
new_url = str()
with open('output_res.txt') as fs:
content = fs.readlines()
new_url = content[len(content) - 1]
print new_url
request = urllib2.Request(new_url, headers=headers)
response_stream = urllib2.urlopen(request, timeout=200)
print response_stream.read()
for i in range(1, 1000):
try:
execute_answer()
except IOError:
execute_answer()
else:
execute_answer()
|
from itertools import product as cartesian_product
from extmath.meta import infinitelist, duality, indexargument
from extmath.basic import product, group, sumexp
@infinitelist([2, 3])
def primes(base):
'''Prime number sequence'''
def index(self, n):
if n not in self:
raise ValueError('{} is not prime number'.format(n))
while n > self[-1]:
self[len(self)]
return super(base, self).index(n)
def __contains__(self, n):
if not isinstance(n, int):
raise TypeError
if n <= 1:
return False
for i in self:
if i**2 > n:
return True
if not n % i:
return False
def __generate__(self):
head = self[self.__sieve_index]**2 + 1
self.__sieve_index += 1
last = self[self.__sieve_index]**2
sieve = list(range(head, last))
for p in self[:self.__sieve_index]:
size = (last - head + (head % -p)) // p + 1
sieve[-head % p::p] = [0] * size
self.extend(p for p in sieve if p)
def __init__(self, value):
self.__sieve_index = 0
super(base, self).__init__(value)
return locals()
@duality((1 + 5**0.5) / 2)
def phi(n):
'''Duality number-function data type.
as number: phi -> 1.618033988...
Golden Ratio, real number where phi == 1 + 1/phi.
as function: phi(number) -> number
Euler's totient, number of all 0 < a < n where gcd(a, n) == 1.'''
if n == 1:
return 1
return product((p-1) * p**(k-1) for p, k in group(factorized(n)))
@duality(3.141592653589793)
def pi(n):
'''Duality number-function data type.
as number: pi -> 3.141592653...
Ratio of a circle's circumference to its diameter.
as function: pi(number) -> number
Prime-counting, number of all 0 < p <= n where p is a prime number'''
return sum(1 for p in primes.under(n+1))
@indexargument
def sigma(n, x=1):
'''sigma(int) -> int
Divisor's sigma function'''
if n == 1:
return 1
return product(sumexp(p**x, k) for p, k in group(factorized(n)))
def trial_div(n):
n = abs(n)
if n == 1:
return [1]
factor = []
for i in primes:
if i**2 > n:
break
while not n % i:
factor.append(i)
n //= i
if n > 1:
factor.append(n)
return factor
def factorized(n):
'''factorized(number) -> list
Prime fatorization of the number, e.g. factorized(42) -> [2, 3, 7]
Return empty list if 'n' is 0, or [1] if 'n' is 1 (by defination).
Also return only absolute value.'''
return trial_div(n)
def divisors(n):
'''divisors(number) -> list
Positive divisors of the number, e.g. divisors(15) -> [1, 3, 5, 15]'''
if not n:
return []
factor_set = ({p**i for i in range(k+1)} for p, k in group(factorized(n)))
return sorted(product(c) for c in cartesian_product(*factor_set))
|
from threading import RLock
from wampnado import processors
from wampnado.uri import URIType
from wampnado.uri.topic import Topic
from wampnado.uri.procedure import Procedure
from wampnado.uri.error import Error
from wampnado.identifier import create_global_id
from wampnado.features import Options
from wampnado.messages import PublishMessage
from re import compile
class URIManager:
"""
Manages all existing uris to which handlers can potentially
publish, subscribe, or call to.
"""
def __init__(self):
# A table of the registrations issued to URIs under this realm.
self.lock = RLock()
self.registrations = {}
# A table of the URIs.
self.uris = {}
self.uri_pattern = compile(r"^([0-9a-z_]+\.)*([0-9a-z_]+)$")
# Reserve all the standard errors.
self.errors = Options(
# The first two of these aren't technically errors, but just messages used in closing a connection. But close enough.
close_realm=self.create_error('wamp.close.close_realm')[0],
goodbye_and_out=self.create_error('wamp.close.goodbye_and_out')[0],
# These are the errors that are required and standardized by WAMP Protocol standard
invalid_uri=self.create_error('wamp.error.invalid_uri')[0],
no_such_procedure=self.create_error('wamp.error.no_such_procedure')[0],
procedure_already_exists=self.create_error('wamp.error.procedure_already_exists')[0],
no_such_registration=self.create_error('wamp.error.no_such_registration')[0],
no_such_subscription=self.create_error('wamp.error.no_such_subscription')[0],
invalid_argument=self.create_error('wamp.error.invalid_argument')[0],
system_shutdown=self.create_error('wamp.close.system_shutdown')[0],
protocol_violation=self.create_error('wamp.error.protocol_violation')[0],
not_authorized=self.create_error('wamp.error.not_authorized')[0],
authorization_failed=self.create_error('wamp.error.authorization_failed')[0],
no_such_realm=self.create_error('wamp.error.no_such_realm')[0],
no_such_role=self.create_error('wamp.error.no_such_role')[0],
cancelled=self.create_error('wamp.error.canceled')[0],
option_not_allowed=self.create_error('wamp.error.option_not_allowed')[0],
no_eligible_callee=self.create_error('wamp.error.no_eligible_callee')[0],
option_disallowed__disclose_me=self.create_error('wamp.error.option_disallowed.disclose_me')[0],
network_failure=self.create_error('wamp.error.network_failure')[0],
# These aren't part of the WAMP standard, but I use them, so here they are.
not_pending=self.create_error('wamp.error.not_pending')[0], # Sent if we get a YIELD message but there is no call pending.
unsupported=self.create_error('wamp.error.unsupported')[0], # Sent when we get a message that we don't recognize.
general_error=self.create_error('wamp.error.general_error')[0], # Sent when we get a message that we don't recognize.
)
def get(self, uri_name, noraise=False):
"""
Looks up and returns the specified uri object by name. If it is not found, it will raise the appropriate exception, unless noraise is true.
"""
if not self.uri_pattern.match(uri_name):
raise self.errors.invalid_uri.to_simple_exception('uri is not valid.', details=Options(uri=uri_name))
uri = self.uris.get(uri_name)
if uri is None and not noraise:
raise self.errors.no_such_role.to_simple_exception('not found')
return uri
def create(self, name, uri_obj, returnifexists=True, noraise=False):
"""
Adds a new uri agnostic of type. Usually, this should be called by other methods inside this object.
"""
self.lock.acquire()
try:
uri = self.get(name, noraise=True)
# Only add if it doesn't exist.
if uri is None:
self.uris[name] = uri_obj
registration_id = self.uris[name].registration_id
self.registrations[registration_id] = name
return self.uris[name], registration_id
elif returnifexists:
return uri, uri.registration_id
elif not noraise:
raise self.errors.procedure_already_exists.to_simple_exception('uri already exists', uri=name)
finally:
self.lock.release()
def create_topic(self, name, reserver=None):
"""
Creates a uri that can be subscribed and published to.
"""
args = self.create(name, Topic(name, reserver=reserver))
if args[0].uri_type != URIType.TOPIC:
raise self.errors.no_such_subscription.to_simple_exception('uri type error', requested_type=URIType.TOPIC, uri=name, required_type=args[0].uri_type)
return args
def create_error(self, name):
"""
Adds an entry for an error URI.
"""
args = self.create(name, Error(name))
return args
def create_procedure(self, name, provider_handler):
"""
Add a new procedure provided by the provider_handler. request_msg should generally be specified whenever the user.
"""
return self.create(name, Procedure(name, provider_handler), returnifexists=False)
def reserve_topic(self, uri_name, provider_handler):
"""
Creates a topic URI that can be subscribed to without having to subscribe to it.
"""
return self.create_topic(uri_name, reserver=provider_handler)
def remove(self, registration_id):
"""
Removes a given registration, regardless of type.
"""
name = self.registrations.pop(registration_id, False)
if name:
for registration_id, registration_uri in self.registrations.items():
if name == registration_uri:
self.registrations.pop(name)
return self.uris.pop(name)
def add_subscriber(self, uri_name, handler):
"""
Add a handler as a uri's subscriber.
"""
(uri, _) = self.create_topic(uri_name)
subscription_id = self.uris[uri.name].add_subscriber(handler)
return subscription_id
def remove_subscriber(self, uri, handler):
"""
Remove a connection a uri's subscriber provided:
- uri
- handler
"""
uri = self.uris.get(uri)
uri.remove_subscriber(handler)
# If there are no publishers and no subscribers, delete it.
if len(uri.subscriptions.keys) == 0 and len(uri.publishers.keys) == 0:
self.remove(uri)
def disconnect(self, handler, notify=False):
"""
Removes a handler from the manager, effectively disconnecting it from the realm. Can be called upon the closure of the
transport as part of its cleanup, or by an authorized client to kick the other client.
"""
for name in list(self.uris.keys()):
self.uris[name].disconnect(handler)
if not self.uris[name].live:
del self.uris[name]
if notify:
pass # XXX Send the final message.
def publish(self, uri_name, origin_handler, *args, request_id=None, **kwargs):
"""
A convenience function to allow slightly more seamless publication.
"""
if request_id is None:
request_id = create_global_id()
uri = self.get(uri_name, noraise=True)
# It is possible, and not an error, that there are not subscribers. In that case, do nothing.
if uri is not None:
uri.publish(origin_handler, PublishMessage(uri_name=uri_name, request_id=request_id, args=args, kwargs=kwargs))
def call(self, uri_name, origin_handler, *args, request_id=None, **kwargs):
"""
A convenience function to allow slightly more seamless publication.
"""
if request_id is None:
request_id = create_global_id()
self.get(uri_name).invoke(origin_handler, request_id, *args, **kwargs)
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from spellbook.models import Spellbook
User = get_user_model()
class SpellbookSlotViewsCase(TestCase):
"""
A spellbook should be editable through the spellbook details page
"""
@classmethod
def setUpTestData(cls): # noqa: N802
"""Set up a user with a spellbook and some spells"""
# We need a profile for a spellbook, thus we need a django user
cls.user = User.objects.create_user(
email='test@user.com',
password='password',
)
cls.spellbook = Spellbook(
name="spellbook_name",
profile=cls.user.profile,
)
cls.spellbook.save()
# Create a level 2 slot
cls.spellbook.slots.create(level=2, max_capacity=1, current_capacity=1)
def setUp(self): # NOQA: N802
"""Log in as the user"""
self.client.login(username='test@user.com', password='password')
def test_0_edit_spell_slots(self):
"""It should be possible to edit the spell slots from the edit page"""
url = reverse('spellbook:spellbook-edit-slots', kwargs={'pk': self.spellbook.pk})
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
data = {
'spellbook_pk': self.spellbook.pk,
'spell_slots_level_1': 3,
'spell_slots_level_2': 2,
}
for level in range(3, 10):
data['spell_slots_level_%s' % level] = 0
r = self.client.post(url, data, follow=True)
self.assertEqual(r.status_code, 200)
# Spell slots should be updated
self.assertEqual(self.spellbook.slot_level(1).max_capacity, 3)
self.assertEqual(self.spellbook.slot_level(1).current_capacity, 3)
self.assertEqual(self.spellbook.slot_level(2).max_capacity, 2)
self.assertEqual(self.spellbook.slot_level(3).max_capacity, 0)
self.assertEqual(self.spellbook.slots.count(), 9)
def test_1_use_spell_slots(self):
"""It should be possible to use a spell slot"""
# Use twice the same spell slot level
url = reverse('spellbook:spellbook-use-slot', args=[self.spellbook.pk, 1])
url = reverse('spellbook:spellbook-use-slot', args=[self.spellbook.pk, 1])
r = self.client.get(url, follow=True) # Once
self.assertEqual(r.status_code, 200)
self.assertEqual(self.spellbook.slot_level(1).current_capacity, 1)
r = self.client.get(url, follow=True) # Twice
self.assertEqual(r.status_code, 200)
self.assertEqual(self.spellbook.slot_level(1).current_capacity, 0)
# And another slot level
self.assertEqual(self.spellbook.slot_level(2).current_capacity, 1)
url = reverse('spellbook:spellbook-use-slot', args=[self.spellbook.pk, 2])
r = self.client.get(url, follow=True)
self.assertEqual(r.status_code, 200)
self.assertEqual(self.spellbook.slot_level(2).current_capacity, 0)
def test_2_reset_spell_slots(self):
"""It should be possible to reset to the maximum a slot's capacity"""
# Use two level 1 slots, one level 2 slot
self.spellbook.slot_level(1).use_slot()
self.spellbook.slot_level(1).use_slot()
self.spellbook.slot_level(2).use_slot()
self.assertEqual(self.spellbook.slot_level(1).current_capacity, 0)
self.assertEqual(self.spellbook.slot_level(2).current_capacity, 0)
# Reset level 1
url = reverse('spellbook:spellbook-reset-slots', args=[self.spellbook.pk, 1])
r = self.client.get(url, follow=True)
self.assertEqual(r.status_code, 200)
# Only level 1 should be reset
self.assertEqual(self.spellbook.slot_level(1).current_capacity, 2)
self.assertEqual(self.spellbook.slot_level(2).current_capacity, 0)
# Use level 1 slot again
self.spellbook.slot_level(1).use_slot()
# Reset all slots
url = reverse('spellbook:spellbook-reset-slots', args=[self.spellbook.pk])
r = self.client.get(url, follow=True)
self.assertEqual(r.status_code, 200)
# All slots should be reset
self.assertEqual(self.spellbook.slot_level(1).current_capacity, 2)
self.assertEqual(self.spellbook.slot_level(2).current_capacity, 1)
|
from flask import Flask, request, abort, send_file, jsonify, send_from_directory
import os, shutil, glob, random, string, tempfile, requests
import sys
sys.path.insert(0,'shortbol')
import shortbol.run as shb_run
app = Flask(__name__)
shortbol_libs = os.path.join("shortbol", "templates")
@app.route("/status")
def status():
return("The Submit ShortBOL Test Plugin Flask Server is up and running")
@app.route("/evaluate", methods=["POST"])
def evaluate():
#uses MIME types
#https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
eval_manifest = request.get_json(force=True)
files = eval_manifest['manifest']['files']
eval_response_manifest = {"manifest":[]}
for file in files:
file_name = file['filename']
file_type = file['type']
file_url = file['url']
########## REPLACE THIS SECTION WITH OWN RUN CODE #################
##IN THE SPECIAL CASE THAT THE EXTENSION HAS NO MIME TYPE USE SOMETHING LIKE THIS
file_type = file_name.split('.')[-1]
#
##types that can be converted to sbol by this plugin
acceptable_types = {'txt', 'shb', 'rdfsh'}
#types that can be converted to sbol by this plugin
#acceptable_types = {'application/vnd.ms-excel',
#'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'}
#types that are useful (will be served to the run endpoint too but noted that they won't be converted)
useful_types = {'txt', 'shb', 'rdfsh'}
file_type_acceptable = file_type in acceptable_types
file_type_useable = file_type in useful_types
#print(file_type_acceptable,file_type_useable)
################## END SECTION ####################################
if file_type_acceptable:
useableness = 2
elif file_type_useable:
useableness = 1
else:
useableness = 0
eval_response_manifest["manifest"].append({
"filename": file_name,
"requirement": useableness})
return jsonify(eval_response_manifest)
@app.route("/run", methods=["POST"])
def run():
#create a temporary directory
temp_dir = tempfile.TemporaryDirectory()
zip_in_dir_name = temp_dir.name
#take in run manifest
run_manifest = request.get_json(force=True)
files = run_manifest['manifest']['files']
#initiate response manifest
run_response_manifest = {"results":[]}
for a_file in files:
file_name = a_file['filename']
file_type = a_file['type']
file_url = a_file['url']
data = str(a_file)
converted_file_name = file_name + ".converted"
file_path_out = os.path.join(zip_in_dir_name, converted_file_name)
########## REPLACE THIS SECTION WITH OWN RUN CODE #################
#Retrieve file from manifest
run_data = requests.get(file_url)
# return data, run_data.text
sbh_input = os.path.join(temp_dir.name,"temp_shb.txt")
with open(sbh_input,"w") as sbh_file:
sbh_file.write(run_data.text)
shb_run.parse_from_file(sbh_file.name, out=file_path_out, optpaths=[shortbol_libs])
################## END SECTION ####################################
# add name of converted file to manifest
run_response_manifest["results"].append({"filename":converted_file_name,
"sources":[file_name]})
#create manifest file
file_path_out = os.path.join(zip_in_dir_name, "manifest.json")
with open(file_path_out, 'w') as manifest_file:
manifest_file.write(str(run_response_manifest))
with tempfile.NamedTemporaryFile() as temp_file:
#create zip file of converted files and manifest
shutil.make_archive(temp_file.name, 'zip', zip_in_dir_name)
#delete zip in directory
shutil.rmtree(zip_in_dir_name)
#return zip file
return send_file(temp_file.name + ".zip")
@app.route("/testing/<file_name>")
def success(file_name):
cwd = os.getcwd()
path = os.path.join(cwd, 'testing')
try:
return send_from_directory(path, file_name)
except:
with open(os.path.join(cwd, "Static_File_Not_Found.html")) as file:
error_message = file.read()
error_message = error_message.replace('REPLACE_FILENAME', file_name)
return error_message, 404
|
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import os
import sys
import subprocess
import re
import argparse
from LoggerManager import logger, initConsoleLogging
""" Utilities Functions to wrap around git command functions via subprocess
1. make sure git is accessible directly via command line,
or git is in the %path% for windows or $PATH for Unix/Linux
"""
DEFAULT_GIT_HASH_LENGTH = 40 # default git hash length is 40
def getGitRepoRevisionHash(revision="HEAD", gitRepoDir=None):
"""
Utility function to get the git hash based on a given git revision
@revision: input revision, default is HEAD on the current branch
@gitRepoDir: git repository directory, default is current directory.
@return: return git hash if success, None otherwise
"""
git_command_list = ["git", "rev-parse", "--verify", revision]
result, output = _runGitCommand(git_command_list, gitRepoDir)
if not result:
return None
lines = output.split('\r\n')
for line in lines:
line = line.strip(' \r\n')
if re.search('^[0-9a-f]+$', line):
return line
return None
def commitChange(commitMsgFile, gitRepoDir=None):
"""
Utility function to commit the change in the current branch
@commitMsgFile: input commit message file for commit
@gitRepoDir: git repository directory, default is current directory.
@return: return True if success, False otherwise
"""
if not os.path.exists(commitMsgFile):
return False
git_command_list = ["git", "commit", "-F", commitMsgFile]
result, output = _runGitCommand(git_command_list, gitRepoDir)
logger.info(output)
return result
def addChangeSet(gitRepoDir=None, patternList=None):
"""
Utility function to add all the files changed to staging area
@gitRepoDir: git repository directory, default is current directory.
if provided, will only add all changes under that directory
@patternList: a list of pattern for matching files.
need to escape wildcard character '*'
@return: return True if success, False otherwise
"""
git_command_list = ["git", "add", "-A", "--"]
if patternList and isinstance(patternList, list):
git_command_list.extend(patternList)
result, output = _runGitCommand(git_command_list, gitRepoDir)
logger.info(output)
return result
def switchBranch(branchName, gitRepoDir=None):
"""
Utility function to switch to a different branch
@branchName: the name of the branch to switch to
@gitRepoDir: git repository directory, default is current directory.
if provided, will only add all changes under that directory
@return: return True if success, False otherwise
"""
git_command_list = ["git", "checkout", branchName]
result, output = _runGitCommand(git_command_list, gitRepoDir)
logger.info(output)
return result
def getStatus(gitRepoDir=None, subDirPath=None):
"""
Utility function to report git status on the directory
@gitRepoDir: git repository directory, default is current directory.
if provided, will only add all changes under that directory
@subDirPath: report only the status for the subdirectory provided
@return: return the status message
"""
git_command_list = ["git", "status"]
if subDirPath:
git_command_list.extend(['--', subDirPath])
result, output = _runGitCommand(git_command_list, gitRepoDir)
return output
def getCommitInfo(gitRepoDir=None, revision='HEAD'):
"""
Utility function to retrieve commit information
like date/time in Unix timestamp, title and hash
@gitRepoDir: git repository directory, default is current directory.
if provided, will only report info WRT to git repository
@revision: the revision to retrieve info, default is HEAD
@return: return commit info dictionary
"""
delim = '\n'
outfmtLst = ("%ct","%s","%H")
git_command_list = ["git", "log"]
fmtStr = "--format=%s" % delim.join(outfmtLst)
git_command_list.extend([fmtStr, "-n1", revision])
result, output = _runGitCommand(git_command_list, gitRepoDir)
if result:
return dict(zip(outfmtLst, output.strip('\r\n').split(delim)))
return None
def _runGitCommand(gitCmdList, workingDir):
"""
Private Utility function to run git command in subprocess
@gitCmdList: a list of git commands to run
@workingDir: the workding directory of the child process
@return: return a tuple of (True, output) if success,
(False, output) otherwise
"""
output = None
try:
popen = subprocess.Popen(gitCmdList,
cwd=workingDir, # set child working directory
stdout=subprocess.PIPE)
output = popen.communicate()[0]
if popen.returncode != 0: # command error
return (False, output)
return (True, output)
except OSError as ex:
logger.error(ex)
return (False, output)
def main():
initConsoleLogging()
pass
if __name__ == '__main__':
main()
|
from .stat import Stat
class Player:
def __init__(self,data:bytearray):
self.data = data
self.name = self.data[:32].decode('utf-16-le')
self.face_type = Stat(self.data, 108, 2, 3, "Face type")
self.skin_colour = Stat(self.data, 91, 0, 3, "Skin Colour")
self.face_id = Stat(self.data, 101, 0, 511, "Face ID")
self.hair_id = Stat(self.data, 93, 0, 2047, "Hair ID") |
from itertools import (permutations,
product)
from typing import Tuple
from hypothesis import given
from robust.hints import Point
from robust.projection import signed_length
from robust.utils import (to_perpendicular_point,
to_sign)
from tests.utils import (equivalence,
is_even_permutation,
permute)
from . import strategies
@given(strategies.points_quadruples)
def test_basic(points_quadruple: Tuple[Point, Point, Point, Point]) -> None:
first_start, first_end, second_start, second_end = points_quadruple
result = signed_length(first_start, first_end, second_start, second_end)
assert isinstance(result, type(first_start[0]))
@given(strategies.points_pairs)
def test_perpendicular_endpoints(points_pair: Tuple[Point, Point]) -> None:
first_start, first_end = points_pair
assert not signed_length(first_start, first_end,
to_perpendicular_point(first_start),
to_perpendicular_point(first_end))
@given(strategies.points_quadruples)
def test_segments_permutation(points_quadruple: Tuple[Point, Point,
Point, Point]) -> None:
first_start, first_end, second_start, second_end = points_quadruple
result = signed_length(first_start, first_end, second_start, second_end)
assert result == signed_length(second_start, second_end,
first_start, first_end)
@given(strategies.points_quadruples)
def test_endpoints_permutations(points_quadruple: Tuple[Point, Point,
Point, Point]) -> None:
first_start, first_end, second_start, second_end = points_quadruple
result = signed_length(first_start, first_end, second_start, second_end)
result_sign = to_sign(result)
first_endpoints = first_start, first_end
second_endpoints = second_start, second_end
assert all(
to_sign(signed_length(*permute(first_endpoints,
first_permutation),
*permute(second_endpoints,
second_permutation)))
== (result_sign
if equivalence(is_even_permutation(first_permutation),
is_even_permutation(second_permutation))
else -result_sign)
for first_permutation, second_permutation in product(
permutations(range(len(first_endpoints))),
permutations(range(len(second_endpoints)))))
|
"""
Access the Helio Event Catalogue
"""
import io
import os
from lxml import etree
from requests import Session
from zeep import Client
from zeep.transports import Transport
from astropy.io.votable.table import parse_single_table
from sunpy.net import attrs as a
from sunpy.net.base_client import BaseClient, QueryResponseTable
from sunpy.net.helio import attrs as ha
from sunpy.net.helio import parser
from sunpy.time import parse_time
from sunpy.util.exceptions import warn_deprecated, warn_user
__all__ = ['HECClient', 'HECResponse']
def votable_handler(xml_table):
"""
Returns a VOtable object from a VOtable style xml string
In order to get a VOtable object, it has to be parsed from an xml file or
file-like object. This function creates a file-like object via the
StringIO module, writes the xml data to it, then passes the file-like
object to parse_single_table() from the astropy.io.votable.table module
and thereby creates a VOtable object.
Parameters
----------
xml_table : `bytes`
Contains the VOtable style xml data
Returns
-------
votable : `astropy.io.votable.tree.Table`
A properly formatted VOtable object
"""
fake_file = io.BytesIO()
fake_file.write(xml_table)
votable = parse_single_table(fake_file)
for i in range(len(votable.array)):
item = votable.array[i][0]
if isinstance(item, bytes):
votable.array[i] = (votable.array[i][0].decode(),)
fake_file.close()
return votable
class HECResponse(QueryResponseTable):
"""
A container for data returned from HEC searches.
"""
class HECClient(BaseClient):
"""
Provides access to the HELIO webservices.
"""
def __init__(self, link=None):
"""
The constructor; establishes the webservice link for the client
Initializes the client with a weblink
Parameters
----------
link : str
Contains URL to valid WSDL endpoint
Examples
--------
>>> from sunpy.net.helio import hec
>>> hc = hec.HECClient() # doctest: +SKIP
"""
if link is None:
# The default wsdl file
link = parser.wsdl_retriever()
session = Session()
# This is for use in our test suite.
session.verify = not(bool(os.environ.get("NO_VERIFY_HELIO_SSL", 0)))
transport = Transport(session=session)
self.hec_client = Client(link, transport=transport)
@classmethod
def _can_handle_query(cls, *query):
required = {a.Time}
optional = {ha.MaxRecords, ha.TableName}
return cls.check_attr_types_in_query(query, required, optional)
@classmethod
def _attrs_module(cls):
return 'helio', 'sunpy.net.helio.attrs'
def search(self, *args, **kwargs):
"""
The simple interface to query the wsdl service.
Used to utilize the service's TimeQuery() method, this is a simple
interface between the sunpy module library and the web-service's API.
.. note::
By default the maximum records returned by the service are limited to 500.
To obtain more results ``a.helio.MaxRecords`` must be set to a higher value.
Examples
--------
>>> from sunpy.net.helio import attrs as ha
>>> from sunpy.net import attrs as a, Fido
>>> timerange = a.Time('2005/01/03', '2005/12/03')
>>> res = Fido.search(timerange, ha.MaxRecords(10),
... ha.TableName('rhessi_hxr_flare')) # doctest: +REMOTE_DATA
>>> res #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
10 Results from the HECClient:
hec_id time_start time_peak ... energy_kev flare_number
------ ------------------- ------------------- ... ---------- ------------
31463 2005-01-03T01:37:36 2005-01-03T01:37:54 ... 6 5010320
31464 2005-01-03T01:51:36 2005-01-03T01:59:18 ... 12 5010301
31465 2005-01-03T03:26:28 2005-01-03T03:42:50 ... 6 5010332
31466 2005-01-03T03:46:04 2005-01-03T04:07:10 ... 12 5010302
31467 2005-01-03T05:00:24 2005-01-03T05:00:30 ... 6 5010313
31468 2005-01-03T06:40:48 2005-01-03T06:42:46 ... 6 5010314
31469 2005-01-03T08:27:56 2005-01-03T08:28:26 ... 6 5010334
31470 2005-01-03T09:31:00 2005-01-03T09:33:34 ... 6 5010322
31471 2005-01-03T09:34:52 2005-01-03T09:59:46 ... 6 5010336
31472 2005-01-03T11:06:48 2005-01-03T11:07:18 ... 12 5010304
<BLANKLINE>
<BLANKLINE>
"""
qrdict = {}
for elem in args:
if isinstance(elem, a.Time):
qrdict['Time'] = elem
elif isinstance(elem, ha.MaxRecords):
qrdict['max_records'] = elem.value
elif isinstance(elem, ha.TableName):
qrdict['table_name'] = elem.value
else:
raise ValueError(
f"{elem.__class__.__name__} should be a ``attrs.Time``, ``attrs.hek.MaxRecords`` or ``attrs.hek.TableName`` attribute.")
qrdict.update(kwargs)
table = qrdict.get('table_name', None)
if table:
if isinstance(table, bytes):
warn_deprecated('type `bytes` for table_name is deprecated, use `str` instead.')
table = str.encode(table)
start_time = qrdict['Time'].start
end_time = qrdict['Time'].end
max_records = qrdict.get('max_records', 500)
while table is None:
table = self.select_table()
start_time = parse_time(start_time)
end_time = parse_time(end_time)
results = self.hec_client.service.TimeQuery(STARTTIME=start_time.isot,
ENDTIME=end_time.isot,
FROM=table,
MAXRECORDS=max_records)
results = votable_handler(etree.tostring(results))
table = HECResponse(results.to_table(), client=self)
if len(table) == max_records == 500:
warn_user("Number of results is the same as the default `max_records` of 500. "
"It is possible your query has been truncated. "
"If you want to change this, set `a.helio.MaxRecords` to a higher value.")
return table
def get_table_names(self):
"""
Returns a list of the available tables to query.
Returns the names of all the tables that can be queried via the
webservice.
Returns
-------
tables.array: `numpy.ma.core.MaskedArray`
A VOtable table of available tables names.
Examples
--------
>>> from sunpy.net.helio import hec
>>> hc = hec.HECClient() # doctest: +SKIP
>>> print(hc.get_table_names()) # doctest: +SKIP
[('timed_see_flare',) ('hi_event',) ('yohkoh_flare_list',)
('wind_mfi_bs_crossing_time',) ('seeds_soho',) ('seeds_stb',)
...
('rhessi_hxr_flare',) ('cactus_soho_flow',) ('cactus_soho_cme',)
('stereob_het_sep',)]
"""
results = self.hec_client.service.getTableNames()
tables = votable_handler(etree.tostring(results))
return tables.array
def select_table(self):
"""
Creates a list of table names and prompts the user for a choice
This takes the table of table names from get_table_names(), creates a
list of the names, sorts them, then presents the tables in a
convenient menu for the user to choose from. It returns a string
containing the name of the table that the user picked.
Returns
-------
`str`
Contains the name of the table that the user picked.
Examples
--------
>>> from sunpy.net.helio import hec # doctest: +SKIP
>>> hc = hec.HECClient() # doctest: +SKIP
>>> hc.select_table() # doctest: +SKIP
"""
tables = self.get_table_names()
table_list = [t[0] for t in tables if len(t[0]) > 0]
table_list.sort()
for index, table in enumerate(table_list):
print(f'{index + 1} - {table}')
while True:
user_input = input(f"\nPlease enter a table number between 1 and {len(table_list)} "
"('e' to exit): ")
if user_input.lower() == "e" or user_input.lower() == "exit":
return None
if user_input.isdigit() and 1 <= int(user_input) <= len(table_list):
table_no = int(user_input)
return table_list[table_no - 1]
else:
print(f"Input must be an integer between 1 and {len(table_list)}")
def fetch(self, *args, **kwargs):
"""
This is a no operation function as this client does not download data.
"""
return NotImplemented
|
from retrobiocat_web.app.biocatdb import bp
from flask import flash, url_for, request, jsonify
from flask_security import roles_required, current_user
from retrobiocat_web.mongo.models.user_models import User, Role
from retrobiocat_web.mongo.models.biocatdb_models import Sequence, Activity, Paper, EnzymeType
from retrobiocat_web.app.app import user_datastore
from retrobiocat_web.app.biocatdb.functions.papers import papers_functions, papers_crossref
import mongoengine as db
from distutils.util import strtobool
from retrobiocat_web.app.biocatdb.functions import check_permission
@bp.route('/_admin_set_owner', methods=['GET', 'POST'])
@roles_required('admin')
def admin_set_owner():
paper = Paper.objects(id=request.form['paper_id'])[0]
new_owner_id = request.form['new_owner_id']
print(new_owner_id)
new_owner = User.objects(id=new_owner_id)[0]
paper.owner = new_owner
paper.save()
result = {'status': 'success',
'msg': 'Paper owner updated',
'issues': []}
return jsonify(result=result)
@bp.route('/_admin_activity_to_owner', methods=['GET', 'POST'])
@roles_required('admin')
def admin_activity_to_owner():
paper = Paper.objects(id=request.form['paper_id']).select_related()[0]
activities = Activity.objects(paper=paper)
for activity in activities:
activity.added_by = paper.owner
activity.save()
result = {'status': 'success',
'msg': 'Activity added by updated',
'issues': []}
return jsonify(result=result)
@bp.route('/_admin_unassigned_seqs_to_owner', methods=['GET', 'POST'])
@roles_required('admin')
def admin_unassigned_seqs_to_owner():
paper = Paper.objects(id=request.form['paper_id']).select_related()[0]
seqs = Sequence.objects(db.Q(papers=paper) & db.Q(owner=None))
for seq in seqs:
seq.owner = paper.owner
seq.added_by = paper.owner
seq.save()
result = {'status': 'success',
'msg': 'Unassigned sequences assigned to paper owner',
'issues': []}
return jsonify(result=result)
@bp.route('/_admin_all_seqs_to_owner', methods=['GET', 'POST'])
@roles_required('admin')
def admin_all_seqs_to_owner():
paper = Paper.objects(id=request.form['paper_id']).select_related()[0]
seqs = Sequence.objects(db.Q(papers=paper))
for seq in seqs:
seq.owner = paper.owner
seq.save()
result = {'status': 'success',
'msg': 'All sequences assigned to paper owner',
'issues': []}
return jsonify(result=result) |
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import skopt
from scipy.optimize import OptimizeResult
# taken from skopt.plots and slightly modified
def plot_convergence(*args, **kwargs):
"""Plot one or several convergence traces.
Parameters:
args[i] (OptimizeResult, list of OptimizeResult, or tuple):
The result(s) for which to plot the convergence trace.
- if OptimizeResult, then draw the corresponding single trace;
- if list of OptimizeResult, then draw the corresponding
convergence traces in transparency, along with the average
convergence trace;
- if tuple, then args[i][0] should be a string label and args[i][1]
an OptimizeResult or a list of OptimizeResult.
ax (Axes, optional): The matplotlib axes on which to draw the plot,
or None to create a new one.
yscale (None or string, optional): The scale for the y-axis.
Returns:
Axes: The matplotlib axes.
"""
ax = kwargs.get("ax", None)
yscale = kwargs.get("yscale", None)
if ax is None:
ax = plt.gca()
ax = plt.gca()
ax.set_title('Convergence plot')
ax.set_xlabel('Number of iterations n')
ax.set_ylabel('max(metric) after n iterations')
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = cm.viridis(np.linspace(0.25, 1.0, len(args)))
for results, color in zip(args, colors):
if isinstance(results, tuple):
name, results = results
else:
name = None
if isinstance(results, OptimizeResult):
n_calls = len(results.x_iters)
maxs = [np.max(results.func_vals[:i])
for i in range(1, n_calls + 1)]
ax.plot(range(1, n_calls + 1), maxs, c=color,
marker=".", markersize=12, lw=2, label=name)
elif isinstance(results, list):
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
maxs = [[np.max(r.func_vals[:i]) for i in iterations]
for r in results]
for m in maxs:
ax.plot(iterations, m, c=color, alpha=0.2)
ax.plot(iterations, np.mean(maxs, axis=0), c=color,
marker=".", markersize=12, lw=2, label=name)
ax.legend(loc='best')
return ax
# taken from neptunecontrib.hpo.utils
def df2result(df, metric_col, param_cols, param_types=None):
"""Convert df with metrics and hyperparams to the OptimizeResults format.
It is a helper function that lets you use all the tools that expect
OptimizeResult object like for example scikit-optimize plot_evaluations
function.
Parameters:
df (pandas.DataFrame): Dataframe containing metric and hyperparameters.
metric_col (str): Name of the metric column.
param_cols (list): Names of the hyperparameter columns.
param_types (list or None): Optional list of hyperparameter column
types.
By default it will treat all the columns as float but you can also
pass str for categorical channels. Example: param_types=[float,
str, float, float]
Returns:
scipy.optimize.OptimizeResult: Results object that contains
the hyperparameter and metric information.
"""
def _prep_df(df, param_cols, param_types):
for col, col_type in zip(param_cols, param_types):
df[col] = df[col].astype(col_type)
return df
def _convert_to_param_space(df, param_cols, param_types):
dimensions = []
for colname, col_type in zip(param_cols, param_types):
if col_type == str:
dimensions.append(skopt.space.Categorical(
categories=df[colname].unique(), name=colname))
elif col_type == float:
low, high = df[colname].min(), df[colname].max()
dimensions.append(skopt.space.Real(low, high, name=colname))
else:
raise NotImplementedError
skopt_space = skopt.Space(dimensions)
return skopt_space
if not param_types:
param_types = [float for _ in param_cols]
df = _prep_df(df, param_cols, param_types)
param_space = _convert_to_param_space(df, param_cols, param_types)
results = OptimizeResult()
results.x_iters = df[param_cols].values
results.func_vals = df[metric_col].to_list()
results.x = results.x_iters[np.argmin(results.func_vals)]
results.fun = np.min(results.func_vals)
results.space = param_space
return results
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 변수
'''
Python에서는 변수를 선언하여 사용할 수 있다.
변수 이름은 문자나 숫자로 이루어지는데,
변수 이름은 숫자나 '_' 이외의 특수문자로 시작할 수 없고,
이미 역할이 지정된 keyword는 변수 이름으로 사용할 수 없다.
'''
for a in (1, 3, 10, -2):
print('a =', a)
print('a × 3 + 1 =', a * 3 + 1, '\n')
# 상수
'''
Python에서 변수와 같은 이름을 갖는 상수를 선언하는 방법은
기본 문법으로는 제공되지 않는다.
관습적으로 대문자와 '_' 만으로 이루어진 변수는
상수로 취급하여,
처음 대입된 값 이외의 값을 다시 대입하지 않는다.
'''
PI = 3.14159265
for r in (0.1, 3, 15, 100):
print('r =', r)
print('PI * (r ** 2) =', PI * (r ** 2), '\n')
INCH_TO_CENTIMETER = 2.54
for centimeter in range(1, 11):
print(centimeter, 'cm ==',\
centimeter * INCH_TO_CENTIMETER, 'in\n') |
from flaml.automl import AutoML
from flaml.model import BaseEstimator
from flaml.data import get_output_from_log
from flaml.version import __version__
import logging
from os.path import join, exists
import datetime as dt
from os import listdir, remove, mkdir
import pathlib
import json
root = pathlib.Path(__file__).parent.parent.absolute()
jsonfilepath = join(root, "settings.json")
with open(jsonfilepath) as f:
settings = json.load(f)
logging_level = settings["logging_level"]
if logging_level == "info":
logging_level = logging.INFO
elif logging_level == "debug":
logging_level = logging.DEBUG
elif logging_level == "error":
logging_level = logging.ERROR
elif logging_level == "warning":
logging_level = logging.WARNING
elif logging_level == "critical":
logging_level = logging.CRITICAL
else:
logging_level = logging.NOTSET
keep_max_logfiles = settings["keep_max_logfiles"]
log_dir = join(root, "logs")
if not exists(log_dir):
mkdir(log_dir)
del_logs = sorted([int(x.split("_")[0]) for x in listdir(log_dir) if ".log" in
x], reverse=True)[keep_max_logfiles:]
for l in del_logs:
try:
remove(join(log_dir, str(l) + "_flaml.log"))
except Exception as e:
continue
b = dt.datetime.now()
a = dt.datetime(2020, 4, 1, 0, 0, 0)
secs = int((b-a).total_seconds())
name = str(secs)
logger = logging.getLogger(__name__)
logger.setLevel(logging_level)
fh = logging.FileHandler(join(log_dir, name + "_" + __name__ + ".log"))
fh.setLevel(logging_level)
ch = logging.StreamHandler()
ch.setLevel(logging_level)
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter = logging.Formatter(
'[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s',
'%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.propagate = True
|
#!/usr/bin/python
'''
Author : tom-snow
Date : 2022-03-16 19:32:32
LastEditTime : 2022-04-24 17:27:30
LastEditors : tom-snow
Description :
FilePath : /awesome-testflight-link/scripts/del_link.py
'''
import sqlite3
import re, os, sys
TABLE_MAP = {
"macos": "./data/macos.md",
"ios": "./data/ios.md",
"ios_game": "./data/ios_game.md",
"chinese": "./data/chinese.md",
"signup": "./data/signup.md"
}
README_TEMPLATE_FILE = "./data/README.template"
def renew_doc(data_file, table):
# header
markdown = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
columns = [ column.strip() for column in line.split("|") ]
markdown.append(line)
if len(columns) > 2 and re.match(r"^:?-+:?$", columns[1]):
break
#
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
res = cur.execute(f"SELECT app_name, testflight_link, status, last_modify FROM {table} ORDER BY app_name;")
for row in res:
app_name, testflight_link, status, last_modify = row
testflight_link = f"[https://testflight.apple.com/join/{testflight_link}](https://testflight.apple.com/join/{testflight_link})"
markdown.append(f"| {app_name} | {testflight_link} | {status} | {last_modify} |\n")
conn.close()
#
with open(data_file, 'w') as f:
lines = f.writelines(markdown)
def renew_readme():
template = ""
with open(README_TEMPLATE_FILE, 'r') as f:
template = f.read()
macos = ""
with open(TABLE_MAP["macos"], 'r') as f:
macos = f.read()
ios = ""
with open(TABLE_MAP["ios"], 'r') as f:
ios = f.read()
ios_game = ""
with open(TABLE_MAP["ios_game"], 'r') as f:
ios_game = f.read()
chinese = ""
with open(TABLE_MAP["chinese"], 'r') as f:
chinese = f.read()
signup = ""
with open(TABLE_MAP["signup"], 'r') as f:
signup = f.read()
readme = template.format(macos=macos, ios=ios, ios_game=ios_game, chinese=chinese, signup=signup)
with open("../README.md", 'w') as f:
f.write(readme)
def main():
testflight_link = sys.argv[1]
table = sys.argv[2].lower()
link_id_match = re.search(r"^https://testflight.apple.com/join/(.*)$", testflight_link, re.I)
if link_id_match is not None:
testflight_link = link_id_match.group(1)
else:
print(f"[Error] Invalid testflight_link. Exit...")
exit(1)
if table not in TABLE_MAP or table == "signup":
print(f"[Error] Invalid table. Exit...")
exit(1)
# 从数据库删除
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
sql = f"SELECT * FROM {table} WHERE testflight_link = '{testflight_link}';"
res = cur.execute(sql)
if len(list(res)) == 0:
print(f"[warn] Data (https://testflight.apple.com/join/{testflight_link}) not found in table ({table}).")
exit(0)
sql = f"DELETE FROM {table} WHERE testflight_link = '{testflight_link}';"
cur.execute(sql)
conn.commit()
print(f"[info] Deleted {conn.total_changes} row(s) into table: {table}")
conn.close()
renew_doc(TABLE_MAP[table], table)
renew_readme()
if __name__ == "__main__":
os.chdir(sys.path[0])
main() |
try:
import sys
import os
from metashare.storage.models import StorageObject
f = os.fdopen(5, 'w')
stos = StorageObject.objects.all()
sto1 = stos[0]
sto1.deleted = True
sto1.save()
sto1.update_storage()
f.write(sto1.identifier)
except:
sys.exit(1)
sys.exit(0)
|
import logging
import re
from fuzzywuzzy import fuzz
logging.getLogger().setLevel(logging.INFO)
def _add_start_end_fuzzy_search(element_to_add, text, sub_text, start_index, move_text_left=0):
sub_text_start = re.sub(r'(?<!AM|PM)( \.)', '.', sub_text[move_text_left:30 + move_text_left])
sub_text_end = re.sub(r'(?<!AM|PM)( \.)', '.', sub_text[-30:])
sub_text_start_index = text.find(sub_text_start, start_index)
if sub_text_start_index > -1:
sub_text_end_index = text.find(sub_text_end, sub_text_start_index)
if sub_text_end_index > -1:
sub_text_end_index = sub_text_end_index + len(sub_text_end)
matched_text = text[sub_text_start_index:sub_text_end_index]
if fuzz.ratio(matched_text, sub_text) > 70:
element_to_add['start'] = sub_text_start_index
element_to_add['end'] = sub_text_end_index
return element_to_add['end']
elif move_text_left < len(sub_text) - 60:
return _add_start_end_fuzzy_search(element_to_add, text, sub_text, start_index, move_text_left + 20)
logging.warning(f'Not found in text:\n{sub_text}')
return -1
def _add_start_end(element_to_add, text, sub_text, start_index, fuzzy_search=False):
if isinstance(sub_text, str):
found_start_index = text.find(sub_text, start_index)
if found_start_index > -1:
element_to_add['start'] = found_start_index
element_to_add['end'] = found_start_index + len(sub_text)
return found_start_index + len(sub_text)
elif fuzzy_search and len(sub_text) > 150:
return _add_start_end_fuzzy_search(element_to_add, text, sub_text, start_index)
else:
logging.warning(f'Not found in text:\n{sub_text}')
return -1
return -1
def get_start_end_for_post(post, full_text, search_start_index, fuzzy_search=False):
index_post_text = _add_start_end(post['post_text'], full_text, post['post_text']['surface_form'],
search_start_index, fuzzy_search)
if 'datetime' in post:
_add_start_end(post['datetime'], full_text,
post['datetime']['surface_form'], search_start_index)
if 'user' in post:
_add_start_end(post['user'], full_text,
post['user']['surface_form'], search_start_index)
if 'post_link' in post:
_add_start_end(post['post_link'], full_text,
post['post_link']['surface_form'], search_start_index)
if index_post_text > -1:
return index_post_text + len(post['post_text'])
return index_post_text
|
# **************************************************************************************************************************************************************************************************** #
# ****************************************************************************** 2. To simulate Full-Subtractor Circuit ***************************************************************************** #
# **************************************************************************************************************************************************************************************************** #
# -@ AmiLab
'''
Note-
- **DATA VALIDATION EXCLUDED FOR BEING CHECKED AT THE TIME OF DATA INPUT**
- All Testings have been logged into the terminal for future debuggings.
'''
# ********************************************************************** Argument / Variable Declaration (for Testing purposes) ********************************************************************** #
MSB = 0 # For storing the MSB(Most Significant Bit) of the Input Signal
LSB = 1 # For storing the LSB(Least Significant Bit) of the Input Signal
borrow_in = 1
bits = [MSB, LSB, borrow_in] # For storing the complete(both MSB and LSB bits) Input Signal
# **************************************************************************************** Section ends here ***************************************************************************************** #
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# **************************************************************************** Calculation of Full-Binary Difference ******************************************************************************** #
def fullSubtractor(bits): # For performing the Full-Binary Subtraction
if str(bits[0]) + str(bits[1]) in ('00', '01', '10', '11'):
return {'Borrow Out': (~ bits[0] & bits[1]) | (bits[1] & bits[2]) | ( ~ bits[0] & bits[2]), 'Difference': bits[0] ^ bits[1] ^ bits[2]}
else:
print('Not a Valid Binary Number')
# Testing-
bin_add = fullSubtractor(bits)
print(f'Binary_Full_Subtraction({bits[0]}, {bits[1]}, (Borrow In = {bits[2]})) =', bin_add)
# ********************************************************************************* Section ends here ************************************************************************************************ #
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
|
# Generated by Django 2.1.5 on 2019-05-05 08:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rankings', '0032_individualresult_round'),
]
operations = [
migrations.AlterModelOptions(
name='individualresult',
options={'ordering': ['time']},
),
migrations.AlterField(
model_name='athlete',
name='first_name',
field=models.CharField(blank=True, default=None, max_length=20, null=True),
),
migrations.AlterField(
model_name='athlete',
name='last_name',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterField(
model_name='athlete',
name='nationalities',
field=models.ManyToManyField(blank=True, default=None, related_name='nationalities', to='rankings.Nationality'),
),
migrations.AlterField(
model_name='athlete',
name='year_of_birth',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='individualresult',
name='extra_analysis_time_by',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
"""[Perspective](https://github.com/finos/perspective#readme) is an interactive visualization
component for large, real-time datasets. It enables analysts and traders at large banks like
J.P.Morgan to understand their data in real time.
Panel provides the [`Perspective`](https://panel.holoviz.org/reference/panes/Perspective.html) pane
which was first contributed by awesome-panel.org and then further improved by Philipp.
"""
import pathlib
import pandas as pd
import panel as pn
from panel.pane import Perspective
from awesome_panel import config
from awesome_panel.assets.csv import PERSPECTIVE_VIEWER_DATA_PATH
DARK_BACKGROUND = "rgb(42, 44, 47)"
DARK_COLOR = "white"
PERSPECTIVE_LOGO = "https://perspective.finos.org/img/logo.png"
PANEL_LOGO = "https://panel.holoviz.org/_static/logo_horizontal.png"
ROOT = pathlib.Path(__file__).parent
# Source: https://datahub.io/core/s-and-p-500-companies-financials
DATA = ROOT / "PerspectiveViewerData.csv"
# pylint: disable=line-too-long
VIDEO = """<iframe width="100%" height="400" src="https://www.youtube.com/embed/IO-HJsGdleE" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>"""
# pylint: enable=line-too-long
INFO = """**You can also use the `Perspective` pane in your apps**. For more
inspiration check out the [Perspective Reference Guide]\
(https://panel.holoviz.org/reference/panes/Perspective.html) or the video below.
"""
COLUMNS = [
"Name",
"Symbol",
"Sector",
"Price",
"Price/Earnings",
"Dividend Yield",
"Earnings/Share",
"52 Week Low",
"52 Week High",
"Market Cap",
"EBITDA",
"Price/Sales",
"Price/Book",
"SEC Filings",
]
@config.cached
def get_data() -> pd.DataFrame:
"""Returns data for the Perspective app
Returns:
pd.DataFrame: Data
"""
return pd.read_csv(PERSPECTIVE_VIEWER_DATA_PATH)[COLUMNS]
DATA = get_data()
def main(theme: str) -> pn.Column:
"""Returns the main app components
Returns:
pn.Column: The main app components
"""
if "dark" in theme:
background = DARK_BACKGROUND
theme = "material-dark"
else:
background = "white"
theme = "material"
perspective_viewer = Perspective(
object=DATA, columns=COLUMNS, theme=theme, sizing_mode="stretch_both"
)
top_app_bar = pn.Row(
pn.pane.PNG(
PERSPECTIVE_LOGO,
link_url="https://perspective.finos.org",
height=50,
margin=(10, 25, 10, 10),
),
# pn.pane.PNG(PANEL_LOGO, height=40, margin=(10, 0, 10, 0)),
pn.layout.HSpacer(),
margin=0,
background=DARK_BACKGROUND,
)
settings_parameters = [
"theme",
"row_pivots",
"plugin",
"columns",
"aggregates",
"filters",
"sort",
"rows",
"column_pivots",
]
settings_pane = pn.Param(
perspective_viewer,
parameters=settings_parameters,
width=200,
sizing_mode="stretch_height",
)
return pn.Column(
pn.Column(
top_app_bar,
pn.Row(
perspective_viewer,
pn.layout.VSpacer(width=10),
settings_pane,
sizing_mode="stretch_both",
background=background,
margin=0,
),
),
pn.pane.Alert(INFO, margin=0),
pn.Column(
pn.pane.HTML(VIDEO),
),
)
if __name__.startswith("bokeh"):
config.extension("perspective", url="perspective")
for component in main(theme=config.get_theme()):
component.servable()
|
import math
import requests
from urllib.parse import quote, urlencode
from .utils import get_hr_size, get_key_from_value
def get_magnet(info_hash, torrent_name):
trackers = [
"udp://tracker.coppersurfer.tk:6969/announce",
"udp://9.rarbg.me:2850/announce",
"udp://9.rarbg.to:2920/announce",
"udp://tracker.opentrackr.org:1337",
"udp://tracker.internetwarriors.net:1337/announce",
"udp://tracker.leechers-paradise.org:6969/announce",
"udp://tracker.pirateparty.gr:6969/announce",
"udp://tracker.cyberia.is:6969/announce",
]
trackers = "".join([f"&tr={quote(t, safe='')}" for t in trackers])
name = quote(torrent_name, safe="")
return f"magnet:?xt=urn:btih:{info_hash}&dn={name}{trackers}"
class CSV:
def __init__(self):
self.base_url = "https://torrents-csv.ml"
def search(self, query, category, order):
url = f"{self.base_url}/service/search?size=100&q={query}"
results = requests.get(url).json()
if order == "size":
order = "size_bytes"
results = sorted(results, key=lambda d: int(d[order]), reverse=True)
torrents = []
for result in results:
torrents.append(
{
"Name": result["name"],
"Size": get_hr_size(result["size_bytes"]),
"size": result["size_bytes"],
"SE": result["seeders"],
"LE": result["leechers"],
"seeders": result["seeders"],
"leechers": result["leechers"],
"Category": "N/A",
"Site": "Torrents CSV",
"Magnet": get_magnet(result["infohash"], result["name"]),
}
)
return torrents
class Solid:
def __init__(self):
self.base_url = "https://solidtorrents.net/api/v1"
self.session = requests.session()
self.categories = {
"all": ["all"],
"audio": ["audio"],
"video": ["video"],
"games": ["program", "android"],
"apps": [
"program",
"android",
"archive",
"discimage",
"sourcecode",
"database",
],
"other": ["image", "document", "ebook", "database"],
}
def generate_results(self, query, category, order):
current, total = 0, 1
while current < total:
params = {
"category": "+".join(self.categories[category]),
"q": query,
"sort": order,
"skip": current,
"fuv": "yes",
}
url = f"{self.base_url}/search?{urlencode(params)}"
data = self.session.get(url).json()
total = data["hits"]["value"]
current += 20
yield from data["results"]
def search(self, query, category, order):
all_pages = []
for content in self.generate_results(query, category, order):
all_pages.append(content)
torrents = []
for result in all_pages:
torrents.append(
{
"Name": result["title"],
"Size": get_hr_size(result["size"]),
"size": result["size"],
"SE": result["swarm"]["seeders"],
"LE": result["swarm"]["leechers"],
"seeders": result["swarm"]["seeders"],
"leechers": result["swarm"]["leechers"],
"Category": result["category"],
"Site": "Solid Torrents",
"Magnet": result["magnet"],
}
)
return torrents
class TPB:
def __init__(self):
self.base_url = "https://apibay.org"
self.categories = {
"all": "",
"audio": "100",
"video": "200",
"apps": "300",
"games": "400",
"porn": "500",
"other": "600",
}
def search(self, query, category, order):
url = f"{self.base_url}/q.php?q={query}&cat={self.categories[category]}"
results = requests.get(url).json()
results = sorted(results, key=lambda d: int(d[order]), reverse=True)
torrents = []
for result in results:
cat = int(math.floor(float(result["category"]) / 100.0)) * 100
torrents.append(
{
"Name": result["name"],
"Size": get_hr_size(result["size"]),
"size": result["size"],
"SE": result["seeders"],
"LE": result["leechers"],
"seeders": result["seeders"],
"leechers": result["leechers"],
"Category": get_key_from_value(self.categories, str(cat)),
"Site": "The Pirate Bay",
"Magnet": get_magnet(result["info_hash"], result["name"]),
}
)
return torrents
|
__author__ = "Alex"
import time
import sys
import os
sys.path.insert(-1, os.getcwd())
sys.path.insert(-1, os.path.dirname(os.getcwd()))
from spambayes import quickselect
from random import shuffle
def main():
k_smallest = quickselect.k_smallest
l_1 = []
l_1 = [i for i in range(10000)]
shuffle(l_1)
l_2 = [item for item in l_1]
start_1 = time.time()
l_1.sort()
c_1 = l_1[:100]
end_1 = time.time()
start_2 = time.time()
c_5 = k_smallest(l_2, 100)
end_2 = time.time()
secs_1 = end_1 - start_1
secs_2 = end_2 - start_2
print "Time 1:", secs_1, "\n"
print "Time 2:", secs_2, "\n"
main() |
import math
import numpy
from scipy.spatial import cKDTree
def deduplicate(
points,
max_distance,
doublecheck_values=None,
doublecheck_function=lambda x, y : x == y,
check_negative=False
):
'''
check_negative is for use with Quaternions
'''
if doublecheck_values is not None:
assert len(doublecheck_values) == len(points)
if not len(points):
return []
try:
kdtree = cKDTree(points)
except:
import pdb
pdb.set_trace()
deduplicated_indices = []
for i, point in enumerate(points):
matches = kdtree.query_ball_point(point, max_distance)
if check_negative:
matches.extend(kdtree.query_ball_point(-point, max_distance))
if doublecheck_values is not None:
matches = [
j for j in matches if i == j or doublecheck_function(
doublecheck_values[i], doublecheck_values[j])
]
if min(matches) == i:
deduplicated_indices.append(i)
return deduplicated_indices
def rotation_doublecheck_function(max_angular_distance):
trace_threshold = 1. + 2. * math.cos(max_angular_distance)
def doublecheck_function(a, b):
r = a[:3,:3] @ b[:3,:3].T
t = numpy.trace(r)
return t > trace_threshold
return doublecheck_function
def deduplicate_transforms(
transforms,
max_metric_distance,
max_angular_distance
):
points = [transform[:3,3] for transform in transforms]
doublecheck_function = rotation_doublecheck_function(max_angular_distance)
deduplicated_indices = deduplicate_points(
points,
max_metric_distance,
doublecheck_values=transforms,
doublecheck_function=doublecheck_function,
)
return deduplicated_indices
|
from __future__ import unicode_literals
import datetime
from django.core import signing
from django.db import models
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.crypto import get_random_string
from .. import app_settings as allauth_app_settings
from . import app_settings
from . import signals
from .utils import user_email
from .managers import EmailAddressManager, EmailConfirmationManager
from .adapter import get_adapter
from django.contrib.auth.models import User
@python_2_unicode_compatible
class EmailAddress(models.Model):
user = models.ForeignKey(allauth_app_settings.USER_MODEL,
verbose_name=_('user'),
on_delete=models.CASCADE)
email = models.EmailField(unique=app_settings.UNIQUE_EMAIL,
max_length=app_settings.EMAIL_MAX_LENGTH,
verbose_name=_('e-mail address'))
verified = models.BooleanField(verbose_name=_('verified'), default=False)
primary = models.BooleanField(verbose_name=_('primary'), default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not app_settings.UNIQUE_EMAIL:
unique_together = [("user", "email")]
def __str__(self):
return "%s (%s)" % (self.email, self.user)
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
user_email(self.user, self.email)
self.user.save()
return True
def send_confirmation(self, request=None, signup=False):
if app_settings.EMAIL_CONFIRMATION_HMAC:
confirmation = EmailConfirmationHMAC(self)
else:
confirmation = EmailConfirmation.create(self)
confirmation.send(request, signup=signup)
return confirmation
def change(self, request, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
try:
atomic_transaction = transaction.atomic
except AttributeError:
atomic_transaction = transaction.commit_on_success
with atomic_transaction():
user_email(self.user, new_email)
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation(request)
@python_2_unicode_compatible
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress,
verbose_name=_('e-mail address'),
on_delete=models.CASCADE)
created = models.DateTimeField(verbose_name=_('created'),
default=timezone.now)
sent = models.DateTimeField(verbose_name=_('sent'), null=True)
key = models.CharField(verbose_name=_('key'), max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __str__(self):
return "confirmation for %s" % self.email_address
@classmethod
def create(cls, email_address):
key = get_random_string(64).lower()
return cls._default_manager.create(email_address=email_address,
key=key)
def key_expired(self):
expiration_date = self.sent \
+ datetime.timedelta(days=app_settings
.EMAIL_CONFIRMATION_EXPIRE_DAYS)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self, request):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
get_adapter(request).confirm_email(request, email_address)
signals.email_confirmed.send(sender=self.__class__,
request=request,
email_address=email_address)
return email_address
def send(self, request=None, signup=False):
get_adapter(request).send_confirmation_mail(request, self, signup)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(sender=self.__class__,
request=request,
confirmation=self,
signup=signup)
class EmailConfirmationHMAC:
def __init__(self, email_address):
self.email_address = email_address
@property
def key(self):
return signing.dumps(
obj=self.email_address.pk,
salt=app_settings.SALT)
@classmethod
def from_key(cls, key):
try:
max_age = (
60 * 60 * 24 * app_settings.EMAIL_CONFIRMATION_EXPIRE_DAYS)
pk = signing.loads(
key,
max_age=max_age,
salt=app_settings.SALT)
ret = EmailConfirmationHMAC(EmailAddress.objects.get(pk=pk))
except (signing.SignatureExpired,
signing.BadSignature,
EmailAddress.DoesNotExist):
ret = None
return ret
def confirm(self, request):
if not self.email_address.verified:
email_address = self.email_address
get_adapter(request).confirm_email(request, email_address)
signals.email_confirmed.send(sender=self.__class__,
request=request,
email_address=email_address)
return email_address
def send(self, request=None, signup=False):
get_adapter(request).send_confirmation_mail(request, self, signup)
signals.email_confirmation_sent.send(sender=self.__class__,
request=request,
confirmation=self,
signup=signup)
from django.contrib.auth.models import Permission, User
from django.db import models
class Hive(models.Model): #Hive Model,
user = models.ManyToManyField(User, related_name = "member")
#ManyToManyField to allow multiple users to assosciated to the Hive. I added a secondary index in order to make search faster and easier(Shaved a some code!!)
course = models.CharField(max_length=500) #Name of the Hive
def __str__(self):
return self.course #Returns Hive in the admin Console making it easier for us to test and update results
class Notes(models.Model): #Notes Model for HIVES
hive = models.ForeignKey(Hive, on_delete=models.CASCADE) #The Hive that the Notes are assosciated to
hivepk = models.IntegerField(default = 0) #Adds the PK of the Hive in order to make sure notes are private and cannot be accessed through hives of the same Name
notes_title = models.CharField(max_length=250) #Notes Name
notes_file = models.FileField(default='') #File of the Notes Model(One for every notes, Hives can have many notes associated to them).
def __str__(self):
return self.notes_title
class ProfileNotes(models.Model): #Notes for the profiles
user = models.ForeignKey(User, default = 1) #associated to a user, no need for PK because usernames are unique, and so are user models
notes_title = models.CharField(max_length=250, blank = True) #title of the Notes
notes_file = models.FileField(default='') #File of the Note
def __str__(self):
return self.notes_title
class MessageBoard(models.Model): #Each is a message to be added to each Hive's message board
user = models.ForeignKey(User, default = 1) #User who posted
hivepk = models.IntegerField(default = 0) #Pk of the assosicated Hive
message = models.CharField(max_length = 500, blank = True) #Message posted
time = models.DateTimeField(default=datetime.datetime.now, blank=True) #Date and Time message posted
def __str__(self):
return self.user
class Bio(models.Model): #The Bio model for user's bios
user = models.ForeignKey(User, default = 1) #One for each user, unique to each of them.
about = models.CharField(max_length = 500, blank = True) #What's in the bio
#Bios are deleted when updated. A new one is created with the new bio
def __str__(self):
return self.user
class profilepic(models.Model):#Unused but basically it would use an ImageField to stoe a profile pic that would be asssociated to a user and then displayed
user = models.ForeignKey(User, default = 1)
image = models.ImageField(default = '')
def __str__(self):
return self.user
class University(models.Model): #Unused but would have told everybody what school you are from and allowed a filter in searching
school = models.CharField(max_length = 900)
students = models.ManyToManyField(User, related_name = "studentof")
def __str__(self):
return self.school
|
import sys
import backend as bk
from PyQt5 import QtGui, QtCore, QtWidgets
from osmnx.errors import UnknownNetworkType
class Controller:
def __init__(self):
self.mainwindow = MainAppWindow()
self.centrality = CentralityWindow()
self.accessibility = AccessibilityWindow()
def open_main(self):
self.mainwindow.switch_window2.connect(self.open_cent)
self.mainwindow.switch_window3.connect(self.open_access)
self.centrality.hide()
self.accessibility.hide()
self.mainwindow.show()
def open_cent(self):
self.centrality.switch_window.connect(self.open_main)
self.mainwindow.hide()
self.centrality.show()
def open_access(self):
self.accessibility.switch_window.connect(self.open_main)
self.mainwindow.hide()
self.accessibility.show()
class MainAppWindow(QtWidgets.QFrame):
switch_window2 = QtCore.pyqtSignal()
switch_window3 = QtCore.pyqtSignal()
def __init__(self):
super(MainAppWindow, self).__init__()
self.mainwindow = MainWindow()
self.mainwindow.setupUi(self)
self.mainwindow.CentralityAnalysisButton.clicked.connect(
self.open_cent_handler
)
self.mainwindow.AccessibilityAnalysisButton.clicked.connect(
self.open_access_handler
)
def open_cent_handler(self):
self.switch_window2.emit()
def open_access_handler(self):
self.switch_window3.emit()
class CentralityWindow(QtWidgets.QFrame):
switch_window = QtCore.pyqtSignal()
def __init__(self):
super(CentralityWindow, self).__init__()
self.centrality = Centrality()
self.centrality.setupUi(self)
self.centrality.BackButton.clicked.connect(self.open_main_handler)
def open_main_handler(self):
self.switch_window.emit()
class AccessibilityWindow(QtWidgets.QFrame):
switch_window = QtCore.pyqtSignal()
def __init__(self):
super(AccessibilityWindow, self).__init__()
self.accessibility = Accessibility()
self.accessibility.setupUi(self)
self.accessibility.BackButton.clicked.connect(self.open_main_handler)
def open_main_handler(self):
self.switch_window.emit()
class MainWindow(object):
def setupUi(self, MainFrame):
MainFrame.setObjectName("MainFrame")
MainFrame.setFixedSize(870, 230)
MainFrame.setWindowIcon(QtGui.QIcon("interface.ico"))
MainFrame.setFrameShape(QtWidgets.QFrame.Box)
# Fonts
font = QtGui.QFont()
font.setFamily("Palatino Linotype")
font.setPointSize(10)
font2 = QtGui.QFont()
font2.setFamily("Palatino Linotype")
font2.setPointSize(20)
font2.setBold(True)
font2.setWeight(75)
font3 = QtGui.QFont()
font3.setFamily("Palatino Linotype")
font3.setPointSize(11)
font3.setBold(True)
font3.setWeight(75)
# Labels
self.ChooseAnalysisLabel = QtWidgets.QLabel(MainFrame)
self.ChooseAnalysisLabel.setGeometry(QtCore.QRect(310, 20, 251, 101))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
self.ChooseAnalysisLabel.setPalette(palette)
self.ChooseAnalysisLabel.setFont(font2)
self.ChooseAnalysisLabel.setAutoFillBackground(False)
self.ChooseAnalysisLabel.setAlignment(QtCore.Qt.AlignCenter)
# Buttons
self.CentralityAnalysisButton = QtWidgets.QPushButton(MainFrame)
self.CentralityAnalysisButton.setGeometry(QtCore.QRect(350, 100,
181, 31))
self.CentralityAnalysisButton.setFont(font3)
self.AccessibilityAnalysisButton = QtWidgets.QPushButton(MainFrame)
self.AccessibilityAnalysisButton.setGeometry(QtCore.QRect(350, 140,
181, 31))
self.AccessibilityAnalysisButton.setFont(font3)
self.retranslateUi(MainFrame)
QtCore.QMetaObject.connectSlotsByName(MainFrame)
def retranslateUi(self, MainFrame):
_translate = QtCore.QCoreApplication.translate
MainFrame.setWindowTitle(_translate("MainFrame", "SATRAP"))
self.CentralityAnalysisButton.setText(_translate(
"MainFrame",
"Centrality Analysis")
)
self.AccessibilityAnalysisButton.setText(_translate(
"MainFrame",
"Accessibility Analysis")
)
self.ChooseAnalysisLabel.setText(_translate("MainFrame",
"Choose Analysis"))
class Centrality(object):
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.setFixedSize(870, 230)
Frame.setWindowIcon(QtGui.QIcon("interface.ico"))
# Fonts
font = QtGui.QFont()
font.setFamily("Palatino Linotype")
font.setPointSize(9)
font.setUnderline(False)
font2 = QtGui.QFont()
font2.setFamily("Palatino Linotype")
font2.setPointSize(11)
font2.setBold(True)
font2.setWeight(75)
self.verticalline = QtWidgets.QFrame(Frame)
self.verticalline.setGeometry(QtCore.QRect(600, 0, 20, 231))
self.verticalline.setFrameShape(QtWidgets.QFrame.VLine)
self.verticalline.setFrameShadow(QtWidgets.QFrame.Sunken)
# Labels
self.PolyBoundaryLabel = QtWidgets.QLabel(Frame)
self.PolyBoundaryLabel.setGeometry(QtCore.QRect(20, 50, 181, 21))
self.PolyBoundaryLabel.setFont(font)
self.PolyBoundaryLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.RegionNameLabel = QtWidgets.QLabel(Frame)
self.RegionNameLabel.setGeometry(QtCore.QRect(20, 80, 181, 21))
self.RegionNameLabel.setFont(font)
self.RegionNameLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.TransportationModeLabel = QtWidgets.QLabel(Frame)
self.TransportationModeLabel.setGeometry(QtCore.QRect(20, 110,
181, 21))
self.TransportationModeLabel.setFont(font)
self.TransportationModeLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.WebMapLabel = QtWidgets.QLabel(Frame)
self.WebMapLabel.setGeometry(QtCore.QRect(20, 140, 181, 21))
self.WebMapLabel.setFont(font)
self.WebMapLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.ShpOutputLabel = QtWidgets.QLabel(Frame)
self.ShpOutputLabel.setGeometry(QtCore.QRect(20, 170, 181, 21))
self.ShpOutputLabel.setFont(font)
self.ShpOutputLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.DataInputLabel = QtWidgets.QLabel(Frame)
self.DataInputLabel.setGeometry(QtCore.QRect(640, 80, 111, 21))
self.DataInputLabel.setFont(font)
self.DataInputLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.AnalysisMethodLabel = QtWidgets.QLabel(Frame)
self.AnalysisMethodLabel.setGeometry(QtCore.QRect(640, 110, 111, 21))
self.AnalysisMethodLabel.setFont(font)
self.AnalysisMethodLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
# TextBoxes
self.PolyBoundaryBox = QtWidgets.QLineEdit(Frame)
self.PolyBoundaryBox.setGeometry(QtCore.QRect(210, 50, 271, 20))
self.RegionNameBox = QtWidgets.QLineEdit(Frame)
self.RegionNameBox.setGeometry(QtCore.QRect(210, 80, 221, 20))
self.RegionNameBox.setEnabled(False)
self.RegionNumberBox = QtWidgets.QLineEdit(Frame)
self.RegionNumberBox.setGeometry(QtCore.QRect(440, 80, 41, 20))
self.RegionNumberBox.setEnabled(False)
self.TransportationModeBox = QtWidgets.QLineEdit(Frame)
self.TransportationModeBox.setGeometry(QtCore.QRect(210, 110, 271, 20))
self.WebMapBox = QtWidgets.QLineEdit(Frame)
self.WebMapBox.setGeometry(QtCore.QRect(210, 140, 271, 20))
self.ShpOutputBox = QtWidgets.QLineEdit(Frame)
self.ShpOutputBox.setGeometry(QtCore.QRect(210, 170, 271, 20))
# Buttons
self.BackButton = QtWidgets.QPushButton(Frame)
self.BackButton.setGeometry(QtCore.QRect(0, 0, 149, 24))
self.BackButton.setFont(font)
self.HelpButton = QtWidgets.QPushButton(Frame)
self.HelpButton.setGeometry(QtCore.QRect(720, 0, 149, 24))
self.HelpButton.setFont(font)
self.HelpButton.clicked.connect(self.getHelp)
self.PolyBrowse = QtWidgets.QPushButton(Frame)
self.PolyBrowse.setGeometry(QtCore.QRect(490, 50, 75, 21))
self.PolyBrowse.setFont(font)
self.PolyBrowse.clicked.connect(self.openFile)
self.WebMapOutputBrowse = QtWidgets.QPushButton(Frame)
self.WebMapOutputBrowse.setGeometry(QtCore.QRect(490, 140, 75, 21))
self.WebMapOutputBrowse.setFont(font)
self.WebMapOutputBrowse.clicked.connect(self.saveWebmap)
self.ShpOutputBrowse = QtWidgets.QPushButton(Frame)
self.ShpOutputBrowse.setGeometry(QtCore.QRect(490, 170, 75, 21))
self.ShpOutputBrowse.setFont(font)
self.ShpOutputBrowse.clicked.connect(self.saveShapefile)
self.Execute = QtWidgets.QPushButton(Frame)
self.Execute.setGeometry(QtCore.QRect(730, 180, 121, 41))
self.Execute.setFont(font2)
self.Execute.clicked.connect(self.returnedFunction)
# Comboboxes
self.DataInputSelection = QtWidgets.QComboBox(Frame)
self.DataInputSelection.setGeometry(QtCore.QRect(760, 80, 91, 22))
self.DataInputSelection.setFont(font)
self.DataInputSelection.setLayoutDirection(QtCore.Qt.LeftToRight)
self.DataInputSelection.setSizeAdjustPolicy(
QtWidgets.QComboBox.AdjustToContentsOnFirstShow
)
self.DataInputSelection.addItem("Boundary")
self.DataInputSelection.addItem("Region Name")
self.DataInputSelection.currentIndexChanged.connect(
self.methodSelection
)
self.AnalysisMethodSelection = QtWidgets.QComboBox(Frame)
self.AnalysisMethodSelection.setGeometry(QtCore.QRect(760, 110,
91, 22))
self.AnalysisMethodSelection.setFont(font)
self.AnalysisMethodSelection.setLayoutDirection(QtCore.Qt.LeftToRight)
self.AnalysisMethodSelection.setSizeAdjustPolicy(
QtWidgets.QComboBox.AdjustToContentsOnFirstShow
)
self.AnalysisMethodSelection.addItem("Degree")
self.AnalysisMethodSelection.addItem("Betweenness")
self.AnalysisMethodSelection.addItem("Closeness")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def checkValid(self):
if not self.ShpOutputBox.text():
return "s", QtWidgets.QMessageBox.warning(None, "Error",
"Shapefile Output \
Folder must be specified.")
elif (
self.DataInputSelection.currentText() == "Boundary" and
(
not self.PolyBoundaryBox.text() or
self.PolyBoundaryBox.text()[-4:] != ".shp"
)
):
return "p", QtWidgets.QMessageBox.warning(None, "Error",
"Boundary data must \
be specified.")
elif (
self.DataInputSelection.currentText() == "Region Name"
and not self.RegionNameBox.text()
):
return "n", QtWidgets.QMessageBox.warning(None, "Error",
"Region name must \
be specified.")
else:
return "t", "t"
def returnedFunction(self):
signal, result = self.checkValid()
validList = ["s", "p", "n"]
if signal not in validList:
try:
funcs = {"Degree": self.degreeCentrality,
"Betweenness": self.betweennessCentrality,
"Closeness": self.closenessCentrality}
function = funcs[self.AnalysisMethodSelection.currentText()]
return function()
except KeyError:
return QtWidgets.QMessageBox.warning(None, "Error",
"Inputted name \
is not available in OSM database.")
except TypeError:
return QtWidgets.QMessageBox.warning(None, "Error",
"Geometry must \
be a polygon.")
except UnknownNetworkType:
return QtWidgets.QMessageBox.warning(None, "Error",
"Transportation mode \
is not available.")
except:
return QtWidgets.QMessageBox.warning(None, "Error",
"Unexpected Error")
def findG(self):
if self.DataInputSelection.currentText() == "Boundary":
if len(self.TransportationModeBox.text()):
G = bk.networkFromPolygon(self.PolyBoundaryBox.text(),
self.TransportationModeBox.text())
if isinstance(G, str):
return QtWidgets.QMessageBox.warning(
None, "Error",
"Polygon is not topologically valid.")
else:
G = bk.networkFromPolygon(self.PolyBoundaryBox.text())
if isinstance(G, str):
return QtWidgets.QMessageBox.warning(
None, "Error",
"Polygon is not topologically valid.")
elif self.DataInputSelection.currentText() == "Region Name":
if (
len(self.TransportationModeBox.text()) and
len(self.RegionNumberBox.text())
):
G = bk.networkFromPlaceName(
self.RegionNameBox.text(),
networkType=self.TransportationModeBox.text(),
whichResult=self.RegionNumberBox.text()
)
elif (len(self.TransportationModeBox.text()) and
not len(self.RegionNumberBox.text())):
G = bk.networkFromPlaceName(
self.RegionNameBox.text(),
networkType=self.TransportationModeBox.text(),
)
elif (not len(self.TransportationModeBox.text()) and
len(self.RegionNumberBox.text())):
G = bk.networkFromPlaceName(
self.RegionNameBox.text(),
whichResult=self.RegionNumberBox.text()
)
elif (not len(self.TransportationModeBox.text()) and
not len(self.RegionNumberBox.text())):
G = bk.networkFromPlaceName(self.RegionNameBox.text())
return G
def degreeCentrality(self):
G = self.findG()
if len(self.WebMapBox.text()):
returnmsg = bk.degreeCentrality(G, self.ShpOutputBox.text(),
self.WebMapBox.text())
if returnmsg == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
else:
returnmsg2 = bk.degreeCentrality(G, self.ShpOutputBox.text())
if returnmsg2 == "G":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
def betweennessCentrality(self):
G = self.findG()
if len(self.WebMapBox.text()):
returnmsg = bk.betweennessCentrality(G, self.ShpOutputBox.text(),
self.WebMapBox.text())
if returnmsg == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
else:
returnmsg2 = bk.betweennessCentrality(G, self.ShpOutputBox.text())
if returnmsg2 == "G":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
def closenessCentrality(self):
G = self.findG()
if len(self.WebMapBox.text()):
returnmsg = bk.closenessCentrality(G, self.ShpOutputBox.text(),
self.WebMapBox.text())
if returnmsg == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
else:
returnmsg2 = bk.closenessCentrality(G, self.ShpOutputBox.text())
if returnmsg2 == "G":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
def methodSelection(self):
if self.DataInputSelection.currentText() == "Boundary":
self.PolyBoundaryBox.setEnabled(True)
self.PolyBrowse.setEnabled(True)
self.RegionNameBox.setEnabled(False)
self.RegionNumberBox.setEnabled(False)
elif self.DataInputSelection.currentText() == "Region Name":
self.PolyBoundaryBox.setEnabled(False)
self.PolyBrowse.setEnabled(False)
self.RegionNameBox.setEnabled(True)
self.RegionNumberBox.setEnabled(True)
def getHelp(self):
QtWidgets.QMessageBox.about(None, "About", "* Input polygon boundary \
data should be in polygon type shapefile format \n\
* Region name should be checked first on \
https://nominatim.openstreetmap.org. \n\
* If there is no region name on the website search, region's network \
is unaccessible. \n\
* Choose the path where the road network of the area of interest and \
the result of the analysis is stored via 'Shapefile Output Folder' \n\
* Choose the path where the interactive map is stored via 'Interactive \
Map Output Path'. \n\
* Available transportation modes on OSM database are; \n\
'drive' - get drivable public streets (but not service roads) \n\
'drive_service' - get drivable public streets, including service \
roads \n\
'walk' - get all streets and paths that pedestrians can use \
(this network type ignores one-way directionality) \n\
'bike' - get all streets and paths that cyclists can use\n\
'all' - download all (non-private) OSM streets and paths\n\
'all_private' - download all OSM streets and paths, including \
private-access ones")
def openFile(self):
name = QtWidgets.QFileDialog.getOpenFileName()
self.PolyBoundaryBox.setText(str(name[0]))
def saveWebmap(self):
path = QtWidgets.QFileDialog.getSaveFileName(None, "Select Directory",
"interactivemap.html",
"HTML Files (*.html)")
self.WebMapBox.setText(str(path[0]))
def saveShapefile(self):
path2 = QtWidgets.QFileDialog.getExistingDirectory(None,
"Select Directory")
self.ShpOutputBox.setText(path2)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Centrality"))
self.BackButton.setText(_translate("Frame", "Back"))
self.HelpButton.setText(_translate("Frame", "Help"))
self.PolyBoundaryLabel.setText(_translate("Frame",
"Polygon Boundary of Area"))
self.RegionNameLabel.setText(_translate("Frame",
"Region Name/Result Number"))
self.TransportationModeLabel.setText(_translate("Frame",
"Transportation Mode \
(*)"))
self.WebMapLabel.setText(_translate("Frame",
"Interactive Map \
Output Path (*)"))
self.ShpOutputLabel.setText(_translate("Frame",
"Shapefile Output Folder"))
self.DataInputLabel.setText(_translate("Frame", "Data Input Method"))
self.DataInputSelection.setItemText(0, _translate("Frame",
"Boundary"))
self.DataInputSelection.setItemText(1, _translate("Frame",
"Region Name"))
self.AnalysisMethodLabel.setText(_translate("Frame", "Analysis \
Method"))
self.AnalysisMethodSelection.setItemText(0, _translate("Frame",
"Degree"))
self.AnalysisMethodSelection.setItemText(1, _translate("Frame",
"Betweenness"))
self.AnalysisMethodSelection.setItemText(2, _translate("Frame",
"Closeness"))
self.PolyBrowse.setText(_translate("Frame", "Browse"))
self.WebMapOutputBrowse.setText(_translate("Frame", "Browse"))
self.ShpOutputBrowse.setText(_translate("Frame", "Browse"))
self.Execute.setText(_translate("Frame", "Execute"))
class Accessibility(object):
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.setFixedSize(870, 253)
Frame.setWindowIcon(QtGui.QIcon("interface.ico"))
font = QtGui.QFont()
font.setFamily("Palatino Linotype")
font.setPointSize(9)
font.setUnderline(False)
font2 = QtGui.QFont()
font2.setFamily("Palatino Linotype")
font2.setPointSize(11)
font2.setBold(True)
font2.setWeight(75)
self.verticalline = QtWidgets.QFrame(Frame)
self.verticalline.setGeometry(QtCore.QRect(600, 0, 20, 252))
self.verticalline.setFrameShape(QtWidgets.QFrame.VLine)
self.verticalline.setFrameShadow(QtWidgets.QFrame.Sunken)
# Labels
self.OriginsLabel = QtWidgets.QLabel(Frame)
self.OriginsLabel.setGeometry(QtCore.QRect(20, 40, 181, 21))
self.OriginsLabel.setFont(font)
self.OriginsLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.DestinationsLabel = QtWidgets.QLabel(Frame)
self.DestinationsLabel.setGeometry(QtCore.QRect(20, 70, 181, 21))
self.DestinationsLabel.setFont(font)
self.DestinationsLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.WeightLabel = QtWidgets.QLabel(Frame)
self.WeightLabel.setGeometry(QtCore.QRect(20, 100, 181, 21))
self.WeightLabel.setFont(font)
self.WeightLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.TransportationModeLabel = QtWidgets.QLabel(Frame)
self.TransportationModeLabel.setGeometry(QtCore.QRect(20, 130,
181, 21))
self.TransportationModeLabel.setFont(font)
self.TransportationModeLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.ThresholdLabel = QtWidgets.QLabel(Frame)
self.ThresholdLabel.setGeometry(QtCore.QRect(20, 160, 181, 21))
self.ThresholdLabel.setFont(font)
self.ThresholdLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.WebMapLabel = QtWidgets.QLabel(Frame)
self.WebMapLabel.setGeometry(QtCore.QRect(20, 190, 181, 21))
self.WebMapLabel.setFont(font)
self.WebMapLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.ShpOutputLabel = QtWidgets.QLabel(Frame)
self.ShpOutputLabel.setGeometry(QtCore.QRect(20, 220, 181, 21))
self.ShpOutputLabel.setFont(font)
self.ShpOutputLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
self.AnalysisMethodLabel = QtWidgets.QLabel(Frame)
self.AnalysisMethodLabel.setGeometry(QtCore.QRect(640, 110, 111, 21))
self.AnalysisMethodLabel.setFont(font)
self.AnalysisMethodLabel.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
# TextBoxes
self.OriginsBox = QtWidgets.QLineEdit(Frame)
self.OriginsBox.setGeometry(QtCore.QRect(210, 40, 271, 20))
self.DestinationsBox = QtWidgets.QLineEdit(Frame)
self.DestinationsBox.setGeometry(QtCore.QRect(210, 70, 271, 20))
self.WeightBox = QtWidgets.QLineEdit(Frame)
self.WeightBox.setGeometry(QtCore.QRect(210, 100, 271, 20))
self.TransportationModeBox = QtWidgets.QLineEdit(Frame)
self.TransportationModeBox.setGeometry(QtCore.QRect(210, 130, 271, 20))
self.ThresholdBox = QtWidgets.QLineEdit(Frame)
self.ThresholdBox.setGeometry(QtCore.QRect(210, 160, 271, 20))
self.ThresholdBox.setEnabled(False)
self.WebMapBox = QtWidgets.QLineEdit(Frame)
self.WebMapBox.setGeometry(QtCore.QRect(210, 190, 271, 20))
self.ShpOutputBox = QtWidgets.QLineEdit(Frame)
self.ShpOutputBox.setGeometry(QtCore.QRect(210, 220, 271, 20))
# Buttons
self.BackButton = QtWidgets.QPushButton(Frame)
self.BackButton.setGeometry(QtCore.QRect(0, 0, 149, 24))
self.BackButton.setFont(font)
self.HelpButton = QtWidgets.QPushButton(Frame)
self.HelpButton.setGeometry(QtCore.QRect(720, 0, 149, 24))
self.HelpButton.setFont(font)
self.HelpButton.clicked.connect(self.getHelp)
self.OriginsBrowse = QtWidgets.QPushButton(Frame)
self.OriginsBrowse.setGeometry(QtCore.QRect(490, 40, 75, 21))
self.OriginsBrowse.setFont(font)
self.OriginsBrowse.clicked.connect(self.openFile)
self.DestinationsBrowse = QtWidgets.QPushButton(Frame)
self.DestinationsBrowse.setGeometry(QtCore.QRect(490, 70, 75, 21))
self.DestinationsBrowse.setFont(font)
self.DestinationsBrowse.clicked.connect(self.openFile2)
self.WebMapOutputBrowse = QtWidgets.QPushButton(Frame)
self.WebMapOutputBrowse.setGeometry(QtCore.QRect(490, 190, 75, 21))
self.WebMapOutputBrowse.setFont(font)
self.WebMapOutputBrowse.clicked.connect(self.saveWebmap)
self.ShpOutputBrowse = QtWidgets.QPushButton(Frame)
self.ShpOutputBrowse.setGeometry(QtCore.QRect(490, 220, 75, 21))
self.ShpOutputBrowse.setFont(font)
self.ShpOutputBrowse.clicked.connect(self.saveShapefile)
self.Execute = QtWidgets.QPushButton(Frame)
self.Execute.setGeometry(QtCore.QRect(730, 200, 121, 41))
self.Execute.setFont(font2)
self.Execute.clicked.connect(self.returnedFunction)
# SelectionBoxes
self.AnalysisMethodSelection = QtWidgets.QComboBox(Frame)
self.AnalysisMethodSelection.setGeometry(QtCore.QRect(760, 110,
91, 22))
self.AnalysisMethodSelection.setFont(font)
self.AnalysisMethodSelection.setLayoutDirection(QtCore.Qt.LeftToRight)
self.AnalysisMethodSelection.setSizeAdjustPolicy(
QtWidgets.QComboBox.AdjustToContentsOnFirstShow
)
self.AnalysisMethodSelection.addItem("Potential")
self.AnalysisMethodSelection.addItem("Daily")
self.AnalysisMethodSelection.currentIndexChanged.connect(
self.enableSelection
)
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def checkValid(self):
if not self.ShpOutputBox.text():
return "s", QtWidgets.QMessageBox.warning(None, "Error",
"Shapefile Output \
Folder must be specified.")
elif (not self.OriginsBox.text() or
self.OriginsBox.text()[-4:] != ".shp"):
return "o", QtWidgets.QMessageBox.warning(None, "Error",
"Origins must \
be specified.")
elif (not self.DestinationsBox.text() or
self.DestinationsBox.text()[-4:] != ".shp"):
return "d", QtWidgets.QMessageBox.warning(None, "Error",
"Destinations must \
be specified.")
else:
return "t", "t"
def returnedFunction(self):
signal, result = self.checkValid()
validList = ["s", "o", "d"]
if signal not in validList:
try:
funcs = {"Potential": self.potentialAccessibility,
"Daily": self.dailyAccessibility}
function = funcs[self.AnalysisMethodSelection.currentText()]
return function()
except UnknownNetworkType:
return QtWidgets.QMessageBox.warning(None, "Error",
"Transportation mode \
is not available.")
except:
raise
def origdest(self):
if len(self.TransportationModeBox.text()):
route_geom, nodes, G_proj, o, d = bk.origindestination(
self.OriginsBox.text(),
self.DestinationsBox.text(),
networkType=self.TransportationModeBox.text()
)
else:
route_geom, nodes, G_proj, o, d = bk.origindestination(
self.OriginsBox.text(),
self.DestinationsBox.text()
)
return route_geom, nodes, G_proj, o, d
def potentialAccessibility(self):
route_geom, nodes, G_proj, o, d = self.origdest()
if (
len(self.WebMapBox.text()) and
len(self.WeightBox.text())
):
returnmsg = bk.potentialAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
self.WeightBox.text(),
self.WebMapBox.text()
)
if returnmsg == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
elif (len(self.WebMapBox.text()) and
not len(self.WeightBox.text())):
returnmsg2 = bk.potentialAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
1, self.WebMapBox.text()
)
if returnmsg2 == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
elif (not len(self.WebMapBox.text()) and
len(self.WeightBox.text())):
returnmsg3 = bk.potentialAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
weight=self.WeightBox.text()
)
if returnmsg3 == "P":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
elif (not len(self.WebMapBox.text()) and
not len(self.WeightBox.text())):
returnmsg4 = bk.potentialAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text()
)
if returnmsg4 == "P":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
def dailyAccessibility(self):
route_geom, nodes, G_proj, o, d = self.origdest()
if (
len(self.ThresholdBox.text()) and
len(self.WebMapBox.text()) and
len(self.WeightBox.text())
):
returnmsg = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
self.WeightBox.text(),
self.ThresholdBox.text(),
self.WebMapBox.text()
)
if returnmsg == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
elif (len(self.ThresholdBox.text()) and
not len(self.WebMapBox.text()) == 0 and
len(self.WeightBox.text())):
returnmsg2 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
weight=self.WeightBox.text(),
threshold=self.ThresholdBox.text()
)
if returnmsg2 == "D":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
elif (len(self.ThresholdBox.text()) and
len(self.WebMapBox.text()) and
not len(self.WeightBox.text())):
returnmsg3 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
1, self.ThresholdBox.text(),
self.WebMapBox.text()
)
if returnmsg3 == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
elif (not len(self.ThresholdBox.text()) and
len(self.WebMapBox.text()) and
len(self.WeightBox.text())):
returnmsg4 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
self.WeightBox.text(), 3000,
self.WebMapBox.text()
)
if returnmsg4 == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
elif (not len(self.ThresholdBox.text()) and
len(self.WebMapBox.text()) and
not len(self.WeightBox.text())):
returnmsg5 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(), 1,
3000, self.WebMapBox.text()
)
if returnmsg5 == "I":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
via interactive map successfully')
elif (not len(self.ThresholdBox.text()) and
not len(self.WebMapBox.text()) and
len(self.WeightBox.text())):
returnmsg6 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
weight=self.WeightBox.text(),
threshold=3000
)
if returnmsg6 == "D":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
elif (len(self.ThresholdBox.text()) and
not len(self.WebMapBox.text()) and
not len(self.WeightBox.text())):
returnmsg7 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text(),
1, threshold=self.ThresholdBox.text()
)
if returnmsg7 == "D":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
elif (not len(self.ThresholdBox.text()) and
not len(self.WebMapBox.text()) and
not len(self.WeightBox.text())):
returnmsg8 = bk.dailyAccessibility(
route_geom, nodes, G_proj, o, d,
self.ShpOutputBox.text()
)
if returnmsg8 == "D":
return QtWidgets.QMessageBox.information(None,
'Done!',
'Operation performed\
successfully')
def enableSelection(self):
if self.AnalysisMethodSelection.currentText() == "Potential":
self.ThresholdBox.setEnabled(False)
elif self.AnalysisMethodSelection.currentText() == "Daily":
self.ThresholdBox.setEnabled(True)
def getHelp(self):
QtWidgets.QMessageBox.about(None, "About", "* Input origins \
and destinations data should be in point type shapefile format \n\
* Weight column name of destinations must be entered as text, \
otherwise default value 1 is used in calculations \n\
* Choose the path where the road network of the area of interest and \
the result of the analysis is stored via 'Shapefile Output Folder' \n\
* Choose the path where the interactive map is stored via 'Interactive \
Map Output Path'. \n\
* Available transportation modes on OSM database are;\n\
'drive' - get drivable public streets (but not service roads)\n\
'drive_service' - get drivable public streets, including service \
roads\n\
'walk' - get all streets and paths that pedestrians can use (this \
network type ignores one-way directionality)\n\
'bike' - get all streets and paths that cyclists can use\n\
'all' - download all (non-private) OSM streets and paths\n\
'all_private' - download all OSM streets and paths, including \
private-access ones")
def openFile(self):
orig = QtWidgets.QFileDialog.getOpenFileName()
orig_text = str(orig[0])
self.OriginsBox.setText(orig_text)
def openFile2(self):
dest = QtWidgets.QFileDialog.getOpenFileName()
dest_text = str(dest[0])
self.DestinationsBox.setText(dest_text)
def saveWebmap(self):
path = QtWidgets.QFileDialog.getSaveFileName(None, "Select Directory",
"interactivemap.html",
"HTML Files (*.html)")
self.WebMapBox.setText(str(path[0]))
def saveShapefile(self):
path2 = QtWidgets.QFileDialog.getExistingDirectory(None,
"Select Directory")
self.ShpOutputBox.setText(path2)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Accessibility"))
self.BackButton.setText(_translate("Frame", "Back"))
self.HelpButton.setText(_translate("Frame", "Help"))
self.OriginsLabel.setText(_translate("Frame", "Origins"))
self.DestinationsLabel.setText(_translate("Frame", "Destinations"))
self.WeightLabel.setText(_translate("Frame", "Weight Field (*)"))
self.TransportationModeLabel.setText(_translate("Frame",
"Transportation Mode \
(*)"))
self.ThresholdLabel.setText(_translate("Frame",
"Distance Threshold (m)"))
self.WebMapLabel.setText(_translate("Frame",
"Interactive Map Output Path (*)"))
self.ShpOutputLabel.setText(_translate("Frame",
"Shapefile Output Folder"))
self.AnalysisMethodLabel.setText(_translate("Frame",
"Analysis Method"))
self.AnalysisMethodSelection.setItemText(0, _translate("Frame",
"Potential"))
self.AnalysisMethodSelection.setItemText(1, _translate("Frame",
"Daily"))
self.OriginsBrowse.setText(_translate("Frame", "Browse"))
self.DestinationsBrowse.setText(_translate("Frame", "Browse"))
self.WebMapOutputBrowse.setText(_translate("Frame", "Browse"))
self.ShpOutputBrowse.setText(_translate("Frame", "Browse"))
self.Execute.setText(_translate("Frame", "Execute"))
def main():
app = QtWidgets.QApplication(sys.argv)
app.setApplicationName("SATRAP")
mywindow = Controller()
mywindow.open_main()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
## this converts fantendo wiki to a game
import gradio
def Game_Generator(Game_Name, Game_Type, Game_Description, Game_Rules, Game_Setup, Game_Play, Game_End):
Game_Name = input
Game_Type = input
Game_Description = input
Game_Rules = input
Game_Setup = input
Game_Play = input
Game_End = input
Game_Generator = open(Game_Name + ".txt", "w")
Game_Generator.write("Game Name: " + Game_Name + "\n")
Game_Generator.write("Game Type: " + Game_Type + "\n")
Game_Generator.write("Game Description: " + Game_Description + "\n")
Game_Generator.write("Game Rules: " + Game_Rules + "\n")
Game_Generator.write("Game Setup: " + Game_Setup + "\n")
Game_Generator.write("Game Play: " + Game_Play + "\n")
Game_Generator.write("Game End: " + Game_End + "\n")
def Program_End():
print("Program Ended")
Game_Generator.close()
exit()
def Program_Start():
print("Program Started")
def Client_Program():
import gradio as gr
import os
import sys
gradio.client = "800x800"
gradio.clientsize= "800x800"
gradio.launch(Game_Generator, "Game Generator")
gradio.title= "Game Generator"
gradio.game.gameinfo=input("Please input the game you would like to generate")
print("Please enter the size of the client")
gradio.clientsize=input()
print("Please enter the title of the game")
gradio.title=input()
gradio.export=input("Please enter the name of the file you would like to export to")
gradio.export=gradio.export+input()
gradio.launch(Game_Generator)
print("Done")
print(Game_Generator)
## play the game
def Game_Player():
import gradio
import os
import sys
gradio.client = "800x800"
gradio.launch(Game_Generator, "Game Generator")
gradio.clientsize= "800x800"
gradio.launch(Game_Generator, "Game Generator")
gradio.title= "Game Generator"
## write the game engine using pygame
def Game_Engine():
import pygame
import os
import sys
pygame.init()
pygame.display.set_caption("Game Engine")
screen = pygame.display.set_mode((800,800))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_UP:
print("up")
if event.key == pygame.K_DOWN:
print("down")
if event.key == pygame.K_LEFT:
print("left")
if event.key == pygame.K_RIGHT:
print("right")
if event.key == pygame.K_SPACE:
print("space")
if event.key == pygame.K_RETURN:
print("enter")
print(Game_Generator)
print(Game_Engine)
print(Game_Player)
print(Client_Program) |
import logging
from typing import List, Union
import torch
import torch.nn
import flair.nn
from flair.data import Dictionary, Label, Sentence
from flair.embeddings import TokenEmbeddings
log = logging.getLogger("flair")
class WordTagger(flair.nn.DefaultClassifier[Sentence]):
"""
This is a simple class of models that tags individual words in text.
"""
def __init__(
self,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
**classifierargs,
):
"""
Initializes a WordTagger
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super().__init__(
label_dictionary=tag_dictionary, final_embedding_size=embeddings.embedding_length, **classifierargs
)
# embeddings
self.embeddings = embeddings
# dictionaries
self.tag_type: str = tag_type
# all parameters will be pushed internally to the specified device
self.to(flair.device)
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings,
"tag_dictionary": self.label_dictionary,
"tag_type": self.tag_type,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state["embeddings"],
tag_dictionary=state["tag_dictionary"],
tag_type=state["tag_type"],
**kwargs,
)
def forward_pass(
self,
sentences: Union[List[Sentence], Sentence],
return_label_candidates: bool = False,
):
if not isinstance(sentences, list):
sentences = [sentences]
self.embeddings.embed(sentences)
names = self.embeddings.get_names()
# get all tokens in this mini-batch
all_tokens = [token for sentence in sentences for token in sentence]
all_embeddings = [token.get_embedding(names) for token in all_tokens]
embedded_tokens = torch.stack(all_embeddings)
labels = [[token.get_tag(self.label_type).value] for token in all_tokens]
if return_label_candidates:
empty_label_candidates = [Label(value=None, score=0.0) for token in all_tokens]
return embedded_tokens, labels, all_tokens, empty_label_candidates
return embedded_tokens, labels
@property
def label_type(self):
return self.tag_type
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_tag(gold_label_type, 'O').value} "
f"{token.get_tag('predicted', 'O').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch import Tensor
import random
from typing import List, Optional, Tuple
from utils.math_utils import bound_fn
from .base_layer import BaseLayer
class RandomApply(BaseLayer):
"""
This layer randomly applies a list of modules during training.
Args:
module_list (List): List of modules
keep_p (Optional[float]): Keep P modules from the list during training. Default: 0.8 (or 80%)
"""
def __init__(
self, module_list: List, keep_p: Optional[float] = 0.8, *args, **kwargs
) -> None:
super().__init__()
n_modules = len(module_list)
self.module_list = module_list
self.module_indexes = [i for i in range(1, n_modules)]
k = int(round(n_modules * keep_p))
self.keep_k = bound_fn(min_val=1, max_val=n_modules, value=k)
def forward(self, x: Tensor) -> Tensor:
if self.training:
indexes = [0] + sorted(random.sample(self.module_indexes, k=self.keep_k))
for idx in indexes:
x = self.module_list[idx](x)
else:
for layer in self.module_list:
x = layer(x)
return x
def profile_module(self, x, *args, **kwargs) -> Tuple[Tensor, float, float]:
params, macs = 0.0, 0.0
for layer in self.module_list:
x, p, m = layer.profile_module(x)
params += p
macs += m
return x, params, macs
def __repr__(self):
format_string = "{}(apply_k (N={})={}, ".format(
self.__class__.__name__, len(self.module_list), self.keep_k
)
for layer in self.module_list:
format_string += "\n\t {}".format(layer)
format_string += "\n)"
return format_string
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import load_data
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=1000, type=int, help='batch size')
parser.add_argument('--train_steps', default=10000, type=int,
help='number of training steps')
def main(argv):
args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = load_data.load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[10, 10],
# The model must choose between 4 classes.
n_classes=6)
# Train the Model.
classifier.train(
input_fn=lambda:load_data.train_input_fn(train_x, train_y,
args.batch_size),
steps=args.train_steps)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:load_data.eval_input_fn(test_x, test_y,
args.batch_size))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# 1 1 1 1 1 1 6 1 1 2 1 3 1 77.75
# 1 1 1 4 1 1 3 1 1 1 1 1 1 45.465
# Generate predictions from the model
expected = ["C", "C", "C", "C", "F"]
predict_x = {
't0': [1, 2, 2, 1, 1],
't1': [1, 3, 1, 1, 1],
't2': [1, 3, 2, 1, 1],
't3': [2, 3, 1, 1, 4],
't4': [2, 2, 1, 1, 1],
't5': [2, 2, 1, 1, 1],
't6': [2, 17,6, 6, 3],
't7': [1, 2, 2, 1, 1],
't8': [3, 6, 2, 1, 1],
't9': [1, 9, 3, 2, 1],
't10':[8, 13,9, 1, 1],
't11':[1, 4, 1, 3, 1],
't12':[8, 3, 2, 1, 1]
}
predictions = classifier.predict(
input_fn=lambda:load_data.eval_input_fn(predict_x,
labels=None,
batch_size=args.batch_size))
for pred_dict, expec in zip(predictions, expected):
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print(template.format(load_data.GRADES[class_id],
100 * probability, expec))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Jim Martens
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Masterthesis package.
Subpackages:
``ssd_keras``: provides an implementation of SSD
Modules:
``data``: provides functionality to load data sets
``definitions``: contains mapper between COCO classes and WordNet IDs
``evaluate``: provides functionality to evaluate networks
``main``: main entrance point of application
``ssd``: provides functionality to use the SSD models
"""
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 14:38:28 2020
@author: peterpiontek
"""
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("data/final_data.csv", index_col = 0, parse_dates = True)
df = df[(df.index.year > 2017) & (df.index.year < 2019)]
df = df[["speed", "flow"]]
df["density"] = (df.flow * 60) / df.speed
df2 = df.resample("W").agg({"speed": np.mean, "flow": np.sum, "density": np.mean})[:-1]
df2.speed.mean()
df2.flow.mean()
df2.flow.sort_values()
df2.speed.sort_values()
x = df2.index
y1 = df2.speed
y2 = df2.flow
y3 = df2.density
# create plot
fig, ax1 = plt.subplots()
ax1.set_title("Speed and flow, 2018")
ax1.plot(x, y2, color = 'r', label = 'flow')
ax1.set_ylabel("flow (no. of vehicles)")
ax2 = ax1.twinx()
ax2.plot(x, y1, 'b-', label = 'speed')
ax2.set_ylabel("speed (kph)")
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1+h2, l1+l2)
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
par1 = ax.twinx()
par2 = ax.twinx()
par2.spines["right"].set_position(("axes", 1.2))
make_patch_spines_invisible(par2)
par2.spines["right"].set_visible(True)
p1, = ax.plot(x, y2, color = 'r', alpha = 0.3, label = 'Flow')
p2, = par1.plot(x, y1, 'b-', alpha = 0.3, label = 'Speed')
p3, = par2.plot(x, y3, 'g-', label = "Density")
ax.set_xlabel("Time")
ax.set_ylabel("Flow")
par1.set_ylabel("Speed")
par2.set_ylabel("Density")
ax.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
ax.tick_params(axis='x', **tkw)
lines = [p1, p2, p3]
ax.legend(lines, [l.get_label() for l in lines])
plt.show() |
# Generated by Django 3.0.3 on 2020-08-06 17:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guidebook', '0016_auto_20200806_1756'),
]
operations = [
migrations.AlterField(
model_name='guidebook',
name='is_approved',
field=models.BooleanField(default=True),
),
]
|
"""\
Plotting: PL
============
.. automodule:: scanpy
.. note::
See the :ref:`settings` section for all important plotting configurations.
Generic
-------
.. autosummary::
:toctree: .
pl.scatter
pl.heatmap
pl.dotplot
pl.violin
pl.stacked_violin
pl.matrixplot
pl.clustermap
pl.ranking
Preprocessing
-------------
Methods for visualizing quality control and results of preprocessing functions.
.. autosummary::
:toctree: .
pl.highest_expr_genes
pl.filter_genes_dispersion
Tools
-----
Methods that extract and visualize tool-specific annotation in an
:class:`~anndata.AnnData` object. For any method in module ``tl``, there is
a method with the same name in ``pl``.
**PCA**
.. autosummary::
:toctree: .
pl.pca
pl.pca_loadings
pl.pca_variance_ratio
pl.pca_overview
**Embeddings**
.. autosummary::
:toctree: .
pl.tsne
pl.umap
pl.diffmap
pl.draw_graph
pl.phate
**Branching trajectories and pseudotime, clustering**
Visualize clusters using one of the embedding methods passing ``color='louvain'``.
.. autosummary::
:toctree: .
pl.dpt_groups_pseudotime
pl.dpt_timeseries
pl.paga
pl.paga_path
pl.paga_compare
**Marker genes**
.. autosummary::
:toctree: .
pl.rank_genes_groups
pl.rank_genes_groups_violin
pl.rank_genes_groups_stacked_violin
pl.rank_genes_groups_heatmap
pl.rank_genes_groups_dotplot
pl.rank_genes_groups_matrixplot
**Simulations**
.. autosummary::
:toctree: .
pl.sim
"""
from .anndata import scatter, violin, ranking, clustermap, stacked_violin, heatmap, dotplot, matrixplot, tracksplot
from .preprocessing import filter_genes_dispersion, highly_variable_genes
from .tools.scatterplots import pca, diffmap, draw_graph, tsne, phate, umap, ivis
from .tools import pca_loadings, pca_scatter, pca_overview, pca_variance_ratio
from .tools.paga import paga, paga_adjacency, paga_compare, paga_path
from .tools import dpt_timeseries, dpt_groups_pseudotime
from .tools import rank_genes_groups, rank_genes_groups_violin
from .tools import rank_genes_groups_dotplot, rank_genes_groups_heatmap, rank_genes_groups_stacked_violin, rank_genes_groups_matrixplot, rank_genes_groups_tracksplot
from .tools import sim
from .rcmod import set_rcParams_scanpy, set_rcParams_defaults
from . import palettes
from .utils import matrix
from .utils import timeseries, timeseries_subplot, timeseries_as_heatmap
from .qc import highest_expr_genes
|
# Combinatoric selections
from sympy import binomial
LIMIT = 100
def solve():
return sum(
int(binomial(n, r)) > 1_000_000 for n in range(1, LIMIT + 1)
for r in range(1, n))
if __name__ == "__main__":
print(solve())
|
import argparse
import csv
import json
import sklearn.manifold
import torch
import umap # type: ignore
# No type stubs for umap-learn. Ignore mypy
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("papers", default=False, help="paper file")
parser.add_argument("embeddings", default=False, help="embeddings file to shrink")
parser.add_argument("--projection-method", default="tsne", help="[umap|tsne]")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
emb = torch.load(args.embeddings)
if args.projection_method == "tsne":
out = sklearn.manifold.TSNE(n_components=2).fit_transform(emb.numpy())
elif args.projection_method == "umap":
out = umap.UMAP(
n_neighbors=5, min_dist=0.3, metric="correlation", n_components=2
).fit_transform(emb.numpy())
else:
print("invalid projection-method: {}".format(args.projection_method))
print("Falling back to T-SNE")
out = sklearn.manifold.TSNE(n_components=2).fit_transform(emb.numpy())
d = []
with open(args.papers, "r") as f:
abstracts = list(csv.DictReader(f))
for i, row in enumerate(abstracts):
d.append({"id": row["UID"], "pos": out[i].tolist()})
print(json.dumps(d))
|
# -*-coding:utf-8-*-
import torch
import torch.nn as nn
class DenseBottleneck(nn.Module):
def __init__(self, in_channels, growth_rate):
super().__init__()
inner_channel = 4 * growth_rate
self.bottle_neck = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.Sigmoid(),
nn.Conv2d(in_channels, inner_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(inner_channel),
nn.Sigmoid(),
nn.Conv2d(inner_channel, growth_rate, kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
return torch.cat([x, self.bottle_neck(x)], 1)
class Transition(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.down_sample = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.AvgPool2d(2, stride=2)
)
def forward(self, x):
return self.down_sample(x)
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_class=100):
super().__init__()
self.growth_rate = growth_rate
inner_channels = 2 * growth_rate
self.conv1 = nn.Conv2d(3, inner_channels, kernel_size=3, padding=1, bias=False)
self.features = nn.Sequential()
for index in range(len(nblocks) - 1):
self.features.add_module("dense_block_layer_{}".format(index), self._make_dense_layers(block, inner_channels, nblocks[index]))
inner_channels += growth_rate * nblocks[index]
out_channels = int(reduction * inner_channels) # int() will automatic floor the value
self.features.add_module("transition_layer_{}".format(index), Transition(inner_channels, out_channels))
inner_channels = out_channels
self.features.add_module("dense_block{}".format(len(nblocks) - 1), self._make_dense_layers(block, inner_channels, nblocks[len(nblocks)-1]))
inner_channels += growth_rate * nblocks[len(nblocks) - 1]
self.features.add_module('bn', nn.BatchNorm2d(inner_channels))
self.features.add_module('sigmoid', nn.Sigmoid())
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(inner_channels, num_class)
def forward(self, x):
output = self.conv1(x)
output = self.features(output)
output = self.avgpool(output)
output = output.view(output.size()[0], -1)
output = self.linear(output)
return output
def _make_dense_layers(self, block, in_channels, nblocks):
dense_block = nn.Sequential()
for index in range(nblocks):
dense_block.add_module('bottle_neck_layer_{}'.format(index), block(in_channels, self.growth_rate))
in_channels += self.growth_rate
return dense_block
def DenseNet121(num_class):
return DenseNet(DenseBottleneck, [6,12,24,16], growth_rate=32,num_class=num_class)
def DenseNet169(num_class):
return DenseNet(DenseBottleneck, [6,12,32,32], growth_rate=32,num_class=num_class)
def DenseNet201(num_class):
return DenseNet(DenseBottleneck, [6,12,48,32], growth_rate=32,num_class=num_class)
def DenseNet161(num_class):
return DenseNet(DenseBottleneck, [6,12,36,24], growth_rate=48,num_class=num_class)
|
# encoding: utf-8
# author: Taehong Kim
# email: peppy0510@hotmail.com
from wx.lib.embeddedimage import PyEmbeddedImage
SmallUpArrow = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAADxJ"
"REFUOI1jZGRiZqAEMFGke2gY8P/f3/9kGwDTjM8QnAaga8JlCG3CAJdt2MQxDCAUaOjyjKMp"
"cRAYAABS2CPsss3BWQAAAABJRU5ErkJggg==")
SmallDnArrow = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAEhJ"
"REFUOI1jZGRiZqAEMFGke9QABgYGBgYWdIH///7+J6SJkYmZEacLkCUJacZqAD5DsInTLhDR"
"bcPlKrwugGnCFy6Mo3mBAQChDgRlP4RC7wAAAABJRU5ErkJggg==")
checkmark_icon14 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABCUlEQVR4nNWSv0rEQBDGv9UD"
"g0WQSPC2CaZJtW0IWCxpLJKnsPEJ0sfCPIAgSIr1AdLZpbBLYREsLNIES0ml2yRimnBjJ3cc"
"enJWDkwxML+Zb/4wIsI2trMV9Rdwthx4ngff9xEEAYQQGIZh1nXd6TiOr0mSPP6qI2MMRHRQ"
"FMVtnudXm6TuATgEANM0oZS6qaqKA9hdq0xEX26a5hPnfFRKIcuycwAURdF9XdfHy3lEtApK"
"Ke8syyIpZTufz99c131pmuZIa40fwTiO9x3HuQZAhmEsyrI86fsebduugSszTtP0wRi7MAzj"
"PU3TyzAMH7TWm8/BOYdt2z2AMyHE8zAM3y0d7P+83CeHypR+yP8P/AAAAABJRU5ErkJggg==")
listbox_brokenlink_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAQAAAAMCAYAAABFohwTAAAAaklEQVR4nE3OvQnCYBSF4SfX"
"rxFBiGQph0hl6QKu4ViS0s4hxCIxGNBYeEVP+XL+YIsed7QFS6x89AyMfnpE2qcEr8D855gC"
"QxbCEKhQEiwKbrhgg+s3u06gQoN9fjnCLpdmHAo6nFHj9AYfShbMZoiymQAAAABJRU5ErkJg"
"gg==")
listbox_brokenlink_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAQAAAAMCAYAAABFohwTAAAAcUlEQVR4nE3OsQkCQQBE0Xfr"
"JiIIik0ZWYEV2IQtWJYYmlnEYXCnKOgYuKA//HyGkWSdZEhyS7KtmGLmy6vg7sejYMCziXdB"
"/opnwYhbE2NBh9rEpOKKC5bouyQwb1VfscKufTlUbLBvG33FCWcscPwAHbsliUqV3YQAAAAA"
"SUVORK5CYII=")
listbox_brokenlink_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAQAAAAMCAYAAABFohwTAAAAb0lEQVR4nE3OMQrCUBRE0eP3"
"NyIICW7KRVhZugG34bIkpZ2LEIskYkCfRR7owDSXyzCCXdAHY7CvWGFtzrvg6ZdXQY8pwacg"
"/oypYMCYYChYoCZYVjxwQ4u7mEc2QRupbnHML2fBIYjsqaLDFQ0uX4+rIsNUxKskAAAAAElF"
"TkSuQmCC")
listbox_tab_add = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAMElEQVR4nGP8//8/AwycPHny"
"PwMDA4O5uTkjTIyJgQAgqIDxxIkT//EpIGwC7R0JAIW3EV/jFS/AAAAAAElFTkSuQmCC")
listbox_playing_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAPklEQVR4nJWQwQ0AIAwC6+tG"
"6Y8VdR6XxAE0VUn4XQgQkrrtqByAAUuaV6iCN+gEN8BRKDPHU9Jfp3Ldy08LPz2cvZ85YukA"
"AAAASUVORK5CYII=")
listbox_playing_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAUUlEQVR4nI2QsQ2AMAwEnyit"
"dwqbwKQw03uApwkBRcbipG98VxkkN0nIBpLqa19RwcPh7nL3hokyH6J4IakgfLPXRK5mdgJA"
"FA15UzM5+POnCxcsagppPRu0AAAAAElFTkSuQmCC")
listbox_stop_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAKklEQVR4nGP8//8/AyHARFAF"
"AwMDC4zBwcGBYeSPHz8YiTaJzooYqRYEAGtJCg2iIiDzAAAAAElFTkSuQmCC")
listbox_stop_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAP0lEQVR4nGP8//8/AyHAwsDA"
"wNDb29uGS0FxcXEVC4yTlpZWia5g1qxZ7QwMDAxMBO2ivyK4w2GOxAYYiQknAPKrEGYaakYq"
"AAAAAElFTkSuQmCC")
listbox_tab_add = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAMElEQVR4nGP8//8/AwycPHny"
"PwMDA4O5uTkjTIyJgQAgqIDxxIkT//EpIGwC7R0JAIW3EV/jFS/AAAAAAElFTkSuQmCC")
listbox_tab_close = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAUElEQVR4nIWPQQrAMAgEJ3nX"
"+vrsv7aXJthesiDIyKCSBElJQq/NZlVFElUV3nQ2khxgG4Ddr7XGSPKxgDMEmFwyu20b2/Sb"
"hqT0nX+B25sPaylfC9YsisEAAAAASUVORK5CYII=")
playbox_button_agc_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABFklEQVR4nO2UIYsDMRCFX49V"
"keFMIG5aDipjloq1MSeqI6vzG6L6G6orT1ec2X9QU79QuRAoZeXqE9lty5Z2I46I6z01vAl8"
"YfIyEyJCcmUAvPcpkUKILFRt26ZBMsYAvKWBDfRP/ZvULO6YMu6jWn8dAABS21XOAeD4Hbx7"
"Z0REFNL8RFJbY4xR/QWslp3tjAKUcS40e+exGGNEFEOV2hoFFbBX6E174IxSIyYs57OmKnF6"
"54XCZXrdUJv9dhcJvNF4muR8xqefzq1yPl1oeaiOPC8U6nKz3u4bAPW54XkxHP9TTYjIe/94"
"I0ptl9htyvpa4pKdPjzxaWKMCSGi0vSLCu/6Slvilajdf00ZqI4qhEiJBPADbSJZ+8/BNTwA"
"AAAASUVORK5CYII=")
playbox_button_agc_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABQElEQVR4nO2VIWvDQBiG34y4"
"qQwijrleU+hUxGDQ2orEVdVVx7WT9wMmTnZ11XVRE2sTEbtAISIukKZxJaKwU9WZyEQYY82g"
"nFj3quPh4HkPPr5TKKWQHhVAURQylYQQtTodJxM5yuv5HMCVHNmX/Fv/plVtdi13eDpklg0A"
"yKKlEewBwByVVutb8nMavTWL3mDiJa8K+EbS3TJWMmd2cJ0cyH0j0Nd1cg6rWCX60OogzQF4"
"adzr0jYAaNMxW7SQvR96gwe7Rs5hFTtX79jQ7g6pV6PPS65w3o9Eg96/t2a7JIxdhS8e9/FT"
"JOyOGQYbD9p0zLaDWwDtGz0MNlUhz2/U4+Q0iVWCmcOmGiCi/usuG1vbwdLgHABgrpkG1Ig5"
"Ku+1k1aFUloUhcw9TAi5pC1xSdbPGa4+W6lWQohMJYAPvr973A2aPgoAAAAASUVORK5CYII=")
playbox_button_agc_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABFUlEQVR4nO3WMYqEMBgF4DeL"
"l/gZFoQ0HkAWm+ktAhYD3iCdpQfwAG5nlwsMwhRCCnsbkRzA0mb5r7GFCrJbOAwSFnZeFV/z"
"xZCEnIQQcB4PADO7JInIm0d1Xbsh0zQF8OYG+5GX+q9VW8nKrh/c5HLO2v1uDlC5uSFGb5cJ"
"qO6ijTFGq6moLGArpf1i2xyh8tD5URbNrO3b4PJBAEBJabIQ/DUF6hpumiNUHjo/CnF+n/rN"
"byyLmjfP3Gv7Kg/d2BZSKj22t4bDKB713YKS0mgVAKCzP+r7uvwPzcN7AIXSJiGAm/xz4DLT"
"KldSAgDiwhCwaeLCJLSrnoQQzOzyHiaiP31eX+rTWXaTS3J5SxDt7/Vj8w3i63uTwpeYJAAA"
"AABJRU5ErkJggg==")
playbox_button_ff_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAmklEQVR4nO3Uuw0DIRAE0DmL"
"aDuYeFqjDTqhv+2A2AEny0KyzaHz6oKbiBUST8tvk4TwJADuHkmSTH3UWoshzQzAIwYbcqsX"
"UkspOedP5b9UACTfsaGczybJ3WdeTinly6y711p/LmJmJE8710N9p7PUyV7X1Q68NvyQt6IO"
"wIK3R1L/G2NiZpIu/0vc6lL2Oxx5oXaVZCQJ4AmyrTqy4lZ/dgAAAABJRU5ErkJggg==")
playbox_button_ff_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAmUlEQVR4nO3VsQ2AIBAF0K9h"
"iauxtrZ2lhvCERziZrGmtrZnDQstCEYDRC4W/opLSB4/IdBYa6EeA8B7r0kSkTlWy7LqkOPY"
"A2h1sCi/+iF1GDqR+W6spQIQkRCLxlpqiN0dpZb6cJRE27ylMjPzlLi5pCszO7dFYzqJ3K5R"
"oax+hWrY7zpm5fOvxK8W5bxNx2erqhKRJglgB3yPM9oSP3qWAAAAAElFTkSuQmCC")
playbox_button_fr_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAjklEQVR4nO2UsQ3AIAwEnYjK"
"G7j+1ViDijW8HxtQpwhFRBQFCLISKVfh6vTo9QsAMscRUUrJUikibn/lnG2UzExEq42s4re+"
"1Oq9DyFcnbe4AZ+IXJ3zrWdBV75u61igp1ZVnevuaJOqxhiP81md7XS3qco9+A0A9m20gZkB"
"fGolfmszpcOWhSrWubvTwgaF0CjkaPX9pQAAAABJRU5ErkJggg==")
playbox_button_fr_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAl0lEQVR4nGNUVlZmoDtgYWBg"
"eP78OT2tlJSUZIGw9u27RB8rnZz0GBgYmOhjGRoYtXWQ2jpnToeFhQouLkHAQoZ9c+bMwcWl"
"vq2YFpDkP5JtJc9DeABR8ZqSUnHixJ2UlBS62orLbrKdQnIaRrObvGBgVFZWfv78OT3LYUlJ"
"ySFVSozaSjSAlhKQypautkpKStLTSgYGBgB9NjiK7ILTQgAAAABJRU5ErkJggg==")
playbox_button_highlight_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADoAAAAVCAIAAACYI2qcAAABLElEQVR4nO2VIW+EMBiG3y2o"
"SnamCa5b5mvIiVnMxDQSzW9A9TegK9EnzmBPLBj8csgmmB0SPUG5SxgbZTnGkfCopv1e+pCP"
"ljvGGJaDBaAsy7k1jKCUWs2orut5VQYhhAC4n1tjHKvulKy6U2L1T3M/2p5knCo4Xhg8vIvD"
"JnzDLk6V44WBawNAsRdJ/r0ywXkGABzPMChPW71+qTHV/QnuB09HKfQePs+TqwVVGou0feUe"
"0wFd2w0itxkW502fH6vjQbVPBwDeX9m1/WvQWLfKLp3qrunGVpmMP3+vvFZQM+6o5R+F7b5w"
"qDQWMqv+Idhh5LebJ3ITBlH0CgDFXijwvrK2y1Umd6OCgzDGmt/xjUMIYYwt7N5ddadkYbr6"
"ZljEaUOjSymdW8OULyeWp0mUz7SHAAAAAElFTkSuQmCC")
playbox_button_highlight_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADoAAAAVCAIAAACYI2qcAAABIklEQVR4nO2XMW6DMBSG/1Rc"
"4s1eOABDluwMlthQL+CNrT5AD8DI5hsgdbDkoXuXDj5A9g7vGh0gTUSg4DbITcU3Wc/v2R96"
"NoKdEAL3QwKAmWNrLIKIkm7Utm1clVnKsgTwEFsjjE13TTbdNZnQ9Y3UlgGArZaNB1vdBdhq"
"2dH40cyLSB9dVnieP+dckYQ9nW/U28G4msBWq8a76maFVNSuYKvVx6OrsollpnWPRknTDfOv"
"Td9f04Oh0+oA4Mczh7Y/LRwwfXZTZZxzzqj0eq5vXN/M7zJvVTirO0a2z4/mxYOKOmSTXxQO"
"CDy7WWWUVlICAPJnRxi9Eqcup8o8BRXOsRNCMPNdfDMQ0f947/5VNt016a9abI1F9H8TRBTb"
"ZCmfU0mpwpmLP04AAAAASUVORK5CYII=")
playbox_button_highlight_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADoAAAAVCAIAAACYI2qcAAABRElEQVR4nO2WsU6DQACGfwyb"
"E26XbqWYdGNwwpWhPgSrt7WO9wg3SreO5jYfoGVg1cSEwUkTIWzNDQ5MzjgAxhCw0Ii0hm+6"
"HP9/fIHL5RRd13E8qACklH1rNIIQomajj/m8X5WdnC6XAE761mjHoNslg26X1OjGniKCCAAS"
"V3AaIwrEZZAAiAKhcK5wrnhxZfLbDLJ8s2LiCp4HvjJNdeuIPeN1GjKWMnr7fk+r19yvqC0c"
"llLbgrlmLJ2NK5dRa1+w9Q3uZ8PrYm7z9mxN6aRYHQDi6mSJvYuNdUd26FxMkLhi9VJ+lrhi"
"dbOFZdOHs5+Tv1XMabcZrs7NR/9pA23hsNAe/UGxRP3XrWQ8C21hcA4AMNdMQ+X2Lf6yZdO7"
"VsVdKLquSymP4s5ACPkf5+6hMuh2SX4yZJffw0cFQAjpW6Mpn55grasoRjO8AAAAAElFTkSu"
"QmCC")
playbox_button_loop_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABA0lEQVR4nO2WLW4DMRCFX6q9"
"xIAikz2AFYWEG6xUtjcwC+wBfIDCZb5AtVJBpQHmJVHkAywMieYaAWsQZVvHilSDpg9Zzz+f"
"3vhHXimlUF0NABGpiSSiZm6N41gH2fc9gKc6sCv9U/8mtfmxJw7d+7N/e6ELwwUAgHG800tH"
"Pl+tnwAArb2cuVBx1ji4o/XMzN4e3RC/c2Y8s7fwHzGzWCk17kO7XRMA0Hrbhn1cOqUBchW+"
"T8F1AUBrvc6MKs2qN2ZKVZPD12Q2eukAqcLMuU3FjayTt51HOhs7Z7o5CIxjDeDakVNhBKyU"
"UiJS8x0mokd6JR6Jmk5TTWT6SxBlL9cv6Awd/3NgvHR6zAAAAABJRU5ErkJggg==")
playbox_button_loop_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABB0lEQVR4nO2UoY6EMBRFz25Q"
"lbgmuLIhWVlDRozF8AUj0fMNqH7DaiRfgMGuGlM5yWSQJKhFoleAIBB2GVOxs1c1t2nPu+3L"
"e1FK4Vwe0HWdS6SU0htXwzC4QQohgFc3sIX+qX+T6m3u6FN++Co+6nZmpCEATWVKu3aC5JzF"
"PgD9ZX7yAeqqiNS/FKZuCZJzdtK2ZOl8zvFHXZd267K9L6yjsL9fW4D2eu/DSK+dvQEeyLpT"
"YZrnKfSXYjMo+7PaW+PHRw0QvL/5zc2uHYCmMsYY89On8ktWP87ymKk3yirKxyA0lbHA0gmS"
"nRFAKTXORjcSQiilnmlKPBN16mGXDTVRpZQukcA3XUttf1c7/3cAAAAASUVORK5CYII=")
playbox_button_loop_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABGklEQVR4nO2VIU/DQBiGn5I6"
"1BLMpa63LZmrmJquGMXjZqkbyPsJlc0kFjcPragdyRJEHcm61DUVS0Chi1gxKysN4gTjU5f3"
"Lu+T97vLd4aUEu1lAmVZ6kQKIcz96mM+14M8XyyAMz2wg/qn/k2qeXQnj43VRTYb97+EKA68"
"FADnupraTWX78jBICgCs0J/d9o56d86ax97OzZSqlB/uln7+nbLHK5W53K3zFrOu1GiTTkay"
"D9C7Gln3m7ypdA3Q1uHfVbo0UsAKfbvlVNesl0PnOVlHAO+Pr8XN0G4qUHe4Um2Xyg9Zi2QQ"
"JMDE9Vfj6ZMTeEFaW9vAobJ96xgBQ0pZlqXOOSyEOKUpcUrU+g3vP1utVCGETiTwCVLMcMKP"
"a6mHAAAAAElFTkSuQmCC")
playbox_button_play_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAh0lEQVR4nO3UsQnFMAwE0MvH"
"lTa4+lbzGt7E+2kD17+IqxQhsYIgkKtUCB5IQpskpKcAcPdMkmTZqzFGDmlmAH452CGfepLW"
"Wq01WwVAMmivTzhiR/e6Zj9zTXft8ojq7r336/1R9a4XVde8dTXizUjaP3JOzEzSSz7ii9V5"
"TZmrnSrJTBLAH2jAKJc1SonSAAAAAElFTkSuQmCC")
playbox_button_play_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAiElEQVR4nO3WMQrAIAwF0G/x"
"EplzHQcnT+CxPIGTg3fyGh3cOpSaSEqhf8ogPEgkxDEzzOMBjDEsSSLys6q12pApJQCHDXbJ"
"r94khNBas1YBlFKUtrzDGls7V5m95zet2n6LmnOOMT5/r1VXPa0q8+SqxhOqvXeNN/ORjfhh"
"1THzO7cEEVmqAE79lyr2lsSv4wAAAABJRU5ErkJggg==")
playbox_button_play_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAi0lEQVR4nO3VIQ6AMAwF0A+Z"
"w85U72rDoneE6Vl2vxksGsRQCMLapQkJX1U0ecnadINzDuoxAEopmiQRmVrty6JDTikBGHWw"
"W371IUOM87ZpqwByzkKb/8ISWzpXnt1nm1pt00X13q/Wvu+Xqq2eVOV5fFXiMdUjBIlX85GL"
"+GH1mmv9bFVVItIkAZwWKCmglIyMIgAAAABJRU5ErkJggg==")
apicoverlapmask = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAJYAAACWCAYAAAA8AXHiAAABmklEQVR4nO3SsQnAMBDAQNvN"
"t9l/Wi8REQh3E6jQnplnwcvO1wH8k7FIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFI"
"GIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4Sx"
"SBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuE"
"sUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiL"
"hLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgY"
"i4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFI"
"GIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4Sx"
"SBiLhLFIGIuEsUgYi8QFqpwBUYyXa/gAAAAASUVORK5CYII=")
macrobox_icon16 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACrElEQVR4nGWSPYicVRSGn3Pv"
"/e58m/nZn5nsYiCyGhFMZ0QQi6hNGm0XsRCLYKEI1toYaxFMqRHFQmHFwk7QJmCpbiGm8ie7"
"LmxiJjOT7M58M/Pdn2OxuxjZFw4HDrwv533PEVX9azro96b3BsGVC4CAAGKOuiBijrqAGHKK"
"NJrtotFs9aXq39brV94m1HP84hK4Aik8ai1Yh22UmMIflm8gzqM50Wh1ePKlDTWz0XAYJ/uU"
"7Q7WFdiigfENYlVRj/dBwXqPsRYjBmMMzjeopxPm1XhorPf48hRiBOsKjDHcu/k75y5e4qlX"
"Xqfq3yZWFcY6RMCggCLHYqCQM6SMamI6GHDu4iWeeHEDgDCZsPX153REKJeWyTEc5qCKquJQ"
"EM1IzswGd2mdOcuF197iGOvPvgDGsLX5KaCUi8vkGFHNoIpBFYkR0UTZbDPZ2+XGN1/wINaf"
"eY4LL1/m/t83md8fkWMgzWZozkcWUkRCRDwstDvc2LxGdWePp9985wGR52l2V/nlq0/IoSYr"
"aEo4ASQliAETAtlazHyGpsQJ5Eyu54gImvNhBqIZGwOSMzqfMR/vc37jMudffeN/3O0ff+Dn"
"ax/SObuOyZkwm0BOGDKYGLExEkZ3aPXWTpB3rn/HT1ffp93t4YxFqwOoKkgZA4oNAalrTjWX"
"mO78wa9Xr/xH/v5btj54l6XTa5TOw+QAmc+Q6QQ04USVog6or8EVtBZX2N38jIXlHq1HHue3"
"j96js3oGX3jy5AArYOoA1RjJCUdVIYO7WGvJKaHO0X34MXa//JicEp3eGhYhjwbYo600BvSf"
"W8h4jCt6qysLUlBv71B0uyQjqDE0RVEMjEbAEAOHj6OgwwFtX1Ku9FZEVf8Mu9un61t7tWk2"
"0WPzcvKKx2Mdj/GrD3m3/mj/X0XjRJEoM1EtAAAAAElFTkSuQmCC")
macrobox_icon32 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGSElEQVR4nMWXW4hdVxnHf9/a"
"ey7nTDK3zMRKHdKaRBPbJFQQhiC04JPUF8H2TSERFSm1lYoQb0QIWkSkoBB98qkPXl5EiA8i"
"BStJbYrQW0KSpm0kETOZM5lJMplz9l7r+3xYa69zZiaY9EHcsFhnrbP2+v/3/7usb4mZXQG2"
"8v95VsTMDMB3b3PtzOsUI6OIc/FvkdiQgd9xXhCQ/hoZXCOksYtrECS9qxqYvO9+iqFhAErg"
"ZnXrxtZTP/0unQtnGJ6YjgScw1yBlCU4B1IgRZzHFeAc4orYirhOXBl/Oxf7okTSWpfmgvdM"
"3Hc/D3/mcwyNjq6UAMvvnmPp/NuMzz0YNxdJQAmsKEBcIlbk/0QiCYr1gJlAIhjJOlyau7Hw"
"b24uXmX6Izsok6YMt8aSuglcHOAGZI1zjZRmhhRJ7mQCSe/Hcf/JIzMwpShLwLIJojzRQOAs"
"LkQjsKWxxF5E6K3eIqhneGycofYYqGGiYBL3aPzE4pyZIOZQNRyKmZKcA5eZAWIWmaVxBlfN"
"4CuXL/GhPft45IlDhLqiu9wBJ6DNOsXMMFXQBGaa9jRUFULIGCWNGGbxRyYBNF9lhgArl99j"
"9uP7OfDkIVxRMtRq8/df/5zu9Q6tqRlMDSREM5lgagiKmWRDmAgaQlKhUYA+sGhDQBtmCMLy"
"Py8yu/sTHHzqCK6IrjO7+yHmv/wsvtdlbXkREcFUseBjb40CAdOQVVGzLLKLTpIA1fqyJ0VE"
"hJtX3mf7xx7m4NPfxxXFukwys3MP84eewXe7rF1fREgk1GfQCBzyHCFkJ8wKuMbxzBBt7BZY"
"61xjbv5RPv2tY5vAM4lde5g/vIFECLE1JEK0vQWPBZ99IDlhNEF2Qu037fUYnZq5I/AmJQ5/"
"A99dSySI4AlQNaBBE7FNBBSCImYxdEi9Ge3Z7Zz/429448VffTASS4uIGeYbEnU0QQio95ht"
"MAEWQCOJJmxEFVFjy/YPc+HE73jjxV/eA4m9yRxr3F7qRCW8T61GfY35OmKwMQpCgKDRHxoi"
"wSMiTMw9wIUTv/8ASjxD6N2mu7IEZv0v9zVaV9ExGfCBbP9MQnM0SAg4YHLuAS6c+C2njz9P"
"qHr/ncSuvTz2zaMU5RDV6q3sC+Y96uvoY6REJPTlximEuIkrYhbIOQmg8eJ7eERc3+FCAIl7"
"ma9zIiob+UVDcjwX83pI80UBhWHec+PSRXY9/gQHDj97V/DFd87yyvHnKUZajIxPZgKYgu9H"
"QZnZqsb4VwVxmEuJSQTRwMqli3z08SfvCbzzzllOvfBD3PAwI+0xrK5yMWPYOieMJsge3xAA"
"wWECaODm5ffZ+dkvsO8rz90d/PzbnPzZD3BDw7THJ9Gql6uifLpWVT41swJukIAjHiQirF69"
"ws7Pf5G9X3rq7uDn3uLkT47gRkZoTU6hdZXkHagO1KDurVeg8XTRAOoQM9Q5TITCFVSdhbuC"
"L517k1M//jbF0DCtian45U0hkurESEBhUxQ0JggKEmINRzw629tm+def/0Bv8SqfOnY8V0Tr"
"wM++zivHnqMcHmF0ahtWVfn9xGAdAanvkIhcCDjTaIoQYuyHgNQ143MPsnj6ZU5/52s5ha4D"
"P/o0ToTW5BR0u4iv4/veI94joUbq2Fxd4epeNHdDQNRwPiA+mkEGSIgPSO2Z3LGbpdMv89qR"
"r8YjFei8+Rqnv/d1SnG0p7ZB1UOCj2Hma/B1JFPXON+QqJCqB2mPnIgKDRRBMQmYM0wEcy7V"
"gvE8n9ixi6VX/8qZF44yO/8YZ37xIwoRWtMzaNWLBQnNnWBAplTYOMBCoKiqVPgMOKFLkuMk"
"1nQiqCrmYkVsEgvOiR27WfzbX1h46U+UW8YZmZ5Fe72+Le/gIw2GpDOh7PVw68LQB1wd4mQI"
"8etTeW0aM6OlW46FwEhrDGu1kaLEul3cIOimL++DN9Fm3W50xGyCEJBOh2J6e6xeEgFzsY85"
"XGJ92oxFwIe+p8sguuUuA6fmzNCFBdwggfa+R9iydz+r/3gVt20WK1wGz326B1qKaWsyWwO3"
"AVuwdJWIha6o4oKii9eYeugAW/Z/Mq5rLqdh+Tq3Tr6EjLbjzYgGcL22ttHEdzL5QKTK4J0j"
"KHZ7la0HH8VNz2QCl4nXc9u80//0EWDlP6ozIjEdXJd7AAAAAElFTkSuQmCC")
macrobox_icon53 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADUAAAA1CAYAAADh5qNwAAAL+0lEQVR4nN2a+W9dxRXHv2fu"
"c7Adl+fg2BEIMF7ipNkIQq1IkPpjUdUfuqjih6qtWopNBG0FOEE0NBJtKa2qItoCbX8pFfwD"
"lVqhFqkLixqykYSEBIFYYhqHOIuXxPaz/e7M6Q9zzty5970Xgl2pKk+6nrvOnc/ZZ66JmUcB"
"fAIfn98UMTP/r0fx3/6VAFxEQVOT770FU2oCDAGg/BOkx1Ro4uPomZpjyrXhL0UP1Nmv1z87"
"i7aOriLTVCk+ujg6ggNPPYrZ8bNImltBxoCJQEQAGYAIbAzI+H1QIi1ARvcJiJ4jec4/YwDj"
"9/39Jpw3Rp7Vc8b4Z+VdRAQTnvfvcs6heXkb1n7ms1i+oiOnKU9tLV57+pcYf+cNlLtXg5m9"
"pki0ZYy0HsyQAZMJIGHQhOzecF73E2llXwdOxgvAiKBM1h8JCCBCMpkgkqZlmB4/h7f3vIBN"
"t33R9xlDTZ8+iQv/PoHydb2gxIBgclBkyEMAgElkAB6MCL41ej0CqQOuz1KSeLMSOCpqS1oi"
"Cpo3YgkqrNb2FZidnMDM5ATarlrpX69QLq3ClEpAiBsMsLSyT3qN2V/Sa8V9jlt9nvPnZePQ"
"H3vryN3rz+m7stNyLzPYsXcLa4P5BShjEu877EAFGOky8r7iAJEfdHQ9cAHZQHL3xCB6bzZo"
"3XRMXOcag6MAFkEB3h04AEUDjqVbkDTJi0gHHR5nsHMAu7wm2QUh+OucSb+gLd1ndtG+3u88"
"DDPg8lmpBiovmYJWwHkTLChPnzUmgZ2fx/yFSSzMzmRS5EhbKjznIk3JeVcww2BumRbZAXAO"
"zC5vDjGUUmcSz5sfcucVyokAXNAWEaEyNQ5bXcBNt9+BVWs3Ymr0BAgmEpoL/fk+Iqkj01ys"
"TdWIF7aDC2aoYNk4c3mKlVI7JRIwgu+NMpMjAolkQz41CSqTHuiWwWF0rtmAVes3Y+b8GZx7"
"5y20X9vtnxVB+Ld5ZycjZuwc2Bh4N3EAG38/OYE1oGD0Mk7nYozI/CLTINVaLho11hYzwxhC"
"ZfI87Pwcbhnajs41G/wLkhJuHdqBlX0DmBodgXhu0LL6B3LvKpihcz7KiZ9mJsxwqs165kfB"
"CDkLGJEdB+0JjPoWMYPIYG7iPOxcBbdsewCdA+tzkqMkwa1DO9DRuwaToyMSZRVGzNBZP3j1"
"M3aeKfY52c+ZpIJF5pcPFJErBTMpRi3VlgQTYwwqE+dQnatgy90P1gDlwO7ajq6B9Zgafd8n"
"VGaAbQTmMjDxlwCmEbAOWM56aqAEIDi9mEA+b3EAo8SgMnEetjKLLXfvxMqBDXWBQv8mwdbB"
"YazsXYMp0Zg6u2qOA4CrD+aie8L+pUK6mlWcg2rykwekxKAyfhZ2roIt39uFzrUbLwkU/7YO"
"DqOjd02msQAiYNZF4TozRe87cs1l1zxwAyjVVgajZuiioOEAB6SzM1jWdiW23vswOtduumwg"
"QExxcBgdvQO4UAeM2WU+piYn5seOA4xqip1FQ/NTTWjibWiGzJibOI9rbt6KjjWXNrkPA7uq"
"dyAzRZdJvW7wcBGYc4BeL5hgFNI5KzcamqGq2aG1owsjL/4V7/3zuUVBBbChYXT2r4t8TAfL"
"AYxtKqnDX2N2EvEYzNbfH/3ymnIu5AaSkkh9yoSk6cFNqYQkacKhp3+Fd//2p8WDmQRbBu9D"
"R89ADixoSbTCNvVAqplgelHuqgsF9tSSk2r8S5ItOS/BUksL2rquweFnn1waGBncOrTdg50c"
"kam7hngrA2ewtQLrW/UnbqypKElpNIk0w/A+FcDENErNzWjrvAaHn3kSJ178y+LBksSD9a7G"
"5MkTIEgZ5vJBg53zcNYCTrRnbQOfUjD1Ha2nmGG0pInBXB5sedfVOPSHX2PkpeeXCLYDnX1r"
"MXnyRFbbqe9YC9go6lkLl3qfa5x8o+gGdoBVMDQGE1Nsam5B61WdOPj7x/Hu3/+8NLBtD6Br"
"YB2mTr4n73cSOCJzE/NTTdX3qTphOwPjPBgXTFHBWlqxfOUqvPbMU3j7+T/C2XSRZIQtdw7j"
"6nWbMX9xMvKfLNmywgTzyx43UT/efxyH4rYhGDIwE8AcYD1Yc3kFDvz2Z5h4983FQQEgY/Dp"
"b34XsBZ2fl5qRDE7diEJs61NvtkSGTMIztd/jv3qUZgkOsAiLH0ZODgiMBPIrzt5P02AdHYa"
"c+NncNMd96K9e/WioQDgwLO/ARhIksSnGj/LDAuaTORhC8k3P0lkhpGIxxayZBXNeaBgOr3z"
"m1/qM6jOTmN67BQ2fuMe9H/+9iUB7fndz/HBkQMoX9eTWUxudZd9q5VF9AtQvhzypmTgZ6V+"
"ckogklmmSsQYwGR9wxhUKzOYHTuFzd++Dz23fWlJQLuf+DHGjh9G+dqeMGDWURIJW7b0XAzp"
"OU1plc7OT5+JCM7pYqNMoJmk6kgA4zWUVmYxPTqCG4e2Lx3o8Ydx+thBlK/vlVCNbDFU19M5"
"eL0XdEOoEP28CTqRhSFf8rMsCwc/sxaEEuz8LKZPjWDTt+5F7+e+sjSgx3Zh7PWDWHF9n49s"
"ekGgWPfjVsN6vUAB+CSrhawsiYAhS8fBrGURxhiksxcxfep9bLhzGH1f+OrSgH7xEMaOvopy"
"dx/YpeKr8tUj1kz81YUAWAtOq43ND8wg0VbolBkspuhXk3zUYbuAtDKDjUM70LtEoFce24Wx"
"w/tQ7u7PLR9znc86Po9mq7FsLcjaaJ2yxvx8ciPnI034HAONcuy/bDAw+8Eoer789SUD7f7p"
"Axg7sh/lG/qB2Ie01cHKPhc70OK2fpkkWnK6QJmF91DYOq35HJrbV+Ds3hcx+ebriwZ65dHt"
"OHNoD9qv6wWlvvQh50BSBmWbBAPdz222JlDklsgIAhRmoE5eIpuAgh2alrdh7txp7N05hInj"
"r31koD2P3I8zB19BubtPpjvRQK28z1qQ9ZDkxMxsGrXRfqMqnVymHRL/Qu7Yv5QcA9UqWjq6"
"YJIE+36wDRPHD1820N4f3Yczr+5Ge3e/H5RowAvOgorClC3WTjgnPhX/cgUtOSmR2JsYCh1m"
"JimFZbWK1o5VSJIE+x66PLB9j9yPsf0vo3x9H7ha9X4UtFG0DA+JGDpsNgiEbNpoPsX+Blkt"
"yrRWB84xjPpfdQGtHatQShLs33nXJcH27roHY7v/4YOCS/1mrYBJm0bHqd8ojcwt9RvSFIiP"
"G/mUcQyyDkbhohDfSHPEDK4uoEU0duD7g5g4dqhWQw9tw7n9L6O9ZyDzFVar0ADlpEC1GbQG"
"C4WW82Rt3rcazaeCeQmcj/+Zf4VB5OBsTmOJKeHVBwcxfuSA79Y57N85hHP7X0a5Z7WXrgyU"
"bL4vEzaONj1ns32bNz/TuPbLfMlI8vXR3IYyJZQrJDmLNTnK+eoCWld2oXL2NI7+ZDvW3/9D"
"nNnzAs7vfQnl/k/6RKlVipoHR/9jAeQ+c8YCz2coWQiCX4wxaSpL5kUohkhAggRTyH3MABPn"
"vpKzlksctwAvOLR0rEJ15gKOProDpmkZyj0D4DTNkng8cKLahFr3F33dZA4f8DhNYRqVSQRE"
"ZueigQtcDKYSjQF1fgMGqvNouqIZSanJT1MEqEYTuf96+TAmmXyE5W+BS1OvqUa1n9E1Csde"
"+oQ8nGqD/IRSpZzTYABmJGTk45nmEYqay0JRomjtJAMj0VRSTXMfszOoNAVVqzDLroB+yfOD"
"lQ4ifwpzGzVP4uxYpK9tBur/5Hnqm144W8fcSBdWBQqpBc3P+1xVhKJSCVSZAzUvB6UWbBSC"
"RDPItBcBkKxV+Gk/SdEbz1SLU4b8cY2+FKB4rCtYyFc5sA6YrcDIv/DkoK64oR8t3b24uO9f"
"aF63ya/UiKT9lD7TkmpCg1dw/CgShhPFURNdwvCyMlxBANRoh8RNiAgLbxxD2/ob0dK/NnsF"
"M1+A/Gvc3PEjOPGdr6H6wSlQayvYJCLpzK/YIGhQ4WLNhAidM7sGgAUe3Sn6D2lQiPKlsRY8"
"M4NlHZ3ofuIZtGz+lHYwlYMCAFSrmDm0F9S0DGSSaEpdZyAfFo4/SiyogSt0w7Gfyb8mLCyg"
"9cabgeaW+PaP739mngRwJerK6P/uRwAm/wPqLWN88pC3HAAAAABJRU5ErkJggg==")
macrobox_icon64 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAPSElEQVR4nOVbXWxcx3X+zlz+"
"KuIuRVKUI1q0ZBuNndhRbBSJrMpo0AJ962ubpyBF7NqyE6QNbImWYiVuURd2EtvJU1+CPATI"
"Qwu0r31sq1iJ5b/EiWI4jpNY0Q8p2eSKSy7J3Z05eZgzM2fu3iXlmIUffIHLvT+zc+d85zvf"
"OTN3Scx8AUANH86tQczMH/QoPsjNAGh+0IP4ALfGQNXVyy89j8Vfvw4zNAQyBUDhDgEkJ0Rg"
"IhBRz3UgXPe3wjUmgEptiUzqnwzIpP5S38ifQ0jP0G1023AdgHMOtalpTO2/tcfWHgBe/4/v"
"49y/fw/FyCiKwRHA+E4ZMlhD4DAQY0DGCBgmGUNyPQAibfxu0gCJBGDyhpPx58a38X2HYwJg"
"5Jm+bXyOAErGwIS2pgBBxssMdhazBz+Nm//0cH8AFn91Dm/81w9Q23cAAyM7wMGFJnmfyICD"
"B8MATfCwAQWw1MASAABQAoD8YJPnClBhkoeNASEApAA00qbUtyHyjy2MPMuACgN2Fhd+8Qom"
"brwJ4zfMRJuNBuDKay8CjjEwPApmBwR9DDLJDIBBpfP4mRqqY5T6UW10P6y+x+r7zHKr+pxL"
"fcdvcnoOOwcqBkBEWLp4XpucA8DWwgwOAs5VG1I2gjkNSN0nPbBKQNIBZ8amPmP/sR1K1xVY"
"zBLtnJ7rOJ1zAKEAO9sfACqK+EAqj18GyT2AJO+RBiMbcMkY7X3VR3xCBDaxIXlV7qk+ObIi"
"gcWAOFIxx3EKy0oA9IC0l5P9CfGyFzMKA6S9kQ0+60yMYQAu3SuzQBlWDjuqZIHL2BQAjcD1"
"AyCAEJ4ROuwJA30tGpsMZd0uY0F5ULpLVs23YoE8x4VQ8+11ny62d6o/xYh+APQir44j5YJx"
"2ujghZw9gQXGFHDtDbQWr6Kz1oIpwqMrWKA85rthFdOIgKTvcfYs7W3frQZhCwAkoSnkZZC5"
"u6Lnyw9MhicGGWOwvrwEZ7u4468/h7Hpj6K5cMmnsB4WhH6cGnByAKt2ebhUh4K/5L2f21QB"
"QECQgs0opTwVv6wHBogA9rLAGINW41101tfwqb/5IvYf/gvc9bdfxMjYOJoLF0GmiHGsM4I/"
"dODgPRUKLrLAIYpbCAXn+6EYSk6asspufQCIhgXvZZTJDUZmbNl7/pxMgfXGErprazh031cx"
"fdudAIDR8Qnc+/BjGK3vqgDB5WBn9GdF54iS0hQPStADDYKTUGDOQegFAIEFwZhc8au0INwn"
"NRBTGKw1FtFZa+GeBx7B7o/dkT1npL4LR47OYaRWx/L8JV8CR5yDZ+ENci4zNmiO1zS57rTa"
"bwJCHgG9aZDYg0CsChotLj1akIsesQNMgfXGIrprqzj0wKPY/bE7yzjnINTrWF645IUxE7YE"
"cgCBxdgEQvJ8DgJXglCe/ecAaErFHBsMCzaXWSChEPTDGKwvves9f/Q4pm//ZKXxYRsdn8C9"
"R+cwWhtHc/5yAsE5hKwQRSyC4JKRyOmfQHC9TNCZogqABIK30bMgdRiQTFqQC6IRz3dWmzh0"
"dA67b9vc+LB5JhzH8FgtgQCIwRUgQGuFeDpjgsog8p3oqNKz8yyAEAaaBUoQteCVBJGMwXrj"
"XXTXWrjnSycxffvB6zK+DMJIrY7mwmUYmU5rUYwgWJcEszIcvFhq7WCngOgHQFDigFLKCEl1"
"IwtUKJiiwPrSIjqtVdzz5ccx/Ym73pPxYRsdn8CRh45jpDaO5sIlDwIDzLYXBB0OSh84prsg"
"pE4xSc1wqwDIPBpDgbcMhfZqE0NjNRw59iR2f/y9eb68jdR24ciDxzC8s14BQj9NEKOdL3Yc"
"Q9ggRgfvByD6AgDEuKIAgg6FKH4hFHw82vUNjNR3YeKW296X8RGE+i4ceegYhsc0CFyhCRZs"
"bTQ81glB/DI2XEcIZBThVOr2hIJLDAEzRuq7sPTWGzjzzOPbAgAAjNYnPBPG6mjOl0BwovYO"
"YpgAwVLouCSEiQ0esC1CICAoCDBgSqFgVNrR+XXH5DTmf3oWP372VM9D/mgQxgMINS+MsU5I"
"8cxZSJTYwDkbYlj0BQAUYyaluRQKTuuBU3ogyI/P3oLLr76wvUwYn8C9D81heGcNyyUmsBNd"
"cInuZTZwZEMIgbz/EgAqthXFy3pgWFeJCgRnMT57CxZeexlnvv21bQPBa8JxjErZbIxJcwel"
"8JENzoJtV/TBJrACYJsCEAx3ViYhOQgcQBA2ZOEgINRv3J9A2K5wqE+oucNFvyzOrNjgZMwl"
"IJwFu27KBJtXglQCwQFqfZSkPI7lZQTB5SCwQ33fAcz/7EWceWb7NCEWS2M1mUWaxNZKIFRo"
"WM8KbLYomoogXVFZAUEyQXkOkIFQEQ4/O7v9mvDwCZk7XPDvKaLgaSBUigwaYK0HZDMA4FAN"
"QpYZXAZCHg4ahC7q+27Gwmsv/T9owmMYqdWxevUyjCFZCElVYQJDNMDarQshX/M7oXsJBNsL"
"gg4Hw6IRMTvIANiifuMBXPnFq/iff/oHrC1e3TYQ/vLRJ1HfO4v1xqJKi6WqL6Z2AaMEQPZq"
"jCB53zl5P+fzf5wlWetfSRkokCRZEmCYwMRwLr7Fko79OoHb2OgRofezmYFB/5osGptenAIk"
"z5JrTIkd/QAI01oCPIJVIDgHyEtHMPv3DHLbEUBMMBQqRoAKg2vn38LkbZ/Ekbmn1cuX97e1"
"V5s4/dwTaDXexc7pvT7eZSw+HhUYMnZfN2yaBTyFCOxvSMlZLYx96oSQEQCQIVz73ZuY+pM7"
"cGTuqe0zfqWJ089+AytX5rFzcg9cpx3L31AYhbFXXlNbHgIs4DkHMgQDAxeE0ZAABM9vZwE2"
"8pY2rBswmHyyNES49ps3MfWJu3B47ilQUflThPe8bTSv4fQzp9B65wpqe/eBbQcUZi3M0esU"
"aO9P/R/bWwj1jIqimAFEDoZIVlQBGErTYpKy2cK/iqYwYfIPv/a7NzF5+0H82YlvggYHt8X4"
"9soyTn/ra1i5chm1mZu85+UVOyVLEWgfVzbko6oQygEQ48MKUAh7CXewFRAIqSNygGURR//u"
"fvntX2Pq9oM4fOq57aN98xpOP30CrSuXUZ+Z9cYDCL9UyX+hkj5JaQBVlMKlLJDyJ3krw4TY"
"//oDQHjDmocEAGdBZLD8mzcwecfduOfr39lW4//vX49h9eo8ajM3gTud9BMcMJiV5+OhKJd6"
"GxwKo74AgAFy4TW3B0GY7cXREAgqJIhAJEwxBZbfeh1TBz+Nz5x6bvtifrmB008ew+o786jt"
"nYXrdtJYiQAWBdCKjxD+pSVQ290CAPh8TeJWFlEhyS8s+T2GBEN+tmPQmr+AyTvvxqEnvrut"
"xv/oyUewunAJtZn9YnzyMoeUJ4CEe3H1qgTAliEAp0IgCJ307dnAMi3gyAZ2hPWlqxi/9eM4"
"9C//ti2GR+P/+R+xsnAJtX37wd1Oj5elSEEChWNNEosChQHb3jTY+2Yo5FPJ7WEPVVRe8voa"
"YGBwCO2lRSz/9s1tMb7dvIbnv/EVtC5fRH3vLLjT8RoTZnpqsdMf21QH6HqAS9eudz3AG6zZ"
"4BIQYrSJy1AOQzvH0F66ih8/8gUs/fKn79v4H339y1iZv4CxmX3gbifqEjnnj62TY6eObZwG"
"R2PDbsPeqwE9r8aMCoHIBqfYEKopmf8TM7jbwcjEFMxAgbOPP4Slc6/8ccYvL+H5Uw+jdek8"
"6jPe86yXtqIzBIiS1wMQERjrQNbKNevPNy2FvdwLzV0CgnNtMJzCIhZOnQ5GJ6dhigJnTx7F"
"0i9ffY/GN/D8iaNoXTiP2sx+cLcbn2XE6B56RzD67TY/5y1CIKO5rvGlAz3XJucSECFzdLvY"
"MbUHxeAQzp58EEvnrg+E9nIDZ078PVqXzmNsZhauveEp6ywgwuXDQAB3igkBnGy3ANu8jXMw"
"VnSkPwPYU4ZZ1QQuAkH9gAjtnPNMmJpGUQzixZMPYPHnL29q/EZjEWfm7sPqxfMY2yepjl1v"
"7Fpbsbu4U8UOa4Gu7LYLdK9DA7yhFsa5/kBwFRBKIzodz4RiEC+dfKAvE9rLDfxk7j60LryN"
"2o0HfIUXvIywl5VeC1t3iz0IYVd9brEi5L0JJSplIPSe2oSVl6QJbYxOTWNgcAgvPXZ/jzC2"
"ry3hJ4/+HVoX30Zt9gC4287EjYT2if55Ntp6T2MzzsFYv9OmaTAyQNb+Fe0TEKV4VBqhlZmY"
"gU4Ho5N7UBQDePn4/Vh87UVP+6V38MJXP4+13/8WtX3ieSVUfvA25v7r30uK7yyMsykT2G6P"
"BpTWAzgZCr+qQhRKYqkMZWeQVIthNiTlM/yqUKwiO23smNqDtavzOPfUY7jl8w9j/n//G2sX"
"38bY/lu98ZqC7H+kB6SJXVb9XdcWpuaIK0DELCK4xXTYx75DvuIns4FQe8v8gOM/TYRmYSnK"
"f7IskXG7jdHJPei2VvD6c09gYOcYarM3w2njo8Wcnkx6DFtv2uBgj/61m+l2PbP6AUAhzuOq"
"JiWD1JIDy+InybQ5zMfj9IEh99V3O20UQ8MY3X2Db9tui8NJMaA0eSmdR7uAHLg0AYiGe3s4"
"ARIB2CoEmEEuoCHeJ29wAESWCjwz1YzMt4P6DiIzQshQoBJLuJSnrZsYHVpWtoier6a/B8DC"
"2C0AMFLg+AF7T7IMPHo7rLhWgBFXkYNhYWWGZN0urtiwDyNW5hGUJ0ts2AyW+PXABAEhFnX+"
"eKDbhdl0PaDTgelKIWFUDAu9vadZFkJQAYZfnQnrkSSGptlrGYSgLsiva6v6bsrTwXAV835N"
"IAegsBbUbvcHgEDgtTX/3sO6SP3gee/htFASRhD+ByjeB2L8B0Ag64hRScKiRgmIzcynngas"
"hpG8T/E81AS+f9NcQVHqOKsDaoc/C2O74NVVXwdYB7J++mmsFBRhDiC1gnHIp6iqnS9CrJ/M"
"SMkcJjdGChzjGMZy6lsXL7IXsuuCpnDWX5NjPz6LwloYa2G6FkXXoug6FNbBrLZQNFdQP/Tn"
"/Rnwkc8cwZ77v4L5Z59GMf4RYGjYYxzojqD4SMpPkLpAfybP+u8EF6oFyiqxo/IB91Ahm86K"
"d6PXQ8UaijXnwcbGBtxiEzc8+CXUPvtX5f54GcCYvtj4zx9i9ZUXQMPD8i9tlMZBKlMrgyPP"
"K4xLYGxlcNWWC10aeGZE+gxaEMBxXfD6OnbceTd2fe4L5c4blQB8iLbGh/6fpwcA/B7+3+c/"
"bEAQgMYfADrwx8s5l7JzAAAAAElFTkSuQmCC")
|
from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
# from sqlalchemy import UniqueConstraint
ma = Marshmallow()
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'users'
# __table_args__ = tuple(db.UniqueConstraint('id', 'username', name='my_2uniq'))
# id = db.Column(db.Integer, primary_key=True)
api_key = db.Column(db.String(), primary_key=True)
username = db.Column(db.String(), primary_key=True, unique=True)
first_name = db.Column(db.String())
last_name = db.Column(db.String())
password = db.Column(db.String())
email = db.Column(db.String())
def __init__(self, api_key, username, first_name, last_name, password, email):
# db.drop_all()
# db.create_all()
# self.id = id
self.api_key = api_key
self.username = username
self.first_name = first_name
self.last_name = last_name
self.password = password
self.email = email
def __repr__(self):
return f'id {self.id}'
def serialize(self):
return {
# 'id': self.id,
'api_key': self.api_key,
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'password': self.password,
'email': self.email
}
class UserSchema(ma.Schema):
# id = fields.String()
api_key = fields.String()
username = fields.String(required=True, validate=validate.Length(1))
firstname = fields.String(required=True, validate=validate.Length(1))
lastname = fields.String(required=True, validate=validate.Length(1))
password = fields.String(required=True, validate=validate.Length(1))
email = fields.String(required=True, validate=validate.Length(1))
|
# -*- coding: utf-8 -*-
"""Contains Cleaner class and clean function."""
import logging
import numpy
from cleanset.base import BaseEstimator, TransformerMixin
logger = logging.getLogger(__name__)
class CleansetError(Exception):
"""Base class for pcdhit exceptions."""
class InvalidEntriesDefinitionError(CleansetError):
"""Invalid definition."""
class InvalidTargetFractionError(CleansetError):
"""Invalid target fraction of invalid values."""
def __init__(self):
message = 'valid values are 0.0 <= target_fraction <= 1.0'
super().__init__(message)
class AxisError(CleansetError):
"""Invalid axis value."""
def __init__(self):
message = 'valid values are 0 <= axis <= 1'
super().__init__(message)
class NotFittedError(CleansetError):
"""Istance not fitted."""
class Cleaner(BaseEstimator, TransformerMixin): # pylint: disable=too-many-instance-attributes
"""Cleaner class.
Parameters
----------
fna : float or tuple
Target fraction(s) of invalid entries for rows/columns.
condition : callable or array
If callable, condition(x) is True if x is an invalid value.
If a 2D array, a boolean mask for invalid entries with shape
[n_samples, n_features].
Default: 'isna', detect NA values via pandas.isna() or numpy.isnan().
axis : int or float, optional
If axis == 0, first remove rows with too many invalid entries,
then columns. If 0 < axis < 1, iterately remove the row/column with the
largest fraction of invalid entries; values larger than 0.5 remove
columns faster than rows. If axis == 1, columns are removed first.
"""
def __init__(self, fna=(0.1, 0.1), *, condition='isna', axis=0.5):
self.mask_ = None
self.rows_ = None
self.cols_ = None
self.col_ninvalid = None
self.row_ninvalid = None
if condition == 'isna':
try:
import pandas
except ImportError:
# use numpy
def condition(x): # pylint: disable=function-redefined
try:
return numpy.isnan(x)
except TypeError:
return False
self.condition = condition
else:
self.condition = pandas.isna
elif callable(condition):
self.condition = condition
elif hasattr(condition, 'ndim'):
if set(numpy.unique(condition)) == set([0, 1]):
self.mask_ = condition
else:
raise InvalidEntriesDefinitionError(condition)
try:
f0, f1 = fna
except TypeError:
f0 = f1 = fna
if f0 is None or (not 0 <= f0 <= 1):
raise InvalidTargetFractionError
if f1 is None or (not 0 <= f1 <= 1):
raise InvalidTargetFractionError
self.fna = (f0, f1)
if 0 <= axis <= 1:
self.axis = numpy.float(axis)
else:
raise AxisError
@staticmethod
def _get_mask(X, condition):
try:
# check if dataframe
return X.applymap(condition).values
except AttributeError:
return numpy.vectorize(condition)(X)
def _fit_remove_cols_first(self):
# first remove cols
self.cols_ = [
k for k, x in enumerate(self.mask_.mean(axis=0))
if x <= self.fna[1]
]
if not self.cols_:
self.rows_ = []
return self
self.rows_ = [
k for k, x in enumerate(self.mask_[:, self.cols_].mean(axis=1))
if x <= self.fna[0]
]
return self
def _fit_remove_rows_first(self):
# first remove rows
self.rows_ = [
k for k, x in enumerate(self.mask_.mean(axis=1))
if x <= self.fna[0]
]
if not self.rows_:
self.cols_ = []
return self
self.cols_ = [
k for k, x in enumerate(self.mask_[self.rows_].mean(axis=0))
if x <= self.fna[1]
]
return self
def _remove_column(self, c):
# remove a column
self.cols_.remove(c)
self.col_ninvalid[c] = 0
self.row_ninvalid -= self.mask_[:, c]
def _remove_rows(self, r):
# remove all rows with the same number of invalid entries of row r
nr = self.row_ninvalid[r]
rset = [x for x in self.rows_ if self.row_ninvalid[x] == nr]
self.rows_ = [x for x in self.rows_ if self.row_ninvalid[x] < nr]
self.row_ninvalid[rset] = 0
self.col_ninvalid -= self.mask_[rset].sum(axis=0)
def fit(self, X): # pylint: disable=too-many-locals
"""Compute a subset of rows and columns.
Parameters
----------
X : dataframe or array-like, shape [n_samples, n_features]
The data used to compute the valid rows and columns.
"""
f0, f1 = self.fna
n, p = X.shape
self.rows_ = list(range(n))
self.cols_ = list(range(p))
# build the mask
if self.mask_ is None:
self.mask_ = self._get_mask(X, self.condition)
# check axis in {0,1}
if self.axis == 1:
return self._fit_remove_cols_first()
if self.axis == 0:
return self._fit_remove_rows_first()
self.col_ninvalid = self.mask_.sum(axis=0) # p-dimensional (columns)
self.row_ninvalid = self.mask_.sum(axis=1) # n-dimensional (rows)
while 1:
n1 = len(self.rows_)
p1 = len(self.cols_)
if not p1 or not n1:
self.rows_ = []
self.cols_ = []
return self
# index of the row with the largest number of invalid entries
r = numpy.argmax(self.row_ninvalid)
# index of the column with the largest number of invalid entries
c = numpy.argmax(self.col_ninvalid)
# n. of invalid entries in row r
nr = self.row_ninvalid[r]
# n. of invalid entries in column c
nc = self.col_ninvalid[c]
row_fraction = (1 - self.axis) * (nr / p1)
col_fraction = self.axis * (nc / n1)
if nr <= p1 * f0:
row_fraction = -1
if nc <= n1 * f1:
col_fraction = -1
if row_fraction == -1 and col_fraction == -1:
return self
if col_fraction / f1 > row_fraction / f0:
self._remove_column(c)
else:
self._remove_rows(r)
def transform(self, X):
"""Returns the filtered data."""
if self.rows_ is not None:
try:
return X.iloc[:, self.cols_].iloc[self.rows_]
except AttributeError:
return X[self.rows_][:, self.cols_]
else:
raise NotFittedError
def clean(X,
fna=(0.1, 0.1),
*,
condition='isna',
axis=0.5,
return_clean_data=False):
"""
Clean data from invalid entries.
Parameters
----------
X : dataframe or array-like, shape [n_samples, n_features]
The data used to compute the valid rows and columns.
fna : tuple
Target fractions of invalid entries for rows/columns.
condition : callable or array
If callable, condition(x) is True if x is an invalid value.
If a 2D array, a boolean mask for invalid entries with shape
[n_samples, n_features].
Default: 'isna', detect NA values via pandas.isna() or numpy.isnan().
axis : int or float, optional
If axis == 0, first remove rows with too many invalid entries,
then columns. If 0 < axis < 1, iterately remove the row/column with the
largest fraction of invalid entries; values larger than 0.5 remove
columns faster than rows. If axis == 1, columns are removed first.
return_clean_data : bool, optional
If True, also return filtered data.
Returns
-------
(rows, columns) : tuple of lists
Indices of rows and columns identifying a submatrix of data
for which the fraction of invalid entries is lower than the thresholds.
If return_clean_data is True: return (rows, columns, filtered_data)
"""
cleaner = Cleaner(fna=fna, condition=condition, axis=axis)
cleaner.fit(X)
if return_clean_data:
return cleaner.rows_, cleaner.cols_, cleaner.transform(X)
return cleaner.rows_, cleaner.cols_
|
# @author:leacoder
# @des: 递归 二叉树的前序遍历
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
result = []
self.helper(root,result)
return result
def helper(self,root,result):
if not root:
return
result.append(root.val)
self.helper(root.left,result)
self.helper(root.right,result)
# @author:leacoder
# @des: 迭代 + 借助栈 二叉树的前序遍历
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
result, stack = [], [root]
while stack:
root = stack.pop()
if root is not None:
result.append(root.val)
if root.right is not None: # 由于 栈先进后出,所以前序遍历 先压入 right
stack.append(root.right)
if root.left is not None:
stack.append(root.left)
return result
|
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Objects.FGuid import FGuid
from UE4Parse.Assets.Objects.FLevelSequenceLegacyObjectReference import (
FLevelSequenceLegacyObjectReference,
)
class FLevelSequenceObjectReferenceMap:
position: int
Map: dict
def __init__(self, reader: BinaryStream) -> None:
self.position = reader.base_stream.tell()
length = reader.readInt32()
for _ in range(length):
self.Map[FGuid(reader)] = FLevelSequenceLegacyObjectReference(reader)
|
import tensorflow as tf
import numpy as np
from rank_metrics import rank_eval
import argparse
def ill_cal(pred, sl):
nll = 0
cur_pos = 0
for i in range(len(sl)):
length = sl[i]
cas_nll = pred[cur_pos : cur_pos+length]
cur_pos += length
nll += (np.sum(cas_nll)/float(length))
return nll
# cas_emb:[b,n,d] cas_mask:[b,n,1]
def hidan(cas_emb, cas_mask, time_weight, hidden_size, keep_prob):
cas_encoding = user2user(cas_emb, cas_mask, hidden_size, keep_prob) # [b,n,d]
return user2cas(cas_encoding, cas_mask, time_weight, hidden_size, keep_prob)
def user2user(cas_emb, cas_mask, hidden_size, keep_prob):
with tf.variable_scope('user2user'):
bs, sl = tf.shape(cas_emb)[0], tf.shape(cas_emb)[1]
col, row = tf.meshgrid(tf.range(sl), tf.range(sl)) # [n,n]
direction_mask = tf.greater(row, col) # [n,n]
direction_mask_tile = tf.tile(tf.expand_dims(direction_mask, 0), [bs, 1, 1]) # [b,n,n]
length_mask_tile = tf.tile(tf.expand_dims(tf.squeeze(tf.cast(cas_mask,tf.bool),-1), 1), [1, sl, 1]) # [b,1,n] -> [b,n,n]
attention_mask = tf.cast(tf.logical_and(direction_mask_tile, length_mask_tile), tf.float32) # [b,n,n]
cas_hidden = dense(cas_emb, hidden_size, tf.nn.elu, keep_prob, 'hidden') * cas_mask # [b,n,d]
head = dense(cas_hidden, hidden_size, tf.identity, keep_prob, 'head', False) # [b,n,d]
tail = dense(cas_hidden, hidden_size, tf.identity, keep_prob, 'tail', False) # [b,n,d]
matching_logit = tf.matmul(head, tf.transpose(tail,perm=[0,2,1])) + (1-attention_mask) * (-1e30)
attention_score = tf.nn.softmax(matching_logit, -1) * attention_mask
depend_emb = tf.matmul(attention_score, cas_hidden) # [b,n,d]
fusion_gate = dense(tf.concat([cas_hidden, depend_emb], 2), hidden_size, tf.sigmoid, keep_prob, 'fusion_gate') # [b,n,d]
return (fusion_gate*cas_hidden + (1-fusion_gate)*depend_emb) * cas_mask # [b,n,d]
def user2cas(cas_encoding, cas_mask, time_weight, hidden_size, keep_prob):
with tf.variable_scope('user2cas'):
map1 = dense(cas_encoding, hidden_size, tf.nn.elu, keep_prob, 'map1') # [b,n,d]
time_influence = dense(time_weight, hidden_size, tf.nn.elu, keep_prob, 'time_influence')
map2 = dense(map1 * time_influence, 1, tf.identity, keep_prob, 'map2')
attention_score = tf.nn.softmax(map2 + (-1e30) * (1 - cas_mask) , 1) * cas_mask
return tf.reduce_sum(attention_score * cas_encoding, 1)
def dense(input, out_size, activation, keep_prob, scope, need_bias=True):
with tf.variable_scope(scope):
W = tf.get_variable('W', [input.get_shape()[-1], out_size], dtype=tf.float32)
b = tf.get_variable('b', [out_size], tf.float32, tf.zeros_initializer(), trainable=need_bias)
flatten = tf.matmul(tf.reshape(input, [-1, tf.shape(input)[-1]]), W) + b
out_shape = [tf.shape(input)[i] for i in range(len(input.get_shape())-1)] + [out_size]
return tf.nn.dropout(activation(tf.reshape(flatten, out_shape)), keep_prob)
class Model(object):
def __init__(self, config):
self.num_nodes = config.num_nodes
self.hidden_size = config.hidden_size
self.embedding_size = config.embedding_size
self.learning_rate = config.learning_rate
self.l2_weight = config.l2_weight
self.train_dropout = config.dropout
self.n_time_interval = config.n_time_interval
self.optimizer = config.optimizer
def build_model(self):
with tf.variable_scope("model",initializer=tf.contrib.layers.xavier_initializer()) as scope:
self.cas = tf.placeholder(tf.int32, [None, None]) # (b,n)
self.cas_length= tf.reduce_sum(tf.sign(self.cas),1)
self.cas_mask = tf.expand_dims(tf.sequence_mask(self.cas_length, tf.shape(self.cas)[1], tf.float32), -1)
self.dropout = tf.placeholder(tf.float32)
self.labels = tf.placeholder(tf.int32, [None]) # (b,)
self.time_interval_index = tf.placeholder(tf.int32, [None, None]) # (b,n)
self.num_cas = tf.placeholder(tf.float32)
with tf.device("/gpu:0"):
self.embedding = tf.get_variable(
"embedding", [self.num_nodes,
self.embedding_size], dtype=tf.float32)
self.cas_emb = tf.nn.embedding_lookup(self.embedding, self.cas) # (b,n,l)
self.time_lambda = tf.get_variable('time_lambda', [self.n_time_interval+1, self.hidden_size], dtype=tf.float32) #,
self.time_weight = tf.nn.embedding_lookup(self.time_lambda, self.time_interval_index)
with tf.variable_scope("hidan") as scope:
self.hidan = hidan(self.cas_emb, self.cas_mask, self.time_weight, self.hidden_size, self.dropout)
with tf.variable_scope("loss"):
l0 = self.hidan
self.logits = dense(l0, self.num_nodes, tf.identity, 1.0, 'logits')
self.nll = tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(self.labels, self.num_nodes, dtype=tf.float32), logits=self.logits)
self.loss = tf.reduce_mean(self.nll,-1)
for v in tf.trainable_variables():
self.loss += self.l2_weight * tf.nn.l2_loss(v)
if self.optimizer == 'adaelta':
self.train_op = tf.train.AdadeltaOptimizer(self.learning_rate, rho=0.999).minimize(self.loss)
else:
self.train_op = tf.train.AdamOptimizer(self.learning_rate, beta1=0.99).minimize(self.loss)
def train_batch(self, sess, batch_data):
cas, next_user, time_interval_index, seq_len = batch_data
feed = {self.cas: cas,
self.labels: next_user,
self.dropout: self.train_dropout,
self.time_interval_index: time_interval_index,
self.num_cas: len(seq_len)
}
_, _, nll = sess.run([self.train_op, self.loss, self.nll], feed_dict = feed)
batch_nll = np.sum(nll)
return batch_nll
def test_batch(self, sess, batch_test, test_batch_perf=False):
cas, next_user, time_interval_index, seq_len = batch_test
feed = {self.cas: cas,
self.labels: next_user,
self.time_interval_index: time_interval_index,
self.dropout: 1.0
}
logits, nll = sess.run([self.logits, self.nll], feed_dict = feed)
print("-----OUTPUT FORMAT-----")
# print(len(logits), logits.shape)
# print(len(next_user))
# print(len(seq_len))
# batch_rr = mrr_cal(logits, next_user, seq_len)
mrr, macc1, macc5, macc10, macc50, macc100, scores = rank_eval(logits, next_user, seq_len,test_batch_perf)
batch_cll = np.sum(nll)
batch_ill = ill_cal(nll, seq_len)
return batch_cll, batch_ill, mrr, macc1, macc5, macc10, macc50, macc100, scores
|
import torch
from torch import nn
from src.config import data
from src.utils import init_weights
class EncoderModel(nn.Module):
def __init__(self):
super().__init__()
self.calculate_mu = nn.Sequential(
nn.Linear(data.x_size, 16, bias=False),
nn.BatchNorm1d(16),
nn.LeakyReLU(),
nn.Linear(16, 32, bias=False),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 64, bias=False),
nn.BatchNorm1d(64),
nn.LeakyReLU(),
nn.Linear(64, data.z_size),
)
self.calculate_log_variance = nn.Sequential(
nn.Linear(data.x_size, 16, bias=False),
nn.BatchNorm1d(16),
nn.LeakyReLU(),
nn.Linear(16, 32, bias=False),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 64, bias=False),
nn.BatchNorm1d(64),
nn.LeakyReLU(),
nn.Linear(64, data.z_size),
)
self.apply(init_weights)
def forward(self, x: torch.Tensor):
mu = self.calculate_mu(x)
log_variance = self.calculate_log_variance(x)
sigma = torch.exp(0.5 * log_variance)
epsilon = torch.randn_like(mu)
z = epsilon * sigma + mu
return z, mu, sigma
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-24 09:55
from __future__ import unicode_literals
import backend.account.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import imagekit.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original', imagekit.models.fields.ProcessedImageField(upload_to=backend.account.models.get_unique_file_path)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=255, null=True)),
('last_name', models.CharField(blank=True, max_length=255, null=True)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.UserImage')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userprofile', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['first_name'],
},
),
]
|
__author__ = 'lynevdv'
from lenstronomy.LensModel.Profiles.elliptical_density_slice import ElliSLICE
import numpy as np
import pytest
import numpy.testing as npt
class TestElliSLICE(object):
"""
tests the elliptical slice lens model
"""
def setup(self):
self.ElliSLICE = ElliSLICE()
def test_function(self):
x = 0.5
y = 0.1
a = 2.
b = 1.
psi = 30*np.pi/180.
sigma_0 = 5.
values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(values, 4.532482297, decimal=4)
x = 3.*np.sqrt(3)/2.
y = 3./2.
values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(values, 15.52885056, decimal=4)
x = np.array([0])
y = np.array([0])
values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(values[0], 4.054651081,decimal=5)
x = np.array([np.sqrt(3), np.sqrt(3)+0.000000001, np.sqrt(3)-0.000000001])
y = np.array([1, 1.000000001, 0.999999999])
values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(values[0], values[1], decimal=5)
npt.assert_almost_equal(values[1], values[2], decimal=5)
def test_derivatives(self):
x = 0.5
y = 0.1
a = 2.
b = 1.
psi = 30 * np.pi / 180.
sigma_0 = 5.
f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(f_x, 1.938995765, decimal=6)
npt.assert_almost_equal(f_y, -0.13835403, decimal=6)
x = 4
y = 0.
f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, 0., sigma_0)
npt.assert_almost_equal(f_x, 2.629658164, decimal=6)
npt.assert_almost_equal(f_y, 0., decimal=6)
x = np.array([0.5])
y = np.array([0.1])
f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(f_x, 1.938995765, decimal=6)
npt.assert_almost_equal(f_y, -0.13835403, decimal=6)
x = np.array([np.sqrt(3), np.sqrt(3) + 0.000000001, np.sqrt(3) - 0.000000001])
y = np.array([1, 1.000000001, 0.999999999])
f_x,f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal(f_x[0], f_x[1], decimal=5)
npt.assert_almost_equal(f_y[1], f_y[2], decimal=5)
def test_hessian(self):
x = 0.5
y = 0.1
a = 2.
b = 1.
psi = 30 * np.pi / 180.
sigma_0 = 5.
f_xx, f_yy, f_xy = self.ElliSLICE.hessian(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal((f_xx+f_yy)/2., 5., decimal=6)
x = np.array([1])
y = np.array([2])
f_xx, f_yy,f_xy = self.ElliSLICE.hessian(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal((f_xx+f_yy)/2., 0., decimal=6)
x = np.array([1,3,0.])
y = np.array([2,1,0.5])
values = self.ElliSLICE.hessian(x, y, a, b, psi, sigma_0)
npt.assert_almost_equal((values[0][2]+values[1][2])/2., 5., decimal=6)
if __name__ == '__main__':
pytest.main()
|
class Addon(object): # can be subclassed to create custom addons
def __init__(self, page):
self.page = page
self.queued_modifications = []
def start(self):
print("Addon started")
def get_modifications(self): # each modification should be a string
modification_string = ""
for i in range(len(self.queued_modifications)):
mod = self.queued_modifications[i]
if i == 0:
modification_string = modification_string + mod
else:
modification_string = modification_string + "\n" + mod
self.queued_modifications = []
return modification_string |
from discord.ext import commands
import discord
from momiji.modules import permissions
from momiji.reusables import send_large_message
class WastelandConfiguration(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="wasteland_ignore_channel", brief="Ignore audit logging for this channel")
@commands.check(permissions.is_owner)
@commands.check(permissions.is_not_ignored)
async def wasteland_ignore_channel(self, ctx):
"""
Blacklist the current channel from being included in wasteland logs.
"""
await self.bot.db.execute("INSERT INTO wasteland_ignore_channels VALUES (?, ?)",
[int(ctx.guild.id), int(ctx.channel.id)])
await self.bot.db.commit()
await ctx.send(":ok_hand:")
@commands.command(name="wasteland_ignore_user", brief="Ignore audit logging for a set user")
@commands.check(permissions.is_owner)
@commands.check(permissions.is_not_ignored)
async def wasteland_ignore_user(self, ctx, user_id):
"""
Blacklist a user from being included in wasteland logs.
"""
await self.bot.db.execute("INSERT INTO wasteland_ignore_users VALUES (?, ?)",
[int(ctx.guild.id), int(user_id)])
await self.bot.db.commit()
await ctx.send(":ok_hand:")
@commands.command(name="set_wasteland_channel", brief="Set a wasteland message")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def set_wasteland_channel(self, ctx, event_name):
"""
Set the channel the message is being called in as a wasteland channel.
event list:
on_member_ban, on_member_unban, on_member_remove,
on_member_join, on_message_edit, on_message_delete,
on_member_update, on_user_update, all
"""
await self.bot.db.execute("INSERT INTO wasteland_channels VALUES (?,?,?)",
[int(ctx.guild.id), int(ctx.channel.id), str(event_name)])
await self.bot.db.commit()
await ctx.send("This channel is now a wasteland channel.")
@commands.command(name="remove_wasteland_channel", brief="Remove a wasteland channel")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def remove_wasteland_channel(self, ctx, *args):
"""
Remove the current channel from being a wasteland channel.
"""
# TODO: adjust for multiple wasteland events
if "guild" in args:
await self.bot.db.execute("DELETE FROM wasteland_channels WHERE guild_id = ?", [int(ctx.guild.id)])
await ctx.send("If this server had a wasteland channel, there are none now.")
else:
await self.bot.db.execute("DELETE FROM wasteland_channels WHERE channel_id = ?", [int(ctx.channel.id)])
await ctx.send("If this channel was a wasteland channel, it is not more.")
await self.bot.db.commit()
@commands.command(name="get_wasteland_channels", brief="Get all wasteland channels")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def get_welcome_messages(self, ctx):
"""
Prints out all wasteland channels in this guild.
"""
# TODO: adjust for multiple wasteland events
async with self.bot.db.execute("SELECT channel_id FROM wasteland_channels WHERE guild_id = ?",
[int(ctx.guild.id)]) as cursor:
wasteland_channels = await cursor.fetchall()
buffer = ":wastebasket: **Wasteland channels in this server.**\n\n"
if wasteland_channels:
for one_wasteland_channel in wasteland_channels:
buffer += f"<#{one_wasteland_channel[0]}>\n"
else:
buffer += "**There are no wasteland channels in this server.**\n"
embed = discord.Embed(color=0xf76a8c)
await send_large_message.send_large_embed(ctx.channel, embed, buffer)
def setup(bot):
bot.add_cog(WastelandConfiguration(bot))
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import json
import os
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument("--model_id", type=str, help="Path to input model")
parser.add_argument("--model_info_output_path", type=str,
help="Path to write model info JSON")
args = parser.parse_args()
return args
def main(args):
print("Writing JSON")
dict = {"id": "{0}".format(args.model_id)}
output_path = os.path.join(args.model_info_output_path, "model_info.json")
with open(output_path, "w") as of:
json.dump(dict, fp=of)
print("Done")
# run script
if __name__ == "__main__":
# add space in logs
print("*" * 60)
print("\n\n")
# parse args
args = parse_args()
# run main function
main(args)
# add space in logs
print("*" * 60)
print("\n\n")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(761, 585)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layout = QtWidgets.QGridLayout(self.centralwidget)
self.layout.setObjectName("layout")
self.searchBox = QtWidgets.QLineEdit(self.centralwidget)
self.searchBox.setObjectName("searchBox")
self.layout.addWidget(self.searchBox, 0, 2, 1, 1)
self.searchLabel = QtWidgets.QLabel(self.centralwidget)
self.searchLabel.setObjectName("searchLabel")
self.layout.addWidget(self.searchLabel, 0, 1, 1, 1)
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.filesView = QtWidgets.QListView(self.splitter)
self.filesView.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.filesView.setAlternatingRowColors(True)
self.filesView.setObjectName("filesView")
self.textView = HighlightTextEdit(self.splitter)
font = QtGui.QFont()
font.setFamily("Yu Gothic")
font.setPointSize(11)
self.textView.setFont(font)
self.textView.setReadOnly(True)
self.textView.setObjectName("textView")
self.layout.addWidget(self.splitter, 1, 1, 1, 2)
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setTextVisible(True)
self.progressBar.setObjectName("progressBar")
self.layout.addWidget(self.progressBar, 2, 1, 1, 2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 761, 26))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuOptions = QtWidgets.QMenu(self.menubar)
self.menuOptions.setObjectName("menuOptions")
MainWindow.setMenuBar(self.menubar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionOpenDir = QtWidgets.QAction(MainWindow)
self.actionOpenDir.setObjectName("actionOpenDir")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionRealtimeSearch = QtWidgets.QAction(MainWindow)
self.actionRealtimeSearch.setCheckable(True)
self.actionRealtimeSearch.setObjectName("actionRealtimeSearch")
self.actionRegExpSearch = QtWidgets.QAction(MainWindow)
self.actionRegExpSearch.setCheckable(True)
self.actionRegExpSearch.setObjectName("actionRegExpSearch")
self.actionExport = QtWidgets.QAction(MainWindow)
self.actionExport.setObjectName("actionExport")
self.menuFile.addAction(self.actionOpenDir)
self.menuFile.addAction(self.actionExport)
self.menuFile.addAction(self.actionExit)
self.menuOptions.addAction(self.actionRealtimeSearch)
self.menuOptions.addAction(self.actionRegExpSearch)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuOptions.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.searchBox, self.filesView)
MainWindow.setTabOrder(self.filesView, self.textView)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Japanese Verb Search"))
self.searchLabel.setText(_translate("MainWindow", "Search"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuOptions.setTitle(_translate("MainWindow", "Options"))
self.actionOpenDir.setText(_translate("MainWindow", "Open..."))
self.actionOpenDir.setToolTip(_translate("MainWindow", "Open working directory"))
self.actionOpenDir.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionRealtimeSearch.setText(_translate("MainWindow", "Search: Realtime"))
self.actionRegExpSearch.setText(_translate("MainWindow", "Search: Use RexExp engine"))
self.actionExport.setText(_translate("MainWindow", "Export..."))
self.actionExport.setShortcut(_translate("MainWindow", "Ctrl+E"))
from jvs.highlighttextedit import HighlightTextEdit
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo_config import cfg
from cloudbaseinit import exception
from cloudbaseinit.tests import testutils
CONF = cfg.CONF
class TestWindowsConfigDriveManager(unittest.TestCase):
def setUp(self):
self._ctypes_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict('sys.modules',
{'ctypes': self._ctypes_mock})
self._module_patcher.start()
self.windows = importlib.import_module(
"cloudbaseinit.metadata.services.osconfigdrive.windows")
self.physical_disk = importlib.import_module(
"cloudbaseinit.utils.windows.physical_disk")
self.physical_disk.Win32_DiskGeometry = mock.MagicMock()
self.windows.physical_disk.PhysicalDisk = mock.MagicMock()
self._config_manager = self.windows.WindowsConfigDriveManager()
def tearDown(self):
self._module_patcher.stop()
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('os.path.exists')
def _test_get_config_drive_cdrom_mount_point(self, mock_join,
mock_get_os_utils, exists):
mock_osutils = mock.MagicMock()
mock_get_os_utils.return_value = mock_osutils
mock_osutils.get_cdrom_drives.return_value = ['fake drive']
mock_osutils.get_volume_label.return_value = 'config-2'
mock_join.return_value = exists
response = self._config_manager._get_config_drive_cdrom_mount_point()
mock_osutils.get_cdrom_drives.assert_called_once_with()
mock_osutils.get_volume_label.assert_called_once_with('fake drive')
if exists:
self.assertEqual('fake drive', response)
else:
self.assertIsNone(response)
def test_get_config_drive_cdrom_mount_point_exists_true(self):
self._test_get_config_drive_cdrom_mount_point(exists=True)
def test_get_config_drive_cdrom_mount_point_exists_false(self):
self._test_get_config_drive_cdrom_mount_point(exists=False)
def test_c_char_array_to_c_ushort(self):
mock_buf = mock.MagicMock()
contents = self._ctypes_mock.cast.return_value.contents
response = self._config_manager._c_char_array_to_c_ushort(mock_buf, 1)
self.assertEqual(2, self._ctypes_mock.cast.call_count)
self._ctypes_mock.POINTER.assert_called_with(
self._ctypes_mock.wintypes.WORD)
self._ctypes_mock.cast.assert_called_with(
mock_buf.__getitem__(), self._ctypes_mock.POINTER.return_value)
self.assertEqual(contents.value.__lshift__().__add__(), response)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._c_char_array_to_c_ushort')
def _test_get_iso_disk_size(self, mock_c_char_array_to_c_ushort,
media_type, value, iso_id):
if media_type == "fixed":
media_type = self.physical_disk.Win32_DiskGeometry.FixedMedia
boot_record_off = 0x8000
volume_size_off = 80
block_size_off = 128
mock_phys_disk = mock.MagicMock()
mock_buff = mock.MagicMock()
mock_geom = mock.MagicMock()
mock_phys_disk.get_geometry.return_value = mock_geom
mock_geom.MediaType = media_type
mock_geom.Cylinders = value
mock_geom.TracksPerCylinder = 2
mock_geom.SectorsPerTrack = 2
mock_geom.BytesPerSector = 2
mock_phys_disk.read.return_value = (mock_buff, 'fake value')
mock_buff.__getitem__.return_value = iso_id
mock_c_char_array_to_c_ushort.return_value = 100
disk_size = mock_geom.Cylinders * mock_geom.TracksPerCylinder * \
mock_geom.SectorsPerTrack * mock_geom.BytesPerSector
offset = boot_record_off / mock_geom.BytesPerSector * \
mock_geom.BytesPerSector
buf_off_volume = boot_record_off - offset + volume_size_off
buf_off_block = boot_record_off - offset + block_size_off
response = self._config_manager._get_iso_disk_size(mock_phys_disk)
mock_phys_disk.get_geometry.assert_called_once_with()
if media_type != self.physical_disk.Win32_DiskGeometry.FixedMedia:
self.assertIsNone(response)
elif disk_size <= offset + mock_geom.BytesPerSector:
self.assertIsNone(response)
else:
mock_phys_disk.seek.assert_called_once_with(offset)
mock_phys_disk.read.assert_called_once_with(
mock_geom.BytesPerSector)
if iso_id != 'CD001':
self.assertIsNone(response)
else:
mock_c_char_array_to_c_ushort.assert_has_calls(
mock.call(mock_buff, buf_off_volume),
mock.call(mock_buff, buf_off_block))
self.assertEqual(10000, response)
def test_test_get_iso_disk_size(self):
self._test_get_iso_disk_size(
media_type="fixed",
value=100, iso_id='CD001')
def test_test_get_iso_disk_size_other_media_type(self):
self._test_get_iso_disk_size(media_type="other", value=100,
iso_id='CD001')
def test_test_get_iso_disk_size_other_disk_size_too_small(self):
self._test_get_iso_disk_size(
media_type="fixed",
value=0, iso_id='CD001')
def test_test_get_iso_disk_size_other_id(self):
self._test_get_iso_disk_size(
media_type="fixed",
value=100, iso_id='other id')
def test_write_iso_file(self):
mock_buff = mock.MagicMock()
mock_geom = mock.MagicMock()
mock_geom.BytesPerSector = 2
mock_phys_disk = mock.MagicMock()
mock_phys_disk.read.return_value = (mock_buff, 10)
fake_path = os.path.join('fake', 'path')
mock_phys_disk.get_geometry.return_value = mock_geom
with mock.patch('six.moves.builtins.open', mock.mock_open(),
create=True) as f:
self._config_manager._write_iso_file(mock_phys_disk, fake_path,
10)
f().write.assert_called_once_with(mock_buff)
mock_phys_disk.seek.assert_called_once_with(0)
mock_phys_disk.read.assert_called_once_with(10)
@mock.patch('os.makedirs')
def _test_extract_iso_files(self, mock_makedirs, exit_code):
fake_path = os.path.join('fake', 'path')
fake_target_path = os.path.join(fake_path, 'target')
args = [CONF.bsdtar_path, '-xf', fake_path, '-C', fake_target_path]
mock_os_utils = mock.MagicMock()
mock_os_utils.execute_process.return_value = ('fake out', 'fake err',
exit_code)
if exit_code:
self.assertRaises(exception.CloudbaseInitException,
self._config_manager._extract_iso_files,
mock_os_utils, fake_path, fake_target_path)
else:
self._config_manager._extract_iso_files(mock_os_utils, fake_path,
fake_target_path)
mock_os_utils.execute_process.assert_called_once_with(args, False)
mock_makedirs.assert_called_once_with(fake_target_path)
def test_extract_iso_files(self):
self._test_extract_iso_files(exit_code=None)
def test_extract_iso_files_exception(self):
self._test_extract_iso_files(exit_code=1)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_iso_disk_size')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._write_iso_file')
def _test_extract_iso_disk_file(self, mock_write_iso_file,
mock_get_iso_disk_size, exception):
mock_osutils = mock.MagicMock()
fake_path = os.path.join('fake', 'path')
fake_path_physical = os.path.join(fake_path, 'physical')
mock_osutils.get_physical_disks.return_value = [fake_path_physical]
mock_get_iso_disk_size.return_value = 'fake iso size'
mock_PhysDisk = self.windows.physical_disk.PhysicalDisk.return_value
if exception:
mock_PhysDisk.open.side_effect = [Exception]
response = self._config_manager._extract_iso_disk_file(
osutils=mock_osutils, iso_file_path=fake_path)
if not exception:
mock_get_iso_disk_size.assert_called_once_with(
mock_PhysDisk)
mock_write_iso_file.assert_called_once_with(
mock_PhysDisk, fake_path, 'fake iso size')
self.windows.physical_disk.PhysicalDisk.assert_called_once_with(
fake_path_physical)
mock_osutils.get_physical_disks.assert_called_once_with()
mock_PhysDisk.open.assert_called_once_with()
mock_PhysDisk.close.assert_called_once_with()
self.assertTrue(response)
else:
self.assertFalse(response)
def test_extract_iso_disk_file_disk_found(self):
self._test_extract_iso_disk_file(exception=False)
def test_extract_iso_disk_file_disk_not_found(self):
self._test_extract_iso_disk_file(exception=True)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_conf_drive_from_raw_hdd')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_conf_drive_from_cdrom_drive')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_conf_drive_from_vfat')
def _test_get_config_drive_files(self,
mock_get_conf_drive_from_vfat,
mock_get_conf_drive_from_cdrom_drive,
mock_get_conf_drive_from_raw_hdd,
raw_hdd_found=False,
cdrom_drive_found=False,
vfat_found=False):
fake_path = os.path.join('fake', 'path')
mock_get_conf_drive_from_raw_hdd.return_value = raw_hdd_found
mock_get_conf_drive_from_cdrom_drive.return_value = cdrom_drive_found
mock_get_conf_drive_from_vfat.return_value = vfat_found
response = self._config_manager.get_config_drive_files(
target_path=fake_path)
if vfat_found:
mock_get_conf_drive_from_vfat.assert_called_once_with(fake_path)
self.assertFalse(mock_get_conf_drive_from_raw_hdd.called)
self.assertFalse(mock_get_conf_drive_from_cdrom_drive.called)
elif cdrom_drive_found:
mock_get_conf_drive_from_vfat.assert_called_once_with(fake_path)
mock_get_conf_drive_from_cdrom_drive.assert_called_once_with(
fake_path)
mock_get_conf_drive_from_raw_hdd.assert_called_once_with(
fake_path)
elif raw_hdd_found:
mock_get_conf_drive_from_vfat.assert_called_once_with(fake_path)
mock_get_conf_drive_from_raw_hdd.assert_called_once_with(
fake_path)
self.assertFalse(mock_get_conf_drive_from_cdrom_drive.called)
self.assertTrue(response)
def test_get_config_drive_files(self):
self._test_get_config_drive_files(raw_hdd_found=True)
self._test_get_config_drive_files(cdrom_drive_found=True)
self._test_get_config_drive_files(vfat_found=True)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager.'
'_get_config_drive_cdrom_mount_point')
@mock.patch('shutil.copytree')
def _test_get_conf_drive_from_cdrom_drive(self, mock_copytree,
mock_get_config_cdrom_mount,
mount_point):
fake_path = os.path.join('fake', 'path')
mock_get_config_cdrom_mount.return_value = mount_point
response = self._config_manager._get_conf_drive_from_cdrom_drive(
fake_path)
mock_get_config_cdrom_mount.assert_called_once_with()
if mount_point:
mock_copytree.assert_called_once_with(mount_point, fake_path)
self.assertTrue(response)
else:
self.assertFalse(response)
def test_get_conf_drive_from_cdrom_drive_with_mountpoint(self):
self._test_get_conf_drive_from_cdrom_drive(
mount_point='fake mount point')
def test_get_conf_drive_from_cdrom_drive_without_mountpoint(self):
self._test_get_conf_drive_from_cdrom_drive(
mount_point=None)
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('tempfile.gettempdir')
@mock.patch('uuid.uuid4')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._extract_iso_disk_file')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._extract_iso_files')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def _test_get_conf_drive_from_raw_hdd(self, mock_get_os_utils,
mock_extract_iso_files,
mock_extract_iso_disk_file,
mock_uuid4, mock_gettempdir,
mock_exists, mock_remove,
found_drive):
fake_target_path = os.path.join('fake', 'path')
fake_iso_path = os.path.join('fake_dir', 'fake_id' + '.iso')
mock_uuid4.return_value = 'fake_id'
mock_gettempdir.return_value = 'fake_dir'
mock_extract_iso_disk_file.return_value = found_drive
mock_exists.return_value = found_drive
response = self._config_manager._get_conf_drive_from_raw_hdd(
fake_target_path)
mock_get_os_utils.assert_called_once_with()
mock_gettempdir.assert_called_once_with()
mock_extract_iso_disk_file.assert_called_once_with(
mock_get_os_utils(), fake_iso_path)
if found_drive:
mock_extract_iso_files.assert_called_once_with(
mock_get_os_utils(), fake_iso_path, fake_target_path)
mock_exists.assert_called_once_with(fake_iso_path)
mock_remove.assert_called_once_with(fake_iso_path)
self.assertTrue(response)
else:
self.assertFalse(response)
def test_get_conf_drive_from_raw_hdd_found_drive(self):
self._test_get_conf_drive_from_raw_hdd(found_drive=True)
def test_get_conf_drive_from_raw_hdd_no_drive_found(self):
self._test_get_conf_drive_from_raw_hdd(found_drive=False)
@mock.patch('os.makedirs')
@mock.patch('cloudbaseinit.utils.windows.vfat.copy_from_vfat_drive')
@mock.patch('cloudbaseinit.utils.windows.vfat.is_vfat_drive')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def test_get_conf_drive_from_vfat(self, mock_get_os_utils,
mock_is_vfat_drive,
mock_copy_from_vfat_drive,
mock_os_makedirs):
mock_osutils = mock_get_os_utils.return_value
mock_osutils.get_physical_disks.return_value = (
mock.sentinel.drive1,
mock.sentinel.drive2,
)
mock_is_vfat_drive.side_effect = (None, True)
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'osconfigdrive.windows') as snatcher:
response = self._config_manager._get_conf_drive_from_vfat(
mock.sentinel.target_path)
self.assertTrue(response)
mock_osutils.get_physical_disks.assert_called_once_with()
expected_is_vfat_calls = [
mock.call(mock_osutils, mock.sentinel.drive1),
mock.call(mock_osutils, mock.sentinel.drive2),
]
self.assertEqual(expected_is_vfat_calls, mock_is_vfat_drive.mock_calls)
mock_copy_from_vfat_drive.assert_called_once_with(
mock_osutils,
mock.sentinel.drive2,
mock.sentinel.target_path)
expected_logging = [
'Config Drive found on disk %r' % mock.sentinel.drive2,
]
self.assertEqual(expected_logging, snatcher.output)
mock_os_makedirs.assert_called_once_with(mock.sentinel.target_path)
|
import re
from django.core.exceptions import ValidationError
from app.common.serializers import BaseModelSerializer
from app.content.models import ShortLink
class ShortLinkSerializer(BaseModelSerializer):
class Meta:
model = ShortLink
fields = ["name", "url"]
def validate_name(self, data):
regex = re.compile("^(a/.*|k/.*|n/.*|(om/.*)|a|k|n|om)$", re.IGNORECASE)
if regex.match(data):
raise ValidationError(
"Dette navnet er reservert. Navn som starter med 'om/', 'a/', 'k/' og 'n/' er ikke tilgjengelige."
)
return data
|
from smallboard import *
class BigBoard(object):
def __init__(self):
self.it = [[SmallBoard() for j in range(3)] for i in range(3)]
self.active = (-1, -1)
def __getitem__(self, index):
return self.it[index]
def __setitem__(self, index, value):
self.it[index] = value
def show(self):
x, y = 0, 0
for row in self:
for board in row:
board.show(x, y)
x += 135
x = 0
y += 135
def makeActive(self, i, j):
if self[i][j].clr:
self.active = (-1, -1)
else:
self.active = (i, j)
def showActive(self):
fill(255, 255, 0, 75)
if self.active == (-1, -1):
rect(0, 0, 405, 405)
else:
i, j = self.active
rect(j*135, i*135, 135, 135)
def play(self, i, j, clr):
flag = None
if self.active == (-1, -1) or (i//3, j//3) == self.active:
flag = self[i//3][j//3].play(i%3, j%3, clr)
if flag:
self.makeActive(i%3, j%3)
return flag
def winner(self):
tie = all(self[i][j].clr for i in range(3) for j in range(3))
win = check([[self[i][j].clr for j in range(3)] for i in range(3)])
return win if win else (-1 if tie else 0) |
import os
import sys
import requests
from prettytable import PrettyTable
# Get risk meter information, and return a dictionary of tuples containing risk meter ID,
# risk meter score, and last update time.
def get_risk_meters(base_url, headers):
risk_meters = {}
list_risk_meters_url = f"{base_url}asset_groups"
response = requests.get(list_risk_meters_url, headers=headers)
if response.status_code != 200:
print(f"List Risk Meters Error: {response.status_code} with {list_risk_meters_url}")
sys.exit(1)
resp_json = response.json()
risk_meters_resp = resp_json['asset_groups']
for risk_meter in risk_meters_resp:
risk_meter_id = risk_meter['id']
risk_meter_score = risk_meter['risk_meter_score']
updated_at = risk_meter['updated_at']
query_string = risk_meter['querystring']
risk_meters[risk_meter['name']] = (risk_meter_id, risk_meter_score, updated_at, query_string)
return risk_meters
# Obtain and return the number of assets in a risk meter.
def get_assets_in_risk_meter(base_url, headers, query_string):
max_allowed_pages = 20
# Create the search URL with the provied query_string
search_assets_url = f"{base_url}assets/search?{query_string}&per_page=5000"
# Invoke the search API.
response = requests.get(search_assets_url, headers=headers)
if response.status_code != 200:
print(f"Search Assets Error: {response.status_code} with {search_assets_url}")
sys.exit(1)
# Obtain the asset information.
resp_json = response.json()
assets_resp = resp_json['assets']
# Suss-out page information
meta = resp_json['meta']
num_pages = meta['pages']
if num_pages > max_allowed_pages:
return 100000
asset_count = len(assets_resp)
page_num = 2
while page_num > max_allowed_pages:
search_assets_url += f"&page={page_num}"
# Invoke the search API.
response = requests.get(search_assets_url, headers=headers)
if response.status_code != 200:
print(f"Search Assets Error: {response.status_code} with {search_assets_url}")
sys.exit(1)
# Obtain the asset information.
resp_json = response.json()
assets_resp = resp_json['assets']
asset_count += len(assets_resp)
page_num += 1
return asset_count
if __name__ == "__main__":
print("List Risk Meters")
print("")
# Obtain the Kenna Security API key from an environment variable.
api_key = os.getenv('KENNA_API_KEY')
if api_key is None:
print("API key is non-existent")
sys.exit(1)
# HTTP headers.
headers = {'X-Risk-Token': api_key,
'Content-Type': 'application/json; charset=utf-8',
'User-Agent': 'sample.list_risk_meters/1.0.0 (Cisco Secure)'}
# You might have to change this depending on your deployment.
base_url = "https://api.kennasecurity.com/"
risk_meters = get_risk_meters(base_url, headers)
print("")
risk_meter_tbl = PrettyTable()
risk_meter_tbl.field_names = ["Risk Meter Name", "ID", "Count", "Score", "Last Updated"]
risk_meter_tbl.align["Risk Meter Name"] = "l"
for risk_meter_name in risk_meters.keys():
risk_meter_tuple = risk_meters[risk_meter_name]
risk_meter_id = risk_meter_tuple[0]
risk_meter_score = risk_meter_tuple[1]
updated_at = risk_meter_tuple[2]
query_string = risk_meter_tuple[3]
#print(f"Processing: {risk_meter_name} with score {risk_meter_score}.")
num_assets = get_assets_in_risk_meter(base_url, headers, query_string)
risk_meter_tbl.add_row([risk_meter_name, risk_meter_id, num_assets, risk_meter_score, updated_at])
print(risk_meter_tbl)
print("")
|
#! /usr/bin/env python
import rospy, actionlib
from motion_planning.msg import PickUpPoseAction, PickUpPoseGoal
def test_client_client():
client = actionlib.SimpleActionClient('pick', PickUpPoseAction)
client.wait_for_server()
# Creates a goal to send to the action server.
goal = PickUpPoseGoal()
goal.object_pose.header.frame_id = 'base_footprint'
goal.object_pose.pose.position.x = 0.531774938258
goal.object_pose.pose.position.y = -0.0489538018672
goal.object_pose.pose.position.z = 0.859598292586
goal.object_pose.pose.orientation.x = 0
goal.object_pose.pose.orientation.y = 0
goal.object_pose.pose.orientation.z = 0
goal.object_pose.pose.orientation.w = 1.0
# Sends the goal to the action server.
client.send_goal(goal)
if __name__ == '__main__':
rospy.init_node('test_client')
rospy.loginfo("qmkdfjqmk")
rospy.sleep(3)
test_client_client() |
import unittest
from application import TestingConfig, create_app, db
from application.apps.data import app_list
class DbUnitTest(unittest.TestCase):
def setUp(self):
self.test_app = create_app(TestingConfig).test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_something(self):
pass
self.assertEqual(True, False)
|
from __future__ import absolute_import
from __future__ import print_function
from owmeta_core.data import DataUser
from .DataTestTemplate import _DataTest
from owmeta.cell import Cell
class CellTest(_DataTest):
ctx_classes = (Cell,)
def test_DataUser(self):
do = Cell('', conf=self.config)
self.assertTrue(isinstance(do, DataUser))
def test_lineageName(self):
""" Test that we can retrieve the lineage name """
c = self.ctx.Cell(name="ADAL", conf=self.config)
c.lineageName("AB plapaaaapp")
self.save()
self.assertEqual("AB plapaaaapp", self.ctx.Cell(name="ADAL").lineageName())
def test_wormbaseID(self):
""" Test that a Cell object has a wormbase ID """
c = self.ctx.Cell(name="ADAL", conf=self.config)
c.wormbaseID("WBbt:0004013")
self.save()
self.assertEqual("WBbt:0004013", self.ctx.Cell(name="ADAL").wormbaseID())
def test_synonyms(self):
""" Test that we can add and retrieve synonyms. """
c = self.ctx.Cell(name="ADAL", conf=self.config)
c.synonym("lineage name: ABplapaaaapp")
self.save()
self.assertEqual(
set(["lineage name: ABplapaaaapp"]),
self.ctx.Cell(name="ADAL").synonym())
def test_same_name_same_id(self):
"""
Test that two Cell objects with the same name have the same
identifier
Saves us from having too many inserts of the same object.
"""
c = Cell(name="boots")
c1 = Cell(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_blast_space(self):
"""
Test that setting the lineage name gives the blast cell.
"""
c = self.ctx.Cell(name="carrots")
c.lineageName("a tahsuetoahusenoatu")
self.assertEqual(c.blast(), "a")
def test_blast_dot(self):
"""
Test that setting the lineage name gives the blast cell.
"""
c = self.ctx.Cell(name="peas")
c.lineageName("ab.tahsuetoahusenoatu")
self.assertEqual(c.blast(), "ab")
def test_daughterOf(self):
"""
Test that we can get the daughterOf of a cell
"""
p = self.ctx.Cell(name="peas")
c = self.ctx.Cell(name="carrots")
c.daughterOf(p)
self.save()
parent_p = self.ctx.Cell(name='carrots').daughterOf().name()
self.assertEqual("peas", parent_p)
def test_daughterOf_inverse(self):
"""
Test that we can get the parent of a cell
"""
p = self.ctx.Cell(name="peas")
c = self.ctx.Cell(name="carrots")
c.daughterOf(p)
self.save()
parent_p = set(x.name() for x in self.ctx.Cell(name='peas').parentOf())
self.assertIn("carrots", parent_p)
def test_str(self):
self.assertEqual('cell_name', str(Cell('cell_name')))
|
import os
import bpy
import bpy_extras
from .. import plugin, plugin_prefs, utils
from .. import context
from ..obj.exp import props as obj_exp_props
from . import imp
from . import exp
from ..version_utils import get_import_export_menus, assign_props, IS_28
class ImportDmContext(context.ImportMeshContext):
def __init__(self):
context.ImportMeshContext.__init__(self)
class ExportDmContext(context.ExportMeshContext):
def __init__(self):
context.ExportMeshContext.__init__(self)
op_import_dm_props = {
'filter_glob': bpy.props.StringProperty(
default='*.dm', options={'HIDDEN'}
),
'directory': bpy.props.StringProperty(
subtype="DIR_PATH", options={'SKIP_SAVE'}
),
'filepath': bpy.props.StringProperty(
subtype="FILE_PATH", options={'SKIP_SAVE'}
),
'files': bpy.props.CollectionProperty(
type=bpy.types.OperatorFileListElement, options={'SKIP_SAVE'}
)
}
class OpImportDM(bpy.types.Operator, bpy_extras.io_utils.ImportHelper):
bl_idname = 'xray_import.dm'
bl_label = 'Import .dm'
bl_description = 'Imports X-Ray Detail Models (.dm)'
bl_options = {'REGISTER', 'UNDO'}
if not IS_28:
for prop_name, prop_value in op_import_dm_props.items():
exec('{0} = op_import_dm_props.get("{0}")'.format(prop_name))
@utils.set_cursor_state
def execute(self, context):
textures_folder = plugin_prefs.get_preferences().textures_folder_auto
if not textures_folder:
self.report({'WARNING'}, 'No textures folder specified')
if not self.files:
self.report({'ERROR'}, 'No files selected')
return {'CANCELLED'}
import_context = ImportDmContext()
import_context.textures_folder=textures_folder
import_context.operator=self
try:
for file in self.files:
ext = os.path.splitext(file.name)[-1].lower()
if ext == '.dm':
imp.import_file(
os.path.join(self.directory, file.name),
import_context
)
else:
self.report(
{'ERROR'},
'Format of {} not recognised'.format(file)
)
except utils.AppError as err:
self.report({'ERROR'}, str(err))
return {'CANCELLED'}
return {'FINISHED'}
op_export_dms_props = {
'detail_models': bpy.props.StringProperty(options={'HIDDEN'}),
'directory': bpy.props.StringProperty(subtype="FILE_PATH"),
'texture_name_from_image_path': obj_exp_props.PropObjectTextureNamesFromPath()
}
class OpExportDMs(bpy.types.Operator):
bl_idname = 'xray_export.dms'
bl_label = 'Export .dm'
if not IS_28:
for prop_name, prop_value in op_export_dms_props.items():
exec('{0} = op_export_dms_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
try:
for name in self.detail_models.split(','):
detail_model = context.scene.objects[name]
if not name.lower().endswith('.dm'):
name += '.dm'
path = self.directory
export_context = ExportDmContext()
export_context.texname_from_path = self.texture_name_from_image_path
model_exp.export_file(
detail_model, os.path.join(path, name), export_context
)
except utils.AppError as err:
self.report({'ERROR'}, str(err))
return {'CANCELLED'}
return {'FINISHED'}
def invoke(self, context, event):
prefs = plugin_prefs.get_preferences()
self.texture_name_from_image_path = \
prefs.object_texture_names_from_path
objs = context.selected_objects
if not objs:
self.report({'ERROR'}, 'Cannot find selected object')
return {'CANCELLED'}
if len(objs) == 1:
if objs[0].type != 'MESH':
self.report({'ERROR'}, 'The select object is not a mesh')
return {'CANCELLED'}
else:
bpy.ops.xray_export.dm('INVOKE_DEFAULT')
else:
self.detail_models = ','.join(
[o.name for o in objs if o.type == 'MESH']
)
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
filename_ext = '.dm'
op_export_dm_props = {
'detail_model': bpy.props.StringProperty(options={'HIDDEN'}),
'filter_glob': bpy.props.StringProperty(
default='*'+filename_ext, options={'HIDDEN'}
),
'texture_name_from_image_path': obj_exp_props.PropObjectTextureNamesFromPath()
}
class OpExportDM(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
bl_idname = 'xray_export.dm'
bl_label = 'Export .dm'
filename_ext = '.dm'
if not IS_28:
for prop_name, prop_value in op_export_dm_props.items():
exec('{0} = op_export_dm_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
try:
self.exp(context.scene.objects[self.detail_model], context)
except utils.AppError as err:
self.report({'ERROR'}, str(err))
return {'CANCELLED'}
return {'FINISHED'}
def exp(self, bpy_obj, context):
export_context = ExportDmContext()
export_context.texname_from_path = self.texture_name_from_image_path
exp.export_file(bpy_obj, self.filepath, export_context)
def invoke(self, context, event):
prefs = plugin_prefs.get_preferences()
self.texture_name_from_image_path = \
prefs.object_texture_names_from_path
objs = context.selected_objects
if not objs:
self.report({'ERROR'}, 'Cannot find selected object')
return {'CANCELLED'}
if len(objs) > 1:
self.report({'ERROR'}, 'Too many selected objects found')
return {'CANCELLED'}
if objs[0].type != 'MESH':
self.report({'ERROR'}, 'The selected object is not a mesh')
return {'CANCELLED'}
self.detail_model = objs[0].name
self.filepath = self.detail_model
return super().invoke(context, event)
assign_props([
(op_import_dm_props, OpImportDM),
(op_export_dms_props, OpExportDMs),
(op_export_dm_props, OpExportDM)
])
def menu_func_import(self, context):
icon = plugin.get_stalker_icon()
self.layout.operator(
OpImportDM.bl_idname, text='X-Ray detail model (.dm)',
icon_value=icon
)
def menu_func_export(self, context):
icon = plugin.get_stalker_icon()
self.layout.operator(
OpExportDMs.bl_idname, text='X-Ray detail model (.dm)', icon_value=icon
)
def register_operators():
bpy.utils.register_class(OpImportDM)
bpy.utils.register_class(OpExportDM)
bpy.utils.register_class(OpExportDMs)
def unregister_operators():
import_menu, export_menu = get_import_export_menus()
export_menu.remove(menu_func_export)
import_menu.remove(menu_func_import)
bpy.utils.unregister_class(OpExportDMs)
bpy.utils.unregister_class(OpExportDM)
bpy.utils.unregister_class(OpImportDM)
|
"""
This contains wrapper functions that simplify plotting raster
and vector data for publication-ready figures.
The documentation of the examples can be found here:
https://lsdtopotools.github.io/LSDTopoTools_ChiMudd2014/
Simon Mudd and Fiona Clubb, January 2018
Released under GPL3
"""
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib
from matplotlib import rcParams
"""
IMPORTANT: You must call this function from a lower level driectory
where both LSDPlottingTools and LSDMapFigure are in the python path!
That is, it will not work if you call it from outside the directory structure.
"""
import LSDPlottingTools as LSDP
import LSDPlottingTools.LSDMap_PointTools as LSDMap_PD
from LSDMapFigure.PlottingRaster import MapFigure
import LSDMapFigure.PlottingHelpers as PlotHelp
import LSDPlottingTools.LSDMap_ChiPlotting as LSDCP
#import LSDPlottingTools.adjust_text
def PrintChiChannels(DataDirectory,fname_prefix, ChannelFileName, add_basin_labels = True, cmap = "jet", cbar_loc = "right", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10, out_fname_prefix = ""):
"""
This function prints a channel map over a hillshade.
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
ChannelFileName (str): The name of the channel file (a csv) without path but with extension
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_lox (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
Returns:
Shaded relief plot with the channels coloured by a plotting column designated by the plotting_column keyword. Uses a colourbar to show each basin
Author: SMM
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# Get the filenames you want
BackgroundRasterName = fname_prefix+"_hs.bil"
DrapeRasterName = fname_prefix+".bil"
chi_csv_fname = DataDirectory+ChannelFileName
thisPointData = LSDMap_PD.LSDMap_PointData(chi_csv_fname)
# clear the plot
plt.clf()
# set up the base image and the map
MF = MapFigure(BackgroundRasterName, DataDirectory, coord_type="UTM_km",colourbar_location = "None")
MF.add_drape_image(DrapeRasterName,DataDirectory,colourmap = "gray", alpha = 0.6)
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,this_colourmap = cmap,
scale_points = True,column_for_scaling = "drainage_area",
scaled_data_in_log = True,
max_point_size = 5, min_point_size = 1,discrete_colours = discrete_colours, NColours = NColours)
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_chi_channels."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_chi_channels."+fig_format
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, FigFormat=fig_format, Fig_dpi = dpi)
def PrintChiChannelsAndBasins(DataDirectory,fname_prefix, ChannelFileName, add_basin_labels = True, cmap = "jet", cbar_loc = "right", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10, colour_log = True, colorbarlabel = "Colourbar", Basin_remove_list = [], Basin_rename_dict = {} , value_dict = {}, out_fname_prefix = "", show_basins = True, min_channel_point_size = 0.5, max_channel_point_size = 2):
"""
This function prints a channel map over a hillshade.
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
colour_log (bool): If true the colours are in log scale
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
show_basins (bool): If true, plot the basins
min_channel_point_size (float): The minimum size of a channel point in points
max_channel_point_size (float): The maximum size of a channel point in points
Returns:
Shaded relief plot with the basins coloured by basin ID. Includes channels. These can be plotted by various metrics denoted but the plotting_column parameter.
Author: SMM
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# get the basin IDs to make a discrete colourmap for each ID
BasinInfoDF = PlotHelp.ReadBasinInfoCSV(DataDirectory, fname_prefix)
basin_keys = list(BasinInfoDF['basin_key'])
basin_keys = [int(x) for x in basin_keys]
basin_junctions = list(BasinInfoDF['outlet_junction'])
basin_junctions = [float(x) for x in basin_junctions]
print ('Basin keys are: ')
print (basin_keys)
# going to make the basin plots - need to have bil extensions.
print("I'm going to make the basin plots. Your topographic data must be in ENVI bil format or I'll break!!")
# get the rasters
raster_ext = '.bil'
#BackgroundRasterName = fname_prefix+raster_ext
HillshadeName = fname_prefix+'_hs'+raster_ext
BasinsName = fname_prefix+'_AllBasins'+raster_ext
print (BasinsName)
Basins = LSDP.GetBasinOutlines(DataDirectory, BasinsName)
chi_csv_fname = DataDirectory+ChannelFileName
chi_csv_fname = DataDirectory+ChannelFileName
thisPointData = LSDMap_PD.LSDMap_PointData(chi_csv_fname)
#thisPointData.ThinDataSelection("basin_key",[10])
thisPointData.selectValue("basin_key",value = Basin_remove_list, operator = "!=")
#print("The new point data is:")
#print(thisPointData.GetLongitude())
# clear the plot
plt.clf()
# set up the base image and the map
print("I am showing the basins without text labels.")
MF = MapFigure(HillshadeName, DataDirectory,coord_type="UTM_km", colourbar_location="None")
# This adds the basins
if show_basins:
MF.add_basin_plot(BasinsName,fname_prefix,DataDirectory, mask_list = Basin_remove_list, rename_dict = Basin_rename_dict, value_dict = value_dict, label_basins = add_basin_labels, show_colourbar = False,
colourmap = "gray")
if discrete_colours:
print("I am printing discrete colours.")
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,
scale_points = True,column_for_scaling = "drainage_area", show_colourbar = True, colourbar_location = cbar_loc,
colorbarlabel = colorbarlabel, this_colourmap = cmap,
scaled_data_in_log = True,
max_point_size = max_channel_point_size, min_point_size = min_channel_point_size,zorder=10, colour_log = colour_log, discrete_colours = discrete_colours, NColours = NColours)
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_chi_channels_and_basins."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_chi_channels_and_basins."+fig_format
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, FigFormat=fig_format, Fig_dpi = dpi)
def PrintChiCoordChannelsAndBasins(DataDirectory,fname_prefix, ChannelFileName, add_basin_labels = True, cmap = "cubehelix", cbar_loc = "right", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10, colour_log = True, colorbarlabel = "Colourbar", Basin_remove_list = [], Basin_rename_dict = {} , value_dict = {}, plot_chi_raster = False, out_fname_prefix = "", show_basins = True, min_channel_point_size = 0.5, max_channel_point_size = 2):
"""
This function prints a channel map over a hillshade.
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
colour_log (bool): If true the colours are in log scale
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
show_basins (bool): If true, plot the basins
min_channel_point_size (float): The minimum size of a channel point in points
max_channel_point_size (float): The maximum size of a channel point in points
Returns:
Shaded relief plot with the basins coloured by basin ID. Includes channels. These can be plotted by various metrics denoted but the plotting_column parameter.
Author: SMM
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# get the basin IDs to make a discrete colourmap for each ID
BasinInfoDF = PlotHelp.ReadBasinInfoCSV(DataDirectory, fname_prefix)
basin_keys = list(BasinInfoDF['basin_key'])
basin_keys = [int(x) for x in basin_keys]
basin_junctions = list(BasinInfoDF['outlet_junction'])
basin_junctions = [float(x) for x in basin_junctions]
print ('Basin keys are: ')
print (basin_keys)
# going to make the basin plots - need to have bil extensions.
print("I'm going to make the basin plots. Your topographic data must be in ENVI bil format or I'll break!!")
# get the rasters
raster_ext = '.bil'
#BackgroundRasterName = fname_prefix+raster_ext
HillshadeName = fname_prefix+'_hs'+raster_ext
BasinsName = fname_prefix+'_AllBasins'+raster_ext
ChiCoordName = fname_prefix+'_Maskedchi'+raster_ext
print (BasinsName)
Basins = LSDP.GetBasinOutlines(DataDirectory, BasinsName)
chi_csv_fname = DataDirectory+ChannelFileName
chi_csv_fname = DataDirectory+ChannelFileName
thisPointData = LSDMap_PD.LSDMap_PointData(chi_csv_fname)
# Remove data that has nodata values
thisPointData.selectValue(plotting_column,value = -9999, operator = "!=")
thisPointData.selectValue("basin_key",value = Basin_remove_list, operator = "!=")
#print("The new point data is:")
#print(thisPointData.GetLongitude())
# clear the plot
plt.clf()
# set up the base image and the map
print("I am showing the basins without text labels.")
MF = MapFigure(HillshadeName, DataDirectory,coord_type="UTM_km", colourbar_location="None")
# This adds the basins
if plot_chi_raster:
if show_basins:
MF.add_basin_plot(BasinsName,fname_prefix,DataDirectory, mask_list = Basin_remove_list, rename_dict = Basin_rename_dict, value_dict = value_dict, label_basins = add_basin_labels, show_colourbar = False, colourmap = "gray", alpha = 1, outlines_only = True)
MF.add_drape_image(ChiCoordName,DataDirectory,colourmap = "cubehelix",alpha=0.6,zorder = 0.5)
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,scale_points = True,column_for_scaling = "drainage_area", show_colourbar = True, colourbar_location = cbar_loc,colorbarlabel = colorbarlabel, this_colourmap = cmap,scaled_data_in_log = True,max_point_size = max_channel_point_size, min_point_size = min_channel_point_size,zorder=0.4, colour_log = colour_log, discrete_colours = discrete_colours, NColours = NColours)
else:
if show_basins:
MF.add_basin_plot(BasinsName,fname_prefix,DataDirectory, mask_list = Basin_remove_list, rename_dict = Basin_rename_dict, value_dict = value_dict, label_basins = add_basin_labels, show_colourbar = False, colourmap = "gray", alpha = 0.7, outlines_only = False)
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,scale_points = True,column_for_scaling = "drainage_area", show_colourbar = True, colourbar_location = cbar_loc,colorbarlabel = colorbarlabel, this_colourmap = cmap,scaled_data_in_log = True,max_point_size = 2, min_point_size = 0.5,zorder=10, colour_log = colour_log, discrete_colours = discrete_colours, NColours = NColours)
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_chicoord_and_basins."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_chicoord_and_basins."+fig_format
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, FigFormat=fig_format, Fig_dpi = dpi)
def PrintChiStacked(DataDirectory,fname_prefix, ChannelFileName, cmap = "jet", cbar_loc = "bottom", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10,colorbarlabel = "Colourbar", axis_data_name = "chi", plot_data_name = "m_chi", plotting_data_format = 'log', Basin_select_list = [], Basin_rename_dict = {}, out_fname_prefix = "", first_basin = 0, last_basin = 0, figure_aspect_ratio = 2, X_offset = 5, rotate_labels=False):
"""
This function prints chi profiles with stacks of chi or flow distance
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
axis_data_name (str): the data used as the x axis
plot_data_name (str): the data name used to colour the plot
Returns:
Plots of chi or flow distance profiles
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# get the basin IDs to make a discrete colourmap for each ID
BasinInfoDF = PlotHelp.ReadBasinInfoCSV(DataDirectory, fname_prefix)
basin_keys = list(BasinInfoDF['basin_key'])
basin_keys = [int(x) for x in basin_keys]
basin_junctions = list(BasinInfoDF['outlet_junction'])
basin_junctions = [float(x) for x in basin_junctions]
print ('Basin keys are: ')
print (basin_keys)
chi_csv_fname = DataDirectory+ChannelFileName
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_stacked_chi."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_stacked_chi."+fig_format
if axis_data_name == "flow_distance" and X_offset <= 10:
print("WARNING! You have a weird flow distance offset. I think it is the chi offset. Check your offset.")
x_offset = 50000
else:
x_offset = X_offset
# print("The colourbar is located on the "+cbar_loc)
# print("Cmap is: "+cmap)
print("About to go into the stacks. My x_offset is: " +str(x_offset)+ ", and my rename dict is:" )
print(Basin_rename_dict)
LSDCP.StackedProfilesGradient(chi_csv_fname, FigFileName = ImageName,
FigFormat = fig_format,elevation_threshold = 0,
first_basin = first_basin, last_basin = last_basin, basin_order_list = Basin_select_list,
basin_rename_dict = Basin_rename_dict,
this_cmap = cmap,axis_data_name = axis_data_name, colour_data_name = plot_data_name,
discrete_colours = discrete_colours, NColours = NColours,
colorbarlabel = colorbarlabel, cbar_loc = cbar_loc, X_offset = x_offset,
plotting_data_format = plotting_data_format,
label_sources = False, source_thinning_threshold = 0,
size_format = size_format, aspect_ratio = figure_aspect_ratio, dpi = dpi, rotate_labels=rotate_labels)
def PrintMultipleStacked(DataDirectory,fname_prefix, ChannelFileNameList, cmap = "jet", cbar_loc = "bottom", size_format = "ESURF", fig_format = "png", dpi = 250,discrete_colours = False, NColours = 10,colorbarlabel = "Colourbar", axis_data_name = "chi", plotting_data_format = 'log', Basin_select_list = [], Basin_rename_dict = {}, out_fname_prefix = "", first_basin = 0, last_basin = 0, figure_aspect_ratio = 2, X_offset = 5, rotate_labels=False):
"""
This function takes a list of files and converst them to a stacked plot
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
ChannelFileNameList (str): A list of strongs with the full paths to the csv files containg the profile data
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
axis_data_name (str): the data used as the x axis
plot_data_name (str): the data name used to colour the plot
Returns:
Plots of chi or flow distance profiles stacked on top of each other
"""
# We need to import this because we need to convert point formats
import LSDPlottingTools.LSDMap_PointTools as LSDMap_PD
print("Let me print some multiply stacked profile plots for you")
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_Multistacked_chi."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_Multistacked_chi."+fig_format
if axis_data_name == "flow_distance" and X_offset <= 10:
print("WARNING! You have a weird flow distance offset. I think it is the chi offset. Check your offset.")
x_offset = 50000
else:
x_offset = X_offset
# print("The colourbar is located on the "+cbar_loc)
# print("Cmap is: "+cmap)
# Now we need to reformat the data into a single data frame and then print the data frame to csv
PD_list = []
i = 0;
for fname in ChannelFileNameList:
#print("Getting data from the file: "+ChannelFileNameList[i])
new_DF = pd.read_csv(ChannelFileNameList[i], sep=",")
elev = new_DF["elevation"].tolist()
n_data = len(elev)
#print("The number of elevations are:"+str(n_data))
this_index = [i]*n_data
new_DF['file_index'] = pd.Series(this_index, index=new_DF.index)
#print("The new dataframe is:")
#print(new_DF)
PD_list.append(new_DF)
i = i+1
new_csv = DataDirectory+fname_prefix+"_concat_chi.csv"
print("Printing to")
print(new_csv)
concat_DF = pd.concat([PD_list[0],PD_list[1]])
concat_DF.to_csv(path_or_buf=new_csv,index=False)
chi_csv_fname = new_csv
plot_data_name = "file_index"
#print("About to go into the stacks. My x_offset is: " +str(x_offset)+ ", and my rename dict is:" )
#print(Basin_rename_dict)
LSDCP.StackedProfilesGradient(chi_csv_fname, FigFileName = ImageName,FigFormat = fig_format,elevation_threshold = 0,first_basin = first_basin, last_basin = last_basin, basin_order_list = Basin_select_list,basin_rename_dict = Basin_rename_dict,this_cmap = cmap,axis_data_name = axis_data_name, colour_data_name = plot_data_name,discrete_colours = discrete_colours, NColours = NColours,colorbarlabel = colorbarlabel, cbar_loc = cbar_loc, X_offset = x_offset,plotting_data_format = plotting_data_format,label_sources = False, source_thinning_threshold = 0,size_format = size_format, aspect_ratio = figure_aspect_ratio, dpi = dpi, rotate_labels=rotate_labels)
|
import os
import sys
import logging
from databases import Database
from yoyo import read_migrations, get_backend
logger = logging.getLogger("uvicorn.default")
try:
DATABASE_USER = os.getenv("POSTGRES_USER")
DATABASE_PASS = os.getenv("POSTGRES_PASSWORD")
DATABASE_NAME = os.getenv("POSTGRES_DB")
except KeyError as ke:
logger.error(f"Environment variables must be defined: {ke}")
sys.exit(1)
url = f"postgres://{DATABASE_USER}:{DATABASE_PASS}@bkdb:5432/{DATABASE_NAME}"
database = Database(url)
async def init_db():
logger.info(f"Connecting to database")
await database.connect()
migrate()
def migrate():
logger.info("Applying migrations to database")
migrations = read_migrations("./migrations")
backend = get_backend(url)
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
|
#!/usr/bin/env python3
"""
Solve any size rubiks cube:
- For 2x2x2 and 3x3x3 just solve it
- For 4x4x4 and larger, reduce to 3x3x3 and then solve
This is a work in progress
"""
# standard libraries
import argparse
import datetime as dt
import logging
import resource
import sys
from math import sqrt
from pprint import pformat
from statistics import median
# rubiks cube libraries
from rubikscubennnsolver import ImplementThis, InvalidCubeReduction, SolveError, StuckInALoop, reverse_steps
from rubikscubennnsolver.LookupTable import NoPruneTableState, NoSteps
from rubikscubennnsolver.RubiksSide import NotSolving
if sys.version_info < (3, 4):
raise SystemError("Must be using Python 3.4 or higher")
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(filename)22s %(levelname)8s: %(message)s")
log = logging.getLogger(__name__)
log.info("rubiks-cube-solver.py begin")
start_time = dt.datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument(
"--state",
type=str,
help="Cube state",
# no longer used
# parser.add_argument('--test', default=False, action='store_true')
# 2x2x2
# default='DLRRFULLDUBFDURDBFBRBLFU')
# default='UUUURRRRFFFFDDDDLLLLBBBB')
# 3x3x3
# default='RRBBUFBFBRLRRRFRDDURUBFBBRFLUDUDFLLFFLLLLDFBDDDUUBDLUU')
# default='UUUUUUUUURRRRRRRRRFFFFFFFFFDDDDDDDDDLLLLLLLLLBBBBBBBBB') # solved
# 4x4x4
# default='DRFDFRUFDURDDLLUFLDLLBLULFBUUFRBLBFLLUDDUFRBURBBRBDLLDURFFBBRUFUFDRFURBUDLDBDUFFBUDRRLDRBLFBRRLB') # xyzzy test cube
# default='FLDFDLBDFBLFFRRBDRFRRURBRDUBBDLURUDRRBFFBDLUBLUULUFRRFBLDDUULBDBDFLDBLUBFRFUFBDDUBFLLRFLURDULLRU') # TPR cube
default="BRBLLLBRDLBBDDRRFUDFUDUDFUDDDRURBBBUUDRLFRDLLFBRFLRFLFFFBRULDRUBUBBLDBFRDLLUBUDDULFLRRFLFUBFUFUR",
)
parser.add_argument("--print-steps", default=False, action="store_true", help="Display animated step-by-step solution")
parser.add_argument("--debug", default=False, action="store_true", help="set loglevel to DEBUG")
parser.add_argument("--no-comments", default=False, action="store_true", help="No comments in alg.cubing.net url")
# CPU mode
parser.add_argument(
"--min-memory",
default=False,
action="store_true",
help="Load smaller tables to use less memory...takes longer to run",
)
parser.add_argument("--fast", default=True, action="store_true", help="Find a solution quickly")
parser.add_argument("--normal", default=False, action="store_true", help="Find a shorter solution but takes longer")
parser.add_argument("--slow", default=False, action="store_true", help="Find shortest solution we can, takes a while")
action = parser.add_mutually_exclusive_group(required=False)
parser.add_argument("--openwith", default=None, type=str, help="Colors for sides U, L, etc")
parser.add_argument("--colormap", default=None, type=str, help="Colors for sides U, L, etc")
parser.add_argument("--order", type=str, default="URFDLB", help="order of sides in --state, default kociemba URFDLB")
parser.add_argument("--solution333", type=str, default=None, help="cube explorer optimal steps for solving 3x3x3")
# default='UUUUUUUUUUUUUUUURRRRRRRRRRRRRRRRFFFFFFFFFFFFFFFFDDDDDDDDDDDDDDDDLLLLLLLLLLLLLLLLBBBBBBBBBBBBBBBB') # solved
# 5x5x5
# default='RFFFUDUDURBFULULFDBLRLDUFDBLUBBBDDURLRDRFRUDDBFUFLFURRLDFRRRUBFUUDUFLLBLBBULDDRRUFUUUBUDFFDRFLRBBLRFDLLUUBBRFRFRLLBFRLBRRFRBDLLDDFBLRDLFBBBLBLBDUUFDDD')
# https://www.speedsolving.com/forum/threads/arnauds-5x5x5-edge-pairing-method-examples.1447/
# default='LDFRDDUUUUFUUUBLUUUFLDFDRFDDFBBRRRULRRRBFRRRURFRFDUBDRUBFFFUBFFFUUFFFRLDLRFDLBDDLDDDRDDDDUDDDDUULDLFBFLFFULLLRFLLLRLLLLRRBLBBRBULULBBBRUBBBRBBBBULBRFB')
# default='UDLFDLDDLUFDUBRLBDLFLRBFRBLBBFUDURDULRRBRLFUURBUFLUBDUDRURRRBUFUFFFRUFFLDUURURFFULFFRLFDBRRFRDDBRFBBLBRDFBBBBUDDLLLDBUULUDULDLDDLBRRLRLUBBFFBDLFBDDLFR')
# default='UUUUUUUUUUUUUUUUUUUUUUUUURRRRRRRRRRRRRRRRRRRRRRRRRFFFFFFFFFFFFFFFFFFFFFFFFFDDDDDDDDDDDDDDDDDDDDDDDDDLLLLLLLLLLLLLLLLLLLLLLLLLBBBBBBBBBBBBBBBBBBBBBBBBB') # solved
# default='DFFURRULDLDLURLBDDRRBFRURFBFBFRBDLBBFRBLRFBRBBFLULDLBLULLFRUBUFLDFFLDULDDLUURRDRFBRLULUDRBDUUUBBRFFDBDFURDBBDDRULBUDRDLLLBDRFDLRDLLFDBBUFBRURFFUFFUUFU') # step10 takes 2s
# default='URUBFUUFRDFFUUFLRDBLLBDDDLUULRDLDUBDLRBBLFLBRBFUUBBRBFFUDLFLLBFUFUDRLBFUBBURRLLRUFRDUFFDFRFUBRBBDRFRFLLFURLLFBRBLUDRDDRRDRRFDUDLFLDLUUDUDBRBBBRBDDLDFL') # step10 takes 9s
# default='RFUBLFUBRULLUDDRLRLLFFFLUBDBLBFFUFLFURBFFLDDLFFBBRLUUDRRDLLLRDFFLBBLFURUBULBRLBDRUURDRRDFURDBUUBBFBUDRUBURBRBDLFLBDFBDULLDBBDDDRRFURLDUDUBRDFRFFDFDRLU') # step10 takes 6s, centers take 37 steps :(
# 6x6x6
# default='FBDDDFFUDRFBBLFLLURLDLLUFBLRFDUFLBLLFBFLRRBBFDRRDUBUFRBUBRDLUBFDRLBBRLRUFLBRBDUDFFFDBLUDBBLRDFUUDLBBBRRDRUDLBLDFRUDLLFFUUBFBUUFDLRUDUDBRRBBUFFDRRRDBULRRURULFDBRRULDDRUUULBLLFDFRRFDURFFLDUUBRUFDRFUBLDFULFBFDDUDLBLLRBL')
# default='UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUURRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB') # solved
# defult='xxxxxxxDRRLxxLDDBxxLUUDxxFRDUxxxxxxxxxxxxxxBBLBxxURFUxxDRBDxxDFDLxxxxxxxxxxxxxxULLRxxUFLLxxBLFRxxBBRDxxxxxxxxxxxxxxLFBRxxBUUFxxFDDFxxURUFxxxxxxxxxxxxxxRFDLxxURFUxxUBBFxxRULDxxxxxxxxxxxxxxBBLFxxFLLRxxDRBBxxFDRUxxxxxxx') # good step20 IDA test
# 7x7x7
# default='DBDBDDFBDDLUBDLFRFRBRLLDUFFDUFRBRDFDRUFDFDRDBDBULDBDBDBUFBUFFFULLFLDURRBBRRBRLFUUUDUURBRDUUURFFFLRFLRLDLBUFRLDLDFLLFBDFUFRFFUUUFURDRFULBRFURRBUDDRBDLLRLDLLDLUURFRFBUBURBRUDBDDLRBULBULUBDBBUDRBLFFBLRBURRUFULBRLFDUFDDBULBRLBUFULUDDLLDFRDRDBBFBUBBFLFFRRUFFRLRRDRULLLFRLFULBLLBBBLDFDBRBFDULLULRFDBR')
# 8x8x8
# default='DRRRURBDDBFBRBDDBRRDUFLLURFBFLFURLFLFRBRFUBDRFDFUUBLFFFUULBBFDBDFBUBBFRFLRDLFDRBBLLFRLDFDRBURULDDRFFBFUUBLLFBRUUFDUBRDBBRDFLURUUFFUDLBRRFDUBFLRUUFFRLBFRFLRULUDFRUBBDBFFLBBDFDFLDBFRRRDDLFLBRBFBBRULDDUUBLBBURULLDDLDRUDRBUDRLUULDURLRDFLFULUFLFULRDDDUBBULRBRDFBBLFURRLULUBDDULRFBRFURBRLBRUBULBDDFBUFFBBRLRUUUFRULLBFFRFDDFFDULLDLBUDLLLLUUBBLDLLBBULULBDUDDFUBFLLDLDLFRDUDDBRRFRURRFRRLDDDDRD')
# 9x9x9
# default='RFBLRUFLLFFLRRBDUDDBBBDUDFRUDUFFFBBFRBRDURBULFUDDFLLLDLFLRDLDBBBUUBRDBBBDFUFRUURULURBURDLFDUBFFDRDFRUBDUBRFLRRLUDLRLFBLBRRLLRDRBRBLURBLLRFRLDDFFFRBFUFURDFRRUDUFDDRRRLFLLUBBLBFDRRDLBRLUUBRDBBUBFLUUFBLLDBFFFBUFBFDBRDDDFLRFFBFFFLFRRDUUDDBUBLUUDURRBDBFFLFURDDLUBULUULULBFBRUBLLDDFLRBDBRFDUUDFURLLUBUFBLULLURDLLLBLFFRLLBLUDRLRDBLDDBRBUDRBLLRDUUUBRRFBFBBULUDUDLDRFUDDDFULRFRBDUDULBRRDBDFFRUUFRRFBDBLFBBDFURLRFDUUFRLUBURFURDDFLDFUBDFRRURRDLUDRBRBDLBFLBBRDLRDBFDUBDFFUBLFLUULLBUDLLLURDBLFFFDFLF'
# 10x10x10
# default='ULBDLDBUFRBBBBBLBFFFDFRFBBDDFDFRFFLDLDLURRBUDRRBFLUDFRLBDURULRUUDBBBUBRURRRLDLRFFUFFFURRFBLLRRFLFUDBDRRDFULLLURFBFUUBDBBDBFLFDFUUFDUBRLUFDBLRFLUDUFBFDULDFRUBLBBBUBRRDBDDDDFURFLRDBRRLLRFUFLRDFDUULRRDULFDUDRFLBFRLDUDBDFLDBDUFULULLLBUUFDFFDBBBRBRLFLUFLFUFFRLLLFLBUDRRFDDUDLFLBRDULFLBLLULFLDLUULBUDRDFLUDDLLRBLUBBRFRRLDRDUUFLDDFUFLBDBBLBURBBRRRFUBLBRBRUBFFDBBBBLBUFBLURBLDRFLFBUDDFFRFFRLBDBDUURBUFBDFFFLFBDLDUFFBRDLBRLRLBFRUUUULRRBDBRRFDLLRRUUBDBDBFDLRDDBRUUUUUBLLURBDFUFLLRDBLRRBBLBDDBBFUDUDLDLUFDDDUURBFUFRRBLLURDDRURRURLBLDRFRUFBDRULUFFDUDLBBUURFDUDBLRRUDFRLLDULFUBFDLURFBFULFLRRRRRFDDDLFDDRUFRRLBLUBU')
# 14x14x14
# default='FBDRLBLRRURRLDRBDLBURDFDDDRBLBBFBRDLLFDUBLFRLDFUUBFRDBFBBBULFRLBUFLBDDDLLDRBFLLBBLFBFFDFBFDDFRRRBDRRBRBDUFDRLRUDLDFDDURFLBUBBUUDLBRRDUDRDBBBLDBRBBBUFLBLRUURBDDLDRLUFFBLFRLDFBRFLDLBULFFBRLDBDDFLLRFLUBFDFBRLRLFDBLBURLBLFRFBLLDULUDURLBUUULLRRLUBDDLURLLRFURFRFRBDDUBLDFBLUDRLRDRRBLFUFRDUFFRULBLRBBRUFDBUBBBBLDBRBLDDRRFDDBFFUUBRBLFUBBRFUURBFDRLURLRBFUUFUBRUDRBDFBBFURFLFFDRDFUFFULFLUBDFUFFDLRRFRUDUDLBBBDLLLDUFUDRFDBLRRFFLRUFDRFURDLRRDRDLFBRLRLULRFBDLFDRLFRDDFLLDBFBUBBRLLDLFURFRFULUBLUBFLFFBFDFBDUUBURUUUBFUBDLLFLUUUFDUDLUUULDLLUDDBUFRDRULRLLULRULFBLUDFURFLFUBDLLFLFUBUUBBUFLUDUBRDBLFFUUUFDRLRULUDDRLRBLRUUFBRRRRULBDLFBFLDLRDFUBLUBRDDFUULFLDLUBFURRURUBDFFFDLRFFLBRFRDRUDUULURULLDFRBUDRDLFUFULDBLUBFRFBURDLLUUFDURLRDBLFFRFDBFURLFUBLUUUFFRULUBURRURFDDBFUFRBURBBDRFUDDFDLRUURFBBDBDRLUBRRBFDFRDFDLRDUFFUBRRBDBBLDLFDUDDRLFRRRBUUUBRFUFBUFFBRRDRDDBBDRUULDRFRFBUFLFFBLRBFLLLRUDFDRUDLDRLFRLUFLUBRDUFDDLLUDDRBUBBBDRDBBFRBDDRRLRRUUBBUDUDBLDBDFLFRFUBFLFDBBLRLULDBRFBRRLUUURDFFFDBLDUDBRFDDFFUBLUUURBBULFUFUDFBRDLLFURBULULBUDLUFFBDRBRRDBUUULFDURRDFDDLUDBDRBFBUFLULURUFDRFRFBBFBBBDRLBLUDLDRDLLDRRLLDLFBRBRLDUFBDDUDBLDFRFBBBDRDRDDLDRULFFLLFLBLDFLURLBUDFBDLRBLFDFLUDDFUBUBLURBBBLFRLFLBDDBURFFBFRRL')
# 15x15x15
# default='RLURLURBDDULFUUURFLRBLURUBFDBULFLUBBFLDUFBDRFRBRUDFULFRUFLUDFRLFDFLLFDBULURRLBFBUURDULFDFBLRRRLFULLFFFDUULRRRUUUUFDBLDDFFLRDLLUURUBBULUFFURBRRLBBUUBBFDRRBRBRLUDLUDRBFBFULLRRBBFBFRDDDLDDDFRFUFLURUFLBDLUBRLDFRRDBDBFLFUDFLDFFURLFULLDDRURRDLRFLDFLULUUDDRFDRBLRBRBFUFDBDUUDBRRBDFBLBLRBBLBFLLDUBFFFFBDDRLBBBRFDFFUBBDURFLUUDDDRDDLDBRLBULLFLFBRBRBLUDDLRDRDUDFLFRUFLDLBLURDDDRUFDLBRDRLFBDBLDRFBFFBURULUDRRBRDFRFFLULLUBRDRRRDUFRBLFULUBBUFFBRBBFRLFDRRDBLDFRDRDDRLRUULBDURDURFDDLFDUUDBFLBDUFBULFRRDUDUBFBUDBBFUDFUUDLUDDRFDDDFRRRBUDRBFBBULLUFBLRLFLLBRRRRUBDRFLFDFDBLRFLURULULFFBUUUUFDBBLDLUBBRUBBBRBFLULLBLUUULLUBFFDULDFFBFFFUFFDUDRFBUFLDDLURFLRFLRFBUUBLRFDDRULUUUFFRDDBLRDULFURUDDBDLBBUUBFURFRFBRLBUULBLDDDBUBRFFULLUDFFDLDFUBLLBLDFFDDLBDUFUFFLBBBUBULDDFBRRFFLDUDDFRBLRRDDUDLBDBLURBUDBRRLUBBDRFBUFRDRDRBBDULBUFFDRBBDFBUULFFRLLDURRRDFFUUFULDULURLDLUUUDLBBUDLDRFBDBBDLUFBRRFDFLLDLFDBRBBRFUDDDBURDRBUBRUBDUBLDLLDLURLDFDBRUBDLDFRRRBRLULFRFLDRLBUBRUBLFBFDFFLFRFDFLBRULLRBLDRBBFURRRDUUULLULLDLBLBBDFBUUUBRRUFFBRUDBFRDFDLFLFFRFFFFRULDFFDFRUBBBRURBUFLBDFBBBBBRRRLFLFBDRRUFLURDDLRRBRLLFURRURBRFLLLFFURBFULFRFFBLDUUUUBDDUFFDRBRLDDFRBULDDDFFRURUFLDRFLDFBLRUFFUBBDFFDBLLDBDUBDLDLUDFBFLRULRRBDBLRBLDLUURRLLRULDBLBLLRRFDDRBBRBUBDDULDRFBFBBFLUFBLUULDDFDBRLLUBUBBDFBBLBBUBLULDRUDBLRULDUDLUFRRDLLUDDBUFLFLBUFUURFDRDLBURLLRRRULRBFFRRBRFBUBRBUUFRLRDRDLBBRFLLLDDBRFUFRBULFLFDRDDRRDBF')
args = parser.parse_args()
if "G" in args.state:
args.state = args.state.replace("G", "F")
args.state = args.state.replace("Y", "D")
args.state = args.state.replace("O", "L")
args.state = args.state.replace("W", "U")
if args.debug:
log.setLevel(logging.DEBUG)
try:
size = int(sqrt((len(args.state) / 6)))
if args.slow:
cpu_mode = "slow"
elif args.normal:
cpu_mode = "normal"
elif args.fast:
cpu_mode = "fast"
else:
raise Exception("What CPU mode to use?")
if size == 2:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCube222 import RubiksCube222
cube = RubiksCube222(args.state, args.order, args.colormap, args.debug)
elif size == 3:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCube333 import RubiksCube333
cube = RubiksCube333(args.state, args.order, args.colormap, args.debug)
elif size == 4:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCube444 import RubiksCube444
cube = RubiksCube444(args.state, args.order, args.colormap, avoid_pll=True, debug=args.debug)
elif size == 5:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCube555 import RubiksCube555
cube = RubiksCube555(args.state, args.order, args.colormap, args.debug)
elif size == 6:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCube666 import RubiksCube666
cube = RubiksCube666(args.state, args.order, args.colormap, args.debug)
elif size == 7:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCube777 import RubiksCube777
cube = RubiksCube777(args.state, args.order, args.colormap, args.debug)
elif size % 2 == 0:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCubeNNNEven import RubiksCubeNNNEven
cube = RubiksCubeNNNEven(args.state, args.order, args.colormap, args.debug)
else:
# rubiks cube libraries
from rubikscubennnsolver.RubiksCubeNNNOdd import RubiksCubeNNNOdd
cube = RubiksCubeNNNOdd(args.state, args.order, args.colormap, args.debug)
if args.openwith:
cube.print_cube()
for step in args.openwith.split():
cube.rotate(step)
cube.cpu_mode = cpu_mode
log.info("CPU mode %s" % cube.cpu_mode)
cube.sanity_check()
cube.print_cube()
cube.www_header()
cube.www_write_cube("Initial Cube")
try:
if args.solution333:
solution333 = reverse_steps(args.solution333.split())
else:
solution333 = []
cube.solve(solution333)
except NotSolving:
if cube.heuristic_stats:
log.info("%s: heuristic_stats raw\n%s\n\n" % (cube, pformat(cube.heuristic_stats)))
for (key, value) in cube.heuristic_stats.items():
cube.heuristic_stats[key] = int(median(value))
log.info("%s: heuristic_stats median\n%s\n\n" % (cube, pformat(cube.heuristic_stats)))
sys.exit(0)
else:
raise
end_time = dt.datetime.now()
log.info("Final Cube")
cube.print_cube()
cube.print_solution(not args.no_comments)
log.info("*********************************************************************************")
log.info("See /tmp/rubiks-cube-NxNxN-solver/index.html for more detailed solve instructions")
log.info("*********************************************************************************\n")
# Now put the cube back in its initial state and verify the solution solves it
solution = cube.solution
cube.re_init()
len_steps = len(solution)
for (i, step) in enumerate(solution):
if args.print_steps:
print(("Phase : %s" % cube.phase()))
print(("Move %d/%d: %s" % (i + 1, len_steps, step)))
cube.rotate(step)
www_desc = "Phase: %s<br>\nCube After Move %d/%d: %s<br>\n" % (cube.phase(), i + 1, len_steps, step)
cube.www_write_cube(www_desc)
if args.print_steps:
cube.print_cube()
print("\n\n\n\n")
cube.www_footer()
if args.print_steps:
cube.print_cube()
if args.min_memory:
print("\n\n****************************************")
print("--min-memory has been replaced by --fast")
print("****************************************\n\n")
log.info("rubiks-cube-solver.py end")
log.info("Memory : {:,} bytes".format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
log.info("Time : %s" % (end_time - start_time))
log.info("")
if not cube.solved():
kociemba_string = cube.get_kociemba_string(False)
# edge_swap_count = cube.get_edge_swap_count(edges_paired=True, orbit=None, debug=True)
# corner_swap_count = cube.get_corner_swap_count(debug=True)
# raise SolveError("cube should be solved but is not, edge parity %d, corner parity %d, kociemba %s" %
# (edge_swap_count, corner_swap_count, kociemba_string))
raise SolveError("cube should be solved but is not")
except (ImplementThis, SolveError, StuckInALoop, NoSteps, KeyError, NoPruneTableState, InvalidCubeReduction):
cube.enable_print_cube = True
cube.print_cube_layout()
cube.print_cube()
cube.print_solution(False)
print((cube.get_kociemba_string(True)))
log.info("rubiks-cube-solver.py end")
raise
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 13:13:16 2018
Intro to Analytics 501 HW6
@author: Yunjia
"""
#### Import Libraries ####
from sklearn import datasets
import pandas as pd
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# Load Iris data
iris = datasets.load_iris()
mydata = pd.DataFrame(np.concatenate((iris.data, np.array([iris.target]).T), axis = 1), columns=iris.feature_names+['target'])
# Seperate training and final validation data set
valueArray = mydata.values
X = valueArray[:,0:4]
Y = valueArray[:,4]
test_size = 0.20
seed = 7
X_train, X_validate, Y_train, Y_validate = cross_validation.train_test_split(X, Y, test_size=test_size, random_state=seed)
# setup 10-fold cross validation to estimate the accuracy of different models
num_folds = 10
num_instances = len(X_train)
scoring = 'accuracy'
# add each algorithm and its name to the model array
models = []
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# Evaluate each model, add results to a results array
results = []
names = []
for name, model in models:
kfold = cross_validation.KFold(n=num_instances, n_folds=num_folds, random_state=seed)
cv_results = cross_validation.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# make predictions on validation data
svm = SVC()
svm.fit(X_train, Y_train)
predictions = svm.predict(X_validate)
print(accuracy_score(Y_validate, predictions))
print(confusion_matrix(Y_validate, predictions))
print(classification_report(Y_validate, predictions)) |
# -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell
<Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains four estimation methods for second `B` virial coefficients,
two utility covnersions for when only `B` is considered, and two methods to
calculate `Z` from higher order virial expansions.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
Utilities
-----------
.. autofunction:: chemicals.virial.B_to_Z
.. autofunction:: chemicals.virial.B_from_Z
.. autofunction:: chemicals.virial.Z_from_virial_density_form
.. autofunction:: chemicals.virial.Z_from_virial_pressure_form
Second Virial Correlations
--------------------------
.. autofunction:: chemicals.virial.BVirial_Pitzer_Curl
.. autofunction:: chemicals.virial.BVirial_Abbott
.. autofunction:: chemicals.virial.BVirial_Tsonopoulos
.. autofunction:: chemicals.virial.BVirial_Tsonopoulos_extended
"""
from __future__ import division
__all__ = ['BVirial_Pitzer_Curl', 'BVirial_Abbott', 'BVirial_Tsonopoulos',
'BVirial_Tsonopoulos_extended', 'B_to_Z', 'B_from_Z', 'Z_from_virial_density_form',
'Z_from_virial_pressure_form']
from fluids.numerics import numpy as np
from cmath import sqrt as csqrt
from chemicals.utils import log
from fluids.constants import R
def B_to_Z(B, T, P):
r'''Calculates the compressibility factor of a gas, given its
second virial coefficient.
.. math::
Z = \frac{PV}{RT} = 1 + \frac{BP}{RT}
Parameters
----------
B : float
Second virial coefficient, [m^3/mol]
T : float
Temperature, [K]
P : float
Pressure [Pa]
Returns
-------
Z : float
Compressibility factor, [-]
Notes
-----
Other forms of the virial coefficient exist.
Examples
--------
>>> B_to_Z(-0.0015, 300, 1E5)
0.939863822478637
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
'''
return 1. + B*P/(R*T)
def B_from_Z(Z, T, P):
r'''Calculates the second virial coefficient of a pure species, given the
compressibility factor of the gas.
.. math::
B = \frac{RT(Z-1)}{P}
Parameters
----------
Z : float
Compressibility factor, [-]
T : float
Temperature, [K]
P : float
Pressure [Pa]
Returns
-------
B : float
Second virial coefficient, [m^3/mol]
Notes
-----
Other forms of the virial coefficient exist.
Examples
--------
>>> B_from_Z(0.94, 300, 1E5)
-0.0014966032712675846
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
'''
return (Z - 1.0)*R*T/P
def Z_from_virial_density_form(T, P, *args):
r'''Calculates the compressibility factor of a gas given its temperature,
pressure, and molar density-form virial coefficients. Any number of
coefficients is supported.
.. math::
Z = \frac{PV}{RT} = 1 + \frac{B}{V} + \frac{C}{V^2} + \frac{D}{V^3}
+ \frac{E}{V^4} \dots
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
B to Z : float, optional
Virial coefficients, [various]
Returns
-------
Z : float
Compressibility factor at T, P, and with given virial coefficients, [-]
Notes
-----
For use with B or with B and C or with B and C and D, optimized equations
are used to obtain the compressibility factor directly.
If more coefficients are provided, uses numpy's roots function to solve
this equation. This takes substantially longer as the solution is
numerical.
If no virial coefficients are given, returns 1, as per the ideal gas law.
The units of each virial coefficient are as follows, where for B, n=1, and
C, n=2, and so on.
.. math::
\left(\frac{\text{m}^3}{\text{mol}}\right)^n
Examples
--------
>>> Z_from_virial_density_form(300, 122057.233762653, 1E-4, 1E-5, 1E-6, 1E-7)
1.28434940526
References
----------
.. [1] Prausnitz, John M., Rudiger N. Lichtenthaler, and Edmundo Gomes de
Azevedo. Molecular Thermodynamics of Fluid-Phase Equilibria. 3rd
edition. Upper Saddle River, N.J: Prentice Hall, 1998.
.. [2] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
l = len(args)
if l == 1:
return 1/2. + (4*args[0]*P + R*T)**0.5/(2*(R*T)**0.5)
# return ((R*T*(4*args[0]*P + R*T))**0.5 + R*T)/(2*P)
if l == 2:
B, C = args[0], args[1]
# A small imaginary part is ignored
return (P*(-(3*B*R*T/P + R**2*T**2/P**2)/(3*(-1/2 + csqrt(3)*1j/2)*(-9*B*R**2*T**2/(2*P**2) - 27*C*R*T/(2*P) + csqrt(-4*(3*B*R*T/P + R**2*T**2/P**2)**(3+0j) + (-9*B*R**2*T**2/P**2 - 27*C*R*T/P - 2*R**3*T**3/P**3)**(2+0j))/2 - R**3*T**3/P**3)**(1/3.+0j)) - (-1/2 + csqrt(3)*1j/2)*(-9*B*R**2*T**2/(2*P**2) - 27*C*R*T/(2*P) + csqrt(-4*(3*B*R*T/P + R**2*T**2/P**2)**(3+0j) + (-9*B*R**2*T**2/P**2 - 27*C*R*T/P - 2*R**3*T**3/P**3)**(2+0j))/2 - R**3*T**3/P**3)**(1/3.+0j)/3 + R*T/(3*P))/(R*T)).real
if l == 3:
# Huge mess. Ideally sympy could optimize a function for quick python
# execution. Derived with kate's text highlighting
B, C, D = args[0], args[1], args[2]
P2 = P**2
RT = R*T
BRT = B*RT
T2 = T**2
R2 = R**2
RT23 = 3*R2*T2
mCRT = -C*RT
P2256 = 256*P2
RT23P2256 = RT23/(P2256)
big1 = (D*RT/P - (-BRT/P - RT23/(8*P2))**2/12 - RT*(mCRT/(4*P) - RT*(BRT/(16*P) + RT23P2256)/P)/P)
big3 = (-BRT/P - RT23/(8*P2))
big4 = (mCRT/P - RT*(BRT/(2*P) + R2*T2/(8*P2))/P)
big5 = big3*(-D*RT/P + RT*(mCRT/(4*P) - RT*(BRT/(16*P) + RT23P2256)/P)/P)
big2 = 2*big1/(3*(big3**3/216 - big5/6 + big4**2/16 + csqrt(big1**3/27 + (-big3**3/108 + big5/3 - big4**2/8)**2/4))**(1/3))
big7 = 2*BRT/(3*P) - big2 + 2*(big3**3/216 - big5/6 + big4**2/16 + csqrt(big1**3/27 + (-big3**3/108 + big5/3 - big4**2/8)**2/4))**(1/3) + R2*T2/(4*P2)
return (P*(((csqrt(big7)/2 + csqrt(4*BRT/(3*P) - (-2*C*RT/P - 2*RT*(BRT/(2*P) + R2*T2/(8*P2))/P)/csqrt(big7) + big2 - 2*(big3**3/216 - big5/6 + big4**2/16 + csqrt(big1**3/27 + (-big3**3/108 + big5/3 - big4**2/8)**2/4))**(1/3) + R2*T2/(2*P2))/2 + RT/(4*P))))/R/T).real
size = l + 2
# arr = np.ones(size, dtype=np.complex128) # numba: uncomment
arr = [1.0]*size # numba: delete
arr[-1] = -P/R/T
for i in range(l):
arr[-3-i] = args[i]
solns = np.roots(arr)
for rho in solns:
if abs(rho.imag) < 1e-12 and rho.real > 0.0:
return float(P/(R*T*rho.real))
raise ValueError("Could not find real root")
def Z_from_virial_pressure_form(P, *args):
r'''Calculates the compressibility factor of a gas given its pressure, and
pressure-form virial coefficients. Any number of coefficients is supported.
.. math::
Z = \frac{Pv}{RT} = 1 + B'P + C'P^2 + D'P^3 + E'P^4 \dots
Parameters
----------
P : float
Pressure, [Pa]
B to Z : float, optional
Pressure form Virial coefficients, [various]
Returns
-------
Z : float
Compressibility factor at P, and with given virial coefficients, [-]
Notes
-----
Note that although this function does not require a temperature input, it
is still dependent on it because the coefficients themselves normally are
regressed in terms of temperature.
The use of this form is less common than the density form. Its coefficients
are normally indicated with the "'" suffix.
If no virial coefficients are given, returns 1, as per the ideal gas law.
The units of each virial coefficient are as follows, where for B, n=1, and
C, n=2, and so on.
.. math::
\left(\frac{1}{\text{Pa}}\right)^n
Examples
--------
>>> Z_from_virial_pressure_form(102919.99946855308, 4.032286555169439e-09, 1.6197059494442215e-13, 6.483855042486911e-19)
1.00283753944
References
----------
.. [1] Prausnitz, John M., Rudiger N. Lichtenthaler, and Edmundo Gomes de
Azevedo. Molecular Thermodynamics of Fluid-Phase Equilibria. 3rd
edition. Upper Saddle River, N.J: Prentice Hall, 1998.
.. [2] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
tot = 0.0
fact = 1.0
for i in range(len(args)):
tot += args[i]*fact
fact *= P
return 1.0 + P*tot
### Second Virial Coefficients
def BVirial_Pitzer_Curl(T, Tc, Pc, omega, order=0):
r'''Calculates the second virial coefficient using the model in [1]_.
Designed for simple calculations.
.. math::
B_r=B^{(0)}+\omega B^{(1)}
.. math::
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
.. math::
B^{(1)} = 0.073+0.46/T_r-0.5/T_r^2 -0.097/T_r^3 - 0.0073/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}}
.. math::
\frac{d B^{(1)}}{dT} = - \frac{23 Tc}{50 T^{2}} + \frac{Tc^{2}}{T^{3}} + \frac{291 Tc^{3}}{1000 T^{4}} + \frac{73 Tc^{8}}{1250 T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{5000 T^{3}} \left(1100 + \frac{1385 Tc}{T} + \frac{242 Tc^{2}}{T^{2}}\right)
.. math::
\frac{d^2 B^{(1)}}{dT^2} = \frac{Tc}{T^{3}} \left(\frac{23}{25} - \frac{3 Tc}{T} - \frac{291 Tc^{2}}{250 T^{2}} - \frac{657 Tc^{7}}{1250 T^{7}}\right)
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{500 T^{4}} \left(330 + \frac{554 Tc}{T} + \frac{121 Tc^{2}}{T^{2}}\right)
.. math::
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc}{T^{4}} \left(- \frac{23}{25} + \frac{4 Tc}{T} + \frac{97 Tc^{2}}{50 T^{2}} + \frac{219 Tc^{7}}{125 T^{7}}\right)
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \ln{\left (T \right )} + \frac{1}{20000 T^{2}} \left(2770 T Tc^{2} + 121 Tc^{3}\right)
.. math::
\int{B^{(1)}} dT = \frac{73 T}{1000} + \frac{23 Tc}{50} \ln{\left (T \right )} + \frac{1}{70000 T^{7}} \left(35000 T^{6} Tc^{2} + 3395 T^{5} Tc^{3} + 73 Tc^{8}\right)
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \ln{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \ln{\left (T \right )} - \frac{121 Tc^{3}}{20000 T}
.. math::
\int\int B^{(1)} dT dT = \frac{73 T^{2}}{2000} + \frac{23 T}{50} Tc \ln{\left (T \right )} - \frac{23 T}{50} Tc + \frac{Tc^{2}}{2} \ln{\left (T \right )} - \frac{1}{420000 T^{6}} \left(20370 T^{5} Tc^{3} + 73 Tc^{8}\right)
Examples
--------
Example matching that in BVirial_Abbott, for isobutane.
>>> BVirial_Pitzer_Curl(510., 425.2, 38E5, 0.193)
-0.00020845362479301725
References
----------
.. [1] Pitzer, Kenneth S., and R. F. Curl. "The Volumetric and
Thermodynamic Properties of Fluids. III. Empirical Equation for the
Second Virial Coefficient1." Journal of the American Chemical Society
79, no. 10 (May 1, 1957): 2369-70. doi:10.1021/ja01567a007.
'''
Tr = T/Tc
if order == 0:
B0 = 0.1445 - 0.33/Tr - 0.1385/Tr**2 - 0.0121/Tr**3
B1 = 0.073 + 0.46/Tr - 0.5/Tr**2 - 0.097/Tr**3 - 0.0073/Tr**8
elif order == 1:
B0 = Tc*(3300*T**2 + 2770*T*Tc + 363*Tc**2)/(10000*T**4)
B1 = Tc*(-2300*T**7 + 5000*T**6*Tc + 1455*T**5*Tc**2 + 292*Tc**7)/(5000*T**9)
elif order == 2:
B0 = -3*Tc*(1100*T**2 + 1385*T*Tc + 242*Tc**2)/(5000*T**5)
B1 = Tc*(1150*T**7 - 3750*T**6*Tc - 1455*T**5*Tc**2 - 657*Tc**7)/(1250*T**10)
elif order == 3:
B0 = 3*Tc*(330*T**2 + 554*T*Tc + 121*Tc**2)/(500*T**6)
B1 = 3*Tc*(-230*T**7 + 1000*T**6*Tc + 485*T**5*Tc**2 + 438*Tc**7)/(250*T**11)
elif order == -1:
B0 = 289*T/2000 - 33*Tc*log(T)/100 + (2770*T*Tc**2 + 121*Tc**3)/(20000*T**2)
B1 = 73*T/1000 + 23*Tc*log(T)/50 + (35000*T**6*Tc**2 + 3395*T**5*Tc**3 + 73*Tc**8)/(70000*T**7)
elif order == -2:
B0 = 289*T**2/4000 - 33*T*Tc*log(T)/100 + 33*T*Tc/100 + 277*Tc**2*log(T)/2000 - 121*Tc**3/(20000*T)
B1 = 73*T**2/2000 + 23*T*Tc*log(T)/50 - 23*T*Tc/50 + Tc**2*log(T)/2 - (20370*T**5*Tc**3 + 73*Tc**8)/(420000*T**6)
else:
raise ValueError('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
Br = B0 + omega*B1
return Br*R*Tc/Pc
def BVirial_Abbott(T, Tc, Pc, omega, order=0):
r'''Calculates the second virial coefficient using the model in [1]_.
Simple fit to the Lee-Kesler equation.
.. math::
B_r=B^{(0)}+\omega B^{(1)}
.. math::
B^{(0)}=0.083+\frac{0.422}{T_r^{1.6}}
.. math::
B^{(1)}=0.139-\frac{0.172}{T_r^{4.2}}
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{0.6752}{T \left(\frac{T}{Tc}\right)^{1.6}}
.. math::
\frac{d B^{(1)}}{dT} = \frac{0.7224}{T \left(\frac{T}{Tc}\right)^{4.2}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{1.75552}{T^{2} \left(\frac{T}{Tc}\right)^{1.6}}
.. math::
\frac{d^2 B^{(1)}}{dT^2} = - \frac{3.75648}{T^{2} \left(\frac{T}{Tc}\right)^{4.2}}
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{6.319872}{T^{3} \left(\frac{T}{Tc}\right)^{1.6}}
.. math::
\frac{d^3 B^{(1)}}{dT^3} = \frac{23.290176}{T^{3} \left(\frac{T}{Tc}\right)^{4.2}}
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = 0.083 T + \frac{\frac{211}{300} Tc}{\left(\frac{T}{Tc}\right)^{0.6}}
.. math::
\int{B^{(1)}} dT = 0.139 T + \frac{0.05375 Tc}{\left(\frac{T}{Tc}\right)^{3.2}}
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = 0.0415 T^{2} + \frac{211}{120} Tc^{2} \left(\frac{T}{Tc}\right)^{0.4}
.. math::
\int\int B^{(1)} dT dT = 0.0695 T^{2} - \frac{\frac{43}{1760} Tc^{2}}{\left(\frac{T}{Tc}\right)^{2.2}}
Examples
--------
Example is from [1]_, p. 93, and matches the result exactly, for isobutane.
>>> BVirial_Abbott(510., 425.2, 38E5, 0.193)
-0.00020570185009564064
References
----------
.. [1] Smith, H. C. Van Ness Joseph M. Introduction to Chemical Engineering
Thermodynamics 4E 1987.
'''
Tr = T/Tc
if order == 0:
B0 = 0.083 - 0.422/Tr**1.6
B1 = 0.139 - 0.172/Tr**4.2
elif order == 1:
B0 = 0.6752*Tr**(-1.6)/T
B1 = 0.7224*Tr**(-4.2)/T
elif order == 2:
B0 = -1.75552*Tr**(-1.6)/T**2
B1 = -3.75648*Tr**(-4.2)/T**2
elif order == 3:
B0 = 6.319872*Tr**(-1.6)/T**3
B1 = 23.290176*Tr**(-4.2)/T**3
elif order == -1:
B0 = 0.083*T + 211/300.*Tc*(Tr)**(-0.6)
B1 = 0.139*T + 0.05375*Tc*Tr**(-3.2)
elif order == -2:
B0 = 0.0415*T**2 + 211/120.*Tc**2*Tr**0.4
B1 = 0.0695*T**2 - 43/1760.*Tc**2*Tr**(-2.2)
else:
raise ValueError('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
Br = B0 + omega*B1
return Br*R*Tc/Pc
def BVirial_Tsonopoulos(T, Tc, Pc, omega, order=0):
r'''Calculates the second virial coefficient using the model in [1]_.
.. math::
B_r=B^{(0)}+\omega B^{(1)}
.. math::
B^{(0)}= 0.1445-0.330/T_r - 0.1385/T_r^2 - 0.0121/T_r^3 - 0.000607/T_r^8
.. math::
B^{(1)} = 0.0637+0.331/T_r^2-0.423/T_r^3 -0.423/T_r^3 - 0.008/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
A more complete expression is also available, in
BVirial_Tsonopoulos_extended.
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}} + \frac{607 Tc^{8}}{125000 T^{9}}
.. math::
\frac{d B^{(1)}}{dT} = - \frac{331 Tc^{2}}{500 T^{3}} + \frac{1269 Tc^{3}}{1000 T^{4}} + \frac{8 Tc^{8}}{125 T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{125000 T^{3}} \left(27500 + \frac{34625 Tc}{T} + \frac{6050 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
.. math::
\frac{d^2 B^{(1)}}{dT^2} = \frac{3 Tc^{2}}{500 T^{4}} \left(331 - \frac{846 Tc}{T} - \frac{96 Tc^{6}}{T^{6}}\right)
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{12500 T^{4}} \left(8250 + \frac{13850 Tc}{T} + \frac{3025 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
.. math::
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc^{2}}{250 T^{5}} \left(-662 + \frac{2115 Tc}{T} + \frac{480 Tc^{6}}{T^{6}}\right)
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \ln{\left (T \right )} + \frac{1}{7000000 T^{7}} \left(969500 T^{6} Tc^{2} + 42350 T^{5} Tc^{3} + 607 Tc^{8}\right)
.. math::
\int{B^{(1)}} dT = \frac{637 T}{10000} - \frac{1}{70000 T^{7}} \left(23170 T^{6} Tc^{2} - 14805 T^{5} Tc^{3} - 80 Tc^{8}\right)
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \ln{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \ln{\left (T \right )} - \frac{1}{42000000 T^{6}} \left(254100 T^{5} Tc^{3} + 607 Tc^{8}\right)
.. math::
\int\int B^{(1)} dT dT = \frac{637 T^{2}}{20000} - \frac{331 Tc^{2}}{1000} \ln{\left (T \right )} - \frac{1}{210000 T^{6}} \left(44415 T^{5} Tc^{3} + 40 Tc^{8}\right)
Examples
--------
Example matching that in BVirial_Abbott, for isobutane.
>>> BVirial_Tsonopoulos(510., 425.2, 38E5, 0.193)
-0.00020935295404416802
References
----------
.. [1] Tsonopoulos, Constantine. "An Empirical Correlation of Second Virial
Coefficients." AIChE Journal 20, no. 2 (March 1, 1974): 263-72.
doi:10.1002/aic.690200209.
'''
Tr = T/Tc
if order == 0:
B0 = 0.1445 - 0.33/Tr - 0.1385/Tr**2 - 0.0121/Tr**3 - 0.000607/Tr**8
B1 = 0.0637 + 0.331/Tr**2 - 0.423/Tr**3 - 0.008/Tr**8
elif order == 1:
B0 = 33*Tc/(100*T**2) + 277*Tc**2/(1000*T**3) + 363*Tc**3/(10000*T**4) + 607*Tc**8/(125000*T**9)
B1 = -331*Tc**2/(500*T**3) + 1269*Tc**3/(1000*T**4) + 8*Tc**8/(125*T**9)
elif order == 2:
B0 = -3*Tc*(27500 + 34625*Tc/T + 6050*Tc**2/T**2 + 1821*Tc**7/T**7)/(125000*T**3)
B1 = 3*Tc**2*(331 - 846*Tc/T - 96*Tc**6/T**6)/(500*T**4)
elif order == 3:
B0 = 3*Tc*(8250 + 13850*Tc/T + 3025*Tc**2/T**2 + 1821*Tc**7/T**7)/(12500*T**4)
B1 = 3*Tc**2*(-662 + 2115*Tc/T + 480*Tc**6/T**6)/(250*T**5)
elif order == -1:
B0 = 289*T/2000. - 33*Tc*log(T)/100. + (969500*T**6*Tc**2 + 42350*T**5*Tc**3 + 607*Tc**8)/(7000000.*T**7)
B1 = 637*T/10000. - (23170*T**6*Tc**2 - 14805*T**5*Tc**3 - 80*Tc**8)/(70000.*T**7)
elif order == -2:
B0 = 289*T**2/4000. - 33*T*Tc*log(T)/100. + 33*T*Tc/100. + 277*Tc**2*log(T)/2000. - (254100*T**5*Tc**3 + 607*Tc**8)/(42000000.*T**6)
B1 = 637*T**2/20000. - 331*Tc**2*log(T)/1000. - (44415*T**5*Tc**3 + 40*Tc**8)/(210000.*T**6)
else:
raise ValueError('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
Br = (B0+omega*B1)
return Br*R*Tc/Pc
def BVirial_Tsonopoulos_extended(T, Tc, Pc, omega, a=0, b=0, species_type='',
dipole=0, order=0):
r'''Calculates the second virial coefficient using the
comprehensive model in [1]_. See the notes for the calculation of `a` and
`b`.
.. math::
\frac{BP_c}{RT_c} = B^{(0)} + \omega B^{(1)} + a B^{(2)} + b B^{(3)}
.. math::
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
.. math::
B^{(1)} = 0.0637+0.331/T_r^2-0.423/T_r^3 -0.423/T_r^3 - 0.008/T_r^8
.. math::
B^{(2)} = 1/T_r^6
.. math::
B^{(3)} = -1/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
a : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
b : float, optional
Fit parameter, calculated based on species_type if a is not given and
species_type matches on of the supported chemical classes.
species_type : str, optional
One of .
dipole : float
dipole moment, optional, [Debye]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
To calculate `a` or `b`, the following rules are used:
For 'simple' or 'normal' fluids:
.. math::
a = 0
.. math::
b = 0
For 'ketone', 'aldehyde', 'alkyl nitrile', 'ether', 'carboxylic acid',
or 'ester' types of chemicals:
.. math::
a = -2.14\times 10^{-4} \mu_r - 4.308 \times 10^{-21} (\mu_r)^8
.. math::
b = 0
For 'alkyl halide', 'mercaptan', 'sulfide', or 'disulfide' types of
chemicals:
.. math::
a = -2.188\times 10^{-4} (\mu_r)^4 - 7.831 \times 10^{-21} (\mu_r)^8
.. math::
b = 0
For 'alkanol' types of chemicals (except methanol):
.. math::
a = 0.0878
.. math::
b = 0.00908 + 0.0006957 \mu_r
For methanol:
.. math::
a = 0.0878
.. math::
b = 0.0525
For water:
.. math::
a = -0.0109
.. math::
b = 0
If required, the form of dipole moment used in the calculation of some
types of `a` and `b` values is as follows:
.. math::
\mu_r = 100000\frac{\mu^2(Pc/101325.0)}{Tc^2}
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}} + \frac{607 Tc^{8}}{125000 T^{9}}
.. math::
\frac{d B^{(1)}}{dT} = - \frac{331 Tc^{2}}{500 T^{3}} + \frac{1269 Tc^{3}}{1000 T^{4}} + \frac{8 Tc^{8}}{125 T^{9}}
.. math::
\frac{d B^{(2)}}{dT} = - \frac{6 Tc^{6}}{T^{7}}
.. math::
\frac{d B^{(3)}}{dT} = \frac{8 Tc^{8}}{T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{125000 T^{3}} \left(27500 + \frac{34625 Tc}{T} + \frac{6050 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
.. math::
\frac{d^2 B^{(1)}}{dT^2} = \frac{3 Tc^{2}}{500 T^{4}} \left(331 - \frac{846 Tc}{T} - \frac{96 Tc^{6}}{T^{6}}\right)
.. math::
\frac{d^2 B^{(2)}}{dT^2} = \frac{42 Tc^{6}}{T^{8}}
.. math::
\frac{d^2 B^{(3)}}{dT^2} = - \frac{72 Tc^{8}}{T^{10}}
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{12500 T^{4}} \left(8250 + \frac{13850 Tc}{T} + \frac{3025 Tc^{2}}{T^{2}} + \frac{1821 Tc^{7}}{T^{7}}\right)
.. math::
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc^{2}}{250 T^{5}} \left(-662 + \frac{2115 Tc}{T} + \frac{480 Tc^{6}}{T^{6}}\right)
.. math::
\frac{d^3 B^{(2)}}{dT^3} = - \frac{336 Tc^{6}}{T^{9}}
.. math::
\frac{d^3 B^{(3)}}{dT^3} = \frac{720 Tc^{8}}{T^{11}}
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \ln{\left (T \right )} + \frac{1}{7000000 T^{7}} \left(969500 T^{6} Tc^{2} + 42350 T^{5} Tc^{3} + 607 Tc^{8}\right)
.. math::
\int{B^{(1)}} dT = \frac{637 T}{10000} - \frac{1}{70000 T^{7}} \left(23170 T^{6} Tc^{2} - 14805 T^{5} Tc^{3} - 80 Tc^{8}\right)
.. math::
\int{B^{(2)}} dT = - \frac{Tc^{6}}{5 T^{5}}
.. math::
\int{B^{(3)}} dT = \frac{Tc^{8}}{7 T^{7}}
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \ln{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \ln{\left (T \right )} - \frac{1}{42000000 T^{6}} \left(254100 T^{5} Tc^{3} + 607 Tc^{8}\right)
.. math::
\int\int B^{(1)} dT dT = \frac{637 T^{2}}{20000} - \frac{331 Tc^{2}}{1000} \ln{\left (T \right )} - \frac{1}{210000 T^{6}} \left(44415 T^{5} Tc^{3} + 40 Tc^{8}\right)
.. math::
\int\int B^{(2)} dT dT = \frac{Tc^{6}}{20 T^{4}}
.. math::
\int\int B^{(3)} dT dT = - \frac{Tc^{8}}{42 T^{6}}
Examples
--------
Example from Perry's Handbook, 8E, p2-499. Matches to a decimal place.
>>> BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469)
-9.679718337596426e-05
References
----------
.. [1] Tsonopoulos, C., and J. L. Heidman. "From the Virial to the Cubic
Equation of State." Fluid Phase Equilibria 57, no. 3 (1990): 261-76.
doi:10.1016/0378-3812(90)85126-U
.. [2] Tsonopoulos, Constantine, and John H. Dymond. "Second Virial
Coefficients of Normal Alkanes, Linear 1-Alkanols (and Water), Alkyl
Ethers, and Their Mixtures." Fluid Phase Equilibria, International
Workshop on Vapour-Liquid Equilibria and Related Properties in Binary
and Ternary Mixtures of Ethers, Alkanes and Alkanols, 133, no. 1-2
(June 1997): 11-34. doi:10.1016/S0378-3812(97)00058-7.
'''
Tr = T/Tc
if order == 0:
B0 = 0.1445 - 0.33/Tr - 0.1385/Tr**2 - 0.0121/Tr**3 - 0.000607/Tr**8
B1 = 0.0637 + 0.331/Tr**2 - 0.423/Tr**3 - 0.008/Tr**8
B2 = 1./Tr**6
B3 = -1./Tr**8
elif order == 1:
B0 = 33*Tc/(100*T**2) + 277*Tc**2/(1000*T**3) + 363*Tc**3/(10000*T**4) + 607*Tc**8/(125000*T**9)
B1 = -331*Tc**2/(500*T**3) + 1269*Tc**3/(1000*T**4) + 8*Tc**8/(125*T**9)
B2 = -6.0*Tc**6/T**7
B3 = 8.0*Tc**8/T**9
elif order == 2:
B0 = -3*Tc*(27500 + 34625*Tc/T + 6050*Tc**2/T**2 + 1821*Tc**7/T**7)/(125000*T**3)
B1 = 3*Tc**2*(331 - 846*Tc/T - 96*Tc**6/T**6)/(500*T**4)
B2 = 42.0*Tc**6/T**8
B3 = -72.0*Tc**8/T**10
elif order == 3:
B0 = 3*Tc*(8250 + 13850*Tc/T + 3025*Tc**2/T**2 + 1821*Tc**7/T**7)/(12500*T**4)
B1 = 3*Tc**2*(-662 + 2115*Tc/T + 480*Tc**6/T**6)/(250*T**5)
B2 = -336.0*Tc**6/T**9
B3 = 720.0*Tc**8/T**11
elif order == -1:
B0 = 289*T/2000. - 33*Tc*log(T)/100. + (969500*T**6*Tc**2 + 42350*T**5*Tc**3 + 607*Tc**8)/(7000000.*T**7)
B1 = 637*T/10000. - (23170*T**6*Tc**2 - 14805*T**5*Tc**3 - 80*Tc**8)/(70000.*T**7)
B2 = -Tc**6/(5*T**5)
B3 = Tc**8/(7*T**7)
elif order == -2:
B0 = 289*T**2/4000. - 33*T*Tc*log(T)/100. + 33*T*Tc/100. + 277*Tc**2*log(T)/2000. - (254100*T**5*Tc**3 + 607*Tc**8)/(42000000.*T**6)
B1 = 637*T**2/20000. - 331*Tc**2*log(T)/1000. - (44415*T**5*Tc**3 + 40*Tc**8)/(210000.*T**6)
B2 = Tc**6/(20*T**4)
B3 = -Tc**8/(42*T**6)
else:
raise ValueError('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
if a == 0 and b == 0 and species_type != '':
if species_type == 'simple' or species_type == 'normal':
a, b = 0, 0
elif species_type == 'methyl alcohol':
a, b = 0.0878, 0.0525
elif species_type == 'water':
a, b = -0.0109, 0
elif dipole != 0 and Tc != 0 and Pc != 0:
dipole_r = 1E5*dipole**2*(Pc/101325.0)/Tc**2
if (species_type == 'ketone' or species_type == 'aldehyde'
or species_type == 'alkyl nitrile' or species_type == 'ether'
or species_type == 'carboxylic acid' or species_type == 'ester'):
a, b = -2.14E-4*dipole_r-4.308E-21*dipole_r**8, 0
elif (species_type == 'alkyl halide' or species_type == 'mercaptan'
or species_type == 'sulfide' or species_type == 'disulfide'):
a, b = -2.188E-4*dipole_r**4-7.831E-21*dipole_r**8, 0
elif species_type == 'alkanol':
a, b = 0.0878, 0.00908+0.0006957*dipole_r
Br = B0 + omega*B1 + a*B2 + b*B3
return Br*R*Tc/Pc
|
#-- gestures supported by Otto
#-- OttDIY Python Project, 2020
OTTOHAPPY = const(0)
OTTOSUPERHAPPY = const(1)
OTTOSAD = const(2)
OTTOSLEEPING = const(3)
OTTOFART = const(4)
OTTOCONFUSED = const(5)
OTTOLOVE = const(6)
OTTOANGRY = const(7)
OTTOFRETFUL = const(8)
OTTOMAGIC = const(9)
OTTOWAVE = const(10)
OTTOVICTORY = const(11)
OTTOFAIL = const(12)
|
import sys
from scapy.all import conf, get_if_hwaddr
if sys.platform.startswith('win'):
from scapy.all import get_windows_if_list
interfaces = get_windows_if_list()
windows = True
else:
from scapy.all import get_if_list
interfaces = get_if_list()
windows = False
def get_ifaces():
"""Get a list of network interfces.
Returns a list like this:
{'eth0': {'index': 0,
'inet': None,
'inet6': None,
'mac': '80:fa:5b:4b:f9:18',
'name': 'eth0'},
'lo': {'index': 1,
'inet': '127.0.0.1',
'inet6': '::1',
'mac': '00:00:00:00:00:00',
'name': 'lo'},
'vboxnet0': {'index': 3,
'inet': '192.168.56.1',
'inet6': 'fe80::800:27ff:fe00:0',
'mac': '0a:00:27:00:00:00',
'name': 'vboxnet0'},
'wlan0': {'index': 2,
'inet': '192.168.0.6',
'inet6': None,
'mac': 'f4:96:34:e5:ae:1b',
'name': 'wlan0'}}
"""
result = {}
index = 0
for i in interfaces:
# Unix return strings and Windows dictionaries
if isinstance(i, str):
name = i
elif isinstance(i, dict):
name = i['name']
else:
print('Unexpected result', file=sys.stderr)
return {}
result[name] = { 'name' : name, 'index' : index }
if windows:
result[name]['mac'] = i['mac']
else:
result[name]['mac'] = get_if_hwaddr(name)
if not result[name]['mac']:
result[name]['mac'] = None
result[name]['inet'] = None
for route in conf.route.routes:
if getattr(route[3], 'name', route[3]) == name:
result[name]['inet'] = route[4]
break
result[name]['inet6'] = None
for route in conf.route6.routes:
if getattr(route[3], 'name', route[3]) == name:
result[name]['inet6'] = route[4][0]
break
index += 1
return result
def search_iface(term):
"""Get a valid interface based on a search term, that can be:
The interface name
The interface index - This is NOT the Windows Interface Index
The interface inet address
The interface inet6 address
The interface mac address"""
if not term:
return None
for iface in get_ifaces().values():
if term in iface.values():
return iface
if str(term) == str(iface['index']):
return iface
return None
if __name__ == '__main__':
from pprint import pprint
pprint(get_ifaces())
|
from typing import Callable, Mapping, Sequence
import numpy as np
import torch
from ignite.engine import Engine, Events
from ignite.utils import convert_tensor
from torch.utils.data import DataLoader
from somen.pytorch_utility.extensions.extension import PRIORITY_WRITER, Extension
class BatchEvaluator(Extension):
priority = PRIORITY_WRITER
main_process_only = True # If you want to distribute the procedure, use DistributedBatchEvaluator
def __init__(
self,
model: torch.nn.Module,
data_loader: DataLoader,
prefix: str,
metric_functions: Mapping[str, Callable],
label_indices: Sequence[int],
device: str,
non_blocking: bool,
micro_average: bool = True,
call_event: Events = Events.EPOCH_COMPLETED,
) -> None:
self.model = model
self.data_loader = data_loader
self.prefix = prefix
self.metric_functions = metric_functions
self.label_indices = label_indices
self.device = device
self.non_blocking = non_blocking
self.micro_average = micro_average
self.call_event = call_event
@torch.no_grad()
def __call__(self, engine: Engine) -> None:
self.model.eval()
def _get_non_ref_array(x):
return x.detach().cpu().numpy().copy()
y_trues, y_preds = None, None
for batch in self.data_loader:
inputs = [
v
for i, v in enumerate(batch)
if i not in self.label_indices and i - len(batch) not in self.label_indices
]
inputs = [convert_tensor(v, device=self.device, non_blocking=self.non_blocking) for v in inputs]
y_pred = self.model(*inputs)
# Store labels
if y_trues is None:
y_trues = [[] for _ in self.label_indices]
for k in range(len(self.label_indices)):
y_trues[k].append(_get_non_ref_array(batch[self.label_indices[k]]))
if isinstance(y_pred, tuple):
y_pred = [_get_non_ref_array(e) for e in y_pred]
else:
y_pred = [_get_non_ref_array(y_pred)]
if y_preds is None:
y_preds = [[] for _ in range(len(y_pred))]
for k in range(len(y_pred)):
y_preds[k].append(y_pred[k])
y_trues = [torch.from_numpy(np.concatenate(e, axis=0)) for e in y_trues]
y_preds = [torch.from_numpy(np.concatenate(e, axis=0)) for e in y_preds]
for key, metric_fn in self.metric_functions.items():
value = metric_fn(*y_preds, *y_trues)
if isinstance(value, torch.Tensor):
value = value.item()
engine.state.metrics["observation"][self.prefix + key] = value
|
from yapsy.IPlugin import IPlugin
from yapsy.PluginManager import PluginManager
from django.template import loader, Context
from django.db.models import Count
from server.models import *
from django.shortcuts import get_object_or_404
import server.utils as utils
class MachineDetailSecurity(IPlugin):
def plugin_type(self):
return 'machine_detail'
def widget_width(self):
return 4
def get_description(self):
return 'Security related information'
def widget_content(self, page, machines=None, theid=None):
t = loader.get_template('machinedetailsecurity/templates/machinedetailsecurity.html')
try:
fv_status = PluginScriptRow.objects.filter(submission__machine=machines, submission__plugin__exact='MachineDetailSecurity', pluginscript_name__exact='Filevault').order_by('submission__recorded').first()
fv_status = fv_status.pluginscript_data
except:
fv_status = 'Unknown'
try:
sip_status = PluginScriptRow.objects.filter(submission__machine=machines, submission__plugin__exact='MachineDetailSecurity', pluginscript_name__exact='SIP', pluginscript_data__exact='Disabled')
if len(sip_status) != 0:
sip_status = 'Disabled'
else:
sip_status = 'Enabled'
# sip_status = sip_status.pluginscript_data
except:
sip_status = 'Unknown'
try:
gatekeeper_status = PluginScriptRow.objects.filter(submission__machine=machines, submission__plugin__exact='MachineDetailSecurity', pluginscript_name__exact='Gatekeeper').order_by('submission__recorded').first()
gatekeeper_status = gatekeeper_status.pluginscript_data
except:
gatekeeper_status = 'Unknown'
c = Context({
'title': 'Security',
'fv_status': fv_status,
'sip_status': sip_status,
'gatekeeper_status': gatekeeper_status,
})
return t.render(c)
def filter_machines(self, machines, data):
# You will be passed a QuerySet of machines, you then need to perform some filtering based on the 'data' part of the url from the show_widget output. Just return your filtered list of machines and the page title.
machines = machines.filter(operating_system__exact=data)
return machines, 'Machines running '+data
|
import re
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy as _lazy
from mozillians.phonebook.validators import validate_username
from mozillians.users.models import UserProfile
REGEX_NUMERIC = re.compile(r"\d+", re.IGNORECASE)
class UserForm(forms.ModelForm):
"""Instead of just inhereting form a UserProfile model form, this
base class allows us to also abstract over methods that have to do
with the User object that need to exist in both Registration and
Profile.
"""
username = forms.CharField(label=_lazy("Username"))
class Meta:
model = User
fields = ["username"]
def clean_username(self):
username = self.cleaned_data["username"]
if not username:
return self.instance.username
# Don't be jacking somebody's username
# This causes a potential race condition however the worst that can
# happen is bad UI.
if User.objects.filter(username=username).exclude(pk=self.instance.id).exists():
raise forms.ValidationError(
_("This username is in use. Please try" " another.")
)
# No funky characters in username.
if not re.match(r"^[\w.@+-]+$", username):
raise forms.ValidationError(_("Please use only alphanumeric" " characters"))
if not validate_username(username):
raise forms.ValidationError(
_("This username is not allowed, " "please choose another.")
)
return username
class BasicInformationForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = (
"full_name",
"privacy_full_name",
)
widgets = {"bio": forms.Textarea()}
|
# Copyright (c) 2021 Egor Tensin <Egor.Tensin@gmail.com>
# This file is part of the "cgitize" project.
# For details, see https://github.com/egor-tensin/cgitize.
# Distributed under the MIT License.
import logging
from github import Github, GithubException
from cgitize.repo import Repo
class GitHub:
def __init__(self, access_token):
self._impl = Github(access_token)
def get_repo(self, repo):
try:
return self._impl.get_repo(repo.id)
except GithubException:
logging.error("Couldn't fetch repository: %s", repo.id)
raise
def get_user_repos(self, user):
try:
return self._impl.get_user(user.name).get_repos()
except GithubException:
logging.error("Couldn't fetch user repositories: %s", user.name)
raise
@staticmethod
def convert_repo(repo, *args, **kwargs):
return Repo.from_github(repo, *args, **kwargs)
|
from __future__ import print_function, absolute_import, unicode_literals, division
from collections import OrderedDict, Counter
import pandas as pd
from sklearn.metrics import cohen_kappa_score
import matplotlib.pyplot as plt
# Adapted from https://gist.github.com/ShinNoNoir/4749548
from amt.detect_spam import get_compromised_hits
def fleiss_kappa(ratings, n):
'''
Computes the Fleiss' kappa measure for assessing the reliability of
agreement between a fixed number n of raters when assigning categorical
ratings to a number of items.
Args:
ratings: a list of (item, category)-ratings
n: number of raters
k: number of categories
Returns:
the Fleiss' kappa score
See also:
http://en.wikipedia.org/wiki/Fleiss'_kappa
'''
items = set()
categories = set()
n_ij = {}
for i, c in ratings:
items.add(i)
categories.add(c)
n_ij[(i, c)] = n_ij.get((i, c), 0) + 1
N = len(items)
p_j = dict(((c, sum(n_ij.get((i, c), 0) for i in items) / (1.0 * n * N)) for c in categories))
P_i = dict(((i, (sum(n_ij.get((i, c), 0) ** 2 for c in categories) - n) / (n * (n - 1.0))) for i in items))
P_bar = sum(P_i.values()) / (1.0 * N)
P_e_bar = sum(value ** 2 for value in p_j.values())
kappa = (P_bar - P_e_bar) / (1 - P_e_bar)
return kappa
def plot_agreement(x, threshold):
plt.hist(x, normed=True, bins=30)
plt.xlabel('Agreement score')
plt.savefig('data/plots/' + str(threshold) + '_agreement_score.png') # save the figure to file
plt.close()
def plot_threshold_agreement(threshold_list, agreement_list):
plt.style.use('seaborn-whitegrid')
avg_agreement = [i[0] for i in agreement_list]
overall_agreement = [i[1] for i in agreement_list]
plt.plot(threshold_list, avg_agreement, 'o', '-ok', color='black', label='avg agreement')
plt.plot(threshold_list, overall_agreement, '*', '-ok', color='pink', label='overall agreement')
plt.legend(numpoints=1)
plt.xlabel('spam accuracy threshold')
plt.ylabel('agreement scores')
plt.savefig('data/plots/spam_agreement_score.png') # save the figure to file
plt.close()
def agreement_per_task(hit_id, video_name):
return
def agreement_total(dict_hit_agreement):
# average of per hit agreements
values = [score for score in dict_hit_agreement.values()]
print("For avg total agreement ( erase hits with >=2 spammer) -> number of hits is {0}".format(len(dict_hit_agreement.keys())))
return sum(values) / len(values)
def compute_ratings(df_worker_1, df_worker_2, df_worker_3=None):
list_ratings = []
for i in range(0, len(df_worker_1)):
if df_worker_3 == None:
list_results = [df_worker_1[i], df_worker_2[i]]
else:
list_results = [df_worker_1[i], df_worker_2[i], df_worker_3[i]]
c = Counter(list_results)
list_ratings += [(i, '0')] * c[0] + [(i, '1')] * c[1]
return list_ratings
def agreement_overall(path_after_spam_filter_csv, do_1_eq_2, potential_spammers):
df = pd.read_csv(path_after_spam_filter_csv)
if do_1_eq_2:
df['Worker_1'] = df['Worker_1'].replace(2, 1)
df['Worker_2'] = df['Worker_2'].replace(2, 1)
df['Worker_3'] = df['Worker_3'].replace(2, 1)
df_hit = df['HIT_nb']
set_hit_ids = set(df_hit)
list_df_worker_1 = []
list_df_worker_2 = []
list_df_worker_3 = []
nb_hits = 0
for hit_id in set_hit_ids:
if potential_spammers:
if hit_id in potential_spammers.keys() and potential_spammers[hit_id] != set():
continue
df_per_hit = (df.loc[df_hit == hit_id])
df_worker_1 = df_per_hit['Worker_1']
df_worker_2 = df_per_hit['Worker_2']
df_worker_3 = df_per_hit['Worker_3']
# if potential_spammers != None:
if not df_worker_3[df_worker_3.isin(['-1'])].empty:
continue
elif not df_worker_2[df_worker_2.isin(['-1'])].empty:
continue
elif not df_worker_1[df_worker_1.isin(['-1'])].empty:
continue
else:
list_df_worker_1 += df_worker_1.values.tolist()
list_df_worker_2 += df_worker_2.values.tolist()
list_df_worker_3 += df_worker_3.values.tolist()
nb_hits += 1
ratings = compute_ratings(list_df_worker_1, list_df_worker_2,
list_df_worker_3)
print("For overall agreement ( erase hits with >=1 spammer) -> number of hits is {0}".format(nb_hits))
score = fleiss_kappa(ratings, 3)
return score
def agreement_per_hit(path_after_spam_filter_csv, do_1_eq_2, do_cohen):
# return ordered dictionary with (hit_id, agreement)
dict_hit_agreement = {}
df = pd.read_csv(path_after_spam_filter_csv)
if do_1_eq_2:
df['Worker_1'] = df['Worker_1'].replace(2, 1)
df['Worker_2'] = df['Worker_2'].replace(2, 1)
df['Worker_3'] = df['Worker_3'].replace(2, 1)
df_hit = df['HIT_nb']
set_hit_ids = set(df_hit)
if do_cohen:
print(" ---------Doing Cohen agreement for 2 workers -------")
else:
print(" ---------Doing Fleiss agreement for 2 workers -------")
for hit_id in set_hit_ids:
df_per_hit = (df.loc[df_hit == hit_id])
df_worker_1 = df_per_hit['Worker_1']
df_worker_2 = df_per_hit['Worker_2']
df_worker_3 = df_per_hit['Worker_3']
if do_cohen:
if not df_worker_3[df_worker_3.isin(['-1'])].empty:
score = cohen_kappa_score(df_worker_1.values.tolist(), df_worker_2.values.tolist())
elif not df_worker_2[df_worker_2.isin(['-1'])].empty:
score = cohen_kappa_score(df_worker_1.values.tolist(), df_worker_3.values.tolist())
elif not df_worker_1[df_worker_1.isin(['-1'])].empty:
score = cohen_kappa_score(df_worker_3.values.tolist(), df_worker_2.values.tolist())
else:
ratings = compute_ratings(df_worker_1.values.tolist(), df_worker_2.values.tolist(),
df_worker_3.values.tolist())
score = fleiss_kappa(ratings, 3)
print(score)
else:
if not df_worker_3[df_worker_3.isin(['-1'])].empty:
ratings = compute_ratings(df_worker_1.values.tolist(), df_worker_2.values.tolist())
score = fleiss_kappa(ratings, 2)
elif not df_worker_2[df_worker_2.isin(['-1'])].empty:
ratings = compute_ratings(df_worker_1.values.tolist(), df_worker_3.values.tolist())
score = fleiss_kappa(ratings, 2)
elif not df_worker_1[df_worker_1.isin(['-1'])].empty:
ratings = compute_ratings(df_worker_2.values.tolist(), df_worker_3.values.tolist())
score = fleiss_kappa(ratings, 2)
else:
ratings = compute_ratings(df_worker_1.values.tolist(), df_worker_2.values.tolist(),
df_worker_3.values.tolist())
score = fleiss_kappa(ratings, 3)
dict_hit_agreement[hit_id] = score
# dictionary sorted by value
return OrderedDict(sorted(dict_hit_agreement.items(), key=lambda t: t[1], reverse=True))
|
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_fp_module, point2sequence_module
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 6))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set Abstraction layers
l1_xyz, l1_points = point2sequence_module(l0_xyz, l0_points, 384, [8, 16, 32, 64],[[32, 64, 128], [32, 64, 128], [64, 64, 128], [64, 64, 128]], 128, 128,
is_training, bn_decay, batch_size=batch_size, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer1')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_xyz, l1_points, [128, 128, 128],
is_training, bn_decay, scope='fa_layer2')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
def get_loss(pred, label):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,2048,6))
net, _ = get_model(inputs, tf.constant(True))
print(net)
|
# To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
# Retrieve all of the anchor tags
tags = soup('span')
count=0
for tag in tags:
# Look at the parts of a tag
print('TAG:', tag)
print('URL:', tag.get('href', None))
print('Contents:', tag.contents[0])
print('Attrs:', tag.attrs)
count+= int(tag.contents[0])
print(count) |
#############
#strings
#############
#strings:
seq = 'GAATTCAACTG'
step = 2
startseq = 'AT'
print(seq)
print('a sequence has a start position and an end position [start:end]')
print('[{}:] makes: {}'.format(2,seq[2:]))
print('[{}:{}] makes: {}'.format(0,8,seq[0:8]))
print('[:{}] makes: {}'.format(8,seq[:8]))
print('[{}:{}] makes: {}'.format(-3,-1,seq[-3:-1]))
print('[{}:{}] makes: {}'.format(2,-2,seq[2:-2]))
print('[::{}] makes: {}'.format(step,seq[::step]))
print('[::{}] makes: {}'.format(step-1,seq[::step-1]))
print('[2::{}] makes: {}'.format(step-1,seq[2::step-1]))
#position of element where the string starts in the sequence
seq.strip()
print('the startposition of the startsequence AT is : {}'.format(seq.find(startseq)))
print('sequence:{}, length: {}, min: {}, max: {}'.format(seq, len(seq), min(seq), max(seq)))
print('number of A: {}'.format(seq.count('A')))
print('index of first A {}:'.format(seq.index('A')))
print(30*'*')
#strings kun je in combinatie met functies gebruiken
#len is functie lengte
#count is functie die aantal letters telt
#lower maakt lowercase
sequence = "AGTCTGAAGT"
print('sequence: {}'.format(sequence))
print('length of sequence {}'.format(len(sequence)))
print('G count:{}'.format(sequence.count('G')))
print('C count:{}'.format(sequence.count('G')))
gc = sequence.count('G') + sequence.count('C')
gc_percentage= gc *100 / len(sequence)
print('gc percentage:{}'.format(gc_percentage))
sequence = sequence.lower()
# ik kan door een string heen loopen
for letter in sequence:
print(letter)
for letter in sequence:
print(letter, end='')
print('\n')
valid_char = 'GATC'
for x in 'ACRGYWCCNA':
if x in valid_char:
print(x)
else:
print('invalid character: {}'.format(x))
#############
#Lists
#############
print(30*'*')
L1 = [] #empty list
for letter in 'ACRGYWCCNA':
if letter in valid_char:
L1.append(letter) #voeg letter aan list toe
#print de gehele list
print('this is L1: {}'.format(L1))
# ik kan ook door de list heen loopen
for item in L1:
print(item)
# ik kan ook door de list heen loopen met index
for index,item in enumerate(L1):
print(index,item)
# ik kan lijsten sorteren
L2 = sorted(L1)
print(L2)
# ik kan een list omzetten naar een string
sequence = ''.join(L2)
print(sequence)
#############
#Dictionaries
#############
print(30*'*')
## amino acids weight
aaWeights = {'gly':75,
'ala':89,
'glu':100,
'his':155,
'pro':115,
'tyr':181}
#veranderen
aaWeights['glu']=147
print(aaWeights)
#verwijderen
del aaWeights['gly']
print(aaWeights)
#toevoegen
aaWeights['gly']=75
print(aaWeights)
print('\n')
L3 = ['His','his','glu','tyr','Gly','Ala','pro']
#omzetten naar lower cases
for index,item in enumerate(L3):
L3[index] = item.lower()
print('lower case list: {} '.format(L3))
#opzoeken gewicht (via dict) bij amino acid uit list
for aa in L3:
weight = aaWeights[aa]
print('amino acid weight of {} is {}'.format(aa, weight))
#sorteren op alfabeth
L4 = sorted(L3)
print('gesorteerd op alphabeth: {} '.format(L4))
#sorteren list op gewicht
def aaSorter(aa):
return aaWeights[aa]
L5 = sorted(L3, key=aaSorter)
print('gesorteerd op gewicht: {} '.format(L5))
for aa in sorted(aaWeights.keys()):
print(aa)
L6 = list(aaWeights.values())
print(L6)
for aa in sorted(aaWeights.values()):
print(aa)
for aa in sorted(aaWeights.items()):
print(aa)
print('\n')
enzymes = {
"EcoRI":"GAATTC",
"BamHI":"GGATCC",
"XbaI":"TCTAGA",
"BbsI":"GAAGAC",
"HindIII":"AAGCTT",
"AfeI":"AGCGCT",
"PmlI":"CACGTG"
}
#############
#Tuples
#############
print(30*'*')
#tuples are immutable
student_tuples = [('john', 'A', 15),
('jane', 'B', 12),
('dave', 'C', 10)]
print(student_tuples)
print(student_tuples[0])
print(student_tuples[1])
print(student_tuples[2])
#sorteren
print(sorted(student_tuples, key= lambda student: student[2]))
print(sorted(student_tuples, key= lambda student: student[1]))
print(sorted(student_tuples, key= lambda student: student[0]))
|
import string
def triangle(n):
return (n*(n+1)/2)
def isTriangle(x):
i=1
while(True):
k=triangle(i)
if(k==x):
return True
if(k>x):
return False
i+=1
alpha={} #The Dictionary which holds the values for the alphabets
alphabet=string.ascii_uppercase
for i in range(len(alphabet)):
alpha[alphabet[i]]=i+1
#Format the input from the file so that it fits nicely in a list
f=open("words.txt","r")
s=f.read()
m=""
for i in range(len(s)):
if(s[i]!='\"'):
m+=s[i]
l=m.split(',');
#Calculate the number of triangle words
n=0
for i in l:
m=list(i)
total=0
for j in m:
total+=alpha[j]
if(isTriangle(total)):
n+=1
print n
|
from pydantic import BaseModel
from datetime import datetime
class EventStreamX(BaseModel):
"""
Defines types and required fields for this event
"""
id: str
received_at: datetime
anonymous_id: str
context_app_version: str
context_device_ad_tracking_enabled: bool
context_device_manufacturer: str
context_device_model: str
context_device_type: str
context_locale: str
context_network_wifi: bool
context_os_name: str
context_timezone: str
event: str
event_text: str
original_timestamp: datetime
sent_at: datetime
timestamp: datetime
user_id: int
context_network_carrier: str
context_device_token: str = None
context_traits_taxfix_language: str
|
from lib import BaseTest
import uuid
import os
try:
import swiftclient
if 'OS_USERNAME' in os.environ and 'OS_PASSWORD' in os.environ:
auth_username = os.environ.get('OS_USERNAME')
auth_password = os.environ.get('OS_PASSWORD')
# Using auth version 2 /v2.0/
auth_url = os.environ.get('OS_AUTH_URL')
auth_tenant = os.environ.get('OS_TENANT_NAME')
account_username = "%s:%s" % (auth_tenant, auth_username)
swift_conn = swiftclient.Connection(auth_url, account_username,
auth_password, auth_version=2)
elif 'ST_USER' in os.environ and 'ST_KEY' in os.environ:
auth_username = os.environ.get('ST_USER')
auth_password = os.environ.get('ST_KEY')
auth_url = os.environ.get('ST_AUTH')
# Using auth version 1 (/auth/v1.0)
swift_conn = swiftclient.Connection(auth_url, auth_username,
auth_password, auth_version=1)
else:
print "Swift tests disabled: OpenStack creds not found in the environment"
swift_conn = None
except ImportError, e:
print "Swift tests disabled: unable to import swiftclient", e
swift_conn = None
class SwiftTest(BaseTest):
"""
BaseTest + support for Swift
"""
def fixture_available(self):
return super(SwiftTest, self).fixture_available() and swift_conn is not None
def prepare(self):
self.container_name = "aptly-sys-test-" + str(uuid.uuid1())
swift_conn.put_container(self.container_name)
self.configOverride = {"SwiftPublishEndpoints": {
"test1": {
"container": self.container_name,
}
}}
super(SwiftTest, self).prepare()
def shutdown(self):
if hasattr(self, "container_name"):
for obj in swift_conn.get_container(self.container_name,
full_listing=True)[1]:
swift_conn.delete_object(self.container_name, obj.get("name"))
swift_conn.delete_container(self.container_name)
super(SwiftTest, self).shutdown()
def check_path(self, path):
if not hasattr(self, "container_contents"):
self.container_contents = [obj.get('name') for obj in
swift_conn.get_container(self.container_name)[1]]
if path in self.container_contents:
return True
if not path.endswith("/"):
path = path + "/"
for item in self.container_contents:
if item.startswith(path):
return True
return False
def check_exists(self, path):
if not self.check_path(path):
raise Exception("path %s doesn't exist" % (path, ))
def check_not_exists(self, path):
if self.check_path(path):
raise Exception("path %s exists" % (path, ))
def read_file(self, path):
hdrs, body = swift_conn.get_object(self.container_name, path)
return body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.