content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python2
from sys import stdin
n = raw_input().split(' ')
k = int(n[1])
n = int(n[0])
ans = 0
for i in range(0, n):
t = int ( stdin.readline() )
if (t%k) == 0: ans += 1
print (ans)
|
"""
@Time : 2021/8/27 16:00
@Author : Haiyang Mei
@E-mail : mhy666@mail.dlut.edu.cn
@Project : CVPR2021_PDNet
@File : datasets.py
@Function:
"""
import os
import os.path
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch
import numpy as np
import torch.utils.data as data
from skimage import io
import torch.nn.functional as F
import random
def make_dataset(root):
image_path = os.path.join(root, 'image')
depth_path = os.path.join(root, 'depth_normalized')
mask_path = os.path.join(root, 'mask_single')
img_list = [os.path.splitext(f)[0] for f in os.listdir(image_path) if f.endswith('.jpg')]
return [(os.path.join(image_path, img_name + '.jpg'), os.path.join(depth_path, img_name + '.png'), os.path.join(mask_path, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and mask should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, depth_transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.depth_transform = depth_transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, depth_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
depth_uint16 = io.imread(depth_path)
depth = (depth_uint16 / 65535).astype(np.float32)
depth = Image.fromarray(depth)
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, depth, target = self.joint_transform(img, depth, target)
if self.transform is not None:
img = self.transform(img)
if self.depth_transform is not None:
depth = self.depth_transform(depth)
if self.target_transform is not None:
target = self.target_transform(target)
return img, depth, target
def collate(self, batch):
size = [320, 352, 384, 416][random.randint(0, 3)]
image, depth, mask = [list(item) for item in zip(*batch)]
image = torch.stack(image, dim=0)
image = F.interpolate(image, size=(size, size), mode="bilinear", align_corners=False)
depth = torch.stack(depth, dim=0)
depth = F.interpolate(depth, size=(size, size), mode="bilinear", align_corners=False)
mask = torch.stack(mask, dim=0)
mask = F.interpolate(mask, size=(size, size), mode="nearest")
return image, depth, mask
def __len__(self):
return len(self.imgs)
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('conjuet/', include('account.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from openapi_server.dbmodels.organization import Organization as DbOrganization # noqa: E501
from openapi_server.test.integration import BaseTestCase
from openapi_server.test.integration import util
ID_QUERY = [("organizationId", "awesome-organization")]
REQUEST_HEADERS = {
'Accept': "application/json",
'Content-Type': "application/json",
}
RESPONSE_HEADERS = {
'Accept': "application/json",
}
# TODO: mock 500 responses
class TestOrganizationController(BaseTestCase):
"""OrganizationController integration test stubs"""
def setUp(self):
util.connect_db()
DbOrganization.objects.delete()
def tearDown(self):
util.disconnect_db()
def test_create_organization_with_status201(self):
"""Test case for create_organization
Create an organization (201)
"""
organization = {
'name': "name",
'shortName': "shortName",
'url': "https://openapi-generator.tech"
}
response = self.client.open(
"/api/v1/organizations",
method="POST",
headers=REQUEST_HEADERS,
data=json.dumps(organization),
query_string=ID_QUERY
)
self.assertStatus(
response, 201,
f"Response body is: {response.data.decode('utf-8')}"
)
# TODO: update to test for non-JSON connexion request
def test_create_organization_with_status400(self):
"""Test case for create_organization
Create a (non-JSON) organization (400)
"""
organization = {
'name': "name",
'shortName': "shortName",
'url': "https://openapi-generator.tech"
}
response = self.client.open(
"/api/v1/organizations",
method="POST",
headers=REQUEST_HEADERS,
data=organization,
query_string=ID_QUERY
)
self.assert400(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_create_empty_organization_with_status400(self):
"""Test case for create_organization
Create an empty organization with missing required properties (400)
"""
organization = {}
response = self.client.open(
"/api/v1/organizations",
method="POST",
headers=REQUEST_HEADERS,
data=json.dumps(organization),
query_string=ID_QUERY
)
self.assert400(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_create_organization_with_status409(self):
"""Test case for create_organization
Create a duplicate organization (409)
"""
util.create_test_organization("awesome-organization") # duplicated org
organization = {
'name': "name",
'shortName': "shortName",
'url': "https://openapi-generator.tech"
}
response = self.client.open(
"/api/v1/organizations",
method="POST",
headers=REQUEST_HEADERS,
data=json.dumps(organization),
query_string=ID_QUERY
)
self.assertStatus(
response, 409,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_delete_organization_with_status200(self):
"""Test case for delete_organization
Delete an existing organization (200)
"""
organization = util.create_test_organization("awesome-organization")
response = self.client.open(
f"/api/v1/organizations/{organization.id}",
method="DELETE",
headers=RESPONSE_HEADERS
)
self.assert200(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_delete_organization_with_status404(self):
"""Test case for delete_organization
Delete an unknown organization (404)
"""
organization_id = "foo"
response = self.client.open(
f"/api/v1/organizations/{organization_id}",
method="DELETE",
headers=RESPONSE_HEADERS
)
self.assert404(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_get_organization_with_status200(self):
"""Test case for get_organization
Get an existing organization (200)
"""
organization = util.create_test_organization("awesome-organization")
response = self.client.open(
f"/api/v1/organizations/{organization.id}",
method="GET",
headers=RESPONSE_HEADERS
)
self.assert200(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_get_organization_with_status404(self):
"""Test case for get_organization
Get an existing organization (200)
"""
organization_id = "foo"
response = self.client.open(
f"/api/v1/organizations/{organization_id}",
method="GET",
headers=RESPONSE_HEADERS
)
self.assert404(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_list_organizations_with_status200(self):
"""Test case for list_organizations
Get all organizations (200)
"""
util.create_test_organization("awesome-organization")
query_string = [("limit", 10),
("offset", 0)]
response = self.client.open(
"/api/v1/organizations",
method="GET",
headers=RESPONSE_HEADERS,
query_string=query_string
)
self.assert200(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
def test_list_organizations_with_status400(self):
"""Test case for list_organizations
Get all organizations using an invalid query (400)
"""
util.create_test_organization("awesome-organization")
query_string = [("limit", "no-limit"),
("offset", "none")]
response = self.client.open(
"/api/v1/organizations",
method="GET",
headers=RESPONSE_HEADERS,
query_string=query_string
)
self.assert400(
response,
f"Response body is: {response.data.decode('utf-8')}"
)
if __name__ == "__main__":
unittest.main()
|
""" Finviz Comparison View """
__docformat__ = "numpy"
import logging
import os
from typing import List
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.comparison_analysis import finviz_compare_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def screener(similar: List[str], data_type: str, export: str = ""):
"""Screener
Parameters
----------
similar : List[str]
Similar companies to compare income with
data_type : str
Screener to use. One of {overview, valuation, financial, ownership, performance, technical}
export : str
Format to export data
"""
df_screen = finviz_compare_model.get_comparison_data(data_type, similar)
if df_screen is None or df_screen.empty:
console.print("No screened data found.")
else:
print_rich_table(
df_screen,
headers=list(df_screen.columns),
show_index=False,
title="Stock Screener",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), data_type, df_screen
)
|
'''
A library for bringing together various components to interface with and analyze a "real" exported
safety project.
The function build_program_model() is the primary entry point to this module.
'''
from fbdplc.s7xml import parse_static_interface_from_file, parse_tags_from_file
from fbdplc.modeling import ProgramModel, program_model
from fbdplc.apps import parse_s7xml
from fbdplc.functions import Block, Program
from z3 import z3
from fbdplc.graph import MemoryProxy
from fbdplc import sorts
from fbdplc.sorts import UDTInstance, UDTSchema, get_sort_factory, register_udt
from fbdplc import s7db
import logging
logger = logging.getLogger(__name__)
class NotSupportedException(Exception):
pass
def _build_udt(outline, outlines):
name = outline['name']
logger.debug(f'Constructing UDT {name}')
if name in sorts.g_udt_archive:
return sorts.g_udt_archive[name]
schema = UDTSchema(name)
symbols = outline['symbols']
logger.debug(f'Symbols for {name}: {symbols}')
for s in symbols.values():
sort = s['type']
name = s['name']
kind = s['kind']
if kind == 'array':
raise NotSupportedException()
if sort in sorts.SORT_MAP:
# Primitive
schema.fields[name] = sorts.SORT_MAP[sort]
else:
# is a UDT, but do we know about it yet?
if sort not in sorts.g_udt_archive:
_build_udt(outlines[sort], outlines)
schema.fields[name] = sorts.g_udt_archive[sort]
register_udt(schema.name, schema)
return schema
g_counter = 0
def alloc_anonymous():
global g_counter
n = f'__anon_struct{g_counter}'
g_counter += 1
return n
def _process_dbs(db_files, ctx):
mem = MemoryProxy('', ctx)
for p in db_files:
logger.debug(f'Processing {p}')
try:
outline = s7db.parse_db_file(p)
root_name: str = outline['name']
if root_name[0] == '"':
root_name = root_name[1:-1]
symbols = outline['symbols']
# Is the variable interface an ad-hoc data type or a named type?
if type(symbols) == dict:
# Ad-hoc
for name, entry in symbols.items():
outlined_sort = entry['type']
if isinstance(outlined_sort, str):
sort = get_sort_factory(outlined_sort)
elif isinstance(outlined_sort, dict):
# This is an anonymous struct
assert 'name' not in outlined_sort
udt_proto = {
'name': alloc_anonymous(), 'symbols': outlined_sort}
udt = _build_udt(udt_proto, {})
register_udt(udt.name, udt)
sort = udt
else:
raise RuntimeError(
f'Unrecognized type in db outline {outlined_sort} {type(outlined_sort)}')
resolved_name = '.'.join([root_name, name])
mem.create(resolved_name, sort)
elif type(symbols) == str:
# Named
logger.debug(
f'Allocating a named type {type(symbols)} {symbols}')
sort_factory = get_sort_factory(symbols)
mem.create(root_name, sort_factory)
else:
raise AssertionError(
f'Bruh, the symbols variable needs to be either a str or dict, not {type(symbols)}')
except Exception as e:
logger.warning(f'Unable to parse {p}: {e}')
logger.debug(f'An exception occurred: {e}', exc_info=True)
return mem
DEBUG_CONTINUE = True
def _build_udts(udt_files):
outlines = {}
for f in udt_files:
logger.debug(f'Considering UDT file {f}')
try:
udt_outline = s7db.parse_udt_file(f)
outlines[udt_outline['name']] = udt_outline
except Exception as e:
logger.exception(e)
if not DEBUG_CONTINUE:
raise
# Transform these outlines into UDTSchemas, make sure we have definitions for everything,
# and register them.
for _, outline in outlines.items():
name = outline['name']
logger.debug(f'Processing {name}')
try:
_build_udt(outline, outlines)
except (NotSupportedException, KeyError) as e:
logger.warning(f'Unable to parse {name}, {e}')
def _build_fb_udts(function_files):
'''
Some function blocks, those labeled as "FB", may contain static data that implicitly forms a UDT with
the name of the block.
'''
outlines = {}
for f in function_files:
logger.debug(f'Considering block file {f}')
static_iface = parse_static_interface_from_file(f)
if static_iface:
outlines[static_iface['name']] = static_iface
for iface in outlines.values():
_build_udt(iface, outlines)
def process_tags(tag_files, mem: MemoryProxy):
symbols = []
for f in tag_files:
logger.debug(f'Considering tag file {f}')
tag_table_name, tag_table_symbols = parse_tags_from_file(f)
symbols.extend(tag_table_symbols)
for entry in symbols:
name = entry[0]
sort_text = entry[1]
try:
sort = get_sort_factory(sort_text)
mem.create(name, sort)
except Exception as e:
logger.warning(f'Unable to create {sort_text}. Skipping.')
logger.debug(e, exc_info=True)
class ProjectContext:
'''
Container for project data. See __init__ for details: Be sure to specify an
entry point for the analysis.
'''
def __init__(self):
# 'user data types': Be sure to export with the *.udt type
self.udt_srcs = []
# 'data block' files. Define global memory stores. Export with *.db type.
self.db_srcs = []
# 'tag files': Memory mapped IO points. Just more global memory to this
# analysis. Has a *.xml extension.
self.tag_srcs = []
# 'Function block' sources: XML describing the computational graphs that you
# draw inside TIA portal.
self.fb_srcs = []
# Where do we start analysis? A string corresponding to the function block
# where we want to start.
# The default here is the the name of the automatically generated TIA portal
# safety task entry point.
self.entry_point = 'Main_Safety_RTG1'
# If your entry point routine is a block of type FB, meaning it has static data
# associated with it, then you can additionally specify the name of the global
# DB that backs it. If its not specified, then:
# 1. The project will look for a db with the name f'{entry_point}_DB' and use
# that if it exists. Otherwise,
# 2. A global symbol, '__main' with the type of your FB will be allocated.
self.entry_point_db = None
def build_program_model(project: ProjectContext) -> ProgramModel:
'''
Given a project description, this function generates a program model which can be
used for analysis by incorporating global symbols with executable code and returning
a ProgramModel.
Currently there are some global memory accesses (in z3 and in this library) so do not
call this from multiple threads.
TODO(JMeyer): See about creating a user data type store and making all of this thread safe.
'''
# First look at the data types because they may be used in all subsequent steps. Note that this
# step populates the g_udt_archive in the sorts module and is therefore not threadsafe. It is a
# TODO(Jmeyer) to clean this up.
_build_udts(project.udt_srcs)
_build_fb_udts(project.fb_srcs)
# Loop through the data blocks (global variables) and build up the symbol table:
ctx = z3.Context()
mem = _process_dbs(project.db_srcs, ctx)
# Add on the 'tags', or io mapped memory
process_tags(project.tag_srcs, mem)
# ... then start parsing the executable code:
program = Program('')
for f in project.fb_srcs:
block = parse_s7xml.parse_function_from_file(f)
program.blocks[block.name] = block
program.entry = project.entry_point
program.entry_point_db = project.entry_point_db
program.entry_point_db = resolve_entry_point_db(mem, program)
return program_model(program, context=ctx, global_memory=mem)
def resolve_entry_point_db(mem: MemoryProxy, program: Program) -> str:
entry_block = program.blocks[program.entry]
entry_is_fb = entry_block.block_type == Block.BLOCK_TYPE_FB
if not entry_is_fb:
return ''
fb_udt_name = f'"{entry_block.name}"'
# If the user did not specify an entry_db, then first try to locate a suitable one,
# and, failing that, use a special new one.
if program.entry_point_db:
logger.debug(
f'User specified entry = {program.entry} and entry db = {program.entry_point_db}')
# Force an assert
mem.read(program.program_entry_db)
return program.entry_point_db
else:
logger.debug(
f'User did not specify an entry DB for entry = {program.entry}')
# Rule #1: Is there a global memory object with the name and sort of the entry point?
expected = f'{entry_block.name}_DB'
try:
r = mem.read(expected)
except AssertionError as e:
r = None
if isinstance(r, UDTInstance) and r.schema.name == fb_udt_name:
logger.warning(
f'An entry point with static data did not have a "entry_point_db" specified. Automatically assuming global memory db "{expected}" backs this entry point.')
return expected
# Rule #2: Create a special entry point variable
fb_sort = get_sort_factory(fb_udt_name)
logger.warning(
f'An entry point with static data did not have a "entry_point_db" specified and no similarly named DB could be located. Creating "__main" to back these statics.')
mem.create('__main', fb_sort)
return '__main'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author mybsdc <mybsdc@gmail.com>
@date 2020/10/12
@time 15:05
@issues
https://github.com/googleapis/python-firestore/issues/18
https://github.com/firebase/firebase-admin-python/issues/294
https://github.com/firebase/firebase-admin-python/issues/282
https://stackoverflow.com/questions/55876107/how-to-detect-realtime-listener-errors-in-firebase-firestore-database
https://uyamazak.hatenablog.com/entry/2019/07/09/221041
"""
import os
import argparse
import sys
import json
import base64
import time
import datetime
import traceback
import threading
from concurrent.futures import ThreadPoolExecutor
from google.cloud import firestore
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.api_core.exceptions import GoogleAPIError
from google.cloud.firestore_v1.watch import Watch
from loguru import logger
import logging
def catch_exception(origin_func):
def wrapper(self, *args, **kwargs):
"""
用于异常捕获的装饰器
:param origin_func:
:return:
"""
try:
return origin_func(self, *args, **kwargs)
except AssertionError as e:
logger.error('参数错误:{}', str(e))
except GoogleAPIError as e:
logger.error('GoogleAPIError: ', str(e))
except Exception as e:
logger.error('出错:{} 位置:{}', str(e), traceback.format_exc())
finally:
pass
return wrapper
class FirestoreListener(object):
@logger.catch
def __init__(self):
FirestoreListener.check_py_version()
# 命令行参数
self.args = self.get_all_args()
# 日志
self.__logger_setting()
# Firestore 日志:由于 firestore 的异常和日志是在它自己的子进程中处理的,外层无法捕获错误信息,但是 firestore 使用了 logging 模块写日志,故可将日志记录在文件中
logging.basicConfig(filename='logs/firestore.log', level=logging.DEBUG if self.args.debug else logging.INFO,
format='[%(asctime)s] %(levelname)s | %(process)d:%(filename)s:%(name)s:%(lineno)d:%(module)s - %(message)s')
# firestore 数据库配置
self.db = firestore.Client.from_service_account_json(self.args.key_path)
self.collection_id = self.args.collection_id
self.restart_interval = self.args.restart_interval
self.doc_ref = None
self.doc_watch = None
# Create an Event for notifying main thread
self.callback_done = threading.Event()
self.is_first_time = True
self.today = FirestoreListener.today()
# 线程池
self.max_workers = self.args.max_workers
self.thread_pool_executor = ThreadPoolExecutor(max_workers=self.max_workers)
@staticmethod
def today():
return str(datetime.date.today())
def __logger_setting(self) -> None:
logger.remove()
level = 'DEBUG' if self.args.debug else 'INFO'
format = '<green>[{time:YYYY-MM-DD HH:mm:ss.SSS}]</green> <b><level>{level: <8}</level></b> | <cyan>{process.id}</cyan>:<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>'
logger.add('logs/{time:YYYY-MM-DD}.log', level=level, format=format, encoding='utf-8')
logger.add(sys.stderr, colorize=True, level=level, format=format)
@staticmethod
def check_py_version(major=3, minor=6):
if sys.version_info < (major, minor):
raise UserWarning(f'请使用 python {major}.{minor} 及以上版本,推荐使用 python 3.8')
@staticmethod
def get_all_args():
"""
获取所有命令行参数
:return:
"""
parser = argparse.ArgumentParser(description='需要传给 FirestoreListener 的各种参数及其含义',
epilog='e.g. python3.8 -i firestore-listener.py -k=luolongfei-1ad2ca735e37.json')
parser.add_argument('-c', '--collection_id', help='collection 名称,或者叫 collection id',
default='Message', type=str)
parser.add_argument('-k', '--key_path',
help='由谷歌提供的 json 格式的密钥文件的路径,更多信息参考:https://googleapis.dev/python/google-api-core/latest/auth.html',
required=True, type=str)
parser.add_argument('-mw', '--max_workers', help='最大线程数(在执行外部 php 命令时)', default=1, type=int)
parser.add_argument('-d', '--debug', help='是否开启 Debug 模式', action='store_true')
parser.add_argument('-r', '--restart_interval', help='重启间隔,每隔指定分钟后重启监听动作。单位:分钟', default=20, type=int)
return parser.parse_args()
@staticmethod
def __json_helper(obj):
if isinstance(obj, DatetimeWithNanoseconds):
return obj.timestamp()
raise TypeError(f'{type(obj)} 类型不可序列化为 json')
@staticmethod
def __php_run(document: dict) -> None:
"""
执行外部 php 命令
:param document:
:return:
"""
try:
doc_json = json.dumps(document, default=FirestoreListener.__json_helper, ensure_ascii=False).encode('utf-8')
doc_b64 = base64.b64encode(doc_json).decode('utf-8')
cmd = "php artisan command:fcmpushformessage '{}'".format(doc_b64)
status_code = os.system(cmd)
if status_code != 0:
logger.error('执行外部命令出错:{}', cmd)
except Exception as e:
logger.error('构造外部命令出错:{}', str(e))
@logger.catch
def __on_snapshot(self, col_snapshot, changes, read_time) -> None:
"""
Firestore 回调
新增文档时触发执行外部命令
:param col_snapshot:
:param changes:
:param read_time:
:return:
"""
# 常驻执行,更新日志目录
real_today = FirestoreListener.today()
if self.today != real_today:
self.today = real_today
self.__logger_setting()
for change in changes:
if change.type.name == 'ADDED':
if self.is_first_time:
self.is_first_time = False
return
# 将任务添加到线程池
self.thread_pool_executor.submit(FirestoreListener.__php_run, change.document.to_dict())
logger.debug('新增文档 ID: {} 内容: {}', change.document.id, change.document.to_dict())
elif change.type.name == 'MODIFIED':
logger.debug('修改文档 ID: {} 内容: {}', change.document.id, change.document.to_dict())
elif change.type.name == 'REMOVED':
logger.debug('移除快照或文档 ID: {} 内容: {}', change.document.id, change.document.to_dict())
# 通知主线程,当前线程已经完事儿了,防止阻塞
self.callback_done.set()
@logger.catch
def __start_snapshot(self, force=False):
isinstance(self.doc_watch, Watch) and self.doc_watch.unsubscribe()
self.doc_ref = self.db.collection(self.collection_id).order_by('updatedAt',
direction=firestore.Query.DESCENDING).limit(1)
# 强行重启时防止重复触发回调
if force:
self.is_first_time = True
self.doc_watch = self.doc_ref.on_snapshot(self.__on_snapshot)
@logger.catch
def __listen_for_changes(self) -> None:
"""
监听文档变化
on_snapshot 方法在每次新增文档时候,会移除旧的快照,创建新的快照
:return:
"""
logger.debug(f'开始实时监听,每隔 {self.restart_interval} 分钟将自动重启监听动作')
start_time = time.time()
self.__start_snapshot()
while True:
if time.time() - int(self.restart_interval) * 60 > start_time:
logger.debug('重启监听')
self.__start_snapshot(force=True)
start_time = time.time()
continue
if self.doc_watch._closed:
logger.error('检测到 firestore 很不仗义的罢工了,将尝试重启')
try:
self.__start_snapshot(force=True)
# 防止异常导致宕机
time.sleep(1)
except Exception as e:
logger.error('重启失败:{}', str(e))
break
time.sleep(0.001)
@logger.catch
@catch_exception
def run(self):
self.__listen_for_changes()
if __name__ == '__main__':
firestore_listener = FirestoreListener()
firestore_listener.run()
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'user-name/$',
views.user_name,
name='user_name'),
url(r'approve/(?P<identifier>\w{10})/(?P<id>\d+)/$',
views.approve_immediately,
name='approve_immediately'),
url(r'remove/(?P<identifier>\w{10})/(?P<id>\d+)/$',
views.remove_immediately,
name='remove_immediately'),
url(r'unsubscribed/(?P<id>\d+)/$',
views.unsubscribed,
name='unsubscribed'),
url(r'unsubscribed/$',
views.unsubscribed,
name='unsubscribed_all'),
url(r'unsubscribe/(?P<identifier>\w{10})/(?P<id>\d+)/$',
views.unsubscribe,
name='unsubscribe_discussion'),
url(r'unsubscribe/(?P<identifier>\w{10})/$',
views.unsubscribe,
name='unsubscribe_all'),
url(r'(?P<id>\d+)/latest/$',
views.event_data_latest,
name='event_data_latest'),
url(r'(?P<id>\d+)/$',
views.event_data,
name='event_data'),
)
|
# Copyright (c) 2020, Zhouxing shi <zhouxingshichn@gmail.com>
# Licenced under the BSD 2-Clause License.
import os, json
res = {}
for dataset in ["yelp", "sst"]:
for num_layers in range(1, 4):
for ln in ["", "_no", "_standard"]:
dir = "model_{}_{}{}".format(dataset, num_layers, ln)
command = "python main.py --dir={} --data={} --log=log.txt".format(dir, dataset)
print(command)
os.system(command)
with open("log.txt") as file:
acc = float(file.readline())
res[dir] = acc
with open("res_acc.json", "w") as file:
file.write(json.dumps(res, indent=4))
|
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Samragni Banerjee <samragnibanerjee4@gmail.com>
# Alexander Sokolov <alexander.y.sokolov@gmail.com>
#
import unittest
import numpy
import math
from pyscf import gto
from pyscf import scf
from pyscf import adc
from pyscf import mp
mol = gto.Mole()
r_CO = 1.21
r_CH = 1.12
theta = 116.5
x = r_CH * math.cos(theta * math.pi/(2 * 180.0))
y = r_CH * math.sin(theta * math.pi/(2 * 180.0))
mol.atom = [
['C', ( 0.0, 0.0, 0.0)],
['O', ( 0.0, r_CO , 0.0)],
['H', ( 0.0, -x, y)],
['H', ( 0.0, -x , -y)],]
mol.basis = {'H': 'aug-cc-pVQZ',
'C': 'aug-cc-pVQZ',
'O': 'aug-cc-pVQZ',}
mol.verbose = 0
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
myadc = adc.ADC(mf)
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_check_mp2_energy(self):
mp2 = mp.MP2(mf)
e_mp2 = mp2.kernel()[0]
myadc.max_memory = 20
e_adc_mp2, t_amp1, t_amp2 = myadc.kernel_gs()
diff_mp2 = e_adc_mp2 - e_mp2
self.assertAlmostEqual(diff_mp2, 0.0000000000000, 6)
def test_check_amplitudes(self):
myadc.max_memory = 20
e, t_amp1, t_amp2 = myadc.kernel_gs()
t_amp1_n = numpy.linalg.norm(t_amp1[0])
t_amp2_n = numpy.linalg.norm(t_amp2[0])
self.assertAlmostEqual(t_amp1_n, 0.0456504320024, 6)
self.assertAlmostEqual(t_amp2_n, 0.2977897530749, 6)
if __name__ == "__main__":
print("Ground state calculations for small memory RADC methods for H2CO molecule")
unittest.main()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################### -*- encoding: utf-8 -*-
from openerp.osv import osv,fields
from xml.dom import minidom
class fiscal_ats_ventaptoemision(osv.osv):
_name = 'fiscal.ats_ventaptoemision'
def toxml(self, listaventaspto):
doc = minidom.Document()
ventaspto = doc.createElement('ventasEstablecimiento')
doc.appendChild(ventaspto)
for v in listaventaspto:
detalle = doc.createElement('ventaEst')
ventaspto.appendChild(detalle)
node = doc.createElement('codEstab')
detalle.appendChild(node)
txt = doc.createTextNode( v.establecimiento )
node.appendChild(txt)
node = doc.createElement('ventasEstab')
detalle.appendChild(node)
txt = doc.createTextNode("%.2f" % v.total)
node.appendChild(txt)
return ventaspto
_columns = {
'atsproceso_id':fields.many2one('fiscal.ats_proceso','ATS Proceso', required=True,ondelete='cascade'),
'establecimiento':fields.char('Establecimiento',size=3,required=True),
'total': fields.float('Total', digits=(8,2),required=True),
'manual': fields.boolean('Manual', required=True),
}
_defaults = {
'manual':True
}
fiscal_ats_ventaptoemision()
|
from django.urls import path
from .views import *
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('home',home, name="home"),
path('addproduct',addproduct, name="addproduct"),
path('productlist',ProductList.as_view(),name='productlist'),
path('edit/<pk>',UpdateProduct.as_view(),name='edit'),
path('delete/<pk>',DeleteProduct.as_view(),name='delete'),
path('clothing',Clothing.as_view(),name='clothing'),
path('watches',Watches.as_view(),name='watches'),
path('detail/<pk>',ProductDetail.as_view(),name='detail')
]+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# Copyright 2016-2020 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import numpy as np
import pytest
import tensorflow as tf
import gpflow
from gpflow.config import default_float
from gpflow.utilities import to_default_float
@dataclass(frozen=True)
class Datum:
rng: np.random.RandomState = np.random.RandomState(0)
X: np.ndarray = rng.randn(100, 2)
Y: np.ndarray = rng.randn(100, 1)
Z: np.ndarray = rng.randn(10, 2)
Xs: np.ndarray = rng.randn(10, 2)
lik = gpflow.likelihoods.Gaussian()
kernel = gpflow.kernels.Matern32()
def test_sgpr_qu():
rng = Datum().rng
X = to_default_float(rng.randn(100, 2))
Z = to_default_float(rng.randn(20, 2))
Y = to_default_float(np.sin(X @ np.array([[-1.4], [0.5]])) + 0.5 * rng.randn(len(X), 1))
model = gpflow.models.SGPR(
(X, Y), kernel=gpflow.kernels.SquaredExponential(), inducing_variable=Z
)
gpflow.optimizers.Scipy().minimize(model.training_loss, variables=model.trainable_variables)
qu_mean, qu_cov = model.compute_qu()
f_at_Z_mean, f_at_Z_cov = model.predict_f(model.inducing_variable.Z, full_cov=True)
np.testing.assert_allclose(qu_mean, f_at_Z_mean, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(tf.reshape(qu_cov, (1, 20, 20)), f_at_Z_cov, rtol=1e-5, atol=1e-5)
|
"""
SOI Tax Data (soi_processing.py):
-------------------------------------------------------------------------------
Last updated: 6/29/2015.
This module creates functions for gathering and processing various SOI Tax
data into a NAICS tree.
"""
# Packages:
import os.path
import sys
import numpy as np
import pandas as pd
import xlrd
# Directories:
_CUR_DIR = os.path.dirname(__file__)
_PROC_DIR = os.path.join(_CUR_DIR, "processing")
_OUT_DIR = os.path.join(_CUR_DIR, "output")
_DATA_DIR = os.path.join(_CUR_DIR, "data")
# Importing custom modules:
import naics_processing as naics
import constants as cst
# Importing soi tax data helper custom modules
sys.path.append(_PROC_DIR)
import pull_soi_corp as corp
import pull_soi_partner as prt
import pull_soi_proprietorship as prop
# Dataframe names:
_TOT_CORP_DF_NM = cst.TOT_CORP_DF_NM
_S_CORP_DF_NM = cst.S_CORP_DF_NM
_C_CORP_DF_NM = cst.C_CORP_DF_NM
_INC_DF_NM = cst.INC_PRT_DF_NM
_AST_DF_NM = cst.AST_PRT_DF_NM
_TYP_DF_NM = cst.TYP_PRT_DF_NM
_NFARM_DF_NM = cst.NON_FARM_PROP_DF_NM
_FARM_DF_NM = cst.FARM_PROP_DF_NM
#
_ALL_SECTORS = cst.ALL_SECTORS_NMS_LIST
_ALL_SECTORS_DICT = cst.ALL_SECTORS_NMS_DICT
def load_corporate(soi_tree=naics.generate_tree(),
from_out=False, get_all=False,
get_tot=False, get_s=False, get_c=False,
output_data=False, out_path=None):
""" Loading the corporate tax soi data into a NAICS Tree.
:param soi_tree: The NAICS tree to put all of the data in.
:param from_out: If the corporate soi data is already in an output folder,
then it can be read in directly from the output.
:param get_all: Get corporate soi data for all kinds of corporations.
:param get_tot: Get the aggregate soi data for corporations.
:param get_s: Get the soi data for s corporations.
:param get_c: Interpolate the soi data for c corporations.
:param output_data: Print the corporate dataframes to csv files in the
output folder.
:param out_path: The output_path, both for reading in output data and for
printing to the output file
.. note: Because there is only data on the aggregate and s corporations,
the c corporations data can only be interpolated if the other two have
been calculated.
"""
# Initializing the output path:
if out_path == None:
out_path = _OUT_DIR
# Initializing booleans based of initial input booleans:
if get_all:
get_tot = True
get_s = True
get_c = True
if not get_tot or not get_s:
get_c = False
# Load the total corporate soi data into the NAICS tree:
if get_tot:
soi_tree = corp.load_soi_tot_corp(data_tree=soi_tree,
from_out=from_out)
if output_data:
naics.print_tree_dfs(tree=soi_tree, out_path=out_path,
data_types=[_TOT_CORP_DF_NM])
# Load the S-corporate soi data into the NAICS tree:
if get_s:
soi_tree = corp.load_soi_s_corp(data_tree=soi_tree,
from_out=from_out)
if output_data:
naics.print_tree_dfs(tree=soi_tree, out_path=out_path,
data_types=[_S_CORP_DF_NM])
# Calculate the C-corporate soi data for the NAICS tree:
if get_c:
soi_tree = corp.calc_c_corp(data_tree=soi_tree,
from_out=from_out)
if output_data:
naics.print_tree_dfs(tree=soi_tree, out_path=out_path,
data_types=[_C_CORP_DF_NM])
return soi_tree
def load_partner(soi_tree=naics.generate_tree(),
from_out=False, output_data=False,
out_path=None):
""" Loading the partnership tax soi data into a NAICS Tree.
:param soi_tree: The NAICS tree to put all of the data in.
:param from_out: If the corporate soi data is already in an output file,
then it can be read in directly from the output.
:param output_data: Print the corporate dataframes to csv files in the
output folder.
:param out_path: The output_path, both for reading in output data and for
printing to the output file
"""
# Initializing the output path:
if out_path == None:
out_path = _OUT_DIR
# Load the soi income data into the NAICS tree:
soi_tree = prt.load_income(data_tree=soi_tree, from_out=from_out)
# Load the soi asset data into the NAICS tree:
soi_tree = prt.load_asset(data_tree=soi_tree, from_out=from_out)
# Load the soi partnership types data into the NAICS tree:
soi_tree = prt.load_type(data_tree=soi_tree, from_out=from_out)
# Output the data to csv files in the output folder:
if output_data:
naics.print_tree_dfs(tree=soi_tree, out_path=out_path,
data_types=[_INC_DF_NM, _AST_DF_NM, _TYP_DF_NM])
return soi_tree
def load_proprietorship(soi_tree=naics.generate_tree(),
from_out=False, get_all=False,
get_nonfarm=False, get_farm=False,
output_data=False, out_path=None):
""" Loading the proprietorship tax soi data into a NAICS Tree.
:param soi_tree: The NAICS tree to put all of the data in.
:param from_out: If the corporate soi data is already in an output file,
then it can be read in directly from the output.
:param output_data: Print the corporate dataframes to csv files in the
output folder.
:param out_path: The output_path, both for reading in output data and for
printing to the output file
"""
# Initializing the output path:
if out_path == None:
out_path = _OUT_DIR
# Load the soi nonfarm data into the NAICS tree:
if get_nonfarm:
soi_tree = prop.load_soi_nonfarm_prop(
data_tree=soi_tree, from_out=from_out
)
# Load the farm data into to the NAICS tree:
if get_farm:
soi_tree = prop.load_soi_farm_prop(
data_tree=soi_tree, from_out=from_out
)
# Output the data to csv files in the output folder:
if output_data:
naics.print_tree_dfs(tree=soi_tree, out_path=out_path,
data_types=[_NFARM_DF_NM, _FARM_DF_NM])
return soi_tree
def calc_assets(soi_tree, asset_tree=naics.generate_tree()):
""" Calculating a breakdown of the various sector type's assets
into fixed assets, inventories, and land.
:param asset_tree: The NAICS tree to put all of the data in.
:param soi_tree: A NAICS tree containing all the pertinent soi data.
"""
# Initializing dataframes for all NAICS industries:
asset_tree.append_all(df_nm="FA", df_cols=_ALL_SECTORS)
asset_tree.append_all(df_nm="INV", df_cols=_ALL_SECTORS)
asset_tree.append_all(df_nm="LAND", df_cols=_ALL_SECTORS)
# Calculate fixed assets, inventories, and land for each industry/sector
for i in range(0, len(asset_tree.enum_inds)):
cur_dfs = soi_tree.enum_inds[i].data.dfs
out_dfs = asset_tree.enum_inds[i].data.dfs
# Total of all the partner data for the current industry:
partner_sum = sum(cur_dfs[_TYP_DF_NM].iloc[0,:])
# C-Corporations:
sector = _ALL_SECTORS_DICT["C_CORP"]
cur_df = cur_dfs[_C_CORP_DF_NM]
out_dfs["FA"][sector][0] = cur_df["depreciable_assets"][0]
out_dfs["INV"][sector][0] = cur_df["inventories"][0]
out_dfs["LAND"][sector][0] = cur_df["land"][0]
# S-Corporations:
sector = _ALL_SECTORS_DICT["S_CORP"]
cur_df = cur_dfs[_S_CORP_DF_NM]
out_dfs["FA"][sector][0] = cur_df["depreciable_assets"][0]
out_dfs["INV"][sector][0] = cur_df["inventories"][0]
out_dfs["LAND"][sector][0] = cur_df["land"][0]
# Partnership sectors:
for sector in cst.DFLT_PRT_TYP_DF_COL_NMS_DICT.values():
if partner_sum != 0:
ratio = abs(float(cur_dfs[_TYP_DF_NM][sector][0]))/partner_sum
else:
ratio = abs(1.0/float(cur_dfs[_TYP_DF_NM].shape[0]))
cur_df = cur_dfs[_AST_DF_NM]
out_dfs["FA"][sector][0] = abs(
ratio*cur_df["depreciable_assets_net"][0]
)
out_dfs["INV"][sector][0] = abs(
ratio*cur_df["inventories_net"][0]
)
out_dfs["LAND"][sector][0] = abs(
ratio*cur_df["land_net"][0]
)
# Sole Proprietorships:
sector = _ALL_SECTORS_DICT["SOLE_PROP"]
if cur_dfs[_INC_DF_NM]["depreciation"][0] != 0:
ratio = abs(float(cur_dfs[_NFARM_DF_NM]["depreciation_deductions"][0])/
cur_dfs[_INC_DF_NM]["depreciation"][0])
else:
ratio = 0.0
cur_df = cur_dfs[_AST_DF_NM]
out_dfs["FA"][sector][0] = abs(
(ratio*
cur_df["depreciable_assets_net"][0])+
cur_dfs[_FARM_DF_NM]["FA"][0]
)
out_dfs["INV"][sector][0] = abs(
(ratio*cur_df["inventories_net"][0])+
cur_dfs[_FARM_DF_NM]["Land"][0]
)
out_dfs["LAND"][sector][0] = abs(ratio*cur_df["land_net"][0])
return asset_tree
|
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: Pawel Rosikiewicz #
# Contact: prosikiewicz_gmail.com #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 Pawel Rosikiewicz #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import tensorflow_hub as hub
import tensorflow as tf # tf.__version__
import tensorflow.keras as keras
import matplotlib.pyplot as plt # for making plots,
import scipy.stats as stats # library for statistics and technical programming,
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image, ImageDraw
from IPython.display import display
from tensorflow.keras import backend as K # used for housekeeping of tf models,
# Function, ........................................................................
def make_keyword_list(input_item):
'''
helper function that turns, lists with str, and enbded list,
mixed together, into one flatten list of strings,
tranforms, floats and integers into string,
'''
keyword_list = []
if isinstance(input_item, str):
keyword_list.append(input_item)
elif isinstance(input_item, int) or isinstance(input_item, float):
keyword_list.append(str(input_item))
elif isinstance(input_item, list):
for item in input_item:
if isinstance(item, str):
keyword_list.append(item)
elif isinstance(item, int) or isinstance(item, float):
keyword_list.append(str(input_item))
elif isinstance(item, list):
keyword_list.extend(item)
return keyword_list
# Function, ........................................................................
def find_strings_mathing_any_pattern(*, input_list=None, pattern_list=None, match_any=True, verbose=False):
'''
helper function that will return list wiht items from input list
that match pattern in at least one item in the pattern_list
match_any : bool, if True, function returns items that match any patter in pattern list
if False, it will return only the list matchin all the patterns in all files in the list
'''
if input_list is None:
if verbose==True:
print(f"No data provided")
else:
pass
return None
else:
# tuns string and embded lists into one flat list
input_list = make_keyword_list(input_list)
# turn input list into pd series to allow using pandas str functions,
input_series = pd.Series(input_list)
if pattern_list is not None:
# tuns string and embded lists into one flat list
pattern_list = make_keyword_list(pattern_list)
# find any item in input items that match any pattern pattern
for i, key in enumerate(pattern_list):
if i==0:
resdf = input_series.str.contains(key)
else:
resdf = pd.concat([resdf, input_series.str.contains(key)], axis=1)
# turn
if isinstance(resdf, pd.core.series.Series):
result_list = input_series.loc[(resdf==True).values.tolist()].values.tolist()
else:
if match_any==True:
result_list = input_series.loc[(resdf.sum(axis=1)>0).values.tolist()].values.tolist()
else:
result_list = input_series.loc[(resdf.sum(axis=1)==resdf.shape[1]).values.tolist()].values.tolist()
else:
result_list = input_list
if verbose==True:
print(f"Provided {len(input_list)} items and {len(pattern_list)} possible patterns to match in each file")
print(f"Returned {len(result_list)} items that matched to at least one pattern from pattern list")
else:
pass
return result_list
# Function, ........................................................................
def collect_results(*,
paths, # str, or list wiht str,
filename_keywords, # required, at least one.
dirname_keywords=None,
filename_match_any=False,
dirname_match_any=True,
verbose=False
):
"""
Helper function that will load csv files, in a given file
paths : str, or list wiht str,
filename_keywords, : str, or list, or list in list, or mixed, required, at least one.
dirname_keywords : str, or list, or list in list, or mixed, or None,
if None, path/s are the final directory where the files will be search,
if not None, keywords will be used to find selected filenames,
if "", all files in each provided path/s will be searched for files filename_keywords
filename_match_any : bool, def=False, if True, filenames that have at least one pattern provided in
filename_keywords will be used
if False, only the files that have all keywords in the name will be loaded
dirname_match_any : bool, same as filename_match_any, but applied to directory names searched
within provided path/s
"""
# set path with results files,
path_list = make_keyword_list(paths)
c=0
for path in path_list:
os.chdir(path)
if dirname_keywords is not None:
# find and selectct folders inside the path
dirmane_list=[]
for dirname in glob.glob("*"):
dirmane_list.append(dirname)
selected_dirmane_list = find_strings_mathing_any_pattern(
input_list = dirmane_list,
pattern_list = dirname_keywords,
match_any = dirname_match_any
)
else:
selected_dirmane_list=[None]
# load all files that match ALL patterns provided wiht filename_keywords
for dirname in selected_dirmane_list:
# if no dirmane is selected, it means the path/s are the final destination,
if dirname is not None:
path_to_file = os.path.join(path, dirname)
else:
path_to_file = path
os.chdir(path_to_file)
# final all files inside using selected patterns such as file extension,
filename_list=[]
for filename in glob.glob("*"):
filename_list.append(filename)
selected_filename_list = find_strings_mathing_any_pattern(
input_list = filename_list,
pattern_list = filename_keywords,
match_any = filename_match_any, # returns only the files that contain all provided patterns,
)
for filename in selected_filename_list:
# load the file and add info on the file name and path to it,
one_results_df = pd.read_csv(filename)
one_results_df["full_path"] = path_to_file
one_results_df["file_name"] = filename
# concatenate all the results into one df,
if c==0:
results_df = one_results_df
else:
results_df = pd.concat([results_df, one_results_df], axis=0)
results_df.reset_index(inplace=True, drop=True)
c+=1
if verbose==True:
print(f"Adding: {filename}")
print(f"df shape: {results_df.shape}")
else:
pass
return results_df
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds heads for constituency spans using a head rules table. """
# Types of head rules.
LEFT = 1 # for a given tag, find the leftmost child with that tag
RIGHT = 2 # for a given tag, find the rightmost child with that tag
LEFTDIS = 3 # find the leftmost child with tag amongst given tags
RIGHTDIS = 4 # find the rightmost child with tag amongst given tags
# Head rule.
class Rule:
def __init__(self, type, tags):
self.type = type # e.g. LEFT
self.tags = tags # POS tags list
# Head rules table.
# It is a map: constituent tag -> list of rules ordered by descending priority.
class RulesTable:
# Backoff POS tag.
UNKNOWN_POS = '<UNK>'
# Creates an empty rules table.
def __init__(self):
self.table = {}
self.default_rules = []
# Adds a rule to the table.
# 'parent' is the constituency tag, rule_type is the type of rule to
# be added and 'tags' is the comma-separated POS tags string for that rule.
def add(self, parent, rule_type, tags):
rules = self.table.get(parent, None)
if rules is None:
rules = []
self.table[parent] = rules
tags = tags.split(',')
# Add backoff POS tag if no tags are specified.
if len(tags) == 0:
tags.append(RulesTable.UNKNOWN_POS)
rules.append(Rule(rule_type, tags))
# Adds a default rule to the table.
def default(self, type):
self.default_rules = [Rule(type, tags=[RulesTable.UNKNOWN_POS])]
# Returns the rules list for the constituency tag 'parent', falling back
# to the default rules.
def get(self, parent):
return self.table.get(parent, self.default_rules)
# Returns Michael Collins' head rules as per his 1999 thesis.
def collins_head_table():
table = [
('ADJP', LEFT, \
'NNS,QP,NN,$,ADVP,JJ,VBN,VBG,ADJP,JJR,NP,JJS,DT,FW,RBR,RBS,SBAR,RB'),
('ADVP', RIGHT, 'RB,RBR,RBS,FW,ADVP,TO,CD,JJR,JJ,IN,NP,JJS,NN'),
('CONJP', RIGHT, 'CC,RB,IN'),
('FRAG', RIGHT, ''),
('INTJ', LEFT, ''),
('LST', RIGHT, 'LS,:'),
('NAC', LEFT, \
'NN,NNS,NNP,NNPS,NP,NAC,EX,$,CD,QP,PRP,VBG,JJ,JJS,JJR,ADJP,FW'),
('NX', LEFT, ''),
('PP', RIGHT, 'IN,TO,VBG,VBN,RP,FW'),
('PRN', LEFT, ''),
('PRT', RIGHT, 'RP'),
('QP', LEFT, '$,IN,NNS,NN,JJ,RB,DT,CD,NCD,QP,JJR,JJS'),
('RRC', RIGHT, 'VP,NP,ADVP,ADJP,PP'),
('S', LEFT, 'TO,IN,VP,S,SBAR,ADJP,UCP,NP'),
('SBAR', LEFT, 'WHNP,WHPP,WHADVP,WHADJP,IN,DT,S,SQ,SINV,SBAR,FRAG'),
('SBARQ', LEFT, 'SQ,S,SINV,SBARQ,FRAG'),
('SINV', LEFT, 'VBZ,VBD,VBP,VB,MD,VP,S,SINV,ADJP,NP'),
('SQ', LEFT, 'VBZ,VBD,VBP,VB,MD,VP,SQ'),
('UCP', RIGHT, ''),
('VP', LEFT, 'TO,VBD,VBN,MD,VBZ,VB,VBG,VBP,AUX,AUXG,VP,ADJP,NN,NNS,NP'),
('WHADJP', LEFT, 'CC,WRB,JJ,ADJP'),
('WHADVP', RIGHT, 'CC,WRB'),
('WHNP', LEFT, 'WDT,WP,WP$,WHADJP,WHPP,WHNP'),
('WHPP', RIGHT, 'IN,TO,FW'),
('X', RIGHT, ''),
('NP', RIGHTDIS, 'NN,NNP,NNPS,NNS,NX,POS,JJR,NML'),
('NP', LEFT, 'NP'),
('NP', RIGHTDIS, '$,ADJP,PRN'),
('NP', RIGHT, 'CD'),
('NP', RIGHTDIS, 'JJ,JJS,RB,QP'),
('NML', RIGHTDIS, 'NN,NNP,NNPS,NNS,NX,NML,POS,JJR'),
('NML', LEFT, 'NP,PRP'),
('NML', RIGHTDIS, '$,ADJP,JJP,PRN'),
('NML', RIGHT, 'CD'),
('NML', RIGHTDIS, 'JJ,JJS,RB,QP,DT,WDT,RBR,ADVP')
]
rules = RulesTable()
rules.default(LEFT)
for t in table:
rules.add(t[0], t[1], t[2])
return rules
# Returns Yamada and Matsumoto head rules (Yamada and Matsumoto, IWPT 2003).
def yamada_matsumoto_head_table():
rules = RulesTable()
# By default, report the first non-punctuation child from the left as head.
rules.default(LEFT)
table = [
('NP', RIGHTDIS, 'POS,NN,NNP,NNPS,NNS'),
('NP', RIGHT, 'NX,JJR,CD,JJ,JJS,RB,QP,NP'),
('ADJP', RIGHT, \
'NNS,QP,NN,$,ADVP,JJ,VBN,VBG,ADJP,JJR,NP,JJS,DT,FW,RBR,RBS,SBAR,RB'),
('ADVP', LEFT, 'RB,RBR,RBS,FW,ADVP,TO,CD,JJR,JJ,IN,NP,JJS,NN'),
('CONJP', LEFT, 'CC,RB,IN'),
('FRAG', LEFT, ''),
('INTJ', RIGHT, ''),
('LST', LEFT, 'LS,:'),
('NAC', RIGHTDIS, 'NN,NNS,NNP,NNPS'),
('NAC', RIGHT, 'NP,NAC,EX,$,CD,QP,PRP,VBG,JJ,JJS,JJR,ADJP,FW'),
('PP', LEFT, 'IN,TO,VBG,VBN,RP,FW'),
('PRN', RIGHT, ''),
('PRT', LEFT, 'RP'),
('QP', RIGHT, '$,IN,NNS,NN,JJ,RB,DT,CD,NCD,QP,JJR,JJS'),
('RRC', LEFT, 'VP,NP,ADVP,ADJP,PP'),
('S', RIGHT, 'TO,IN,VP,S,SBAR,ADJP,UCP,NP'),
('SBAR', RIGHT, 'WHNP,WHPP,WHADVP,WHADJP,IN,DT,S,SQ,SINV,SBAR,FRAG'),
('SBARQ', RIGHT, 'SQ,S,SINV,SBARQ,FRAG'),
('SINV', RIGHT, 'VBZ,VBD,VBP,VB,MD,VP,S,SINV,ADJP,NP'),
('SQ', RIGHT, 'VBZ,VBD,VBP,VB,MD,VP,SQ'),
('UCP', LEFT, ''),
('VP', LEFT, 'VBD,VBN,MD,VBZ,VB,VBG,VBP,VP,ADJP,NN,NNS,NP'),
('WHADJP', RIGHT, 'CC,WRB,JJ,ADJP'),
('WHADVP', LEFT, 'CC,WRB'),
('WHNP', RIGHT, 'WDT,WP,WP$,WHADJP,WHPP,WHNP'),
('WHPP', LEFT, 'IN,TO,FW'),
('NX', RIGHTDIS, 'POS,NN,NNP,NNPS,NNS'),
('NX', RIGHT, 'NX,JJR,CD,JJ,JJS,RB,QP,NP'),
('X', RIGHT, '')
]
for t in table:
rules.add(t[0], t[1], t[2])
return rules
# Head finder takes a head rules table and a constituency node, and outputs
# the token index of the head for that node. The node is assumed to be a rooted
# tree that goes all the way down to individual tokens with POS tags.
class HeadFinder:
def __init__(self, statistics=None, rulestype="collins"):
self.punctuation = [".", ",", "(", ")", ":", "``", "''"]
self.rules = None
if rulestype == "collins":
self.rules = collins_head_table()
else:
self.rules = yamada_matsumoto_head_table()
# Various counters.
self.num_default = None # default rule usage
self.num_backoff = None # backoff head computation
self.num_none_heads = None # no head could be found
self.num_total = None # total invocations
self.backoff_usage_histogram = None # constituency tag -> backoff heads
self.default_usage_histogram = None # constituency tag -> default rules
if statistics is not None:
self.num_default = statistics.counter("HeadFinder/DefaultRuleUsage")
self.num_backoff = statistics.counter("HeadFinder/BackoffHeads")
self.num_none_heads = statistics.counter("HeadFinder/NoHead")
self.num_total = statistics.counter("HeadFinder/Total")
self.default_usage_histogram = \
statistics.histogram("HeadFinder/DefaultRuleUsageByTag")
self.backoff_usage_histogram = \
statistics.histogram("HeadFinder/BackoffUsageByTag")
# Returns whether POS tag 'tag' is not a punctuation.
def not_punctuation(self, tag):
return tag not in self.punctuation
# Computes head token for node 'root' as per 'rule'.
# Nodes are assumed to have the following fields: children, head, label.
# Children's heads should already have been computed.
#
# Returns the tuple (head, whether backoff was used).
def head_from_rule(self, root, rule, force):
backoff = None
children = root.children
if rule.type in [RIGHT, RIGHTDIS]:
children = reversed(root.children) # right->left children traversal
if rule.type in [LEFT, RIGHT]:
for tag in rule.tags:
for child in children:
if child.label == tag:
return child.head, False
if backoff is None and force and self.not_punctuation(child.label):
backoff = child.head
else:
assert rule.type in [LEFTDIS, RIGHTDIS], rule.type
for child in children:
for tag in rule.tags:
if child.label == tag:
return child.head, False
if backoff is None and force and self.not_punctuation(child.label):
backoff = child.head
if backoff is not None and self.num_backoff is not None:
self.num_backoff.increment()
return backoff, backoff is not None
# Recursively finds the head for all nodes starting at 'root'.
def find(self, root):
if root.head is not None:
return root.head
if self.num_total is not None:
self.num_total.increment()
if root.leaf():
assert root.begin == root.end - 1 # should be token
root.head = root.begin # token is its own head
elif len(root.children) == 1:
root.head = self.find(root.children[0])
else:
# Find heads of all children.
for child in root.children:
self.find(child)
# Apply rules to select a head.
rules = self.rules.get(root.label)
if rules is self.rules.default_rules and self.num_default is not None:
self.num_default.increment()
self.default_usage_histogram.increment(root.label)
for rule in rules:
head, via_backoff = self.head_from_rule(root, rule, rule == rules[-1])
if head is not None:
root.head = head
if via_backoff and self.backoff_usage_histogram is not None:
self.backoff_usage_histogram.increment(root.label)
break
if root.head is None and self.num_none_heads is not None:
self.num_none_heads.increment()
return root.head
|
from typing import Dict
import pytest
import nest_asyncio
from mds.agg_mds import datastore
from unittest.mock import patch
from conftest import AsyncMock
# https://github.com/encode/starlette/issues/440
nest_asyncio.apply()
@pytest.mark.asyncio
async def test_aggregate_commons(client):
with patch.object(
datastore, "get_commons", AsyncMock(return_value={})
) as datastore_mock:
resp = client.get("/aggregate/commons")
assert resp.status_code == 200
assert resp.json() == {}
datastore.get_commons.assert_called_with()
with patch.object(
datastore,
"get_commons",
AsyncMock(return_value={"commons": ["commons1", "commons2"]}),
) as datastore_mock:
resp = client.get("/aggregate/commons")
assert resp.status_code == 200
assert resp.json() == {"commons": ["commons1", "commons2"]}
datastore.get_commons.assert_called_with()
@pytest.mark.asyncio
async def test_aggregate_metadata(client):
with patch.object(
datastore, "get_all_metadata", AsyncMock(return_value=[])
) as datastore_mock:
resp = client.get("/aggregate/metadata")
assert resp.status_code == 200
assert resp.json() == []
datastore.get_all_metadata.assert_called_with(20, 0)
mock_data = {
"commons1": [
{
"study1": {},
}
],
"commons2": [
{
"study2": {},
}
],
}
with patch.object(
datastore, "get_all_metadata", AsyncMock(return_value=mock_data)
) as datastore_mock:
resp = client.get("/aggregate/metadata")
assert resp.status_code == 200
assert resp.json() == mock_data
datastore.get_all_metadata.assert_called_with(20, 0)
@pytest.mark.asyncio
async def test_aggregate_metadata_name(client):
with patch.object(
datastore, "get_all_named_commons_metadata", AsyncMock(return_value=None)
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1")
assert resp.status_code == 404
assert resp.json() == {
"detail": {
"code": 404,
"message": "no common exists with the given: commons1",
}
}
datastore.get_all_named_commons_metadata.assert_called_with("commons1")
with patch.object(
datastore,
"get_all_named_commons_metadata",
AsyncMock(return_value=[{"study1": {}}]),
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1")
assert resp.status_code == 200
assert resp.json() == [{"study1": {}}]
datastore.get_all_named_commons_metadata.assert_called_with("commons1")
@pytest.mark.asyncio
async def test_aggregate_metadata_tags(client):
with patch.object(
datastore, "get_commons_attribute", AsyncMock(return_value=None)
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1/tags")
assert resp.status_code == 404
assert resp.json() == {
"detail": {
"code": 404,
"message": "no common exists with the given: commons1",
}
}
with patch.object(
datastore, "get_commons_attribute", AsyncMock(return_value=["mytag1"])
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1/tags")
assert resp.status_code == 200
assert resp.json() == ["mytag1"]
datastore.get_commons_attribute.assert_called_with("commons1", "tags")
@pytest.mark.asyncio
async def test_aggregate_metadata_info(client):
with patch.object(
datastore, "get_commons_attribute", AsyncMock(return_value=None)
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1/info")
assert resp.status_code == 404
assert resp.json() == {
"detail": {
"code": 404,
"message": "no common exists with the given: commons1",
}
}
datastore.get_commons_attribute.assert_called_with("commons1", "info")
with patch.object(
datastore,
"get_commons_attribute",
AsyncMock(return_value={"commons_url": "http://commons"}),
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1/info")
assert resp.status_code == 200
assert resp.json() == {"commons_url": "http://commons"}
datastore.get_commons_attribute.assert_called_with("commons1", "info")
@pytest.mark.asyncio
async def test_metadata_aggregations(client):
with patch.object(
datastore, "get_aggregations", AsyncMock(return_value=None)
) as datastore_mock:
resp = client.get("/aggregate/metadata/commons1/aggregations")
assert resp.status_code == 404
assert resp.json() == {
"detail": {
"code": 404,
"message": "no common exists with the given: commons1",
}
}
datastore.get_aggregations.assert_called_with("commons1")
@pytest.mark.asyncio
async def test_aggregate_metadata_name_guid(client):
with patch.object(
datastore, "get_by_guid", AsyncMock(return_value=None)
) as datastore_mock:
resp = client.get("/aggregate/metadata/guid/123")
assert resp.status_code == 404
assert resp.json() == {
"detail": {
"code": 404,
"message": "no entry exists with the given guid: 123",
}
}
datastore.get_by_guid.assert_called_with("123")
with patch.object(
datastore, "get_by_guid", AsyncMock(return_value={"study2": {}})
) as datastore_mock:
resp = client.get("/aggregate/metadata/guid/123")
assert resp.status_code == 200
assert resp.json() == {"study2": {}}
datastore.get_by_guid.assert_called_with("123")
|
"""Bipartite.py
Two-color graphs and find related structures.
D. Eppstein, May 2004.
"""
import unittest
from sets import Set
from Biconnectivity import BiconnectedComponents
import Graphs
import DFS
class NonBipartite(Exception):
pass
def TwoColor(G):
"""
Find a bipartition of G, if one exists.
Raises NonBipartite or returns dict mapping vertices
to two colors (True and False).
"""
color = {}
for v,w,edgetype in DFS.search(G):
if edgetype is DFS.forward:
color[w] = not color.get(v,False)
elif edgetype is DFS.nontree and color[v] == color[w]:
raise NonBipartite
return color
def Bipartition(G):
"""
Find a bipartition of G, if one exists.
Raises NonBipartite or returns sequence of vertices
on one side of the bipartition.
"""
color = TwoColor(G)
for v in color:
if color[v]:
yield v
def isBipartite(G):
"""
Return True if G is bipartite, False otherwise.
"""
try:
TwoColor(G)
return True
except NonBipartite:
return False
def BipartiteOrientation(G,adjacency_list_type=Set):
"""
Given an undirected bipartite graph G, return a directed graph in which
the edges are oriented from one side of the bipartition to the other.
The second argument has the same meaning as in Graphs.copyGraph.
"""
B = Bipartition(G)
return dict([(v,adjacency_list_type(iter(G[v]))) for v in B])
def OddCore(G):
"""
Subgraph of vertices and edges that participate in odd cycles.
Aka, the union of nonbipartite biconnected components.
"""
return Graphs.union(*[C for C in BiconnectedComponents(G)
if not isBipartite(C)])
# If run as "python Bipartite.py", run tests on various small graphs
# and check that the correct results are obtained.
class BipartitenessTest(unittest.TestCase):
def cycle(self,n):
return dict([(i,[(i-1)%n,(i+1)%n]) for i in range(n)])
def testEvenCycles(self):
for i in range(4,12,2):
self.assertEqual(isBipartite(self.cycle(i)), True)
def testOddCycles(self):
for i in range(3,12,2):
self.assertEqual(isBipartite(self.cycle(i)), False)
if __name__ == "__main__":
unittest.main()
|
from bitmath import GiB
from bitmath import MiB
class Album:
def __init__(self,
artist,
album,
url,
band_url='',
slug_text='',
num_comments=0,
tralbum_id=0,
art_id=0,
genre='',
band_id=0,
genre_id=0,
year=1970,
size='0MB',
cover_art=''
):
# Version field for enabling smarter updates
self.ver = 1
# Bandcamp Hub API fields
self.artist = artist
self.title = album
self.tralbum_url = url
self.band_url = band_url
self.slug_text = slug_text
self.num_comments = num_comments
self.tralbum_id = tralbum_id
self.art_id = art_id # cover art?
self.genre = genre
self.genre_id = genre_id
self.band_id = band_id
# Custom properties
self.cover_art = cover_art
self.size = size
self.year = year
self.duration = ''
self.tracklist = []
self.tags = []
self.about = ''
self.is_free = False
def add_track(self, track_title, track_duration):
"""
Adds a track to an album, recalculating the duration time
:param track_title: Track title
:param track_duration: track is a string in a form hours:mins:secs (1:09:23 or 4:59)
"""
self.tracklist.append(track_title)
time = self.duration.split(':') if ':' in self.duration else []
new_track = track_duration.split(':')
if len(time) > len(new_track):
for x in range(0, (len(time) - len(new_track))):
new_track.insert(0, '00')
if len(new_track) > len(time):
for x in range(0, (len(new_track) - len(time))):
time.insert(0, '00')
add_one = False
new_album_time = ''
for l in range(0, len(time)):
index = len(time) - l - 1
additional = 1 if add_one else 0
add_one = False
length = int(new_track[index]) + int(time[index]) + additional
if (l in [0, 1]) and length > 60:
add_one = True
length %= 60
slength = str(length).zfill(2)
new_album_time = slength + ':' + new_album_time if new_album_time != '' else slength
self.duration = new_album_time
def duration_seconds(self):
seconds = 0
time = self.duration.split(':') if ':' in self.duration else []
for i in range(0, len(time)):
multiply = pow(60, i)
seconds += int(time[len(time) - i - 1]) * multiply
return seconds
def size_bytes(self):
if 'GB' in self.size:
return GiB(float(self.size.replace('GB', ''))).bytes
if 'MB' in self.size:
return MiB(float(self.size.replace('MB', ''))).bytes
# for unknown size, let's return some heuristic value based on album duration
return self.duration_seconds() * 1024 * 100
def big(self):
"""
Is the album big?
:return True if the size of the album is more than 300MB, false otherwise
"""
return MiB(300).bytes < self.size_bytes()
|
import calendar as cal
import os
import tempfile
import zipfile
import logging
import fiona
from shapely import geometry
import seaice.nasateam as nt
log = logging.getLogger(__name__)
OCEAN = 0
ICE = 1
COAST = nt.FLAGS['coast']
LAND = nt.FLAGS['land']
MISSING = nt.FLAGS['missing']
def _clim_string(range):
return '_{:04}-{:04}'.format(*range)
def _shapefile_name(config):
"""Returns a string containing the shapefile name (without file extension).
Arguments
---------
config: dictionary containing various settings.
hemi: nt.NORTH or nt.SOUTH
year: int
month: int
day: int
dayofyear: int
version_str: str
polygon: True or False; incompatible with polyline
polyline: True or False; incompatible with polygon
median: True or False
range: list of ints, defines the range of years for the median
"""
if config['polygon']:
kind = 'polygon'
elif config['polyline']:
kind = 'polyline'
if config['median']:
median_part = 'median_'
clim_str = _clim_string(config['range'])
else:
median_part = ''
clim_str = ''
if 'dayofyear' in config:
date_str = '{dayofyear:03}'.format(dayofyear=config['dayofyear'])
elif 'year' in config and 'month' in config and 'day' not in config:
date_str = '{year}{month:02}'.format(year=config['year'], month=config['month'])
elif 'year' not in config and 'month' in config and 'day' in config:
date_str = '{month:02}_{day:02}'.format(month=config['month'], day=config['day'])
elif 'year' not in config and 'month' in config and 'day' not in config:
date_str = '{month:02}'.format(month=config['month'])
return '{median_part}extent_{hemi}_{date_str}{clim_str}_{kind}_{version}'.format(
median_part=median_part,
hemi=config['hemi']['short_name'],
date_str=date_str,
clim_str=clim_str,
kind=kind,
version=config['version_str']
)
def _create_shapefile(config, *geoms):
"""Create a zip file containing a .shp and the other files composing a
Shapefile. Returns the path to the zip file.
Arguments
---------
config: nasateam dictionary containing various settings.
hemi: nt.NORTH or nt.SOUTH
output_dir: directory in which to save the created .zip file
polygon, polyline: determines what kind of shapefile to create; one of
these options must be True, and the other False
geoms: list of shapely.geometry.BaseGeometry objects that can be passed to
shapely.geometry.mapping to write to a shapefile. If creating a polygon
shapefile, these should be Polygon (or similar) objects; if creating a
polyline shapefile, these should be MultiLineString objects.
"""
shapefile_name = _shapefile_name(config)
tree_struct = ''
if not config['flatten']:
tree_struct = _default_archive_paths(config)
os.makedirs(os.path.join(config['output_dir'], tree_struct), exist_ok=True)
shapefile_zip = os.path.join(config['output_dir'], tree_struct, shapefile_name + '.zip')
if config['polygon']:
schema = {'properties': {}, 'geometry': 'Polygon'}
elif config['polyline']:
schema = {'properties': {}, 'geometry': 'MultiLineString'}
with tempfile.TemporaryDirectory() as tmpdir:
shapefile_shp = os.path.join(tmpdir, shapefile_name + '.shp')
with fiona.collection(shapefile_shp, 'w', 'ESRI Shapefile',
schema=schema, crs=config['hemi']['crs']) as output:
for geom in geoms:
output.write({'geometry': geometry.mapping(geom), 'properties': {}})
with zipfile.ZipFile(shapefile_zip, 'w') as z:
for f in os.listdir(tmpdir):
z.write(os.path.join(tmpdir, f), arcname=f)
log.info('Created {}'.format(shapefile_zip))
return shapefile_zip
def _default_archive_paths(config):
"""
Default output directory structure for writing shapefiles. Output roots may be different.
/hemi/monthly/shapefiles/shp_extent/[MM]_[Mon]/extent_[NS]_[YYYYMM]_poly[gon|line]_{ver}.zip
/shp_median/median_[NS]_[MM]_1981_2010_polyline_{ver}.zip
/dayofyear_median/median_extent_[NS}_[DOY]_1981-2010_polyline_{ver}.zip
"""
log.debug('keys used to create archive paths')
log.debug('month %s', config.get('month', 'no key found for month'))
log.debug('dayofyear %s', config.get('dayofyear', 'no key found for dayofyear'))
log.debug('median %s', config.get('median', 'no key found for median'))
log.debug('hemi %s', config['hemi']['short_name'])
if config.get('median', None):
if config.get('dayofyear'):
temporality = 'daily'
last_dir = 'dayofyear_median'
else:
temporality = 'monthly'
last_dir = 'shp_median'
return os.path.join(config['hemi']['long_name'], temporality, 'shapefiles', last_dir)
# It's not a median file, so we know how to build the output path.
month_dir = '{:02}_{}'.format(config['month'], cal.month_abbr[config['month']])
return os.path.join(config['hemi']['long_name'], 'monthly', 'shapefiles',
'shp_extent', month_dir)
|
from syft import node
from syft.message import execute_capability
import numpy as np
port = 50051
iface = "0.0.0.0"
target_addr = f"http://{iface}:{port}"
remote_caps = node.request_capabilities(target_addr)
print(f"Node at: {target_addr} has capabilities: {remote_caps}")
message = execute_capability(target_addr, "hello", "Client 1")
print(message)
sum1 = execute_capability(target_addr, "sum", [1, 2, 3])
print(sum1)
sum2 = execute_capability(target_addr, "sum_np", [1, 2, 3])
print(sum2)
|
string = "The b";
count = 0;
for i in range(0, len(string)):
if(string[i] != ' '):
count = count + 1;
print("Total number of characters in a string: ",count);
|
import requests
import json
import config
class dns:
api_token: str
email : str
zoneid : str
dnsrecords : json
def __init__(self, API_TOKEN: str, EMAIL: str, ZONE_ID: str):
self.api_token = API_TOKEN
self.email = EMAIL
self.zoneid = ZONE_ID
def get_dnsrecords(self) -> None:
# Initialize your dns records.
dnsrec = requests.get("https://api.cloudflare.com/client/v4/zones/"+ self.zoneid +"/dns_records", headers={
"X-Auth-Email" : self.email,
"X-Auth-Key" : self.api_token,
"Content-Type" : "application/json"
})
self.dnsrecords = json.loads(dnsrec.text)
def print_dnsrecords(self) -> None:
# Print your dns records, debug purposes.
print("ZONES AVAILABLE")
for i in self.dnsrecords["result"]:
print("----------------")
print("ID: "+ i["id"])
print("ZONE NAME: " + i["zone_name"])
print("NAME: " + i["name"])
print("TYPE: " + i["type"])
print("CONTENT: "+ i["content"])
def dns_update(self, name: str=config.DOMAIN) -> bool:
# Update the dns pointed ip.
id=""
type : str
ttl : int
proxied : bool
for i in self.dnsrecords["result"]:
if i["name"] == name:
id = i["id"]
type = i["type"]
ttl = i["ttl"]
proxied = i["proxied"]
if id=="":
raise Exception("DNS RECORD DOES NOT EXISTS.")
upddns= requests.put("https://api.cloudflare.com/client/v4/zones/"+self.zoneid+"/dns_records/"+id,
headers={
"X-Auth-Email": self.email,
"X-Auth-Key" : self.api_token,
"Content-Type" : "application/json"
}, data=json.dumps({"type":type, "name": config.DOMAIN,"content":config.IP, "ttl":ttl,"proxied": proxied}, indent=4))
return bool(json.loads(upddns.text)["success"])
if __name__ == "__main__":
dnsrecord = dns(config.API_TOKEN, config.EMAIL, config.ZONE_ID)
dnsrecord.get_dnsrecords()
# UNCOMMENT THIS LINE TO CHECK NAMES OF DNS RECORDS
# dnsrecord.print_dnsrecords()
if dnsrecord.dns_update():
print("Success")
|
from .control import common_process, interactive_process, CommonProcess, InteractiveProcess, TimingContent, common_run, \
timing_run, mutual_run, ProcessResult, RunResult, RunResultStatus, ResourceLimit, Identification
from .entry import DispatchRunner, load_pji_script
from .service import DispatchTemplate, Dispatch, TaskTemplate, Task
|
import boto3
from config.constants import DEFAULT_S3_RETRIES
from config.settings import (S3_BUCKET, BEIWE_SERVER_AWS_ACCESS_KEY_ID,
BEIWE_SERVER_AWS_SECRET_ACCESS_KEY, S3_REGION_NAME)
from libs import encryption
class S3VersionException(Exception): pass
conn = boto3.client('s3',
aws_access_key_id=BEIWE_SERVER_AWS_ACCESS_KEY_ID,
aws_secret_access_key=BEIWE_SERVER_AWS_SECRET_ACCESS_KEY,
region_name=S3_REGION_NAME)
def s3_upload(key_path, data_string, study_object_id, raw_path=False):
if not raw_path:
key_path = study_object_id + "/" + key_path
data = encryption.encrypt_for_server(data_string, study_object_id)
conn.put_object(Body=data, Bucket=S3_BUCKET, Key=key_path, ContentType='string')
def s3_retrieve(key_path, study_object_id, raw_path=False, number_retries=DEFAULT_S3_RETRIES):
""" Takes an S3 file path (key_path), and a study ID. Takes an optional argument, raw_path,
which defaults to false. When set to false the path is prepended to place the file in the
appropriate study_id folder. """
if not raw_path:
key_path = study_object_id + "/" + key_path
encrypted_data = _do_retrieve(S3_BUCKET, key_path, number_retries=number_retries)['Body'].read()
return encryption.decrypt_server(encrypted_data, study_object_id)
def _do_retrieve(bucket_name, key_path, number_retries=DEFAULT_S3_RETRIES):
""" Run-logic to do a data retrieval for a file in an S3 bucket."""
try:
return conn.get_object(Bucket=bucket_name, Key=key_path, ResponseContentType='string')
except Exception:
if number_retries > 0:
print("s3_retrieve failed, retrying on %s" % key_path)
return _do_retrieve(bucket_name, key_path, number_retries=number_retries - 1)
raise
def s3_list_files(prefix, as_generator=False):
""" Method fetches a list of filenames with prefix.
note: entering the empty string into this search without later calling
the object results in a truncated/paginated view."""
return _do_list_files(S3_BUCKET, prefix, as_generator=as_generator)
def s3_list_versions(prefix, allow_multiple_matches=False):
"""
Page structure - each page is a dictionary with these keys:
Name, ResponseMetadata, Versions, MaxKeys, Prefix, KeyMarker, IsTruncated, VersionIdMarker
We only care about 'Versions', which is a list of all object versions matching that prefix.
Versions is a list of dictionaries with these keys:
LastModified, VersionId, ETag, StorageClass, Key, Owner, IsLatest, Size
returns a list of dictionaries.
If allow_multiple_matches is False the keys are LastModified, VersionId, IsLatest.
If allow_multiple_matches is True the key 'Key' is added, containing the s3 file path.
"""
paginator = conn.get_paginator('list_object_versions')
page_iterator = paginator.paginate(Bucket=S3_BUCKET, Prefix=prefix)
versions = []
for page in page_iterator:
# versions are not guaranteed, usually this means the file was deleted and only has deletion markers.
if 'Versions' not in page.keys():
continue
for s3_version in page['Versions']:
if not allow_multiple_matches and s3_version['Key'] != prefix:
raise S3VersionException("the prefix '%s' was not an exact match" % prefix)
versions.append({
'VersionId': s3_version["VersionId"],
'Key': s3_version['Key'],
})
return versions
def _do_list_files(bucket_name, prefix, as_generator=False):
paginator = conn.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket_name, Prefix=prefix)
if as_generator:
return _do_list_files_generator(page_iterator)
else:
items = []
for page in page_iterator:
if 'Contents' in page.keys():
for item in page['Contents']:
items.append(item['Key'].strip("/"))
return items
def _do_list_files_generator(page_iterator):
for page in page_iterator:
if 'Contents' not in page.keys():
return
for item in page['Contents']:
yield item['Key'].strip("/")
def s3_delete(key_path):
raise Exception("NO DONT DELETE")
################################################################################
######################### Client Key Management ################################
################################################################################
def create_client_key_pair(patient_id, study_id):
"""Generate key pairing, push to database, return sanitized key for client."""
public, private = encryption.generate_key_pairing()
s3_upload("keys/" + patient_id + "_private", private, study_id )
s3_upload("keys/" + patient_id + "_public", public, study_id )
def get_client_public_key_string(patient_id, study_id):
"""Grabs a user's public key string from s3."""
key_string = s3_retrieve( "keys/" + patient_id +"_public" , study_id)
return encryption.prepare_X509_key_for_java( key_string )
def get_client_public_key(patient_id, study_id):
"""Grabs a user's public key file from s3."""
key = s3_retrieve( "keys/" + patient_id +"_public", study_id )
return encryption.import_RSA_key( key )
def get_client_private_key(patient_id, study_id):
"""Grabs a user's private key file from s3."""
key = s3_retrieve( "keys/" + patient_id +"_private", study_id)
return encryption.import_RSA_key( key )
|
coffee_src_type = [".coffee"]
cjsx_src_type = [".cjsx", ".coffee"]
def _cjsx_compile_dir(ctx, dir, srcs, generate_dts):
"""
Compiles a single directory of JSX/CoffeeScript files into JavaScript files.
"""
out_dir = ctx.configuration.bin_dir.path + "/" + dir
arguments = [out_dir]
outputs = []
for src in srcs:
if src.extension == "cjsx":
js_name = src.basename.replace(".cjsx", ".js")
elif src.extension == "coffee":
js_name = src.basename.replace(".coffee", ".js")
else:
fail('%s has unknown ext "%s"' % (src.short_path, src.extension))
js_output = ctx.actions.declare_file(js_name)
outputs.append(js_output)
dts_path = ""
if generate_dts:
dts_name = js_name.replace(".js", ".d.ts")
dts_output = ctx.actions.declare_file(dts_name)
outputs.append(dts_output)
dts_path = dts_output.path
arguments.append("%s=%s=%s" % (src.path, js_output.path, dts_path))
ctx.actions.run(
mnemonic = "CompileCJSX",
executable = ctx.executable._cjsxc,
arguments = arguments,
inputs = srcs,
tools = [ctx.executable._node],
outputs = outputs,
)
return outputs
def cjsx_compile(ctx, srcs, generate_dts):
srcs_by_dir = {}
for src in srcs:
dir = src.dirname
if dir not in srcs_by_dir:
srcs_by_dir[dir] = [src]
else:
srcs_by_dir[dir].append(src)
outputs = []
for dir in srcs_by_dir:
outputs += _cjsx_compile_dir(ctx, dir, srcs_by_dir[dir], generate_dts)
return [DefaultInfo(files = depset(outputs))]
def cjsx_srcs_impl(ctx):
return cjsx_compile(ctx, ctx.files.srcs, ctx.attr.generate_dts)
def cjsx_src_impl(ctx):
return cjsx_compile(ctx, ctx.files.src, generate_dts = False)
# -----------------------------------------------------------------------------
cjsx_attrs = {
"generate_dts": attr.bool(default = True),
"_node": attr.label(
default = Label("@com_vistarmedia_rules_js//js/toolchain:node"),
cfg = "host",
executable = True,
allow_files = True,
),
"_cjsxc": attr.label(
default = Label("//coffee/toolchain:cjsxc"),
executable = True,
cfg = "host",
),
}
cjsx_srcs = rule(
cjsx_srcs_impl,
attrs = dict(
cjsx_attrs,
srcs = attr.label_list(allow_files = cjsx_src_type),
),
)
cjsx_src = rule(
cjsx_src_impl,
attrs = dict(
cjsx_attrs,
src = attr.label(allow_files = cjsx_src_type),
),
)
|
import docutils.frontend
import docutils.parsers.rst
import docutils.utils
import pyautogui
import pynput.keyboard as kb
import queue
import subprocess as sp
import threading
import time
slide = 0
slides = [
'''
# Give the Gift of Python
## Grant Jenks
# 1. Python trainer for Fortune 100 companies.
# 2. Married to Chemistry teacher, father of two.
# 3. 100s of hours of 4-12th grade instruction.
'''.splitlines(),
'''
## Setup
# 1. Install Python: https://www.python.org/
# 2. Run IDLE: $ python -m idlelib.idle
# 3. Use built-in Turtle module!
'''.splitlines(),
[
'',
'## Open the Turtle Window',
'',
'from turtle import *', 0.5,
'reset()', 0.5,
],
[
'',
'## Basic Commands',
'',
'forward(100)', 1,
'right(90)', 1,
'fd(100)', 1,
'rt(90)', 1,
'backward(-100)', 1,
'left(-90)', 1,
'forward(100)', 1,
'right(90)', 1,
'undo()', 1,
'undo()', 1,
],
[
'',
'## Loops',
'',
'reset()', 0.5,
'for each in range(5):', 0.5,
'bk(100)', 0.5,
'lt(144)', 0.5,
'', 3,
],
[
'',
'## Functions and Shapes',
'',
'def square():', 0.5,
'begin_fill()', 0.5,
'for each in range(4):', 0.5,
'forward(100)', 0.5,
'right(90)', 0.5,
-1,
'end_fill()', 0.5,
'',
'reset()', 0.5,
'square()', 3,
],
[
'',
'## Dots',
'',
'reset()', 0.5,
'help(dot)', 1,
'dot(100)', 1,
],
[
'',
'## Colors'
'',
'reset()', 0.5,
'from itertools import *', 0.5,
"colors = cycle(['red', 'green', 'blue', 'purple'])", 0.5,
'def present():', 0.5,
'for i in range(4):', 0.5,
'color(next(colors))', 0.5,
'square()', 0.5,
'left(90)', 0.5,
'',
'present()', 5,
],
[
'',
'## Locations',
'',
'reset()', 0.5,
'def line(a, b, x, y):', 0.5,
'up()', 0.5,
'goto(a, b)', 0.5,
'down()', 0.5,
'goto(x, y)', 0.5,
'',
"color('red')", 0.5,
'width(20)', 0.5,
'line(-100, -100, 0, 200)', 1,
'line(0, 200, 100, -100)', 1,
'line(100, -100, -100, -100)', 1,
],
[
'',
'## Mouse Inputs',
'',
'width(10)', 0.5,
"color('green')", 0.5,
'def tap(x, y):', 0.5,
'goto(x, y)', 0.5,
'dot(20)', 0.5,
'',
'onscreenclick(tap)', 0.5,
],
[
'',
'## Keyboard Events',
'',
'reset()', 0.5,
'width(10)', 0.5,
"onkey(lambda: fd(30), 'Up')", 0.5,
"onkey(lambda: bk(30), 'Down')", 0.5,
"onkey(lambda: lt(30), 'Left')", 0.5,
"onkey(lambda: rt(30), 'Right')", 0.5,
'listen()', 0.5,
],
[
'',
'## Animation',
'',
'hideturtle()', 0.5,
'tracer(False)', 0.5,
'running = True', 0.5,
'def draw():', 0.5,
'clear()', 0.5,
'present()', 0.5,
'update()', 0.5,
'left(1)', 0.5,
'if running:', 0.5,
'ontimer(draw, 100)',
'',
'reset()', 0.5,
'draw()', 0.5,
],
'''
## Free Python Games
# 1. Search: Free Python Games
# 2. $ python -m pip install freegames
# 3. http://www.grantjenks.com/docs/freegames/
'''.splitlines(),
]
def worker():
global slide
while True:
key = inputs.get()
if key == kb.Key.esc:
print('Typing slide', slide)
parts = slides[slide]
for part in parts:
if part == '':
pyautogui.press('enter')
time.sleep(0.25)
elif part == -1:
pyautogui.press('backspace')
elif isinstance(part, str):
pyautogui.typewrite(part, interval=0.1)
pyautogui.press('enter')
else:
time.sleep(part)
slide += 1
def ticker():
def on_press(key):
inputs.put(key)
with kb.Listener(on_press=on_press) as listener:
listener.join()
def commander():
global slide
while True:
value = input()
if value == 'q':
exit()
try:
slide = int(value)
except ValueError:
pass
def main():
global inputs
idle = sp.Popen(['python', '-m', 'idlelib.idle'])
inputs = queue.Queue()
work = threading.Thread(target=worker)
work.start()
tick = threading.Thread(target=ticker)
tick.start()
cmdr = threading.Thread(target=commander)
cmdr.start()
cmdr.join()
tick.join()
work.join()
idle.wait()
if __name__ == '__main__':
main()
|
"""
Test cases for the wiutils.summarizing.compute_count_summary function.
"""
import numpy as np
import pandas as pd
import pytest
from wiutils.summarizing import compute_count_summary
@pytest.fixture(scope="function")
def images():
return pd.DataFrame(
{
"deployment_id": [
"001",
"001",
"001",
"002",
"002",
"002",
"002",
"002",
"002",
],
"class": [
"Mammalia",
"Mammalia",
"Mammalia",
"Aves",
"No CV Result",
"Mammalia",
"Aves",
"Aves",
"Mammalia",
],
"order": [
"Carnivora",
"Carnivora",
"Rodentia",
"Pelecaniformes",
"No CV Result",
"Primates",
"Passeriformes",
"Passeriformes",
"Carnivora",
],
"family": [
"Felidae",
"Felidae",
np.nan,
"Ardeidae",
"No CV Result",
"Cebidae",
"Corvidae",
"Tyrannidae",
"Felidae",
],
"genus": [
"Leopardus",
"Leopardus",
np.nan,
"Bubulcus",
"No CV Result",
"Saimiri",
"Cyanocorax",
"Elaenia",
"Leopardus",
],
"species": [
"pardalis",
"pardalis",
np.nan,
"ibis",
"No CV Result",
"sciureus",
"violaceus",
np.nan,
"pardalis",
],
"timestamp": [
"2020-11-24 00:06:26",
"2020-11-24 00:54:12",
"2020-12-20 22:16:10",
"2020-12-18 16:48:04",
"2020-12-23 07:26:33",
"2020-12-24 08:09:32",
"2020-12-24 09:15:01",
"2020-12-24 13:48:12",
"2020-12-21 03:12:21",
],
"number_of_objects": [1, 1, 2, 1, 1, 3, 1, 1, 1],
}
)
@pytest.fixture(scope="function")
def deployments():
return pd.DataFrame({"deployment_id": ["001", "002"], "placename": ["AAA", "AAA"]})
def test_groupby_deployment(images):
result = compute_count_summary(images, groupby="deployment")
expected = pd.DataFrame(
{
"deployment_id": ["001", "002"],
"total_images": [3, 6],
"identified_images": [3, 5],
"records": [4, 7],
"taxa": [2, 5],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_groupby_location(images, deployments):
result = compute_count_summary(images, deployments, groupby="location")
expected = pd.DataFrame(
{
"placename": ["AAA"],
"total_images": [9],
"identified_images": [8],
"records": [11],
"taxa": [6],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_add_records_by_class_deployment(images):
result = compute_count_summary(
images, groupby="deployment", add_records_by_class=True
)
expected = pd.DataFrame(
{
"deployment_id": ["001", "002"],
"total_images": [3, 6],
"identified_images": [3, 5],
"records": [4, 7],
"records_mammalia": [4, 4],
"records_aves": [0, 3],
"taxa": [2, 5],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_add_records_by_class_location(images, deployments):
result = compute_count_summary(
images, deployments, groupby="location", add_records_by_class=True
)
expected = pd.DataFrame(
{
"placename": ["AAA"],
"total_images": [9],
"identified_images": [8],
"records": [11],
"records_mammalia": [8],
"records_aves": [3],
"taxa": [6],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_add_species_by_class_deployment(images):
result = compute_count_summary(
images, groupby="deployment", add_species_by_class=True
)
expected = pd.DataFrame(
{
"deployment_id": ["001", "002"],
"total_images": [3, 6],
"identified_images": [3, 5],
"records": [4, 7],
"taxa": [2, 5],
"taxa_mammalia": [2, 2],
"taxa_aves": [0, 3],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_add_species_by_class_location(images, deployments):
result = compute_count_summary(
images, deployments, groupby="location", add_species_by_class=True
)
expected = pd.DataFrame(
{
"placename": ["AAA"],
"total_images": [9],
"identified_images": [8],
"records": [11],
"taxa": [6],
"taxa_mammalia": [3],
"taxa_aves": [3],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_remove_unidentified_kws(images):
result = compute_count_summary(images, remove_unidentified_kws={"rank": "species"})
expected = pd.DataFrame(
{
"deployment_id": ["001", "002"],
"total_images": [3, 6],
"identified_images": [2, 4],
"records": [2, 6],
"taxa": [1, 4],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_remove_duplicates_kws(images):
result = compute_count_summary(
images, remove_duplicates_kws={"interval": 60, "unit": "minutes"}
)
expected = pd.DataFrame(
{
"deployment_id": ["001", "002"],
"total_images": [3, 6],
"identified_images": [3, 5],
"records": [3, 7],
"taxa": [2, 5],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_invalid_groupby(images, deployments):
with pytest.raises(ValueError):
compute_count_summary(images, deployments, groupby="placename")
def test_no_deployments(images):
with pytest.raises(ValueError):
compute_count_summary(images, groupby="location")
def test_intact_input(images, deployments):
images_original = images.copy()
deployments_original = deployments.copy()
compute_count_summary(images)
pd.testing.assert_frame_equal(images_original, images)
pd.testing.assert_frame_equal(deployments_original, deployments)
|
import numpy as np
import pandas as pd
import json
import argparse
def convert_factors_to_numeric(dataset):
# DATASET
with open('../Configs/'+dataset+'.json') as config_file:
config = json.load(config_file)
dataset = pd.read_csv('../Data/'+config['filtered_data_with_headers'], header = 0)
factors = pd.read_csv('../Outputs/'+config['factors'], header = 0)
dataset = dataset.values
X = dataset
factors = factors.values
cols = factors[0,:].astype(int)
k = 0
for i in cols:
col = X[:,i-1]
for j, val in enumerate(factors[1:,k]):
col[col == val] = j+1
X[:, i-1] = col
k += 1
op = pd.DataFrame(X, columns=config['columns'])
pd.DataFrame(op).to_csv("../Data/"+config['data_numeric'], index = False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Instance metrics', allow_abbrev=False)
parser.add_argument('--dataset', type=str, required=True, help = 'name of the dataset')
(args, _) = parser.parse_known_args()
convert_factors_to_numeric(args.dataset)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 21 15:22:29 2022
@author: tonifuc3m
"""
import argparse
import warnings
import livingner_app
import livingner_ner_norm
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
def parse_arguments():
'''
DESCRIPTION: Parse command line arguments
'''
parser = argparse.ArgumentParser(description='process user given parameters')
parser.add_argument("-g", "--gs_path", required = True, dest = "gs_path",
help = "path to GS file")
parser.add_argument("-p", "--pred_path", required = True, dest = "pred_path",
help = "path to predictions file")
parser.add_argument("-c", "--valid_codes_path", required = False,
default = '../ncbi_codes_unique.tsv',
dest = "codes_path", help = "path to valid codes TSV")
parser.add_argument('-s', '--subtask', required = True, dest = 'subtask',
choices=['ner', 'norm', 'app'],
help = 'Subtask name')
args = parser.parse_args()
gs_path = args.gs_path
pred_path = args.pred_path
codes_path = args.codes_path
subtask = args.subtask
return gs_path, pred_path, codes_path, subtask
if __name__ == '__main__':
gs_path, pred_path, codes_path, subtask = parse_arguments()
if subtask == 'app':
livingner_app.main(gs_path, pred_path, codes_path)
elif subtask == 'ner':
livingner_ner_norm.main(gs_path, pred_path, codes_path, subtask='ner')
elif subtask == 'norm':
livingner_ner_norm.main(gs_path, pred_path, codes_path, subtask='norm')
|
import os
import time
import json
import sys
import shutil
def write_file(base_dir, filename, text):
path = "{}/out/data/{}".format(base_dir, filename)
with open(path, "w") as out:
out.write(text)
def write_event(base_dir, filename, event):
path = "{}/out/events/{}".format(base_dir, filename)
with open(path, 'w') as out:
json.dump(event, out)
def write_insight(base_dir, insight):
path = "{}/out/insights.json".format(base_dir)
with open(path, 'w') as out:
json.dump(insight, out)
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
|
import contextlib
import sqlite3
from typing import Iterator
class RDB:
def __init__(self, database: str, table: str, schema: str):
self.con = sqlite3.connect(database)
self.table = table
self.schema = schema
self.initialize()
def initialize(self):
"""テーブルの初期化"""
q = f"CREATE TABLE IF NOT EXISTS {self.table} {self.schema}"
with self.execute(q, ()):
pass
@contextlib.contextmanager
def execute(self, query: str, params: tuple):
cur = self.con.cursor()
cur.execute(query, params)
yield cur
cur.close()
self.con.commit()
def to_dict(self, item):
raise NotImplementedError
def insert(self, item) -> bool:
"""アイテムの挿入
Returns
-------
インサートできたかどうか
"""
d = self.to_dict(item)
names = []
values = []
for name, value in d.items():
names.append(name)
values.append(value)
fields = ",".join(names)
num = len(names)
placeholder = ",".join(["?"] * num)
q = f"INSERT INTO {self.table}({fields}) VALUES ({placeholder})"
with self.execute(q, tuple(values)) as cur:
return cur.rowcount > 0
def __len__(self) -> int:
"""レコード数"""
q = f"SELECT COUNT(*) FROM {self.table}"
with self.execute(q, ()) as cur:
(count,) = cur.fetchone()
return count
def __iter__(self) -> Iterator:
"""レコードの全列挙"""
q = f"SELECT * FROM {self.table}"
with self.execute(q, ()) as cur:
return iter(cur.fetchall())
class WorkDB(RDB):
"""作品
References
----------
- https://developers.annict.com/docs/rest-api/v1/works
"""
def __init__(self):
schema = """
(
id INTEGER PRIMARY KEY NOT NULL,
title TEXT,
image_url TEXT,
dt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
"""
super().__init__("dataset/works.db", "works", schema)
def to_dict(self, item: dict) -> dict:
id = item["id"]
title = item["title"]
image_url = item["images"]["recommended_url"]
return {
"id": id,
"title": title,
"image_url": image_url,
}
class ReviewDB(RDB):
"""作品への記録
References
----------
- https://developers.annict.com/docs/rest-api/v1/reviews
"""
def __init__(self):
schema = """
(
id INTEGER PRIMARY KEY NOT NULL,
user_id INTEGER NOT NULL,
work_id INTEGER NOT NULL,
rating_overall_state TEXT,
dt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
"""
super().__init__("dataset/reviews.db", "reviews", schema)
def to_dict(self, item) -> dict:
id = item["id"]
user_id = item["user"]["id"]
work_id = item["work"]["id"]
rating_overall_state = item["rating_overall_state"]
return {
"id": id,
"user_id": user_id,
"work_id": work_id,
"rating_overall_state": rating_overall_state,
}
class RecordDB(RDB):
"""エピソードへの記録
References
----------
- https://developers.annict.com/docs/rest-api/v1/records
"""
def __init__(self):
schema = """
(
id INTEGER PRIMARY KEY NOT NULL,
user_id INTEGER NOT NULL,
work_id INTEGER NOT NULL,
rating_state TEXT,
dt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
"""
super().__init__("dataset/records.db", "records", schema)
def to_dict(self, item) -> dict:
id = item["id"]
user_id = item["user"]["id"]
work_id = item["work"]["id"]
rating_state = item["rating_state"]
return {
"id": id,
"user_id": user_id,
"work_id": work_id,
"rating_state": rating_state,
}
class StaffDB(RDB):
"""スタッフ情報
References
----------
- https://developers.annict.com/docs/rest-api/v1/staffs
"""
def __init__(self):
schema = """
(
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
work_id INTEGER NOT NULL,
dt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
"""
super().__init__("dataset/staffs.db", "staffs", schema)
def to_dict(self, item) -> dict:
id = item["id"]
name = item["name"]
work_id = item["work"]["id"]
return {
"id": id,
"name": name,
"work_id": work_id,
}
|
# Season - Fall 2016
# Week Date Time Ice Home Team Away Team Home Score Away Score Shootout Clock Operator Disc Jockey First Star Second Star Third Star
# Week 1 9/12/2016 09:00 PM Olympic North Stars Nordiques 1 2 False Todd Maff Proctor Coan Trent
# 9/12/2016 10:00 PM Olympic Whalers Golden Seals 3 4 False Drager Troy
# Week 2 9/19/2016 09:00 PM Olympic Nordiques Whalers 4 1 False Chad Turnbull Paulinski/Pistol Pete Hardy McCrackin
# 9/19/2016 10:00 PM Olympic Golden Seals North Stars 5 8 False Hal Brauny
# Week 3 9/26/2016 09:00 PM Olympic North Stars Whalers 3 2 True Suokas Drager Proctor Manny Paulinski
# 9/26/2016 10:00 PM Olympic Golden Seals Nordiques 7 4 False Charles Troy
# Week 4 10/3/2016 09:00 PM Olympic Whalers Golden Seals 0 6 False Brennan Myers Swanberg Coan ShaunE
# 10/3/2016 10:00 PM Olympic Nordiques North Stars 4 3 False Kevan Paul
# Season - Fall 2017
# Week Date Time Ice Home Team Away Team Home Score Away Score Shootout Clock Operator Disc Jockey Ref #1 Ref #2 First Star Second Star Third Star
# Week 1 9/11/2017 09:00 PM Olympic Whalers Golden Seals 3 2 True Suokas Pete Jim Greg Myers Trent JFlo
# 9/11/2017 10:00 PM Olympic Americans Mighty Ducks 4 2 False Greg Peso Proctor Starr Drago
# 9/11/2017 10:00 PM NHL Nordiques North Stars 1 3 False Whoop Jim Trent Troy Serda Bin
# Week 2 9/18/2017 09:00 PM Olympic North Stars Americans 2 3 False Panty Pete Jim Greg Proctor McCrackin Hetes
# 9/18/2017 10:00 PM Olympic Nordiques Whalers 4 1 False Braun Allen Jim Troy Coan Joe Ellis Kimmel
# 9/18/2017 10:00 PM NHL Golden Seals Mighty Ducks 4 2 False Manny Antioch Greg Chad Riley Matt Taylor Peso
|
import json
import numpy as np
from pypokerengine.players import BasePokerPlayer
from treys import Card
import os
from sample_player.tools.models import load
from sample_player.tools.utils import hand_strength_estimation, hand_strength_estimation_with_time
def save_board_in_tab_and_return_equity(nb_players, saved_tab, hole_card, tmp_hole_card, community_card):
hand_equity = 0
string_community = ' '.join(map(str, community_card))
if len(tmp_hole_card) > 1:
hand = tmp_hole_card
string_hand = ' '.join(map(str, hand))
else:
hand = hole_card
string_hand = ' '.join(map(str, hand))
found = False
for board, equity in saved_tab[string_hand].items():
if len(set(community_card).intersection(set(board.split()))) == len(community_card):
hand_equity = equity
found = True
if found is False:
a = set(hole_card).intersection(set(board.split()))
# hand_equity = hand_strength_estimation(100000, nb_players, hand, community_card)
hand_equity = hand_strength_estimation_with_time(5, nb_players, hand, community_card)
saved_tab[string_hand].update({string_community: hand_equity})
return hand_equity, saved_tab
def get_state_equity(nb_players, hole_card, tmp_hole_card, community_card=None):
string_hole_card = ' '.join(map(str, hole_card))
label_pre_flop = "pre_flop"
saved_tab = grab_state_equity()
hand_equity = 0
if community_card is not None and len(community_card) > 0:
hand_equity, saved_tab = save_board_in_tab_and_return_equity(nb_players, saved_tab, hole_card, tmp_hole_card, community_card)
else:
try:
hand_equity = saved_tab[string_hole_card][label_pre_flop]
except Exception as e:
found = False
for key, board in saved_tab.items():
if len(set(hole_card).intersection(set(key.split()))) == 2:
hand_equity = board['pre_flop']
tmp_hole_card = key.split(" ")
found = True
break
if found is False:
# hand_equity = hand_strength_estimation(100000, nb_players, hole_card, community_card)
hand_equity = hand_strength_estimation_with_time(5, nb_players, hole_card, community_card)
saved_tab[string_hole_card] = {"pre_flop": hand_equity}
save_state_equity(saved_tab)
return hand_equity, tmp_hole_card
class MCPlayer(BasePokerPlayer):
uuid = 0
uuid_adverse = 1
tmp_hole_card = []
my_position = 0
def receive_game_start_message(self, game_info):
if game_info["seats"][0]["name"] == "MonteCarloAgent":
self.uuid = game_info["seats"][0]["uuid"]
self.uuid_adverse = game_info["seats"][1]["uuid"]
else:
self.uuid = game_info["seats"][1]["uuid"]
self.uuid_adverse = game_info["seats"][0]["uuid"]
def receive_round_start_message(self, round_count, hole_card, seats):
print("MonteCarlo cards : ", hole_card)
if seats[0]["name"] == "MonteCarloAgent":
self.my_position = 0
else:
self.my_position = 1
self.tmp_hole_card = []
def declare_action(self, valid_actions, hole_card, round_state):
street = round_state["street"]
big_blind = round_state['small_blind_amount'] * 2
my_stack = round_state['seats'][0]['stack']
opposing_stack = round_state['seats'][1]['stack']
nb_players = 2
pot = round_state['pot']['main']['amount']
my_bb_stack = my_stack / big_blind
opposing_bb_stack = opposing_stack / big_blind
action = 'call'
raise_amount = 0
treys_hand = convert_card_to_treys(hole_card)
suited_hand = is_suited_hand(hole_card)
# push or fold if short stack or or if opponent is short stack
if my_bb_stack < 20 or opposing_bb_stack < 20 or my_bb_stack > (opposing_bb_stack * 20 + pot/big_blind):
model = load("sample_player/tools/push_or_fold")
print("Push or fold")
# model.summary()
# Si premier à parler
if round_state['small_blind_pos'] == self.my_position:
print("premier de parole")
my_features = np.concatenate((treys_hand, np.array(
[suited_hand, 1, my_bb_stack]))).reshape((1, 16))
allQ_sb = model.predict(my_features)
action_sb = np.argmax(allQ_sb)
# fold
if action_sb == 0:
action = 'fold'
raise_amount = 0
# shove
elif action_sb == 1:
action = 'raise'
raise_amount = valid_actions[2]['amount']['max']
# Si on est second à parler
else:
print("deuxieme de parole")
bb_features = np.concatenate((treys_hand, np.array(
[suited_hand, 0, my_bb_stack]))).reshape((1, 16))
allQ_bb = model.predict(bb_features)
action_bb = np.argmax(allQ_bb)
# Si action fold
if action_bb == 0:
# Check si possible
if (valid_actions[1]['amount'] == big_blind and street == "preflop") or valid_actions[1]['amount'] == 0 and street != "preflop":
action = 'call'
raise_amount = 0
# Sinon fold
else:
action = 'fold'
raise_amount = 0
# Si pas fold => partir à tapis
elif action_bb == 1:
action = 'raise'
raise_amount = valid_actions[2]['amount']['max']
# not push or fold
else:
# Calcul de l'équité de la main (use monte carlo)
hand_equity, self.tmp_hole_card = get_state_equity(nb_players, hole_card, self.tmp_hole_card, round_state['community_card'])
# Si premier relanceur ( call amount = 0 )
if valid_actions[1]['amount'] < 2 * big_blind:
# Si équité > 65
if hand_equity > 0.5600:
# relance un montant fixe 1/2 de la taille du pot
action = 'raise'
if pot > big_blind * 3:
raise_amount = round(pot / 2, 0)
else:
raise_amount = round(big_blind * 3, 0)
# si équité < 35
elif hand_equity < 0.3500:
print("bluff low equity")
# relance meme montant fixe + haut ( bluff ) 1/2 du pot
action = 'raise'
raise_amount = round(pot / 2, 0)
# sinon call 0
else:
action = 'call'
raise_amount = 0
# Si il y relance avant
else:
# calcul de la cote
action_info = valid_actions[1]
amount = action_info["amount"]
cote = amount / pot
# Si côte au dessus d'un stade call n'importe quel mise
if hand_equity > 0.8750 and round_state["street"] == "river":
if valid_actions[2]['amount']['max'] != -1:
action = 'raise'
raise_amount = valid_actions[2]['amount']['max']
else:
action = 'call'
raise_amount = 0
elif hand_equity > 0.6900:
action = 'call'
raise_amount = 0
elif hand_equity > 0.5600 and valid_actions[1]['amount'] < 6 * big_blind:
action = 'call'
raise_amount = 0
elif hand_equity > cote:
action = 'call'
raise_amount = 0
# Sinon calcul de la cote pour savoir si call ou fold
else:
action = 'fold'
raise_amount = 0
return action_to_return(action, valid_actions, raise_amount)
def receive_street_start_message(self, street, round_state):
pass
def receive_game_update_message(self, action, round_state):
pass
def receive_round_result_message(self, winners, hand_info, round_state):
pass
def action_to_return(action, valid_actions, raise_amount):
if action == "raise":
action_info = valid_actions[2]
if raise_amount < action_info["amount"]["min"]:
amount = action_info["amount"]["min"]
elif raise_amount > action_info["amount"]["max"]:
amount = action_info["amount"]["max"]
else:
amount = raise_amount
if action == "call":
action_info = valid_actions[1]
amount = action_info["amount"]
if action == "fold":
action_info = valid_actions[0]
amount = action_info["amount"]
print("MonteCarloAgent action :", action, "amount", amount)
return action, amount # action returned here is sent to the poker engine
def save_state_equity(saved_tab):
if os.getcwd() == '/home/benoit/Documents/Projet_IA/Perso/Poker/PokerAI':
with open("sample_player/tools/hu_hands_equity.json", "w") as file:
json.dump(saved_tab, file)
else:
with open("tools/hu_hands_equity.json", "w") as file:
json.dump(saved_tab, file)
def grab_state_equity():
if os.getcwd() == '/home/benoit/Documents/Projet_IA/Perso/Poker/PokerAI':
with open("sample_player/tools/hu_hands_equity.json", "r") as file:
saved_tab = json.load(file)
return saved_tab
else:
with open("tools/hu_hands_equity.json", "r") as file:
saved_tab = json.load(file)
return saved_tab
def convert_card_to_treys(hand):
num = []
color = []
for card in hand:
color.append(card[0])
num.append(card[1])
card1 = Card.new(num[0].upper() + color[0].lower())
card2 = Card.new(num[1].upper() + color[1].lower())
feat = np.zeros(13)
for c in [card1, card2]:
feat[Card.get_rank_int(c)] = 1
return feat
def is_suited_hand(hand):
color = []
for card in hand:
color.append(card[0])
if color[0] == color[1]:
return True
else:
return False
def setup_ai():
return MCPlayer()
|
"""Displays the metrics of Keras History dictionaries in folder 'Data/NNResults'.
"""
import pickle
import matplotlib.pyplot as plt
# ************************* Config *************************************
# Choose wether custom metrics like TPR or MCC were calculated during training.
custom_metrics = False
# Set this to the directory of the local git repository.
root_dir = './'
combine_data = True
benchmark_names_comb = ['combined_without_mgc_fft_2_1500epochs_normalized',
'combined_without_mgc_fft_2_3000epochs_normalized',
'combined_without_mgc_fft_2_3000epochs',
'combined_without_mgc_fft_2_6000epochs_normalized',
'combined_without_mgc_fft_2_750epochs_normalized',
'combined_without_mgc_fft_2_600epochs_normalized',
'combined_without_mgc_fft_2_6000epochs',
]
# ************************* Config *************************************
def print_history(fname_history, custom_metrics):
with open(fname_history, 'rb') as f:
history_dict = pickle.load(f)
print('Metrics for benchmark', benchmark_name + ' for last epoch:')
if custom_metrics:
# Extract metrics from last epoch of training
SPC = history_dict['specificity'][-1]
ACC = history_dict['accuracy_K'][-1]
accuracy = history_dict['acc'][-1]
TPR = history_dict['true_positive_rate'][-1]
FPR = history_dict['false_positive_rate'][-1]
MCC = history_dict['matthews_correlation_coefficient'][-1]
TP = history_dict['TP'][-1]
TN = history_dict['TN'][-1]
FP = history_dict['FP'][-1]
FN = history_dict['FN'][-1]
num_total = history_dict['num_total'][-1]
num_shorts = history_dict['num_shorts'][-1]
print('Number of samples:', num_total)
print('Number of shorts:', num_shorts)
print('Number of True Positives:', TP)
print('Number of True Negatives:', TN)
print('Number of False Positives:', FP)
print('Number of False Negatives:', FN)
print('accuracy:', accuracy)
print('Sensitivity or True Positive Rate (TPR):', TPR)
print('Specificity (SPC):', SPC)
print('False Alarm Rate (FPR):', FPR)
print('Accuracy (ACC):', ACC)
print('Matthews Correlation Coefficient (MCC):', MCC)
else:
entries = ['loss', 'acc', 'binary_crossentropy', 'val_loss', 'val_acc', 'val_binary_crossentropy']
for entry in entries:
print(entry + ':', history_dict[entry][-1])
if (entry == 'loss') or (entry == 'val_loss'):
plt.semilogy(range(1, len(history_dict[entry][:]) + 1), history_dict[entry][:])
else:
plt.plot(range(1, len(history_dict[entry][:]) + 1), history_dict[entry][:])
plt.xlabel('Epochs')
plt.ylabel(entry)
fname_fig = root_dir + 'Data/NNResults/figures/'+ benchmark_name + '_' + entry + '.png'
plt.savefig(fname_fig)
plt.clf()
print('')
if combine_data:
for benchmark_name in benchmark_names_comb:
fname_history = root_dir + 'Data/NNResults/' + benchmark_name + '_history.pickle'
print_history(fname_history, custom_metrics)
else:
benchmark_names=['mgc_des_perf_1', 'mgc_des_perf_a', 'mgc_des_perf_b',
'mgc_fft_1', 'mgc_fft_2', 'mgc_fft_a',
'mgc_matrix_mult_1', 'mgc_matrix_mult_a', 'mgc_pci_bridge32_a',
'mgc_pci_bridge32_b']
for benchmark_name in benchmark_names:
# Load history dictionary from disk
fname_history = root_dir + 'Data/NNResults/' + benchmark_name + '_history.pickle'
|
'''
Joe Walter
difficulty: 20%
run time: 0:00
answer: 1322
***
064 Odd Period Square Roots
How many continued fractions of sqrt(n) for n≤10000 have an odd period?
'''
# https://en.wikipedia.org/wiki/Periodic_continued_fraction#Canonical_form_and_repetend
from math import isqrt
def nonsquare(m):
n = 2
while n < m:
if isqrt(n)**2 != n:
yield n
n += 1
def frac_len(n):
m = [0]
d = [1]
a = [isqrt(n)]
while True:
m_new = d[-1]*a[-1] - m[-1]
d_new = (n - m_new**2)//d[-1]
a_new = (a[0] + m_new)//d_new
m.append(m_new)
d.append(d_new)
a.append(a_new)
if a_new == 2*a[0]:
break
return len(a) - 1
ans = sum( 1 for n in nonsquare(10001) if frac_len(n) % 2 == 1 )
print(ans)
|
from django_filters import (
CharFilter,
FilterSet,
NumberFilter,
CharFilter,
OrderingFilter,
)
from graphene import relay, ObjectType
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from coordinator.api.models.task import Task
class TaskNode(DjangoObjectType):
""" A task run on a task service during a release """
class Meta:
model = Task
filter_fields = {}
interfaces = (relay.Node,)
class TaskFilter(FilterSet):
created_before = NumberFilter(field_name="created_at", lookup_expr="lt")
created_after = NumberFilter(field_name="created_at", lookup_expr="gt")
release = CharFilter(field_name="release__kf_id")
task_service = CharFilter(field_name="task_service__kf_id")
order_by = OrderingFilter(fields=("created_at",))
class Meta:
model = Task
fields = ["kf_id", "state"]
class Query:
task = relay.Node.Field(TaskNode, description="Retrieve a single task")
all_tasks = DjangoFilterConnectionField(
TaskNode,
filterset_class=TaskFilter,
description="Get all tasks from the Coordinator",
)
def resolve_all_tasks(self, info, **kwargs):
user = info.context.user
if hasattr(user, "auth_roles") and (
"ADMIN" in user.auth_roles or "DEV" in user.auth_roles
):
return Task.objects.all()
queryset = Task.objects.filter(release__state="published")
# Return tasks from any releases that the user has a study in
if user and hasattr(user, "auth_groups") and len(user.auth_groups) > 0:
queryset = queryset | Task.objects.filter(
release__studies__kf_id__in=user.auth_groups
)
return queryset
|
from enum import Enum
class AdminOperation(Enum):
CREATE = 0
UPDATE = 1
DELETE = 2
class AdminItemType(Enum):
Announcement = "announcement"
QueryEngine = "query_engine"
QueryMetastore = "query_metastore"
Admin = "admin"
Environment = "environment"
Task = "task"
|
import torch
from torch.autograd import Variable
from .lazy_variable import LazyVariable
class ConstantMulLazyVariable(LazyVariable):
def __init__(self, lazy_var, constant):
if not isinstance(constant, Variable):
tensor_cls = lazy_var.tensor_cls
constant = Variable(tensor_cls(1).fill_(constant))
super(ConstantMulLazyVariable, self).__init__(lazy_var, constant)
self.lazy_var = lazy_var
self.constant = constant
def _matmul_closure_factory(self, *args):
lazy_var_closure = self.lazy_var._matmul_closure_factory(*args[:-1])
constant = args[-1]
def closure(rhs_mat):
res = lazy_var_closure(rhs_mat)
res = res * constant.expand_as(res)
return res
return closure
def _derivative_quadratic_form_factory(self, *args):
lazy_var_closure = self.lazy_var._derivative_quadratic_form_factory(*args[:-1])
constant = args[-1]
def closure(left_factor, right_factor):
res = list(lazy_var_closure(left_factor, right_factor))
for i, item in enumerate(res):
if torch.is_tensor(item) and res[i].sum():
res[i] = res[i] * constant.expand_as(res[i])
# Gradient with respect to the constant
res.append(left_factor.new(1).fill_((left_factor * right_factor).sum()))
return res
return closure
def _size(self):
return self.lazy_var.size()
def _transpose_nonbatch(self):
return ConstantMulLazyVariable(self.lazy_var._transpose_nonbatch(), self.constant)
def _batch_get_indices(self, batch_indices, left_indices, right_indices):
res = self.lazy_var._batch_get_indices(batch_indices, left_indices, right_indices)
return self.constant.expand_as(res) * res
def _get_indices(self, left_indices, right_indices):
res = self.lazy_var._get_indices(left_indices, right_indices)
return self.constant.expand_as(res) * res
def repeat(self, *sizes):
return ConstantMulLazyVariable(self.lazy_var.repeat(*sizes), self.constant)
def __getitem__(self, i):
return self.lazy_var.__getitem__(i) * self.constant
|
from django.urls import reverse
from wagtail.images.views.serve import generate_signature
def generate_image_url(image, filter_spec='original'):
"""Return generated URL for image."""
signature = generate_signature(image.id, filter_spec)
url = reverse('wagtailimages_serve', urlconf='wagtail.images.urls', args=(signature, image.id, filter_spec))
return url
|
# this defines the input size of the images we will be feeding into our model
target_size = 28
# create an instance of a sequential model
model = models.Sequential()
# first block of convolutional layers
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(target_size, target_size, 1)))
model.add(layers.MaxPooling2D((2, 2)))
# final dense layer for classification
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# compile the model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# train the model using 20% of the data for validation
history = model.fit(X, y, batch_size=256, epochs = 5, validation_split = 0.2)
|
from math import pi,sin,cos,acos
def dis(lat1,long1,lat2,long2):
'''Calculates the distance from a second airport and returns it as a float'''
r_earth=6378.1
theta1=long1*(2*pi)/360
theta2=long1*(2*pi)/360
phi1=(90-lat1)*(2*pi)/360
phi2=(90-lat2)*(2*pi)/360
distance=acos(sin(phi1)*sin(phi2)*cos(theta1-theta2)+cos(phi1)*cos(phi2))*r_earth
return distance
|
""" All command line related exceptions """
# standart python imports
import logging
import sys
# 3rd party imports
import click
# Creates a ClickLogger
logger = logging.getLogger(__name__)
class Two1Error(click.ClickException):
"""
A ClickException with customized formatting.
"""
def __init__(self, message, json=None):
self.json = json if json else {}
super(Two1Error, self).__init__(message)
def format_message(self):
return click.style(str(self.message), fg='red')
def show(self, file=None):
"""
Same as base method but without printing "Error: "
"""
if file is None:
file = sys.stderr
click.echo(self.format_message(), file=file)
class UnloggedException(Two1Error):
""" An error used to exit out of a commnad and not log the exception """
class MiningDisabledError(UnloggedException):
""" A error indicating that the mining limit has been reached """
class UpdateRequiredError(UnloggedException):
""" Error during a request os made and the client is out of date """
class FileDecodeError(Exception):
""" Error when a config file cannot be decoded """
class ServerRequestError(Two1Error):
""" Error during a request to a server """
def __init__(self, response, message=''):
self.response = response
self.status_code = response.status_code
try:
# For 4XX responses we expect the response json to have this format:
# {"error": "invalid_login", "message": "Incorrect username or password"}
self.data = response.json()
except ValueError:
self.data = {}
message = (
message or self.data.get('message') or self.data.get('error') or
'Unspecified HTTP error (%d).' % self.status_code)
super(ServerRequestError, self).__init__(message)
class ServerConnectionError(Two1Error):
"""Error during a connection to a server"""
def __init__(self, message=""):
super(ServerConnectionError, self).__init__(message)
class BitcoinComputerNeededError(ServerRequestError):
""" Error during a request made on a protected api """
class ValidationError(Two1Error):
""" Manifest validation error occurs when parsing manifest file """
|
#! /usr/bin/env python
"""
Variables that are shared between modules
"""
import sys
import codecs
version = ""
scriptPath = ""
scriptName = ""
mediaInfoExe = ""
mets_ns = ""
mods_ns = ""
premis_ns = ""
ebucore_ns = ""
xlink_ns = ""
xsi_ns = ""
isolyzer_ns = ""
cdInfo_ns = ""
dfxml_ns = ""
dc_ns = ""
hfs_ns = ""
metsSchema = ""
modsSchema = ""
premisSchema = ""
ebucoreSchema = ""
NSMAP = {}
failedPPNs = []
errors = 0
warnings = 0
createSIPs = False
pruneBatch = False
skipChecksumFlag = False
batchErr = ""
dirOut = ""
dirsInMetaCarriers = []
carrierTypeAllowedValues = []
iromlabMajorVersion = 0
iromlabMinorVersion = 11
# Set encoding of the terminal to UTF-8
if sys.version.startswith("2"):
out = codecs.getwriter("UTF-8")(sys.stdout)
err = codecs.getwriter("UTF-8")(sys.stderr)
elif sys.version.startswith("3"):
out = codecs.getwriter("UTF-8")(sys.stdout.buffer)
err = codecs.getwriter("UTF-8")(sys.stderr.buffer)
|
import torch
def save_checkpoint(save_dir, model, optimizer):
torch.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()}, save_dir)
return True
def load_checkpoint(load_dir):
checkpoint = torch.load(load_dir)
return checkpoint
|
from pyspark.mllib.feature import Word2Vec
from xframes.spark_context import CommonSparkContext
class TextModel(object):
def __init__(self, model):
self.model = model
def find_associations(self, word, num=10):
return self.model.findSynonyms(word, num)
class TextBuilder(object):
def __init__(self, corpus, vector_size=100, seed=42):
self.corpus = corpus
self.vector_size = vector_size
self.seed = seed
def train(self):
sc = CommonSparkContext().sc()
rdd = self.corpus.to_spark_rdd()
model = Word2Vec().setVectorSize(self.vector_size).setSeed(self.seed).fit(rdd)
return TextModel(model)
|
print(int(5.5))
|
import numpy as np
import netCDF4 as nc
|
#!/usr/bin/env python3
def good(sol):
n = len(sol)
for i in range(n):
for j in range(i+1,n):
dy = sol[i]-sol[j]
dx = i-j
if dy==0: return False
if dy==dx or dy==-dx: return False
return True
def solve(n,m=3):
tot = n**n
sol = [0]*n
cnt = 0
for i in range(tot):
for j in range(n-1,-1,-1):
sol[j]=(i%n)+1
i = i//n
if good(sol):
cnt += 1
if cnt<=m:
print(*sol)
print(cnt)
n=int(input())
solve(n)
|
import os
class Config:
def __init__(self, recipe):
self.recipe = recipe
self.app_dir = os.path.abspath(self.recipe.get_item('AppDir/path'))
self.arch = self.recipe.get_item('AppDir/yum/arch')
self.include_list = self.recipe.get_item('AppDir/yum/include')
self.exclude_list = self.recipe.get_item('AppDir/yum/exclude', [])
self.cache_root = self._get_cache_dir()
self.archives_path = self._get_archives_path()
def configure(self):
os.makedirs(self.cache_root, exist_ok=True)
os.makedirs(self.archives_path, exist_ok=True)
def _get_cache_dir(self):
return os.path.abspath('appimage-builder-cache')
def _get_archives_path(self):
return os.path.join(self.cache_root, 'yum', 'archives')
|
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import numpy as np
import collections, random
# Hyperparameters
lr_pi = 0.0005
lr_q = 0.001
init_alpha = 0.01
gamma = 0.98
batch_size = 32
buffer_limit = 50000
tau = 0.01 # for target network soft update
target_entropy = -1.0 # for automated alpha update
lr_alpha = 0.001 # for automated alpha update
class ReplayBuffer():
def __init__(self):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
done_mask = 0.0 if done else 1.0
done_mask_lst.append([done_mask])
return torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst, dtype=torch.float), \
torch.tensor(r_lst, dtype=torch.float), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_mask_lst, dtype=torch.float)
def size(self):
return len(self.buffer)
class PolicyNet(nn.Module):
def __init__(self, learning_rate):
super(PolicyNet, self).__init__()
self.fc1 = nn.Linear(3, 128)
self.fc_mu = nn.Linear(128, 1)
self.fc_std = nn.Linear(128, 1)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
self.log_alpha = torch.tensor(np.log(init_alpha))
self.log_alpha.requires_grad = True
self.log_alpha_optimizer = optim.Adam([self.log_alpha], lr=lr_alpha)
def forward(self, x):
x = F.relu(self.fc1(x))
mu = self.fc_mu(x)
std = F.softplus(self.fc_std(x))
dist = Normal(mu, std)
action = dist.rsample()
log_prob = dist.log_prob(action)
real_action = torch.tanh(action)
real_log_prob = log_prob - torch.log(1 - torch.tanh(action).pow(2) + 1e-7)
return real_action, real_log_prob
def train_net(self, q1, q2, mini_batch):
s, _, _, _, _ = mini_batch
a, log_prob = self.forward(s)
entropy = -self.log_alpha.exp() * log_prob
q1_val, q2_val = q1(s, a), q2(s, a)
q1_q2 = torch.cat([q1_val, q2_val], dim=1)
min_q = torch.min(q1_q2, 1, keepdim=True)[0]
loss = -min_q - entropy # for gradient ascent
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.log_alpha_optimizer.zero_grad()
alpha_loss = -(self.log_alpha.exp() * (log_prob + target_entropy).detach()).mean()
alpha_loss.backward()
self.log_alpha_optimizer.step()
class QNet(nn.Module):
def __init__(self, learning_rate):
super(QNet, self).__init__()
self.fc_s = nn.Linear(3, 64)
self.fc_a = nn.Linear(1, 64)
self.fc_cat = nn.Linear(128, 32)
self.fc_out = nn.Linear(32, 1)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, x, a):
h1 = F.relu(self.fc_s(x))
h2 = F.relu(self.fc_a(a))
cat = torch.cat([h1, h2], dim=1)
q = F.relu(self.fc_cat(cat))
q = self.fc_out(q)
return q
def train_net(self, target, mini_batch):
s, a, r, s_prime, done = mini_batch
loss = F.smooth_l1_loss(self.forward(s, a), target)
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
def soft_update(self, net_target):
for param_target, param in zip(net_target.parameters(), self.parameters()):
param_target.data.copy_(param_target.data * (1.0 - tau) + param.data * tau)
def calc_target(pi, q1, q2, mini_batch):
s, a, r, s_prime, done = mini_batch
with torch.no_grad():
a_prime, log_prob = pi(s_prime)
entropy = -pi.log_alpha.exp() * log_prob
q1_val, q2_val = q1(s_prime, a_prime), q2(s_prime, a_prime)
q1_q2 = torch.cat([q1_val, q2_val], dim=1)
min_q = torch.min(q1_q2, 1, keepdim=True)[0]
target = r + gamma * done * (min_q + entropy)
return target
def main():
env = gym.make('Pendulum-v1')
memory = ReplayBuffer()
q1, q2, q1_target, q2_target = QNet(lr_q), QNet(lr_q), QNet(lr_q), QNet(lr_q)
pi = PolicyNet(lr_pi)
q1_target.load_state_dict(q1.state_dict())
q2_target.load_state_dict(q2.state_dict())
score = 0.0
print_interval = 20
for n_epi in range(10000):
s = env.reset()
done = False
while not done:
env.render()
a, log_prob = pi(torch.from_numpy(s).float())
s_prime, r, done, info = env.step([2.0 * a.item()])
memory.put((s, a.item(), r / 10.0, s_prime, done))
score += r
s = s_prime
if memory.size() > 1000:
for i in range(20):
mini_batch = memory.sample(batch_size)
td_target = calc_target(pi, q1_target, q2_target, mini_batch)
q1.train_net(td_target, mini_batch)
q2.train_net(td_target, mini_batch)
entropy = pi.train_net(q1, q2, mini_batch)
q1.soft_update(q1_target)
q2.soft_update(q2_target)
if n_epi % print_interval == 0 and n_epi != 0:
print("# of episode :{}, avg score : {:.1f} alpha:{:.4f}".format(n_epi, score / print_interval,
pi.log_alpha.exp()))
score = 0.0
env.close()
if __name__ == '__main__':
main()
|
def section1():
import dtlpy as dl
# Get project and dataset
project = dl.projects.get(project_name='project_name')
dataset = project.datasets.get(dataset_name='dataset_name')
def section2():
item.metadata['user']['MyKey'] = 'MyValue'
annotation.metadata['user']['MyKey'] = 'MyValue'
def section3():
item.metadata['user']['MyKey'] = 3
annotation.metadata['user']['MyKey'] = 3
def section4():
item.metadata['user']['MyKey'] = True
annotation.metadata['user']['MyKey'] = True
def section5():
item.metadata['user']['MyKey'] = None
annotation.metadata['user']['MyKey'] = None
def section6():
# add metadata of a list (can contain elements of different types).
item.metadata['user']['MyKey'] = ["A", 2, False]
annotation.metadata['user']['MyKey'] = ["A", 2, False]
def section7():
item.metadata['user']['MyKey'].append(3)
item = item.update()
annotation.metadata['user']['MyKey'].append(3)
annotation = annotation.update()
def section8():
# upload and claim item
item = dataset.items.upload(local_path=r'C:/home/project/images/item.mimetype')
# or get item
item = dataset.items.get(item_id='write-your-id-number')
# modify metadata
item.metadata['user'] = dict()
item.metadata['user']['MyKey'] = 'MyValue'
# update and reclaim item
item = item.update()
# item in platform should have section 'user' in metadata with field 'MyKey' and value 'MyValue'
def section9():
# Get annotation
annotation = dl.annotations.get(annotation_id='my-annotation-id')
# modify metadata
annotation.metadata['user'] = dict()
item.metadata['user']['red'] = True
# update and reclaim annotation
annotation = annotation.update()
# annotation in platform should have section 'user' in metadata with field 'red' and value True
def section10():
project = dl.projects.get(project_name='project_name')
dataset = project.datasets.get(dataset_name='dataset_name')
def section11():
# upload and claim item
item = dataset.items.upload(local_path=r'C:/home/project/images/item.mimetype')
# or get item
item = dataset.items.get(item_id='write-your-id-number')
# modify metadata
item.metadata['user'] = dict()
item.metadata['user']['MyKey'] = 'MyValue'
# update and reclaim item
item = item.update()
def section12():
filters = dl.Filters()
# set resource - optional - default is item
filters.resource = dl.FiltersResource.ITEM
def section13():
filters.add(field='metadata.user.Key', values='Value')
def section14():
pages = dataset.items.list(filters=filters)
# Go over all item and print the properties
for page in pages:
for item in page:
item.print()
def section15():
# upload and claim item
item = dataset.items.upload(local_path=r'C:/home/project/images/item.mimetype')
# or get item
item = dataset.items.get(item_id='write-your-id-number')
# modify metadata
if 'user' not in item.metadata:
item.metadata['user'] = dict()
item.metadata['user']['MyKey'] = 'MyValue'
# update and reclaim item
item = item.update()
|
""" Serializers for the Transaction Logging API """
from rest_framework import serializers
from .models import TransactionRecord
class TransactionRecordSerializer(serializers.ModelSerializer):
""" Serializer for TransactionRecord objects """
class Meta:
""" TransactionRecordSerializer Django Metadata """
model = TransactionRecord
fields = "__all__"
|
import math
class Points(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __sub__(self, no):
return Points((self.x-no.x), (self.y-no.y), (self.z-no.z))
#dot product
def dot(self, no):
return (self.x*no.x)+(self.y*no.y)+(self.z*no.z)
# cross product
def cross(self, no):
return Points((self.y*no.z-self.z*no.y), (self.z*no.x-self.x*no.z), (self.x*no.y-self.y*no.x))
def absolute(self):
return pow((self.x ** 2 + self.y ** 2 + self.z ** 2), 0.5)
if __name__ == '__main__':
points = []
points.append([0, 4, 5])
points.append([1, 7, 6])
points.append([0, 5, 9])
points.append([1, 7, 2])
a, b, c, d = Points(*points[0]), Points(*points[1]
), Points(*points[2]), Points(*points[3])
x = (b - a).cross(c - b)
y = (c - b).cross(d - c)
angle = math.acos(x.dot(y) / (x.absolute() * y.absolute()))
print("%.2f" % math.degrees(angle))
|
def test_range():
simple = list(range(0, 10))
results = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert simple == results
def test_range_with_steps():
with_steps = list(range(0, 10, 2))
results = [0, 2, 4, 6, 8]
assert with_steps == results
def test_range_with_negative_steps():
with_steps = list(range(10, 0, -2))
results = [10, 8, 6, 4, 2]
assert with_steps == results
|
import argparse
import os
import functools
import time
import subprocess
from augmentation.utilities.config import *
from augmentation.utilities.metrics import *
from augmentation.datasets.utils import get_processed_dataset_info, apply_modifier_to_dataset_payload, load_dataset
from augmentation.dataflows.utils import dataflow_len
from augmentation.augment.utils import create_augmentation_pipelines
from augmentation.models.models import *
from augmentation.methods.cyclegan.utils import *
from augmentation.utilities.checkpoint import *
from augmentation.utilities.visualize import *
from augmentation.utilities.wandb import load_wandb_run, load_most_recent_keras_model_weights, \
get_most_recent_model_file, particular_checkpoint_step_extractor
from augmentation.utilities.utils import basic_setup
def train_cyclegan(config):
# Do basic setup
basic_setup(seed=config.seed, logical_gpu_memory_limits=(14336,))
# Set up the source dataset
source_dataset_payload = load_dataset(config.source_dataset,
config.source_dataset_version,
config.datadir,
config.validation_frac)
target_dataset_payload = load_dataset(config.target_dataset,
config.target_dataset_version,
config.datadir,
config.validation_frac)
# Get some dataset information
source_proc_dataset_info = get_processed_dataset_info(source_dataset_payload.dataset_info,
config.validation_frac, config.batch_size)
target_proc_dataset_info = get_processed_dataset_info(target_dataset_payload.dataset_info,
config.validation_frac, config.batch_size)
source_input_shape = source_proc_dataset_info.input_shape
target_input_shape = target_proc_dataset_info.input_shape
# Do selection on each dataset
source_dataset_payload = apply_modifier_to_dataset_payload(source_dataset_payload, config.source_dataset_modifier)
target_dataset_payload = apply_modifier_to_dataset_payload(target_dataset_payload, config.target_dataset_modifier)
# Setup the augmentation pipeline we'll be using
train_augmentations, val_augmentations, test_augmentations = \
create_augmentation_pipelines(config.train_daug_pipeline, config.train_daug_pipeline_args,
config.val_daug_pipeline, config.val_daug_pipeline_args,
config.test_daug_pipeline, config.test_daug_pipeline_args)
# Create the data generators
train_generator = create_cyclegan_data_generator(source_dataset_payload.train_dataset,
target_dataset_payload.train_dataset,
config.batch_size,
train_augmentations,
config.dataflow,
config.cache_dir + 'train')
test_generator = create_cyclegan_data_generator(source_dataset_payload.test_dataset,
target_dataset_payload.test_dataset,
config.batch_size,
test_augmentations,
config.dataflow,
config.cache_dir + 'test')
# Create the models
generator_g, generator_f, discriminator_x, discriminator_y = \
build_models(source_input_shape, target_input_shape, config.norm_type, config.output_init, config.residual_outputs)
generator_g.summary()
generator_f.summary()
discriminator_x.summary()
discriminator_y.summary()
# Set up the optimizers
generator_optimizer, _, discriminator_optimizer, _ = build_optimizers(lr_gen=config.lr_gen,
lr_disc=config.lr_disc,
beta_1_gen=config.beta_1_gen,
beta_1_disc=config.beta_1_disc,
lr_scheduler=config.lr_scheduler,
lr_decay_steps=config.n_epochs *
dataflow_len(train_generator))
# Compile the models
compile_keras_models([generator_g, generator_f, discriminator_x, discriminator_y],
[generator_optimizer, generator_optimizer, discriminator_optimizer, discriminator_optimizer])
# Create the replay buffers for the discriminators
disc_x_replay, disc_y_replay = ReplayBuffer(config.replay_buffer_size), ReplayBuffer(config.replay_buffer_size)
# Define the loss function to pass to the generator and discriminator
gan_loss_fn = build_gan_loss_fn(config.gan_loss)
# By default, assume we're starting training from scratch
start_epoch, start_step = 0, 0
if config.resume:
# If we're resuming a run
prev_run = load_wandb_run(config.prev_wandb_run_id, config.prev_wandb_project, config.prev_wandb_entity)
# If the previous run crashed, wandb_ckpt_path should be '': this is the typical use case
# but this should be changed in the future
step_extraction_fn = lambda fname: fname.split("_")[1].split(".")[0]
_, gen_g_ep = load_most_recent_keras_model_weights(generator_g, prev_run, model_name='generator_g',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
_, gen_f_ep = load_most_recent_keras_model_weights(generator_f, prev_run, model_name='generator_f',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
_, disc_x_ep = load_most_recent_keras_model_weights(discriminator_x, prev_run, model_name='discriminator_x',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
_, disc_y_ep = load_most_recent_keras_model_weights(discriminator_y, prev_run, model_name='discriminator_y',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
assert gen_g_ep == gen_f_ep == disc_x_ep == disc_y_ep, 'All restored models should be from the same epoch.'
if gen_g_ep is not None:
start_epoch, start_step = gen_g_ep, 0
for line in prev_run.history():
if 'epochs' in line and line['epochs'] == start_epoch:
start_step = line['steps']
break
# Reloading the optimizer states from that epoch
step_extraction_fn = lambda fname: fname.split(".")[0].split("_")[-1]
gen_opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=config.prev_ckpt_path,
model_name='generator_optimizer',
step_extractor=
particular_checkpoint_step_extractor(start_epoch,
step_extractor=
step_extraction_fn))
load_tf_optimizer_state(generator_optimizer, gen_opt_ckpt.name)
disc_opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=config.prev_ckpt_path,
model_name='discriminator_optimizer',
step_extractor=
particular_checkpoint_step_extractor(start_epoch,
step_extractor=
step_extraction_fn))
load_tf_optimizer_state(discriminator_optimizer, disc_opt_ckpt.name)
# Set up weights and biases
while True:
try:
if not config.resume:
# Start a new Weights and Biases run
wandb.init(entity=config.wandb_entity,
project=config.wandb_project,
group=config.wandb_group,
job_type=config.wandb_job_type,
reinit=True,
config=config)
else:
# Resume a previous Weights and Biases run
wandb.init(entity=config.prev_wandb_entity,
project=config.prev_wandb_project,
id=config.prev_wandb_run_id,
reinit=True,
resume=True)
os.makedirs(f'{wandb.run.dir}/{config.checkpoint_path}', exist_ok=True)
break
except:
continue
_train_cyclegan(train_data_generator=train_generator,
val_data_generator=test_generator,
generator_g=generator_g,
generator_f=generator_f,
discriminator_x=discriminator_x,
discriminator_y=discriminator_y,
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
discriminator_x_replay=disc_x_replay,
discriminator_y_replay=disc_y_replay,
metrics=None,
batch_size=None,
n_epochs=config.n_epochs,
gan_loss_fn=gan_loss_fn,
cycle_loss_scale_x=config.cycle_loss_scale,
cycle_loss_scale_y=config.cycle_loss_scale * (1 - config.source_cycle_loss_only),
identity_loss_scale=config.identity_loss_scale,
grad_penalty=config.grad_penalty,
grad_penalty_scale=config.grad_penalty_scale,
checkpoint_path=config.checkpoint_path,
checkpoint_freq=config.checkpoint_freq,
image_log_freq=config.image_log_freq,
start_step=start_step, start_epoch=start_epoch)
def _train_cyclegan(train_data_generator, val_data_generator,
generator_g, generator_f,
discriminator_x, discriminator_y,
generator_optimizer, discriminator_optimizer,
discriminator_x_replay, discriminator_y_replay,
metrics, batch_size, n_epochs,
gan_loss_fn, cycle_loss_scale_x, cycle_loss_scale_y, identity_loss_scale,
grad_penalty, grad_penalty_scale,
checkpoint_path, checkpoint_freq,
image_log_freq=50,
start_step=0, start_epoch=0):
# Keep track of how many gradient steps we've taken
step = start_step
# Multiple training epochs
for epoch in range(start_epoch, n_epochs):
# Iterate over the dataset
for batch_x, batch_y in train_data_generator:
# Convert to tensors
batch_x, batch_y = tf.convert_to_tensor(batch_x), tf.convert_to_tensor(batch_y)
# Train using this batch of data
gen_losses, gen_predictions, gen_gradients = train_step_generator(generator_g, generator_f,
discriminator_x, discriminator_y,
gan_loss_fn,
batch_x, batch_y,
generator_optimizer,
discriminator_x_replay,
discriminator_y_replay,
cycle_loss_scale_x, cycle_loss_scale_y,
identity_loss_scale)
disc_losses, disc_predictions, disc_gradients = train_step_discriminator(discriminator_x, discriminator_y,
gan_loss_fn,
batch_x, batch_y,
discriminator_optimizer,
discriminator_x_replay,
discriminator_y_replay,
grad_penalty,
grad_penalty_scale)
# Update the step counter
step += 1
# Unpack and log to weights and biases
(gen_g_loss, gen_f_loss, cycle_loss_x, cycle_loss_y, identity_loss_x, identity_loss_y) = gen_losses
((same_x, fake_x, cycled_x, disc_fake_x),
(same_y, fake_y, cycled_y, disc_fake_y)) = gen_predictions
(disc_x_loss, disc_y_loss, disc_x_gp, disc_y_gp) = disc_losses
((disc_real_x, disc_sampled_fake_x),
(disc_real_y, disc_sampled_fake_y)) = disc_predictions
wandb.log({'training_metrics/gen_g_loss': gen_g_loss.numpy(),
'training_metrics/gen_f_loss': gen_f_loss.numpy(),
'training_metrics/cycle_loss_x': cycle_loss_x.numpy(),
'training_metrics/cycle_loss_y': cycle_loss_y.numpy(),
'training_metrics/identity_loss_x': identity_loss_x.numpy(),
'training_metrics/identity_loss_y': identity_loss_y.numpy(),
'training_metrics/disc_x_loss': disc_x_loss.numpy(),
'training_metrics/disc_y_loss': disc_y_loss.numpy(),
'training_metrics/disc_x_gp': disc_x_gp.numpy(),
'training_metrics/disc_y_gp': disc_y_gp.numpy(),
'predictions/disc_real_x': wandb.Histogram(disc_real_x.numpy()),
'predictions/disc_real_y': wandb.Histogram(disc_real_y.numpy()),
'predictions/disc_fake_x': wandb.Histogram(disc_fake_x.numpy()),
'predictions/disc_fake_y': wandb.Histogram(disc_fake_y.numpy()),
'predictions/disc_sampled_fake_x': wandb.Histogram(disc_sampled_fake_x.numpy()),
'predictions/disc_sampled_fake_y': wandb.Histogram(disc_sampled_fake_y.numpy()),
'gradient_norms/generators': tf.linalg.global_norm(gen_gradients).numpy(),
'gradient_norms/discriminators': tf.linalg.global_norm(disc_gradients).numpy(),
'learning_rates/generators': generator_optimizer._decayed_lr(tf.float32).numpy(),
'learning_rates/discriminators': discriminator_optimizer._decayed_lr(tf.float32).numpy(),
'steps': step},
step=step)
# Log images frequently to admire
if step % image_log_freq == 0:
# Use a (* 0.5 + 0.5) offset before visualizing since the data lies in [-1, 1]
wandb.log({'real_x': wandb.Image(gallery(batch_x.numpy() * 0.5 + 0.5)),
'fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),
'cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),
'same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),
'real_y': wandb.Image(gallery(batch_y.numpy() * 0.5 + 0.5)),
'fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),
'cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),
'same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)
# Visualize a batch of validation data every epoch
generate_and_log_one_image_batch(val_data_generator, generator_g, generator_f, step)
del gen_losses, disc_losses, gen_predictions, disc_predictions, gen_gradients, disc_gradients
# End of epoch, log to weights and biases
wandb.log({'epochs': epoch + 1}, step=step)
# Checkpoint every few epochs
if (epoch + 1) % checkpoint_freq == 0:
# Store the models
generator_g.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_g.h5')
generator_f.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_f.h5')
discriminator_x.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_x.h5')
discriminator_y.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_y.h5')
# Store the optimizers
save_tf_optimizer_state(generator_optimizer,
f'{wandb.run.dir}/{checkpoint_path}/generator_optimizer_{epoch + 1}.pkl')
save_tf_optimizer_state(discriminator_optimizer,
f'{wandb.run.dir}/{checkpoint_path}/discriminator_optimizer_{epoch + 1}.pkl')
# Save to Weights and Biases
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_g.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_f.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_x.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_y.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/generator_optimizer_{epoch + 1}.pkl')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/discriminator_optimizer_{epoch + 1}.pkl')
return generator_g, generator_f, discriminator_x, discriminator_y
def train_step_generator(generator_g, generator_f,
discriminator_x, discriminator_y,
loss_fn,
batch_x, batch_y,
generator_optimizer,
discriminator_x_replay, discriminator_y_replay,
cycle_loss_scale_x, cycle_loss_scale_y, identity_loss_scale):
def _train_step_generator(real_x, real_y):
with tf.GradientTape() as tape:
# Generator G translates X -> Y
# Generator F translates Y -> X.
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# same_x and same_y are used for identity loss.
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
# Calculate all the losses
gen_g_loss = generator_loss(disc_fake_y, loss_fn)
gen_f_loss = generator_loss(disc_fake_x, loss_fn)
cycle_loss_x = cycle_loss(real_x, cycled_x, cycle_loss_scale_x)
cycle_loss_y = cycle_loss(real_y, cycled_y, cycle_loss_scale_y)
identity_loss_x = identity_loss(real_x, same_x, identity_loss_scale)
identity_loss_y = identity_loss(real_y, same_y, identity_loss_scale)
# Total generator loss = adversarial loss + cycle loss
total_gen_loss = gen_g_loss + gen_f_loss + cycle_loss_x + cycle_loss_y + identity_loss_x + identity_loss_y
# Update the discriminator replay buffers
discriminator_x_replay.add(fake_x)
discriminator_y_replay.add(fake_y)
# Calculate the gradients for generator and discriminator
generator_gradients = tape.gradient(total_gen_loss,
generator_g.trainable_variables + generator_f.trainable_variables)
# Apply the gradients to the optimizer
generator_optimizer.apply_gradients(zip(generator_gradients,
generator_g.trainable_variables + generator_f.trainable_variables))
del tape
return (gen_g_loss, gen_f_loss, cycle_loss_x, cycle_loss_y, identity_loss_x, identity_loss_y), \
((same_x, fake_x, cycled_x, disc_fake_x),
(same_y, fake_y, cycled_y, disc_fake_y)), generator_gradients
return _train_step_generator(batch_x, batch_y)
def train_step_discriminator(discriminator_x, discriminator_y,
loss_fn,
batch_x, batch_y,
discriminator_optimizer,
discriminator_x_replay, discriminator_y_replay,
grad_penalty, grad_penalty_scale):
def _train_step_discriminator(real_x, real_y):
# Sample fake_x and fake_y from the replay buffers
sampled_fake_x = discriminator_x_replay.get_tf_batch(real_x.shape[0])
sampled_fake_y = discriminator_y_replay.get_tf_batch(real_y.shape[0])
with tf.GradientTape() as tape:
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(sampled_fake_x, training=True)
disc_fake_y = discriminator_y(sampled_fake_y, training=True)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x, loss_fn)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y, loss_fn)
disc_x_gp = gradient_penalty(functools.partial(discriminator_x, training=True),
real_x, sampled_fake_x, mode=grad_penalty, scale=grad_penalty_scale)
disc_y_gp = gradient_penalty(functools.partial(discriminator_y, training=True),
real_y, sampled_fake_y, mode=grad_penalty, scale=grad_penalty_scale)
total_disc_loss = disc_x_loss + disc_y_loss + disc_x_gp + disc_y_gp
# Calculate the gradients for generator and discriminator
discriminator_gradients = tape.gradient(total_disc_loss,
discriminator_x.trainable_variables +
discriminator_y.trainable_variables)
# Apply the gradients to the optimizer
discriminator_optimizer.apply_gradients(zip(discriminator_gradients,
discriminator_x.trainable_variables +
discriminator_y.trainable_variables))
del tape
return (disc_x_loss, disc_y_loss, disc_x_gp, disc_y_gp), \
((disc_real_x, disc_fake_x),
(disc_real_y, disc_fake_y)), discriminator_gradients
return _train_step_discriminator(batch_x, batch_y)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True, help='Path to configuration file.')
parser.add_argument('--template', type=str, default='augmentation/configs/template_cyclegan_training.yaml')
args = parser.parse_args()
# Load up the config files
config = create_config_simple_namespace(config_path=args.config, template_config_path=args.template)
# Train the end model
train_cyclegan(config)
|
"""
Compile SMIv2 MIBs
++++++++++++++++++
Invoke user callback function to provide MIB text,
compile given text string into pysnmp MIB form and pass
results to another user callback function for storing.
Here we expect to deal only with SMIv2-valid MIBs.
We use noDeps flag to prevent MIB compiler from attemping
to compile IMPORT'ed MIBs as well.
"""#
import sys
from pysmi.reader import CallbackReader
from pysmi.searcher import StubSearcher
from pysmi.writer import CallbackWriter
from pysmi.parser import SmiV2Parser
from pysmi.codegen import PySnmpCodeGen
from pysmi.compiler import MibCompiler
inputMibs = ['IF-MIB', 'IP-MIB']
srcDir = '/usr/share/snmp/mibs/' # we will read MIBs from here
# Initialize compiler infrastructure
mibCompiler = MibCompiler(
SmiV2Parser(),
PySnmpCodeGen(),
# out own callback function stores results in its own way
CallbackWriter(lambda m, d, c: sys.stdout.write(d))
)
# our own callback function serves as a MIB source here
mibCompiler.addSources(
CallbackReader(lambda m, c: open(srcDir+m+'.txt').read())
)
# never recompile MIBs with MACROs
mibCompiler.addSearchers(StubSearcher(*PySnmpCodeGen.baseMibs))
# run non-recursive MIB compilation
results = mibCompiler.compile(*inputMibs, **dict(noDeps=True))
print('Results: %s' % ', '.join(['%s:%s' % (x, results[x]) for x in results]))
|
import os
import queue
import cv2
import numpy as np
from PIL import Image, ImageDraw
import csv
import sys
try:
input_video_path = sys.argv[1]
input_csv_path = sys.argv[2]
#output_video_path = sys.argv[3]
if (not input_video_path) or (not input_csv_path):
raise ''
except:
print('usage: python3 show_trajectory.py <input_video_path> <input_csv_path>')
exit(1)
with open(input_csv_path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
frames = []
x, y = [], []
list1 = []
for row in readCSV:
list1.append(row)
for i in range(1 , len(list1)):
frames += [int(list1[i][0])]
x += [int(float(list1[i][2]))]
y += [int(float(list1[i][3]))]
output_video_path = input_video_path.split('.')[0] + "_trajectory.mp4"
q = queue.deque()
for i in range(0,8):
q.appendleft(None)
#get video fps&video size
currentFrame= 0
video = cv2.VideoCapture(input_video_path)
fps = int(video.get(cv2.CAP_PROP_FPS))
output_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
output_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_video = cv2.VideoWriter(output_video_path,fourcc, fps, (output_width,output_height))
video.set(1,currentFrame);
ret, img1 = video.read()
#write image to video
output_video.write(img1)
currentFrame +=1
#input must be float type
img1 = img1.astype(np.float32)
#capture frame-by-frame
video.set(1,currentFrame);
ret, img = video.read()
#write image to video
output_video.write(img)
currentFrame +=1
#input must be float type
img = img.astype(np.float32)
while(True):
#capture frame-by-frame
video.set(1,currentFrame);
ret, img = video.read()
#if there dont have any frame in video, break
if not ret:
break
PIL_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
PIL_image = Image.fromarray(PIL_image)
if x[currentFrame] != 0 and y[currentFrame] != 0:
q.appendleft([x[currentFrame],y[currentFrame]])
q.pop()
else:
q.appendleft(None)
q.pop()
for i in range(0,8):
if q[i] is not None:
draw_x = q[i][0]
draw_y = q[i][1]
bbox = (draw_x - 2, draw_y - 2, draw_x + 2, draw_y + 2)
draw = ImageDraw.Draw(PIL_image)
draw.ellipse(bbox, outline ='yellow')
del draw
opencvImage = cv2.cvtColor(np.array(PIL_image), cv2.COLOR_RGB2BGR)
#write image to output_video
output_video.write(opencvImage)
#next frame
currentFrame += 1
video.release()
output_video.release()
print("finish")
|
"""tests/test_dataframe.py module."""
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from talus_utils import dataframe
from talus_utils.fasta import parse_fasta_header_uniprot_protein
DATA_DIR = Path(__file__).resolve().parent.joinpath("data")
def assert_frame_not_equal(*args: str, **kwargs: str) -> None:
"""Check that left and right DataFrame are unequal.
Raises
------
AssertionError
When the two frames are not equal.
"""
try:
assert_frame_equal(*args, **kwargs)
except AssertionError:
# frames are not equal
pass
else:
# frames are equal
raise AssertionError
def dummy_function_change_df_column(df: pd.DataFrame) -> None:
"""Change the input df's column.
Parameters
----------
df : pd.DataFrame
The input DataFrame.
"""
df["test"] = "dummy_value"
def dummy_function(df: pd.DataFrame) -> pd.DataFrame:
"""Return the input DataFrame.
Parameters
----------
df : pd.DataFrame
The input DataFrame.
"""
return df
def test_copy_df_in_args() -> None:
"""Test the copy decorator with the df in the args."""
df_input = pd.DataFrame([{"test": "a", "test2": "b"}, {"test": "c", "test2": "d"}])
df_expected_to_change = df_input.copy(deep=True)
df_expected_not_to_change = df_input.copy(deep=True)
dummy_function_change_df_column(df_expected_to_change)
assert_frame_not_equal(df_expected_to_change, df_input)
dataframe.copy(dummy_function_change_df_column)(df_expected_not_to_change)
assert_frame_equal(df_expected_not_to_change, df_input)
def test_copy_df_in_kwargs() -> None:
"""Test the copy decorator with the df in the kwargs."""
df_input = pd.DataFrame([{"test": "a", "test2": "b"}, {"test": "c", "test2": "d"}])
df_expected_to_change = df_input.copy(deep=True)
df_expected_not_to_change = df_input.copy(deep=True)
dummy_function_change_df_column(df=df_expected_to_change)
assert_frame_not_equal(df_expected_to_change, df_input)
dataframe.copy(dummy_function_change_df_column)(df=df_expected_not_to_change)
assert_frame_equal(df_expected_not_to_change, df_input)
def test_dropna() -> None:
"""Test the dropna decorator."""
df_input = pd.DataFrame(
[{"test": "a", "test2": np.nan}, {"test": "c", "test2": "d"}]
)
df_expected = df_input.dropna()
df_actual = dataframe.dropna()(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_dropna_column() -> None:
"""Test the dropna decorator by dropping a column."""
df_input = pd.DataFrame(
[{"test": "a", "test2": np.nan}, {"test": "c", "test2": "d"}]
)
df_expected = pd.DataFrame([{"test": "a"}, {"test": "c"}])
df_actual = dataframe.dropna(axis=1)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_log_scaling() -> None:
"""Test the log_scaling decorator."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 500)
df_expected = np.log10(df_input)
df_actual = dataframe.log_scaling()(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_log_scaling_custom_log() -> None:
"""Test the log_scaling decorator with a custom log function (np.log2)."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_expected = np.log2(df_input.where(df_input >= 1))
df_actual = dataframe.log_scaling(log_function=np.log2)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_log_scaling_with_zeros() -> None:
"""Test the log_scaling decorator with zeros (should be filtered out and set to NaN)."""
df_input = pd.DataFrame([{"test": 25, "test2": 0}, {"test": 26, "test2": 42}])
df_expected = np.log10(df_input.where(df_input >= 1))
df_actual = dataframe.log_scaling()(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_log_scaling_with_zeros_no_filter_outliers() -> None:
"""Test the log_scaling decorator with zeros and without filtering outliers."""
df_input = pd.DataFrame([{"test": 25, "test2": 0}, {"test": 26, "test2": 42}])
df_expected = np.log10(df_input.mask(df_input < 1, 1))
df_actual = dataframe.log_scaling(filter_outliers=False)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_pivot_table() -> None:
"""Test the test_pivot_table decorator."""
df_input = pd.DataFrame(
[
{"index": "a", "column": "b", "value": 1},
{"index": "c", "column": "d", "value": 2},
]
)
df_expected = df_input.pivot_table(index="index", columns="column", values="value")
df_actual = dataframe.pivot_table(index="index", columns="column", values="value")(
dummy_function
)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_normalize_value_error() -> None:
"""Test the normalize decorator with a value error."""
df_input = pd.DataFrame([{"test": "a", "test2": "b"}, {"test": "c", "test2": "d"}])
with pytest.raises(ValueError):
_ = dataframe.normalize(how="nonexisting")(dummy_function)(df_input)
def test_normalize_minmax() -> None:
"""Test the normalize decorator with how='minmax'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_expected = (df_input - df_input.min()) / (df_input.max() - df_input.min())
df_actual = dataframe.normalize(how="minmax")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_normalize_row() -> None:
"""Test the normalize decorator with how='row'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_expected = df_input.apply(lambda x: x / x.sum(), axis=1)
df_actual = dataframe.normalize(how="row")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_normalize_column() -> None:
"""Test the normalize decorator with how='column'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_expected = df_input.apply(lambda x: x / x.sum(), axis=0)
df_actual = dataframe.normalize(how="column")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_normalize_median_column() -> None:
"""Test the normalize decorator with how='median_column'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_expected = df_input / df_input.median(axis=0)
df_actual = dataframe.normalize(how="median_column")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_normalize_quantile_column() -> None:
"""Test the normalize decorator with how='quantile_column'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
rank_mean = (
df_input.stack()
.groupby(df_input.rank(method="first").stack().astype(int))
.mean()
)
df_expected = (
df_input.rank(method="min").stack().astype(int).map(rank_mean).unstack()
)
df_actual = dataframe.normalize(how="quantile_column")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_sort_row_values_value_error() -> None:
"""Test the sort_by decorator with a value error."""
df_input = pd.DataFrame([{"test": "a", "test2": "b"}, {"test": "c", "test2": "d"}])
with pytest.raises(ValueError):
_ = dataframe.sort_row_values(how="nonexisting")(dummy_function)(df_input)
def test_sort_row_values_max() -> None:
"""Test the sort_row_values decorator with 'max'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_abs_expected = df_input.reindex(
index=df_input.abs().max(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(
how="max", use_absolute_values=True, sort_ascending=True
)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_abs_expected)
df_expected = df_input.reindex(
index=df_input.max(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(how="max", sort_ascending=True)(
dummy_function
)(df_input)
assert_frame_equal(df_actual, df_expected)
df_desc_expected = df_input.reindex(
index=df_input.max(axis=1).sort_values(ascending=False).index
)
df_actual = dataframe.sort_row_values(how="max")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_desc_expected)
def test_sort_row_values_min() -> None:
"""Test the sort_row_values decorator with 'min'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_abs_expected = df_input.reindex(
index=df_input.abs().min(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(
how="min", use_absolute_values=True, sort_ascending=True
)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_abs_expected)
df_expected = df_input.reindex(
index=df_input.min(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(how="min", sort_ascending=True)(
dummy_function
)(df_input)
assert_frame_equal(df_actual, df_expected)
df_desc_expected = df_input.reindex(
index=df_input.min(axis=1).sort_values(ascending=False).index
)
df_actual = dataframe.sort_row_values(how="min")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_desc_expected)
def test_sort_row_values_median() -> None:
"""Test the sort_row_values decorator with 'median'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_abs_expected = df_input.reindex(
index=df_input.abs().median(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(
how="median", use_absolute_values=True, sort_ascending=True
)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_abs_expected)
df_expected = df_input.reindex(
index=df_input.median(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(how="median", sort_ascending=True)(
dummy_function
)(df_input)
assert_frame_equal(df_actual, df_expected)
df_desc_expected = df_input.reindex(
index=df_input.median(axis=1).sort_values(ascending=False).index
)
df_actual = dataframe.sort_row_values(how="median")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_desc_expected)
def test_sort_row_values_mean() -> None:
"""Test the sort_row_values decorator with 'mean'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_abs_expected = df_input.reindex(
index=df_input.abs().mean(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(
how="mean", use_absolute_values=True, sort_ascending=True
)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_abs_expected)
df_expected = df_input.reindex(
index=df_input.mean(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(how="mean", sort_ascending=True)(
dummy_function
)(df_input)
assert_frame_equal(df_actual, df_expected)
df_desc_expected = df_input.reindex(
index=df_input.mean(axis=1).sort_values(ascending=False).index
)
df_actual = dataframe.sort_row_values(how="mean")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_desc_expected)
def test_sort_row_values_sum() -> None:
"""Test the sort_row_values decorator with 'sum'."""
df_input = pd.DataFrame(np.random.rand(5, 5) * 100)
df_abs_expected = df_input.reindex(
index=df_input.abs().sum(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(
how="sum", use_absolute_values=True, sort_ascending=True
)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_abs_expected)
df_expected = df_input.reindex(
index=df_input.sum(axis=1).sort_values(ascending=True).index
)
df_actual = dataframe.sort_row_values(how="sum", sort_ascending=True)(
dummy_function
)(df_input)
assert_frame_equal(df_actual, df_expected)
df_desc_expected = df_input.reindex(
index=df_input.sum(axis=1).sort_values(ascending=False).index
)
df_actual = dataframe.sort_row_values(how="sum")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_desc_expected)
def test_explode() -> None:
"""Test the explode_column decorator."""
df_input = pd.DataFrame(
{
"A": [[0, 1, 2], "foo", [], [3, 4]],
"B": 1,
"C": 2,
}
)
df_expected = df_input.explode(column="A")
df_actual = dataframe.explode(column="A")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_update_column() -> None:
"""Test the update_column decorator."""
df_input = pd.read_csv(DATA_DIR.joinpath("select_peptidetoprotein.csv"))
df_expected = df_input.copy(deep=True)
df_expected["ProteinAccession"] = df_expected["ProteinAccession"].apply(
parse_fasta_header_uniprot_protein
)
df_actual = dataframe.update_column(
column="ProteinAccession", update_func=parse_fasta_header_uniprot_protein
)(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
def test_update_and_explode() -> None:
"""Test the update_column and explode decorators."""
df_input = pd.read_csv(DATA_DIR.joinpath("peptide_proteins_to_explode.csv"))
df_expected = df_input.copy(deep=True)
df_expected["Protein"] = df_expected["Protein"].apply(lambda x: x.split(";"))
df_expected = df_expected.explode(column="Protein")
df_actual = dataframe.explode(column="Protein", sep=";")(dummy_function)(df_input)
assert_frame_equal(df_actual, df_expected)
|
##
# movie_reccomendation.py
# Created: 29.08.19
# Last edit: 06.09.19
# A program that reccommends movies based on an algorithm and other users
from tkinter import *
import tkinter as tk
class GUI:
def __init__(self, parent):
self.parent = parent
# Frames
self.search_frame = Frame(root, width=250).grid(row=0, column=0)
self.suggestions_frame = Frame(root, width=250).grid(row=0, column=1)
# Main buttons
self.button_search = Button(self.search_frame, text="Search for a movie\nRapua he kiriata", command=self.movie_search, width=35).grid(row=0, column=0)
self.button_suggestions = Button(self.suggestions_frame, text="View your suggestions\nTirohia o whakaaro", command=self.view_suggestions, width=35).grid(row=0, column=1)
def movie_search(self):
""" Search the name of a movie """
self.movie_entrybox = Entry(self.search_frame, width=32)
self.movie_entrybox.grid(row=1, column=0, pady=5, padx=5, ipady=10, sticky=W)
self.search_submit_button = Button(self.search_frame, text="Submit\nTuku", command=self.submit_search)
self.search_submit_button.grid(row=1, column=0, sticky=E)
self.selected = None
def submit_search(self):
""" Display Listbox with all relevant movies """
self.results = Listbox(self.search_frame, width=41, height=20)
self.results.grid(row=2, column=0)
for movie in movies:
if self.movie_entrybox.get().lower() in movie.title.lower():
self.results.insert(END, movie.title)
self.results.bind('<<ListboxSelect>>',self.CurSelect)
def CurSelect(self, evt):
""" Display rating buttons if a movie is selected """
self.selected = self.results.get(self.results.curselection())
self.movie_label = Label(self.search_frame, text="Now rating {0}:\nNa te whakatauranga {0}:".format(self.selected)).grid(row=4, column=0)
self.rate_buttons()
def rate_buttons(self):
""" Display five buttons with 1-5 as labels"""
self.star_frame = Frame(self.search_frame)
self.star_frame.grid(row=3, column=0)
self.rating_var = IntVar()
self.one_star = Radiobutton(self.star_frame, variable=self.rating_var, value=1, text="1", command=self.rate_movie).grid(row=0, column=0)
self.two_star = Radiobutton(self.star_frame, variable=self.rating_var, value=2, text="2", command=self.rate_movie).grid(row=0, column=1)
self.three_star = Radiobutton(self.star_frame, variable=self.rating_var, value=3, text="3", command=self.rate_movie).grid(row=0, column=2)
self.four_star = Radiobutton(self.star_frame, variable=self.rating_var, value=4, text="4", command=self.rate_movie).grid(row=0, column=3)
self.five_star = Radiobutton(self.star_frame, variable=self.rating_var, value=5, text="5", command=self.rate_movie).grid(row=0, column=4)
def rate_movie(self):
""" Add movie to user's liked or disliked set depending on what they rated the movie """
for movie in movies:
if movie.title == self.selected:
rated_movie_id = movie.id
if self.rating_var.get() >= int(LIKED_RATING):
current_user.add_liked(rated_movie_id)
Label(self.search_frame, text="Liked\nPēnei").grid()
elif self.rating_var.get() <= int(LIKED_RATING):
current_user.add_disliked(rated_movie_id)
Label(self.search_frame, text="Disiked\nMea koretake").grid()
def view_suggestions(self):
""" Create listbox that displays the users suggested movies """
recommended_movies_dict = generate_recommendations(current_user, 5)
# Recommend the top five recommended movies using a dictionary sorted on values
highest_possibility = -1
for movie in recommended_movies_dict:
if possibility_index(current_user, movie.id) > highest_possibility:
highest_possibility = possibility_index(current_user, movie.id)
self.suggested_movies_frame = LabelFrame(root, text="Recommended Movies\nKiriata E taunakitia ana")
self.suggested_movies_frame.grid(row=2, column=1)
for movie in recommended_movies_dict:
Label(self.suggested_movies_frame, text=
(movie.title, movie.year, "{}%".format(round(possibility_index(current_user, movie.id) / highest_possibility * 100, 1))), width=35).grid(column=1)
class Movies:
""" Movie class, genres is stored as a list """
def __init__(self, id, year, title, genres):
self.id = id
self.title = title
self.year = year
self.genres = genres
movies.append(self)
class Ratings:
""" Ratings class joining the user and movie classes with a rating """
def __init__(self, user_id, movie_id, rating):
self.user_id = user_id
self.movie_id = movie_id
self.rating = rating
ratings.append(self)
class User:
"""
User class holds the id and a list of all movies liked and disliked
The liked and disliked lists are initialised on creation of a new user
Methods to add to liked and disliked lists
Methods to return the liked and disliked lists as sets
"""
def __init__(self, id):
self.id = id
self.liked = set()
self.disliked = set()
users.append(self)
def add_liked(self, movie_id):
self.liked.add(movie_id)
def return_liked(self):
return self.liked
def add_disliked(self, movie_id):
self.disliked.add(movie_id)
def return_disliked(self):
return self.disliked
def set_current_user(user_id):
""" Assign the current user to an instance """
# Set the current user to do recommendations for
current_user = "" # Var to store instance of current user
for user in users:
if user.id == user_id: # Change for current user
current_user = user
return current_user
def import_movies():
""" Load movie csv files movies class """
import csv
with open('lessMovies.csv', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0: # Header row
line_count += 1
else:
line_count += 1
"""*** Add the imported data to the Movie class ***"""
# Seperating the year and title
year = (row[1])[-5:-1]
title = (row[1])[0:-6]
id = (row[0])
# ***Do some string processing to import into the Movie class ***
Movies(row[0], year, title, row[2])
def import_ratings(LIKED_RATING):
""" Load ratings csv files to ratings and user classes """
id_list = []
import csv
with open('lessRatings.csv', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
# Count the rows and discount the header
for row in csv_reader:
if line_count == 0: # Header row
line_count += 1
else:
line_count += 1
"""*** Add the imported data to the Rating class ***"""
# Add the imported data to the Rating class
Ratings(row[0], row[1], row[2])
"""*** Add the imported data to the User class ***"""
if row[0] in id_list:
# Add liked and disliked movies to the user instance
if row[2] >= LIKED_RATING: # If the rating is above the liked rating add it to the user's liked movies set
users[int(row[0])-1].add_liked(row[1])
else: # Otherwise add it to the disliked movies set
users[int(row[0])-1].add_disliked(row[1])
# If the user ID changes, create new user
else:
User(row[0])
id_list.append(row[0])
if row[2] >= LIKED_RATING:
users[int(row[0])-1].add_liked(row[1])
else:
users[int(row[0])-1].add_disliked(row[1])
def similarity_index(current_user, user):
"""
Return the similarity index of two users Between —1.0 and 1.0.
Originally known as "coefficient de communaute" By Paul Jaccard
"""
U1 = current_user
U2 = user
L1 = U1.return_liked()
D1 = U1.return_disliked()
L2 = U2.return_liked()
D2 = U2.return_disliked()
similarity = (len(L1&L2) + len(D1&D2) - len(L1&D2) - len(L2&D1)) / len(L1|L2|D1|D2)
return similarity
def user_movies(user):
""" Return the liked and disliked movie SETS for the given user """
# Return movies rated by a given user
movies_rated = (user.return_liked())|(user.return_disliked())
return movies_rated
def return_user_liked(movie):
""" Return the set of all users who liked a movie """
# Create an empty set
users_liked = set()
# For each user
for user in users:
# If the movie is in the users set of liked movies
if movie in user.return_liked():
# Add the user to the set
users_liked.add(user)
# Return the set
return users_liked
def return_user_disliked(movie):
""" Return the set of all users who liked a movie """
users_disliked = set()
for user in users:
if movie in user.return_disliked():
users_disliked.add(user)
return users_disliked
def find_similar_users(current_user):
"""
Given a user, compute a list of other users who are similar
Store the list in a database (in this case a dictionary), along with their similarity indicies
Return the list of similar user in order of most to least similar
"""
similar_users_set = set()
similar_user_instances = []
similar_users_ratio = {}
rated_movies = user_movies(current_user)
for movie in rated_movies:
similar_users_set.update(return_user_liked(movie)|return_user_disliked(movie))
for similar_user in similar_users_set:
if similar_user.id != current_user.id:
similarity = similarity_index(current_user, similar_user)
similar_users_ratio[similar_user] = similarity
# Order users in terms of most similar
for user, similarity in sorted(similar_users_ratio.items(), key=lambda x:x[1], reverse=True):
similar_user_instances.append(user)
return similar_user_instances
def possibility_index(current_user, movie):
"""
Given a user and a movie (obviously the user should not have rated this movie yet)
Find all users who have rated the movie
Compute the similarity index of each user and use if to
Generate the possibility of a user liking a given movie
"""
## Finding the sum of the similarity indicies of all users who have LIKED a movie
liked_sum = 0 # Variable to store the sum of all the similarity indicies of all users who have liked a given movie
for user in return_user_liked(movie):
if user != current_user:
liked_sum += similarity_index(current_user, user)
## Finding the sum of the similarity indicies of all users who have DISLIKED a movie
disliked_sum = 0
for user in return_user_disliked(movie):
if user != current_user:
disliked_sum += similarity_index(current_user, user)
try:
possibility_index = (liked_sum - disliked_sum) / (len(return_user_liked(movie)) + len(return_user_disliked(movie)))
except ZeroDivisionError:
possibility_index = 0
return possibility_index
def return_unrated(current_user):
""" Return a list of all unrated movie ids a given user has not rated """
# Create a list to store all movie ids of unrated movies
unrated_movie_ids = []
for user in find_similar_users(current_user):
unrated_movies = user_movies(user).difference(user_movies(current_user))
for movie in unrated_movies:
if movie not in unrated_movie_ids:
unrated_movie_ids.append(movie)
return unrated_movie_ids
def unrated_movie_possibilities(current_user):
""" Store all items the given user has not rated with the possibility index and return the dictionary """
# Create an empty dictionary to store all reccommended movies with their id and their possibility index
recommended_movies = {}
unrated_movie_ids = return_unrated(current_user)
for unrated_movie in unrated_movie_ids:
recommended_movies[unrated_movie] = possibility_index(current_user, unrated_movie)
# Return the dictionary of reccommended movies
return recommended_movies
def generate_recommendations(current_user, num_recommendations):
""" Generating movie recommendations """
recommended_movies_dictionary = {}
recommended_movies = unrated_movie_possibilities(current_user) # Rate all recommended
counter = 0
for i, j in sorted(recommended_movies.items(), key=lambda x:x[1], reverse=True):
if counter < num_recommendations:
for movie in movies:
if movie.id == i:
recommended_movies_dictionary[movie] = movie.year, possibility_index(current_user, movie.id)
counter += 1
return recommended_movies_dictionary
if __name__ == "__main__":
LIKED_RATING = "4" # Movies rated this score and above are considered liked
movies = [] # Stores all instances of movies
ratings = [] # Stores all instances of ratings
users = [] # Stores all instances of users
# Import csv as objects
import_movies()
import_ratings(LIKED_RATING)
# Store current user instance
current_user_id = '2'
CURRENT_USER = set_current_user(current_user_id)
#
new_user = int(ratings[-1].user_id) + 1
User(new_user)
current_user = set_current_user(new_user)
#GUI
root = tk.Tk()
root.title("Movie Ratings")
root.geometry("510x530+705+150")
gui_1 = GUI(root)
root.mainloop()
|
'''
Descripttion:
version:
Author: Jinlong Li CSU PhD
Date: 2022-01-04 10:58:11
LastEditors: Jinlong Li CSU PhD
LastEditTime: 2022-01-04 17:06:05
'''
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from ._utils import _C
# from maskrcnn_benchmark import _C
from ._utils import _C
from apex import amp
# Only valid with fp32 inputs - give AMP the hint
nms = amp.float_function(_C.nms)
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""
|
# coding=utf-8
from random import randint
array = []
cont = 0
while cont <= 7:
array.append(randint(0,100))
cont += 1
else:
print("Numeros Gerados com sucesso! \n{} \nFim da execução!" .format(array))
|
# Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import aspose.words as aw
import aspose.pydrawing as drawing
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR
from document_helper import DocumentHelper
class ExStyles(ApiExampleBase):
def test_styles(self):
#ExStart
#ExFor:DocumentBase.styles
#ExFor:Style.document
#ExFor:Style.name
#ExFor:Style.is_heading
#ExFor:Style.is_quick_style
#ExFor:Style.next_paragraph_style_name
#ExFor:Style.styles
#ExFor:Style.type
#ExFor:StyleCollection.document
#ExFor:StyleCollection.__iter__
#ExSummary:Shows how to access a document's style collection.
doc = aw.Document()
self.assertEqual(4, doc.styles.count)
# Enumerate and list all the styles that a document created using Aspose.Words contains by default.
for cur_style in doc.styles:
print(f"Style name:\t\"{cur_style.name}\", of type \"{cur_style.type}\"")
print(f"\tSubsequent style:\t{cur_style.next_paragraph_style_name}")
print(f"\tIs heading:\t\t\t{cur_style.is_heading}")
print(f"\tIs QuickStyle:\t\t{cur_style.is_quick_style}")
self.assertEqual(doc, cur_style.document)
#ExEnd
def test_create_style(self):
#ExStart
#ExFor:Style.font
#ExFor:Style
#ExFor:Style.remove
#ExSummary:Shows how to create and apply a custom style.
doc = aw.Document()
style = doc.styles.add(aw.StyleType.PARAGRAPH, "MyStyle")
style.font.name = "Times New Roman"
style.font.size = 16
style.font.color = drawing.Color.navy
builder = aw.DocumentBuilder(doc)
# Apply one of the styles from the document to the paragraph that the document builder is creating.
builder.paragraph_format.style = doc.styles.get_by_name("MyStyle")
builder.writeln("Hello world!")
first_paragraph_style = doc.first_section.body.first_paragraph.paragraph_format.style
self.assertEqual(style, first_paragraph_style)
# Remove our custom style from the document's styles collection.
doc.styles.get_by_name("MyStyle").remove()
first_paragraph_style = doc.first_section.body.first_paragraph.paragraph_format.style
# Any text that used a removed style reverts to the default formatting.
self.assertFalse(any(s.name == "MyStyle" for s in doc.styles))
self.assertEqual("Times New Roman", first_paragraph_style.font.name)
self.assertEqual(12.0, first_paragraph_style.font.size)
self.assertEqual(drawing.Color.empty().to_argb(), first_paragraph_style.font.color.to_argb())
#ExEnd
def test_style_collection(self):
#ExStart
#ExFor:StyleCollection.add(StyleType,str)
#ExFor:StyleCollection.count
#ExFor:StyleCollection.default_font
#ExFor:StyleCollection.default_paragraph_format
#ExFor:StyleCollection.__getitem__(StyleIdentifier)
#ExFor:StyleCollection.__getitem__(int)
#ExSummary:Shows how to add a Style to a document's styles collection.
doc = aw.Document()
styles = doc.styles
# Set default parameters for new styles that we may later add to this collection.
styles.default_font.name = "Courier New"
# If we add a style of the "StyleType.PARAGRAPH", the collection will apply the values of
# its "default_paragraph_format" property to the style's "paragraph_format" property.
styles.default_paragraph_format.first_line_indent = 15.0
# Add a style, and then verify that it has the default settings.
styles.add(aw.StyleType.PARAGRAPH, "MyStyle")
self.assertEqual("Courier New", styles[4].font.name)
self.assertEqual(15.0, styles.get_by_name("MyStyle").paragraph_format.first_line_indent)
#ExEnd
def test_remove_styles_from_style_gallery(self):
#ExStart
#ExFor:StyleCollection.clear_quick_style_gallery
#ExSummary:Shows how to remove styles from Style Gallery panel.
doc = aw.Document()
# Note that remove styles work only with DOCX format for now.
doc.styles.clear_quick_style_gallery()
doc.save(ARTIFACTS_DIR + "Styles.remove_styles_from_style_gallery.docx")
#ExEnd
def test_change_tocs_tab_stops(self):
#ExStart
#ExFor:TabStop
#ExFor:ParagraphFormat.tab_stops
#ExFor:Style.style_identifier
#ExFor:TabStopCollection.remove_by_position
#ExFor:TabStop.alignment
#ExFor:TabStop.position
#ExFor:TabStop.leader
#ExSummary:Shows how to modify the position of the right tab stop in TOC related paragraphs.
doc = aw.Document(MY_DIR + "Table of contents.docx")
# Iterate through all paragraphs with TOC result-based styles; this is any style between TOC and TOC9.
for para in doc.get_child_nodes(aw.NodeType.PARAGRAPH, True):
para = para.as_paragraph()
if (para.paragraph_format.style.style_identifier >= aw.StyleIdentifier.TOC1 and
para.paragraph_format.style.style_identifier <= aw.StyleIdentifier.TOC9):
# Get the first tab used in this paragraph, this should be the tab used to align the page numbers.
tab = para.paragraph_format.tab_stops[0]
# Replace the first default tab, stop with a custom tab stop.
para.paragraph_format.tab_stops.remove_by_position(tab.position)
para.paragraph_format.tab_stops.add(tab.position - 50, tab.alignment, tab.leader)
doc.save(ARTIFACTS_DIR + "Styles.change_tocs_tab_stops.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Styles.change_tocs_tab_stops.docx")
for para in doc.get_child_nodes(aw.NodeType.PARAGRAPH, True):
para = para.as_paragraph()
if (para.paragraph_format.style.style_identifier >= aw.StyleIdentifier.TOC1 and
para.paragraph_format.style.style_identifier <= aw.StyleIdentifier.TOC9):
tab_stop = para.get_effective_tab_stops()[0]
self.assertEqual(400.8, tab_stop.position)
self.assertEqual(aw.TabAlignment.RIGHT, tab_stop.alignment)
self.assertEqual(aw.TabLeader.DOTS, tab_stop.leader)
def test_copy_style_same_document(self):
#ExStart
#ExFor:StyleCollection.add_copy
#ExFor:Style.name
#ExSummary:Shows how to clone a document's style.
doc = aw.Document()
# The add_copy method creates a copy of the specified style and
# automatically generates a new name for the style, such as "Heading 1_0".
new_style = doc.styles.add_copy(doc.styles.get_by_name("Heading 1"))
# Use the style's "name" property to change the style's identifying name.
new_style.name = "My Heading 1"
# Our document now has two identical looking styles with different names.
# Changing settings of one of the styles do not affect the other.
new_style.font.color = drawing.Color.red
self.assertEqual("My Heading 1", new_style.name)
self.assertEqual("Heading 1", doc.styles.get_by_name("Heading 1").name)
self.assertEqual(doc.styles.get_by_name("Heading 1").type, new_style.type)
self.assertEqual(doc.styles.get_by_name("Heading 1").font.name, new_style.font.name)
self.assertEqual(doc.styles.get_by_name("Heading 1").font.size, new_style.font.size)
self.assertNotEqual(doc.styles.get_by_name("Heading 1").font.color, new_style.font.color)
#ExEnd
def test_copy_style_different_document(self):
#ExStart
#ExFor:StyleCollection.add_copy
#ExSummary:Shows how to import a style from one document into a different document.
src_doc = aw.Document()
# Create a custom style for the source document.
src_style = src_doc.styles.add(aw.StyleType.PARAGRAPH, "MyStyle")
src_style.font.color = drawing.Color.red
# Import the source document's custom style into the destination document.
dst_doc = aw.Document()
new_style = dst_doc.styles.add_copy(src_style)
# The imported style has an appearance identical to its source style.
self.assertEqual("MyStyle", new_style.name)
self.assertEqual(drawing.Color.red.to_argb(), new_style.font.color.to_argb())
#ExEnd
def test_default_styles(self):
doc = aw.Document()
doc.styles.default_font.name = "PMingLiU"
doc.styles.default_font.bold = True
doc.styles.default_paragraph_format.space_after = 20
doc.styles.default_paragraph_format.alignment = aw.ParagraphAlignment.RIGHT
doc = DocumentHelper.save_open(doc)
self.assertTrue(doc.styles.default_font.bold)
self.assertEqual("PMingLiU", doc.styles.default_font.name)
self.assertEqual(20, doc.styles.default_paragraph_format.space_after)
self.assertEqual(aw.ParagraphAlignment.RIGHT, doc.styles.default_paragraph_format.alignment)
def test_paragraph_style_bulleted_list(self):
#ExStart
#ExFor:StyleCollection
#ExFor:DocumentBase.styles
#ExFor:Style
#ExFor:Font
#ExFor:Style.font
#ExFor:Style.paragraph_format
#ExFor:Style.list_format
#ExFor:ParagraphFormat.style
#ExSummary:Shows how to create and use a paragraph style with list formatting.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Create a custom paragraph style.
style = doc.styles.add(aw.StyleType.PARAGRAPH, "MyStyle1")
style.font.size = 24
style.font.name = "Verdana"
style.paragraph_format.space_after = 12
# Create a list and make sure the paragraphs that use this style will use this list.
style.list_format.list = doc.lists.add(aw.lists.ListTemplate.BULLET_DEFAULT)
style.list_format.list_level_number = 0
# Apply the paragraph style to the document builder's current paragraph, and then add some text.
builder.paragraph_format.style = style
builder.writeln("Hello World: MyStyle1, bulleted list.")
# Change the document builder's style to one that has no list formatting and write another paragraph.
builder.paragraph_format.style = doc.styles.get_by_name("Normal")
builder.writeln("Hello World: Normal.")
builder.document.save(ARTIFACTS_DIR + "Styles.paragraph_style_bulleted_list.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Styles.paragraph_style_bulleted_list.docx")
style = doc.styles.get_by_name("MyStyle1")
self.assertEqual("MyStyle1", style.name)
self.assertEqual(24, style.font.size)
self.assertEqual("Verdana", style.font.name)
self.assertEqual(12.0, style.paragraph_format.space_after)
def test_style_aliases(self):
#ExStart
#ExFor:Style.aliases
#ExFor:Style.base_style_name
#ExFor:Style.__eq__(Style)
#ExFor:Style.linked_style_name
#ExSummary:Shows how to use style aliases.
doc = aw.Document(MY_DIR + "Style with alias.docx")
# This document contains a style named "MyStyle,MyStyle Alias 1,MyStyle Alias 2".
# If a style's name has multiple values separated by commas, each clause is a separate alias.
style = doc.styles.get_by_name("MyStyle")
self.assertSequenceEqual(["MyStyle Alias 1", "MyStyle Alias 2"], style.aliases)
self.assertEqual("Title", style.base_style_name)
self.assertEqual("MyStyle Char", style.linked_style_name)
# We can reference a style using its alias, as well as its name.
self.assertEqual(doc.styles.get_by_name("MyStyle Alias 1"), doc.styles.get_by_name("MyStyle Alias 2"))
builder = aw.DocumentBuilder(doc)
builder.move_to_document_end()
builder.paragraph_format.style = doc.styles.get_by_name("MyStyle Alias 1")
builder.writeln("Hello world!")
builder.paragraph_format.style = doc.styles.get_by_name("MyStyle Alias 2")
builder.write("Hello again!")
self.assertEqual(
doc.first_section.body.paragraphs[0].paragraph_format.style,
doc.first_section.body.paragraphs[1].paragraph_format.style)
#ExEnd
|
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DataType,
FloatType,
)
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class RiskAssessment_PredictionSchema:
"""
An assessment of the likely outcome(s) for a patient or other subject as well
as the likelihood of each outcome.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
An assessment of the likely outcome(s) for a patient or other subject as well
as the likelihood of each outcome.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
outcome: One of the potential outcomes for the patient (e.g. remission, death, a
particular condition).
probabilityDecimal: Indicates how likely the outcome is (in the specified timeframe).
probabilityRange: Indicates how likely the outcome is (in the specified timeframe).
qualitativeRisk: Indicates how likely the outcome is (in the specified timeframe), expressed as
a qualitative value (e.g. low, medium, or high).
relativeRisk: Indicates the risk for this particular subject (with their specific
characteristics) divided by the risk of the population in general. (Numbers
greater than 1 = higher risk than the population, numbers less than 1 = lower
risk.).
whenPeriod: Indicates the period of time or age range of the subject to which the
specified probability applies.
whenRange: Indicates the period of time or age range of the subject to which the
specified probability applies.
rationale: Additional information explaining the basis for the prediction.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.range import RangeSchema
from spark_fhir_schemas.r4.simple_types.decimal import decimalSchema
from spark_fhir_schemas.r4.complex_types.period import PeriodSchema
if (
max_recursion_limit
and nesting_list.count("RiskAssessment_Prediction") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["RiskAssessment_Prediction"]
my_parent_path = (
parent_path + ".riskassessment_prediction"
if parent_path
else "riskassessment_prediction"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# One of the potential outcomes for the patient (e.g. remission, death, a
# particular condition).
StructField(
"outcome",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indicates how likely the outcome is (in the specified timeframe).
StructField("probabilityDecimal", FloatType(), True),
# Indicates how likely the outcome is (in the specified timeframe).
StructField(
"probabilityRange",
RangeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indicates how likely the outcome is (in the specified timeframe), expressed as
# a qualitative value (e.g. low, medium, or high).
StructField(
"qualitativeRisk",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indicates the risk for this particular subject (with their specific
# characteristics) divided by the risk of the population in general. (Numbers
# greater than 1 = higher risk than the population, numbers less than 1 = lower
# risk.).
StructField(
"relativeRisk",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".relativerisk",
),
True,
),
# Indicates the period of time or age range of the subject to which the
# specified probability applies.
StructField(
"whenPeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indicates the period of time or age range of the subject to which the
# specified probability applies.
StructField(
"whenRange",
RangeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Additional information explaining the basis for the prediction.
StructField("rationale", StringType(), True),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
|
from gwa_framework.resource.base import BaseResource
from gwa_framework.utils.decorators import validate_schema
from common.models.goal import GoalModel
from common.repositories.goal import GoalRepository
from common.schemas.goal import GoalInputSchema, GoalOutputSchema
class GoalResource(BaseResource):
method_decorators = {
'create': [validate_schema(GoalInputSchema)],
'retrieve': [validate_schema(GoalInputSchema)],
'list': []
}
def create(self, request_model: 'GoalInputSchema'):
goal = GoalRepository.create(GoalModel(request_model))
schema = GoalOutputSchema()
schema.goal_id = goal.id
schema.description = goal.description
return schema.to_primitive()
def list(self, args=None, kwargs=None):
goals = GoalRepository.get(kwargs)
results = []
for goal in goals:
schema = GoalOutputSchema()
schema.goal_id = goal.id
schema.description = goal.description
results.append(schema.to_primitive())
return results
def retrieve(self, filter, filter2, kwargs=None):
return {'filter': filter, 'filter2': filter2, 'args': kwargs, 'kwargs': kwargs}
|
from schema import Schema, Optional, SchemaError
from exceptions import RoutificParamsError
"""
Defines and validates visits, which are locations that must be visited.
"""
VISIT_SCHEMA = Schema({
"location": {
"lat": float,
"lng": float,
Optional("name"): str
},
Optional("start", default="00:00"): str,
Optional("end", default="23:59"): str,
Optional("duration"): int,
Optional("demand"): str,
Optional("priority"): str,
Optional("type"): str,
Optional("time_windows", default=[]): list
})
class Visit():
""" Represents a location that must be visited in the route
"""
def __init__(self, params):
try:
valid_params = VISIT_SCHEMA.validate(params)
except BaseException:
raise RoutificParamsError("Invalid or incomplete parameters")
self.lat = valid_params["location"]["lat"]
self.lng = valid_params["location"]["lng"]
self.name = valid_params["location"].get("name", None)
self.start = valid_params["start"]
self.end = valid_params["end"]
self.duration = valid_params.get("duration", None)
self.demand = valid_params.get("demand", None)
self.priority = valid_params.get("priority", None)
self.type = valid_params.get("type", None)
self.time_windows = valid_params.get("time_windows", None)
def __repr__(self) -> str:
if self.name:
return f"<Visit: {self.name}>"
else:
return f"<Visit: {self.lat}, {self.lng}>"
def to_api(self) -> dict:
""" Returns a dict that can be sent to the Routific API
"""
ret = {
"location": {
"lat": self.lat,
"lng": self.lng,
"name": self.name
},
"start": self.start,
"end": self.end,
"duration": self.duration,
"demand": self.demand,
"priority": self.priority,
"type": self.type,
"time_windows": self.time_windows
}
# Remove "None" as there is no purpose in sending them to API
return {k: v for k, v in ret.items() if v}
|
"""
TorchScript implementation of the low-level push/pull utilities.
The idea is to eventually have an alternate implementation that does not
require compiling C++/CUDA code. The compiled version could still be
installed [optionally] by the setup script, since it is expected to be
much faster than the TorchScript version.
"""
from .pushpull import (
grid_pull, grid_pull_backward, grid_push, grid_push_backward,
grid_count, grid_count_backward, grid_grad, grid_grad_backward)
from .bounds import BoundType
from .splines import InterpolationType
from .coeff import spline_coeff, spline_coeff_nd
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
from os.path import join, exists
import urllib
import hashlib
import sys
import tarfile
TEST_262_ARCHIVE_REVISION = 'fb327c439e20' # This is the r334 revision.
TEST_262_ARCHIVE_MD5 = '307acd166ec34629592f240dc12d57ed'
TEST_262_URL = 'http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2'
TEST_262_HARNESS = ['sta.js']
class Test262TestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
super(Test262TestCase, self).__init__(context, path, mode)
self.filename = filename
self.framework = framework
self.root = root
def IsNegative(self):
return '@negative' in self.GetSource()
def GetLabel(self):
return "%s test262 %s" % (self.mode, self.GetName())
def IsFailureOutput(self, output):
if output.exit_code != 0:
return True
return 'FAILED!' in output.stdout
def GetCommand(self):
result = self.context.GetVmCommand(self, self.mode)
result += self.framework
result.append(self.filename)
return result
def GetName(self):
return self.path[-1]
def GetSource(self):
return open(self.filename).read()
class Test262TestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(Test262TestConfiguration, self).__init__(context, root)
def ListTests(self, current_path, path, mode, variant_flags):
testroot = join(self.root, 'data', 'test', 'suite')
harness = [join(self.root, 'data', 'test', 'harness', f)
for f in TEST_262_HARNESS]
harness += [join(self.root, 'harness-adapt.js')]
tests = []
for root, dirs, files in os.walk(testroot):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
root_path = current_path + [x for x in root_path if x]
files.sort()
for file in files:
if file.endswith('.js'):
test_path = ['test262', file[:-3]]
if self.Contains(path, test_path):
test = Test262TestCase(join(root, file), test_path, self.context,
self.root, mode, harness)
tests.append(test)
return tests
def DownloadData(self):
revision = TEST_262_ARCHIVE_REVISION
archive_url = TEST_262_URL % revision
archive_name = join(self.root, 'test262-%s.tar.bz2' % revision)
directory_name = join(self.root, 'data')
directory_old_name = join(self.root, 'data.old')
if not exists(archive_name):
print "Downloading test data from %s ..." % archive_url
urllib.urlretrieve(archive_url, archive_name)
if exists(directory_name):
os.rename(directory_name, directory_old_name)
if not exists(directory_name):
print "Extracting test262-%s.tar.bz2 ..." % revision
md5 = hashlib.md5()
with open(archive_name,'rb') as f:
for chunk in iter(lambda: f.read(8192), ''):
md5.update(chunk)
if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
os.remove(archive_name)
raise Exception("Hash mismatch of test data file")
archive = tarfile.open(archive_name, 'r:bz2')
if sys.platform in ('win32', 'cygwin'):
# Magic incantation to allow longer path names on Windows.
archive.extractall(u'\\\\?\\%s' % self.root)
else:
archive.extractall(self.root)
os.rename(join(self.root, 'test262-%s' % revision), directory_name)
def GetBuildRequirements(self):
return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'test262.status')
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
def GetConfiguration(context, root):
return Test262TestConfiguration(context, root)
|
import numpy as np
import pandas as pd
import os
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
fig_path = 'H:/py4fi/images/ch08/'
if not os.path.exists(fig_path):
os.makedirs(fig_path)
raw = pd.read_csv('tr_eikon_eod_data.csv', index_col=0, parse_dates=True)
data = raw[['.SPX', '.VIX']].dropna()
print(data.tail())
# data.plot(subplots=True, figsize=(10, 6))
# plt.savefig(fig_path+'fts_08.png')
# data.loc[:'2012-12-31'].plot(secondary_y='.VIX', figsize=(10, 6))
# plt.savefig(fig_path+'fts_09.png')
# plt.show()
print(data.loc[:'2012-12-31'].tail())
rets = np.log(data/data.shift(1))
print(rets.head())
rets.dropna(inplace=True)
rets.plot(subplots=True, figsize=(10, 6))
# plt.savefig(fig_path+'fts_10.png')
# pd.plotting.scatter_matrix(rets, alpha=0.2, diagonal='hist', hist_kwds={'bins':35}, figsize=(10, 6)) # alpha=0.2透明度
# plt.savefig(fig_path+'fts_11.png')
# pd.plotting.scatter_matrix(rets, alpha=0.2, diagonal='kde', figsize=(10, 6))
# plt.show()
reg = np.polyfit(rets['.SPX'], rets['.VIX'], deg=1) # b1 b0 x, y y=b1x+b0 线性回归,最小平方法
ax = rets.plot(kind='scatter', x='.SPX', y='.VIX', figsize=(10, 6))
ax.plot(rets['.SPX'], np.polyval(reg, rets['.SPX']), 'r', lw=2)
plt.savefig(fig_path+'fts_12.png')
# plt.show()
print(rets.corr().iloc[0, 1])
# ax = rets['.SPX'].rolling(window=252).corr(rets['.VIX']).plot(figsize=(10, 6))
# ax.axhline(rets.corr().iloc[0, 1], c='r')
ax = pd.DataFrame(rets['.SPX'].rolling(window=252).corr(
rets['.VIX'])).plot(figsize=(10, 6)) # 要把Series Class转变为Data Frame Class,否则plot会报错
print(type(ax))
ax.axhline(rets.corr().iloc[0, 1], c='r')
plt.savefig(fig_path+'fts_13.png')
plt.show()
|
__author__ = "Rodrigo Yamamoto"
__date__ = "2021.Mar"
__credits__ = ["Rodrigo Yamamoto"]
__maintainer__ = "Rodrigo Yamamoto"
__email__ = "codes@rodrigoyamamoto.com"
__version__ = "version 0.1.8.9"
__license__ = "MIT"
__status__ = "development"
__description__ = "A simple and concise gridded data IO library for read multiples grib and netcdf files"
import copy
import logging
import multiprocessing
import os
import warnings
from datetime import datetime, timedelta
from functools import partial
import numpy as np
import numpy.ma as ma
from gdio.commons import near_yx, objectify
from gdio.grib import grib as gblib
from gdio.netcdf import netcdf as nclib
warnings.filterwarnings("ignore")
class gdio(object):
def __init__(self,
verbose=False,
remap_n_processes=2):
self.verbose = verbose
self.remap_n_processes = remap_n_processes
self.dataset = list()
self.coordinates = list()
self.variables = list()
self.__fields_latitude = ['latitude', 'lat', 'xlat', 'LATITUDE']
self.__fields_longitude = ['longitude', 'lon', 'xlon', 'LONGITUDE']
self.__fields_time = ['time', 'TIME']
self.__fields_level = ['level', 'lev', 'LEVEL', 'levels', 'LEVELS']
self.fields_ensemble = 'perturbationNumber'
self.fields_ensemble_exception = [0]
self.lon = None
self.lat = None
self.time = None
self.time_units = None
self.history = None
logging.basicConfig(datefmt='%Y%-m-%dT%H:%M:%S', level=logging.DEBUG,
format='[%(levelname)s @ %(asctime)s] %(message)s')
def thread(self, ifile,
vars=None,
cut_time=None,
cut_domain=None,
level_type=None,
filter_by={},
sort_before=False):
'''
Load and cutting function
:param ifile: string
filename
:param vars: list
lista de variaveis
:param cut_time: tuple
range of time to cut ex.: (0,10)/(0,None)/(None,10)
:param cut_domain: tuple
range of latitudes and longitudes to cut: (lat1, lon1, lat2, lon2)
ex.: (-45,-90,20,-30)/(-45,None,20,-30)/(None,-90,None,-20)
:level_type: list
type of level (hybrid, isobaricInhPa, surface)
:param filter_by: dictonary
dict with grib parameters at form of pair key:values (list or single values)
eg: filter_by={'perturbationNumber': [0,10],'level': [1000,500,250]}
or filter_by={'gridType': 'regular_ll'}
:param sort_before: bool
Sort fields before process validityDate, validityTime, paramId, typeOfLevel,
perturbationNumber and level. Warning high consumption of memory, just use
when the grib data structure is not standard
:return: dictionary
'''
if os.path.isfile(ifile):
logging.info('''[PID:{0}] io.thread > opening file: {1}'''.format(os.getpid(), ifile))
_data = None
gb = gblib(verbose=self.verbose)
gb.fields_ensemble = self.fields_ensemble
gb.fields_ensemble_exception = self.fields_ensemble_exception
nc = nclib(verbose=self.verbose)
if gb.is_grib(ifile):
return gb.gb_load(ifile, vars=vars,
cut_time=cut_time,
cut_domain=cut_domain,
level_type=level_type,
filter_by=filter_by,
sort_before=sort_before)
else:
return nc.nc_load(ifile, vars=vars,
cut_time=cut_time,
cut_domain=cut_domain,
level_type=level_type)
else:
logging.warning('''[PID:{0}] io.thread > missing file: {1}'''.format(os.getpid(),ifile))
return None
def mload(self,
files,
vars=None,
merge_files=True,
cut_time=None,
cut_domain=None,
level_type=None,
filter_by={},
uniformize_grid=True,
sort_before=False,
inplace=False):
'''
Load multiple grib/netcdf files
:param files: list
files names
:param uniformize_grid: boolean
interpolate all ncs to first nc grid specification
:param vars: list
variables names
:param merge_files: boolean
merge files
:param cut_time: tuple
range of time to cut ex.: (0,10)/(0,None)/(None,10)
:param cut_domain: tuple
range of latitudes and longitudes to cut: (lat1, lon1, lat2, lon2)
ex.: (-45,-90,20,-30)/(-45,None,20,-30)/(None,-90,None,-20)
:param level_type: list
type of level (hybrid, isobaricInhPa, surface)
:param filter_by: dictonary
dict with grib parameters at form of pair key:values (list or single values)
eg: filter_by={'perturbationNumber': [0,10],'level': [1000,500,250]}
or filter_by={'gridType': 'regular_ll'}
:param rename_vars: dictonary
rename variables names (key) for a new name (value).
Eg. {'tmpmdl': 't', 'tmpprs': 't'}
:param sort_before: bool
Sort fields before process validityDate, validityTime, paramId, typeOfLevel,
perturbationNumber and level. Warning high consumption of memory, just use
when the grib data structure is not standard
:return: list of dictionaries
'''
data = objectify()
griddes = ()
ref_time = None
t_units = 1
# convert timestep index to timeserie ......
def dtp(t, unity=1):
return timedelta(days=float(t * unity))
vf = np.vectorize(dtp)
# ..........................................
pool = multiprocessing.Pool(processes=self.remap_n_processes)
if isinstance(files, str):
files = [files]
for _dat in pool.map(
partial(self.thread, vars=vars,
cut_time=cut_time,
cut_domain=cut_domain,
level_type=level_type,
filter_by=filter_by,
sort_before=sort_before),
files):
if _dat:
ref_time = _dat.get('ref_time')
# setting the standard projection/dimensions
if not griddes:
lons_n, lats_n = self.__get_dims(_dat, vars)
griddes = lats_n.shape + lons_n.shape
for key, val in _dat.items():
# convert to day unity
if _dat.get('time_units').lower() in ['hour', 'hours', 'hrs']:
t_units = 1 / 24
else:
t_units = 1
if (vars is None or key in vars) \
and not key in ['latitude', 'longitude', 'ref_time', 'time', 'time_units']:
for typLev in val.level_type:
# uniformize all grids ...........
if uniformize_grid:
# grid resample, if spatial dimensions are different of first grid(z,lat,lon)
if not val[typLev].value.shape[3:] == griddes:
logging.info('''gdio.mload > auto remapping grid @ {0}'''.format(key))
# interpolate through z dimension .........
_tmp = np.ones(val[typLev].value.shape[:3]+griddes) * np.nan
# WARNING: If the z dimension of the source data is different
# from that of the ref data, the interpolated level data may not
# represent the same z as the ref data
for m in range(_tmp.shape[0]):
for z in range(_tmp.shape[2]):
try:
_tmp[m,:,z,:,:] = self.remapbil(val[typLev].value[m,:,z,:,:],
val.longitude, val.latitude,
lons_n, lats_n, order=1, masked=True)
except Exception as e:
logging.error('''gdio.mload > auto remapping grid error {0}'''.format(e))
val[typLev].value = _tmp
del val.longitude, val.latitude
del _tmp
# update the lat/lon dimensions
data['longitude'], data['latitude'] = lons_n, lats_n
# merge files ........................
if merge_files:
if key in data.keys() and typLev in data[key].keys():
if not (key in self.__fields_latitude
or key in self.__fields_longitude
or key in self.__fields_time
or key in self.__fields_level
or key in ['ref_time', 'time_units']): # merge variable field
try:
data[key][typLev].value = np.concatenate((data[key][typLev].value,
val[typLev].value), axis=1)
except Exception as e:
logging.error('''gdio.mload > error @ {0} - {1}'''.format(key, e))
else:
if key in data.keys(): # in case of multiples level per variable
data[key].update(val)
else:
data[key] = val
else: # all parameters except variables
# set datetime field
if key in self.__fields_time:
if key in data.keys(): # merge datetime field
try:
_time = ref_time + vf(_dat.get('time'), t_units)
data[key] = np.concatenate((data[key], _time))
except Exception as e:
logging.error('''gdio.mload > error @ {0} - {1}'''.format(key, e))
else:
data['time'] = ref_time + vf(_dat.get('time'), t_units)
data['ref_time'] = _dat.get('ref_time')
else:
if key not in data.keys():
data[key] = val
# do not merge files option ..............
if not merge_files:
data.update({'time': ref_time + vf(_dat.get('time'), t_units)})
data.update({'ref_time': [_dat.get('ref_time')]})
self.dataset.append(data)
data = objectify()
else:
# in case of missing file ................
for key in data.keys():
if not (key in self.__fields_latitude
or key in self.__fields_longitude
or key in self.__fields_time
or key in self.__fields_level
or key in ['ref_time', 'time_units']):
for typLev in val.level_type:
data[key][typLev].value = np.concatenate((data[key][typLev].value,
np.ones((1,1)+data[key][typLev].value.shape[2:]) * np.nan),
axis=1)
elif key in self.__fields_time:
data[key] = np.concatenate((data[key], [data[key][-1] + timedelta(days=t_units)]))
elif key in ['ref_time']:
ref_time += timedelta(days=t_units)
data[key] = np.concatenate((data[key], [ref_time]))
logging.warning('''io.load_nc > missing file applying null grid''')
self.variables = list(data.keys())
self.coordinates.append('latitude')
self.coordinates.append('longitude')
self.coordinates.append('level')
self.coordinates.append('members')
if inplace:
if data:
self.dataset.append(data)
else:
return data
def sel(self,
__data=None,
latitude=None,
longitude=None,
dates=None,
level=None,
member=None,
date_format='%Y-%m-%d %H:%M'):
'''
Select data by coordinates (date, latitude, longitude, levels and members)
:param __data: list of dictionaries
raw dataset
:param latitude: list of floats
latitudes
range of latitudes to select: [lat1, lat2]
especific latitudes (1 or >2) [lat1, lat2, lat3, ...]
:param longitude: list of floats
range of longitudes to select: [lon1, lon2]
especific longitudes (1 or >2) [lon1, lon2, lon3, ...]
:param dates: list of datetime/string
datetime/string date
range of dates to select: [date1, date2]
especific dates (1 or >2) [date1, date2, date3, ...]
:param level: list of int
range of levels to select: [level1, level2]
especific levels (1 or >2) [level1, level2, level3, ...]
:param member: list of int
range of levels to select: [member, member]
especific levels (1 or >2) [level1, level2, level3, ...]
return dict
'''
if __data is None:
__data = copy.deepcopy(self.dataset)
for _dat in __data:
t = dates
x = None
y = None
z = level
# select time
if dates:
for i, dt in enumerate(dates):
if isinstance(dt, str):
dates[i] = datetime.strptime(dt, date_format)
if len(dates) == 2:
t = (_dat.get('time') >= dates[0]) & (_dat.get('time') <= dates[1])
elif len(dates) > 0:
t = np.isin(_dat.get('time'), dates)
# select spatial subdomain
if longitude or latitude:
y, x = near_yx(_dat, lats=latitude, lons=longitude)
for k, v in _dat.items():
# cutting data array
if isinstance(v, dict):
for typLev in v.level_type:
# cut data in longitude dimension
if x:
if len(x) == 2:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, :, :, x[0]:x[1]]
elif len(x) == 1:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, :, :, x[0]]
else:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, :, :, x]
# cut data in latitude dimension
if y:
if len(y) == 2:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, :, y[0]:y[1]]
elif len(y) == 1:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, :, y[0]]
else:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, :, y]
# cut data in levels dimension
if z:
if len(z) == 2:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, z[0]:z[1]]
_dat[k][typLev].level = _dat[k][typLev].level[z[0]:z[1]]
else:
try:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, z]
_dat[k][typLev].level = list(map(_dat[k][typLev].level.__getitem__, z))
except:
_dat[k][typLev].value = _dat[k][typLev].value[:, :, -1]
_dat[k][typLev].level = _dat[k][typLev].level[-1]
# cut data in time dimension
if dates:
_dat[k][typLev].value = _dat[k][typLev].value[:, t]
# cut data in member dimension
if member:
_dat[k][typLev].value = _dat[k][typLev].value[member]
# select cordinates attributes
else:
if k in ['latitude']: # latitude coordinate
if y:
if len(y) == 2:
_dat[k] = _dat[k][y[0]:y[1]]
else:
_dat[k] = _dat[k][y]
elif k in ['longitude']: # longitude coordinate
if x:
if len(x) == 2:
_dat[k] = _dat[k][x[0]:x[1]]
else:
_dat[k] = _dat[k][x]
elif k in ['time']: # time coordinate
if dates:
_dat[k] = _dat[k][t]
else:
_dat.update({k: v})
return __data
def remapbil(self, data, lon, lat, lon_new, lat_new, order=1, masked=False):
'''
Interpolate data to new domain resolution
:param data: array
3D data (time,lon,lat)
:param lon: array
:param lat: array
:param lon_new: array
new grid logitudes
:param lat_new: array
new grid latitudes
:param order: int
0- nearest-neighbor, 1 - bilinear, 2 - cubic spline
:param masked: boolean
If True, points outside the range of xin and yin
are masked (in a masked array). If masked is set to a number
:return: 3D array
'''
_lon_new, _lat_new = np.meshgrid(lon_new, lat_new)
cpu_num = multiprocessing.cpu_count()
n_processes = cpu_num if self.remap_n_processes > cpu_num else self.remap_n_processes
pool = multiprocessing.Pool(processes=n_processes)
# here we parallelise in each step of time, a kind of magic
return np.array(pool.map(
partial(self.interp, xin=lon[np.argsort(lon)],
yin=lat[np.argsort(lat)], xout=_lon_new, yout=_lat_new,
order=order, masked=masked),
data)
)
def interp(self, datain, xin, yin, xout, yout, checkbounds=False, masked=False, order=1):
"""
From basemap lib
Interpolate data (``datain``) on a rectilinear grid (with x = ``xin``
y = ``yin``) to a grid with x = ``xout``, y= ``yout``.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
datain a rank-2 array with 1st dimension corresponding to
y, 2nd dimension x.
xin, yin rank-1 arrays containing x and y of
datain grid in increasing order.
xout, yout rank-2 arrays containing x and y of desired output grid.
============== ====================================================
Keywords Description
============== ====================================================
checkbounds If True, values of xout and yout are checked to see
that they lie within the range specified by xin
and xin.
If False, and xout,yout are outside xin,yin,
interpolated values will be clipped to values on
boundary of input grid (xin,yin)
Default is False.
masked If True, points outside the range of xin and yin
are masked (in a masked array).
If masked is set to a number, then
points outside the range of xin and yin will be
set to that number. Default False.
order 0 for nearest-neighbor interpolation, 1 for
bilinear interpolation, 3 for cublic spline
(default 1). order=3 requires scipy.ndimage.
============== ====================================================
.. note::
If datain is a masked array and order=1 (bilinear interpolation) is
used, elements of dataout will be masked if any of the four surrounding
points in datain are masked. To avoid this, do the interpolation in two
passes, first with order=1 (producing dataout1), then with order=0
(producing dataout2). Then replace all the masked values in dataout1
with the corresponding elements in dataout2 (using numpy.where).
This effectively uses nearest neighbor interpolation if any of the
four surrounding points in datain are masked, and bilinear interpolation
otherwise.
Returns ``dataout``, the interpolated data on the grid ``xout, yout``.
"""
# xin and yin must be monotonically increasing.
if xin[-1] - xin[0] < 0 or yin[-1] - yin[0] < 0:
raise ValueError('xin and yin must be increasing!')
if xout.shape != yout.shape:
raise ValueError('xout and yout must have same shape!')
# check that xout,yout are
# within region defined by xin,yin.
if checkbounds:
if xout.min() < xin.min() or \
xout.max() > xin.max() or \
yout.min() < yin.min() or \
yout.max() > yin.max():
raise ValueError('yout or xout outside range of yin or xin')
# compute grid coordinates of output grid.
delx = xin[1:] - xin[0:-1]
dely = yin[1:] - yin[0:-1]
if max(delx) - min(delx) < 1.e-4 and max(dely) - min(dely) < 1.e-4:
# regular input grid.
xcoords = (len(xin) - 1) * (xout - xin[0]) / (xin[-1] - xin[0])
ycoords = (len(yin) - 1) * (yout - yin[0]) / (yin[-1] - yin[0])
else:
# irregular (but still rectilinear) input grid.
xoutflat = xout.flatten()
youtflat = yout.flatten()
ix = (np.searchsorted(xin, xoutflat) - 1).tolist()
iy = (np.searchsorted(yin, youtflat) - 1).tolist()
xoutflat = xoutflat.tolist()
xin = xin.tolist()
youtflat = youtflat.tolist()
yin = yin.tolist()
xcoords = []
ycoords = []
for n, i in enumerate(ix):
if i < 0:
xcoords.append(-1) # outside of range on xin (lower end)
elif i >= len(xin) - 1:
xcoords.append(len(xin)) # outside range on upper end.
else:
xcoords.append(
float(i) + (xoutflat[n] - xin[i]) / (xin[i + 1] - xin[i]))
for m, j in enumerate(iy):
if j < 0:
# outside of range of yin (on lower end)
ycoords.append(-1)
elif j >= len(yin) - 1:
ycoords.append(len(yin)) # outside range on upper end
else:
ycoords.append(
float(j) + (youtflat[m] - yin[j]) / (yin[j + 1] - yin[j]))
xcoords = np.reshape(xcoords, xout.shape)
ycoords = np.reshape(ycoords, yout.shape)
# data outside range xin,yin will be clipped to
# values on boundary.
if masked:
xmask = np.logical_or(np.less(xcoords, 0),
np.greater(xcoords, len(xin) - 1))
ymask = np.logical_or(np.less(ycoords, 0),
np.greater(ycoords, len(yin) - 1))
xymask = np.logical_or(xmask, ymask)
xcoords = np.clip(xcoords, 0, len(xin) - 1)
ycoords = np.clip(ycoords, 0, len(yin) - 1)
# interpolate to output grid using bilinear interpolation.
if order == 1:
xi = xcoords.astype(np.int32)
yi = ycoords.astype(np.int32)
xip1 = xi + 1
yip1 = yi + 1
xip1 = np.clip(xip1, 0, len(xin) - 1)
yip1 = np.clip(yip1, 0, len(yin) - 1)
delx = xcoords - xi.astype(np.float32)
dely = ycoords - yi.astype(np.float32)
dataout = (1. - delx) * (1. - dely) * datain[yi, xi] + \
delx * dely * datain[yip1, xip1] + \
(1. - delx) * dely * datain[yip1, xi] + \
delx * (1. - dely) * datain[yi, xip1]
elif order == 0:
xcoordsi = np.around(xcoords).astype(np.int32)
ycoordsi = np.around(ycoords).astype(np.int32)
dataout = datain[ycoordsi, xcoordsi]
elif order == 3:
try:
from scipy.ndimage import map_coordinates
except ImportError:
raise ValueError('scipy.ndimage must be installed if order=3')
coords = [ycoords, xcoords]
dataout = map_coordinates(datain, coords, order=3, mode='nearest')
else:
raise ValueError('order keyword must be 0, 1 or 3')
if masked:
newmask = ma.mask_or(ma.getmask(dataout), xymask)
dataout = ma.masked_array(dataout, mask=newmask)
if not isinstance(masked, bool):
dataout = dataout.filled(masked)
return dataout
def __get_dims(self, data, var=None):
'''
Get grid data dimension
:param data: dictionary
data
:param var: string
grid reference variable name
:return: tuple
grid dimension, lat/lon of grid reference
'''
for key, val in data.items():
if (var is None or key == var[0]) \
and not key in ['latitude', 'longitude', 'ref_time', 'time', 'time_units']:
return val.longitude, val.latitude
|
def jdt(i,alln):
ii=alln//10
if ii>0:
iii=i
iiii=alln
i=i//ii
alln=10
print("[%s>%s] %u/%u"%("="*i," "*(alln-1-i),iii,iiii-1) , end = '\r')
print("="*50)
print("made in china?")
print("="*50)
forcs = int(input("please tape the cishu"))
a = 0
b = 1
list = []
for i in range (forcs):
jdt(i,forcs)
a,b=b,a+b
list.append(a)
if int(input('what do you want?\n1:print list '))==1:
print(list)
|
from app.content.models.badge import Badge
from app.content.models.category import Category
from app.content.models.cheatsheet import Cheatsheet
from app.content.models.event import Event
from app.content.models.news import News
from app.content.models.user import User, UserManager
from app.content.models.user_badge import UserBadge
from app.content.models.registration import Registration
from app.content.models.warning import Warning
from app.content.models.page import Page
from app.content.models.prioritiy import Priority
from app.content.models.short_link import ShortLink
from app.content.models.notification import Notification
from app.content.models.strike import (
Strike,
get_strike_description,
get_strike_strike_size,
)
|
from flask import Flask, request, Response
from flask_cors import CORS
import drone_awe
import json
import copy
import traceback
import utilities
'''
Notes:
- Need to disable plotting in the library
- if possible remove matplotlib entirely from the library
- if possible remove gekko object from a.output in order to make it JSONSerializable
'''
app = Flask(__name__)
CORS(app)
@app.route('/')
def root():
return json.dumps({
'msg': 'Drones and weather API 0.0.2. See DroneEcon.com for details.'
})
@app.route('/getValidationCases')
def getValidationCases():
try:
data = drone_awe.validationdatabase
data = [d for d in data if 'xvalid' in d]
resp = Response(json.dumps(data))
return resp
except Exception as err:
return utilities.handleError(err)
@app.route('/getDrones')
def getDrones():
try:
drones = copy.deepcopy(drone_awe.drones)
resp = []
for drone in drones:
el = {}
if 'battery' in drone:
for prop in drone['battery']:
l = list(filter(lambda el: el['param'] == prop, utilities.ParamMap))
if len(l) > 0:
el[l[0]['display']] = drone['battery'][prop]
del drone['battery']
for prop in drone:
l = list(filter(lambda el: el['param'] == prop, utilities.ParamMap))
if len(l) > 0:
el[l[0]['display']] = drone[prop]
resp.append(el)
return Response(json.dumps(resp))
except Exception as err:
utilities.handleError(err)
a.simulate()
@app.route('/simulate', methods=['POST'])
def simulate():
try:
# Track Z-variable
zParam = None
params = {}
if request.data:
params = json.loads(request.data)
# zParam = params['zParam']
for arg in utilities.DefaultArgs:
if arg['name'] not in params:
print(f'Missing', {arg['name']}, 'Using default value:', {arg['default']})
params[arg['name']] = arg['default']
a = drone_awe.model(params)
try:
a.simulate()
data = a.output
resp = {
'error': False,
'errorType': None,
'log': 'Successful simulation',
'plottables': [],
'zAxis': {
'id': zParam,
'displayName': '',
'values': []
}
}
if zParam:
resp['zAxis']['displayName'] = data['zvals']
for key in list(data.keys()):
if key != 'zvals' and type(data[key][0][0]) != str:
l = list(filter(lambda el: el['param'] == key, utilities.ParamMap))
if len(l) >= 1:
displayName = l[0]['display']
plottable = {
'id': key,
'displayName': displayName,
'values': data[key]
}
if key == 'missionspeed':
print(plottable)
resp['plottables'].append(plottable)
else:
print(f'Missing ParamMep entry for {key}')
resp = Response(json.dumps(resp))
return resp
except Exception as err:
resp = {
'error': True,
'errorType': None,
'log': 'Simulation failed: ' + err.__repr__(),
'plottables': [],
'zAxis': {
'id': zParam,
'displayName': '',
'values': []
}
}
resp = Response(json.dumps(resp))
return resp
except Exception as err:
return utilities.handleError(err)
if __name__ == '__main__':
app.run()
|
#
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import unittest
from mbed_tools_lib.python_helpers import minimum_python_version, named_tuple_with_defaults
class TestPythonHelpers(unittest.TestCase):
def test_python_version(self):
# Tools only support Python>= 3.6
self.assertTrue(minimum_python_version(3, 6))
# With a bit of luck, python 100.100 is not coming tomorrow
self.assertFalse(minimum_python_version(100, 100))
def test_named_tuple_with_defaults(self):
named_tuple = named_tuple_with_defaults("TestNamedTuple", field_names=["field1", "field2"], defaults=[1, 2])
a_tuple = named_tuple()
self.assertIsNotNone(a_tuple)
self.assertEqual(a_tuple.field1, 1)
self.assertEqual(a_tuple.field2, 2)
a_tuple = named_tuple(field1=15)
self.assertEqual(a_tuple.field1, 15)
self.assertEqual(a_tuple.field2, 2)
|
import random
Column = []
for x in range(ord('A'),ord('I')+1):
Column.append(chr(x))
print(Column)
Rows = [None]*9
Game = [[0 for i in range(len(Column))] for j in range(len(Rows))]
print(Game,'\n')
mX = len(Game[0])
mY = len(Game)
print(mX)
print(mY,'\n')
Bombs = []
mB = _mB = 10
while mB>0:
ran = (random.randint(0,mX-1),random.randint(0,mY-1))
if not ran in Bombs:
Bombs.append(ran)
mB-=1
for x in Bombs:
cI = x[0]
rI = x[1]
for r in range(3):
if r+rI-1>=0 and r+rI-1<mY:
T = Game[r+rI-1]
for c in range(3):
if c+cI-1>=0 and c+cI-1<mY:
if T[c+cI-1] == 0:
T[c+cI-1] = 1
else:
T[c+cI-1] += 1
Game[r+rI-1] = T
def game(game):
temp = ' '*5
t=0
for x in range(0, len(game)):
temp+=str(x)+' '*2
temp+='\n\n'
for x in game:
temp+=str(t)+' '*4
t+=1
for y in x:
temp+=str(y)+' '*2
temp+='\n'
return temp
print(game(Game))
for x in Bombs:
Game[x[1]][x[0]]='X'
print(game(Game))
showcase = [[0 for i in range(len(Column))] for j in range(len(Rows))]
print(game(showcase))
gg=0
def check(arr, ind):
for x in arr:
count = x.count(0)
ind-=count
if ind==0: return True
else: False
var = ' '
def yikes(inp):
#print(game(showcase))
global gg
gg+=1
#print(gg)
if gg>100:
exit()
I = inp[0]
J = inp[1]
#print(inp)
if I>=0 and J>=0 and I<mX and J<mY:
if str(Game[I][J]) == str(0):
showcase[I][J] = var
if J+1<mY:
if str(showcase[I][J+1]) != str(var):
yikes((I,J+1))
if J-1>=0:
if str(showcase[I][J-1]) != str(var):
yikes((I,J-1))
if I+1<mX:
if str(showcase[I+1][J]) != str(var):
yikes((I+1,J))
if I-1>=0:
if str(showcase[I-1][J]) != str(var):
yikes((I-1,J))
else:
temp = Game[I][J]
if temp != 'X':
showcase[I][J] = Game[I][J]
else:
exit("You Lost")
else:
return False
while(True):
yikes((int(input("A: ")), int(input("B: "))))
if(check(showcase, _mB)):
exit('Won!')
print(game(Game))
print(game(showcase))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-02 17:06
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import picklefield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Estimator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('model_type', models.CharField(choices=[('classifier', 'classifier'), ('regression', 'regression'), ('clusters', 'clusters'), ('decomposition', 'decomposition')], max_length=32)),
('model_class', models.CharField(blank=True, default=None, max_length=255, null=True)),
('model_form', models.CharField(blank=True, default=None, max_length=512, null=True)),
('estimator', picklefield.fields.PickledObjectField(blank=True, default=None, editable=False, null=True)),
('build_time', models.DurationField(blank=True, default=None, null=True)),
('owner', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'created',
'db_table': 'estimators',
},
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('metric', models.CharField(choices=[('accuracy', 'accuracy'), ('auc', 'auc'), ('brier', 'brier'), ('f1', 'f1'), ('fbeta', 'fbeta'), ('hamming', 'hamming'), ('hinge', 'hinge'), ('jaccard', 'jaccard'), ('logloss', 'logloss'), ('mcc', 'mcc'), ('precision', 'precision'), ('recall', 'recall'), ('roc', 'roc'), ('support', 'support'), ('mae', 'mae'), ('mse', 'mse'), ('mdae', 'mdae'), ('r2', 'r2'), ('rand', 'rand'), ('completeness', 'completeness'), ('homogeneity', 'homogeneity'), ('mutual', 'mutual'), ('silhouette', 'silhouette'), ('v', 'v'), ('time', 'time')], max_length=32)),
('score', models.FloatField(blank=True, default=None, null=True)),
('label', models.CharField(blank=True, default=None, max_length=32, null=True)),
('folds', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), blank=True, default=None, null=True, size=None)),
('estimator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scores', to='arbiter.Estimator')),
],
options={
'get_latest_by': 'created',
'db_table': 'evaluations',
},
),
]
|
# https://www.shadertoy.com/view/stdGzH
import taichi as ti
ti.init()
res_x = 1024
res_y = 576
pixels = ti.Vector.field(3, ti.f32)
ti.root.dense(ti.i, res_x).dense(ti.j, res_y).place(pixels)
@ti.kernel
def render(t :ti.f32):
for i, j in pixels:
color = ti.Vector([0.0, 0.0, 0.0])
uv = ti.Vector([float(i) / res_x, float(j) / res_y])
mag = 100.0
uv*=10.0
uv[1]-=5.0
uv[1]+=ti.cos(uv[1] + t) * ti.sin(uv[0] + t) * ti.sin(uv[0] + t)
mag = ti.abs(4.0/(20.0 * uv[1]))
color = ti.Vector([mag, mag, mag])
pixels[i, j] = color
gui = ti.GUI("Glow", res=(res_x, res_y))
for i in range(50000):
t = i*0.03
render(t)
gui.set_image(pixels)
gui.show()
|
from collections import deque
from .tst import TernarySearchTree
from .corpustools import extract_fields, ContainsEverything
class LanguageModel():
"""N-gram (Markov) model that uses a ternary search tree.
Tracks frequencies and calculates probabilities.
Attributes
----------
n : int
Size of n-grams to be tracked.
vocabulary : set
If provided, n-grams containing words not in vocabulary are skipped.
Can be other container than set, if it has add method.
targets : container
If provided, n-grams not ending in target are counted as
ending in "OOV" (OutOfVocabulary) instead, so probabilities
can still be calculated.
must_contain : container
If provided, only n-grams containing at least one word in
must_contain are counted
boundary : str
N-grams crossing boundary will not be counted,
e.g. sentence </s> or document </doc> meta tags
splitchar : str
String that separates tokens in n-grams
"""
def __init__(self, n, boundary="</s>", splitchar="#",
vocabulary=None, targets=None, must_contain=None):
"""
Parameters
----------
n : int
Size of n-grams to be tracked.
boundary : str
N-grams crossing boundary will not be counted,
e.g. sentence </s> or document </doc> meta tags
splitchar : str
String that separates tokens in n-grams
vocabulary : set
If provided, n-grams with words not in vocabulary are skipped.
Can be other container than set, if it has add method.
targets : container
If provided, n-grams not ending in target are counted as
ending in "OOV" (OutOfVocabulary) instead, so probabilities
can still be calculated.
must_contain : container
If provided, only n-grams containing at least one word in
must_contain are counted
Notes
-----
If must_contain is provided, probabilities will be inaccurate. Only
use for counting target n-gram frequencies.
"""
if not targets:
targets = ContainsEverything()
if not vocabulary:
vocabulary = ContainsEverything()
self._n = n
self._counts = TernarySearchTree(splitchar)
self._vocabulary = vocabulary
self._targets = targets
self._boundary = boundary
self._splitchar = splitchar
self._must_contain = must_contain
def train(self, sequence):
"""Train model on all n-grams in sequence.
Parameters
----------
sequence : iterable of str
Sequence of tokens to train on.
Notes
-----
A sequence [A, B, C, D, E] with n==3 will result in these
n-grams:
[A, B, C]
[B, C, D]
[C, D, E]
[D, E]
[E]
"""
n_gram = deque(maxlen=self.n)
for element in sequence:
if element == self.boundary:
# train on smaller n-grams at end of sentence
# but exclude full n_gram if it was already trained
# on in last iteration
not_trained = len(n_gram) < self.n
for length in range(1, len(n_gram) + not_trained):
self._train(list(n_gram)[-length:])
n_gram.clear()
continue
n_gram.append(element)
if len(n_gram) == self.n:
if element not in self.targets:
self._train(list(n_gram)[:-1])
continue
self._train(n_gram)
# train on last n-grams in sequence
# ignore full n-gram if it has already been trained on
if len(n_gram) == self.n:
n_gram = list(n_gram)[1:]
for length in range(1, len(n_gram) + 1):
self._train(list(n_gram)[-length:])
def insert_sequence(self, counts,
is_string=True, subsequences=False):
"""Increase counts of sequence of ngrams by their frequencies.
Parameters
----------
counts : sequence of (str, int) tuples
Tuples of ngrams and their frequencies
is_string : bool
If True, ngrams are assumed to be strings.
Otherwise they are assumed to be tuples
of strings, which will be joined by self.splitchar.
subsequences : bool
If True, counts for subsequences of n-gram will
also be increased by frequency. A subsequence
is everything that ends in self.splitchar,
e.g. for "my#shiny#trigram", subsequences are
"my#shiny" and "my"
"""
for ngram, frequency in counts:
self.insert(ngram, frequency, is_string, subsequences)
def insert(self, ngram, frequency,
is_string=True, subsequences=False):
"""Increases count of n-gram by frequency.
Parameters
----------
ngram : str or sequence of str
n-gram as string or sequence of strings (words)
frequency : int
Frequency of n-gram
is_string : bool
If True, n-gram must be a string, with
self.splitchar (default '#') separating words.
subsequences : bool
If True, counts for subsequences of n-gram will
also be increased by frequency. A subsequence
is everything that ends in self.splitchar,
e.g. for "my#shiny#trigram", subsequences are
"my#shiny" and "my"
"""
if not is_string:
ngram = self.splitchar.join(ngram)
self._counts.insert(ngram, frequency,
subsequences)
def probability(self, sequence, predict_all=False):
"""Returns probability of the sequence.
Parameters
----------
sequence : iterable of str
Sequence of tokens to get the probability for
predict_all : bool
Return probability for each word in the sequence (True)
or only for the last word (False).
Returns
-------
float or list of float
Probability of last element or probabilities of all elements
"""
n_gram = deque(maxlen=self.n)
if predict_all:
probabilities = []
for element in sequence:
n_gram.append(element)
probability = self._probability(n_gram)
probabilities.append(probability)
return probabilities
else:
try:
n_gram.extend(sequence[-self.n:])
# if sequence is generator (cannot slice - TypeError),
# run through it and return probability for final element
except TypeError:
for element in sequence:
n_gram.append(element)
probability = self._probability(n_gram)
return probability
def all_target_probabilities(self, return_n_gram=False, sizes=None):
"""Generator yielding probabilities and frequencies
of all encountered targets.
Parameters
----------
return_n_gram : bool
Return full n-gram rather than just target with results
sizes: list of int
Sizes of n-grams to be returned, defaults to target size
Returns
-------
generator
Generator yielding (n-gram, frequency, probability)-tuples
or (target, frequency, probability) tuples (if return_n_gram=True)
"""
if not sizes:
sizes = [self.n]
for n_gram_string, frequency in self.completions():
n_gram = n_gram_string.split(self.splitchar)
if len(n_gram) not in sizes:
continue
target = n_gram[-1]
if target in self.targets:
probability = self._probability(n_gram)
if return_n_gram:
yield n_gram, frequency, probability
else:
yield target, frequency, probability
def frequency(self, n_gram):
"""Return frequency of n_gram.
Parameters
----------
n_gram : str or list/tuple of str
Returns
-------
int
Frequency
"""
if isinstance(n_gram, str):
n_gram = n_gram.split(self.splitchar)
if self.must_contain:
if not any(word in self.must_contain for word in n_gram):
return 0
n_gram_string = self.splitchar.join(n_gram)
frequency = self._counts.frequency(n_gram_string)
return frequency
def completions(self, prefix=""):
"""Generator that returns all completions for a given prefix.
Parameters
----------
prefix : str
Prefix that all results returned begin with.
Yields
-------
Tuple
Each complete n-gram with frequency as a (str, int)-tuple
"""
if not self.must_contain:
return self._counts.completions(prefix)
for completion, frequency in self._counts.completions(prefix):
completion_ = completion.split("#")
if not any(word in self.must_contain for word in completion_):
continue
yield completion, frequency
def _train(self, n_gram):
# test for OOV words
for idx, word in enumerate(n_gram):
if word not in self.vocabulary:
n_gram = list(n_gram)[:idx]
break
# ensure n-gram contains target word if provided
if self.must_contain:
if not any([word in self.must_contain
for word in n_gram]):
return
n_gram_string = self.splitchar.join(n_gram)
self._counts.insert(n_gram_string)
def _probability(self, n_gram):
frequency = self.frequency(n_gram)
if frequency == 0:
return 0
*preceding, target = n_gram
preceding = self.splitchar.join(preceding)
total = self._counts.frequency(preceding)
probability = frequency / total
return probability
def __contains__(self, n_gram):
return self.frequency(n_gram)
def __iter__(self):
return self.completions()
@property
def n(self):
return self._n
@property
def vocabulary(self):
return self._vocabulary
@property
def targets(self):
return self._targets
@property
def must_contain(self):
return self._must_contain
@property
def boundary(self):
return self._boundary
@property
def splitchar(self):
return self._splitchar
def train_lm(corpus, n,
vocabulary=None, targets=None, must_contain=None,
**kwargs):
"""Convenience function to train n-gram model on tagged corpus.
"""
corpus = extract_fields(corpus, **kwargs)
lm = LanguageModel(n,
vocabulary=vocabulary,
targets=targets,
must_contain=must_contain)
lm.train(corpus)
return lm
|
#!/usr/bin/env python
from __future__ import print_function, division
import argparse
import rospy
from tf import transformations
import tf2_ros
import numpy
from StringIO import StringIO
from std_msgs.msg import String
from visualization_msgs.msg import Marker, MarkerArray
from ihmc_msgs.msg import FootstepDataListRosMessage
from ihmc_msgs.msg import FootstepDataRosMessage
LEFT = 0
RIGHT = 1
"""
REGEX: You can use this hasty regex to do most of the work of reformatting a message from
/ihmc_ros/valkyrie/control/footstep_list to this format
FIND: \s*-\s*origin: \d+\s*robot_side: (\d)\s*location:\s*x: ([\d.e+-]+)\s*y: ([\d.e+-]+)\s*z: ([\d.e+-]+)\s*orientation: \s*x: ([\d.e+-]+)\s*y: ([\d.e+-]+)\s*z: ([\d.e+-]+)\s*w: ([\d.e+-]+)[^-]*unique_id: \d+
REPLACE: ($1, ($2, $3, $4), ($5, $6, $7, $8)), \n
"""
footstep_sets = {
-90: (
# pelvis-to-world tf (rosrun tf tf_echo /pelvis /world)
([0.103, -0.146, -0.994], [0.000, -0.000, 1.000, 0.002]),
# footsteps (from rostopic echo /ihmc_ros/valkyrie/control/footstep_list, then reformatted using the regex)
[
# (foot, position (xyz), quaternion (xyzw)
(0, (0.125, -0.275, 0.0982290995701), (0.0, 0.0, 1.0, 6.12323399574e-17)),
(1, (0.025, 0.025, 0.0982304462606), (0.0, 0.0, -0.995184726672, 0.0980171403296)),
(0, (0.075, -0.325, 0.0982290995701), (0.0, 0.0, -0.903989293123, 0.42755509343)),
(1, (-0.075, -0.075, 0.0982304462606), (0.0, 0.0, -0.831469612303, 0.55557023302)),
(0, (0.275, -0.025, 0.0982290995701), (0.0, 0.0, -0.707106781187, 0.707106781187)),
(1, (0.0190632732585, -0.0171800019412, 0.0982304462606), (0.0, 0.0, -0.705664962114, 0.708545666309))
]
),
90: (
([-0.060, 0.049, -0.985], [0.000, 0.000, 0.010, 1.000]),
[
(0, (0.025, 0.075, 0.0900992894832), (0.0, 0.0, 0.0, 1.0)),
(1, (-0.075, -0.225, 0.0901028669253), (0.0, 0.0, 0.0980171403296, 0.995184726672)),
(0, (-0.125, 0.125, 0.0900992894832), (0.0, 0.0, 0.42755509343, 0.903989293123)),
(1, (0.225, -0.025, 0.0901028669253), (0.0, 0.0, 0.595699304492, 0.803207531481)),
(0, (-0.125, -0.125, 0.0900992894832), (0.0, 0.0, 0.671558954847, 0.740951125355)),
(1, (0.147420042966, -0.153386633716, 0.0901028669253), (0.0, 0.0, 0.683880261288, 0.729594262738))
]
)
}
class ArgumentParserError(Exception): pass
class ThrowingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ArgumentParserError(message)
def footstep_marker(i, step):
"""
:param int i:
:param FootstepDataRosMessage step:
:return Marker:
"""
m = Marker()
m.header.stamp = rospy.Time.now()
m.header.frame_id = '/world'
m.id = i
m.type = Marker.CUBE
m.action = Marker.ADD
m.pose.position.x = step.location.x
m.pose.position.y = step.location.y
m.pose.position.z = step.location.z
m.pose.orientation.x = step.orientation.x
m.pose.orientation.y = step.orientation.y
m.pose.orientation.z = step.orientation.z
m.pose.orientation.w = step.orientation.w
m.scale.x = 0.27
m.scale.y = 0.18
m.scale.z = 0.015
if step.robot_side == RIGHT:
m.color.r = 0
m.color.g = 1
else:
m.color.r = 1
m.color.g = 0
m.color.a = 0.6
return m
def empty_marker(i):
"""
:param int i:
:param FootstepDataRosMessage step:
:return Marker:
"""
m = Marker()
m.header.stamp = rospy.Time.now()
m.header.frame_id = '/world'
m.id = i
m.action = Marker.DELETE
return m
def make_steps(step_set):
msg = FootstepDataListRosMessage()
msg.default_transfer_time = 1.0
msg.default_swing_time = 1.0
reference_pose, steps = step_set
msg.footstep_data_list = [make_footstep(reference_pose, *step) for step in steps]
msg.unique_id = 1
return msg
# Creates footstep offset from the current foot position. The offset is in foot frame.
def make_footstep(reference_pose, side, offset_posn, offset_quat):
step = FootstepDataRosMessage()
step.robot_side = side
old_world_to_point = numpy.dot(transformations.translation_matrix(offset_posn),
transformations.quaternion_matrix(offset_quat))
pelvis_to_old_world = numpy.dot(transformations.translation_matrix(reference_pose[0]),
transformations.quaternion_matrix(reference_pose[1]))
pelvis_tf_msg = tfBuffer.lookup_transform('world', 'pelvis', rospy.Time())
q = pelvis_tf_msg.transform.rotation
t = pelvis_tf_msg.transform.translation
new_world_to_pelvis = numpy.dot(transformations.translation_matrix((t.x, t.y, t.z)),
transformations.quaternion_matrix((q.x, q.y, q.z, q.w)))
new_world_to_point = new_world_to_pelvis.dot(pelvis_to_old_world).dot(old_world_to_point)
step.location.x, step.location.y, step.location.z = transformations.translation_from_matrix(new_world_to_point)
step.orientation.x, step.orientation.y, step.orientation.z, step.orientation.w = \
transformations.quaternion_from_matrix(new_world_to_point)
foot_COP_tf_msg = tfBuffer.lookup_transform('world', 'leftCOP_Frame', rospy.Time())
# Ensure z is always at foot height
step.location.z = foot_COP_tf_msg.transform.translation.z
return step
if __name__ == '__main__':
try:
parser = ThrowingArgumentParser(description='Rotate valkyrie using predefined footsteps that are known collision-free.')
parser.add_argument('angle', type=int, choices=footstep_sets.keys(),
help="The angle to rotate relative to pelvis")
rospy.init_node('walk_rotate_safe')
log_pub = rospy.Publisher('/field/log', String, queue_size=10)
def log_msg(val):
val = rospy.get_name() + ": " + val
msg = String(val)
log_pub.publish(msg)
rospy.loginfo(val)
# Wait a reasonable amount of time for log_pub to connect
wait_until = rospy.Time.now() + rospy.Duration(0.5)
while log_pub.get_num_connections() == 0 and rospy.Time.now() < wait_until:
rospy.sleep(0.1)
try:
args = parser.parse_args()
except ArgumentParserError as e:
f = StringIO()
parser.print_usage(f)
log_msg(f.getvalue() + e.message)
argparse.ArgumentParser.error(parser, e.message)
footStepListPublisher = rospy.Publisher('/ihmc_ros/valkyrie/control/footstep_list', FootstepDataListRosMessage, queue_size=1)
# Set up TF so we can place footsteps relative to the world frame
# these go into the global scope
tfBuffer = tf2_ros.Buffer()
tfListener = tf2_ros.TransformListener(tfBuffer)
vis_pub = rospy.Publisher('/footstep_planner/footsteps_array', MarkerArray, queue_size=10)
rospy.sleep(1)
msg = make_steps(footstep_sets[args.angle])
ma = MarkerArray()
ma.markers = [footstep_marker(i, step) for i, step in enumerate(msg.footstep_data_list)]
ma.markers.extend([empty_marker(i) for i in range(len(ma.markers), 100)])
vis_pub.publish(ma)
log_msg("Rotating " + str(args.angle) + " using hard coded step list")
footStepListPublisher.publish(msg)
log_msg("Node finished, footsteps may still be executing")
except rospy.ROSInterruptException:
pass
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 the AAS WorldWide Telescope project
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import pytest
from . import test_path
from .. import collection
try:
from astropy.io import fits
HAS_ASTRO = True
except ImportError:
HAS_ASTRO = False
class TestCollection(object):
@pytest.mark.skipif('not HAS_ASTRO')
def test_is_multi_tan(self):
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])
assert coll._is_multi_tan()
coll = collection.SimpleFitsCollection([test_path('herschel_spire.fits.gz'),
test_path('herschel_spire.fits.gz')])
assert coll._is_multi_tan()
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz'),
test_path('herschel_spire.fits.gz')])
assert not coll._is_multi_tan()
|
import numpy as np
import time
from copy import deepcopy
from functools import partial, reduce
from itertools import product
from collections.abc import Mapping, Sequence, Iterable
import operator
from simulation.model.epidemic import Epidemic
class ParameterSearch:
"""Gradient-based search to estimate network characteristics and disease
dynamics of a pandemic, based on the SIRV model"""
def __init__(self,
graph_generator,
graph_generator_check,
n_nodes,
steps,
n_infected_init,
vacc,
ni_target):
"""Set the vaccination vector, the steps of the simulation and the
newly infected individuals target.
Args:
graph_generator: function to generate a nx graph, used to run
simulations
graph_generator_check: function to check validity of parameters
for the graph
n_nodes: total number of individuals in the population
steps: number of steps (e.g., weeks) of the simulation
n_infected_init: number of infected nodes in the initial
configuration, chosen randomly among all nodes of the graph
according to a uniform probability distribution
vacc: total fraction of population that has received vaccination by
each week
ni_target: newly infected nodes target
"""
self.vacc = vacc
self.steps = steps
self.ni_target = np.array(ni_target)
self.n_infected_init = n_infected_init
self.generator = graph_generator
self.generator_check = graph_generator_check
self.n_nodes = n_nodes
self.best_params = None
def search(self,
graph_initial_params,
epidemic_initial_params,
graph_delta_params,
epidemic_delta_params,
graph_delta_fine_params,
epidemic_delta_fine_params,
simulations_per_grid):
"""Estimate the best parameters for the simulation according to the
newly infected individuals target `ni_target`.
Args:
graph_initial_params (dict): initial parameters for the graph
epidemic_initial_params (dict): initial parameters for the epidemic,
namely beta and rho
graph_delta_params (dict): deltas for searching the parameter space
of the graph in the first phase
epidemic_delta_params (dict): deltas for searching the parameter
space of the epidemic model in the first phase
graph_delta_fine_params (dict): deltas for searching the parameter
space of the graph in the second phase
epidemic_delta_fine_params (dict): deltas for searching the
parameter space of the epidemic model in the second phase
simulations_per_grid (int): number of simulations to run for each
parameter set
"""
graph_param_names = set(graph_initial_params.keys())
epidemic_param_names = set(epidemic_initial_params.keys())
# Merge dicts for constructing parameter space
current_params = {**graph_initial_params, **epidemic_initial_params}
delta_params = {**graph_delta_params, **epidemic_delta_params}
delta_fine_params = {**graph_delta_fine_params, **epidemic_delta_fine_params}
iteration_i = 0
fine = False # If False, we are in the first phase. If True, we are in the second phase.
end = False
start = time.time()
# Keep descending the gradient until we reach a local optimum,
# i.e., the previous set of best parameters is equal to the
# current set of best parameters.
while not end:
iteration_i += 1
print(f"Iteration {iteration_i} params={current_params}")
# Construct the search space with the following format.
# {"a": [current_params["a"]-delta_params["a"], current_params["a"], current_params["a"]+delta_params["a"],
# ...,
# "z": [current_params["z"]-delta_params["z"], current_params["z"], current_params["z"]+delta_params["z"]}
search_space_params = {}
if not fine:
for k, v in current_params.items():
search_space_params[k] = [v - delta_params[k], v, v + delta_params[k]]
else:
for k, v in current_params.items():
search_space_params[k] = [v - delta_fine_params[k], v, v + delta_fine_params[k]]
# Generate the a list of parameters, based on the search space, over
# which simulations will be run. Our goal is to find the best set of
# parameters among these.
grid_params = list(ParameterGrid(search_space_params))
# Initialize the loss array (RMSE)
loss = np.full(len(grid_params), np.inf)
for i, grid_i in enumerate(grid_params):
# Split grid into epidemic parameters and graph parameters
graph_grid_i = {**{k: grid_i[k] for k in graph_param_names}, 'n': self.n_nodes}
epidemic_grid_i = {k: grid_i[k] for k in epidemic_param_names}
# Skip invalid grids
if not self.generator_check(**graph_grid_i) \
or not Epidemic.parameter_check(n_infected_init=self.n_infected_init, **epidemic_grid_i):
continue
# Perform a step and compute the loss
loss[i] = self.search_step(graph_grid_i, epidemic_grid_i, simulations_per_grid, seed=42)
prev_params = deepcopy(current_params)
current_params = grid_params[int(np.argmin(loss))]
print(f"Lowest loss {np.min(loss)} for grid set {current_params}")
if self.isclose(current_params, prev_params):
if not fine:
print("Switching to finer delta grid")
fine = True
else:
end = True
print(f"Best parameter set {current_params} after {iteration_i} iteration(s)")
print(f"Time elapsed: {time.time() - start}")
self.best_params = current_params
def basic_search(self,
graph_initial_params,
epidemic_initial_params,
graph_delta_params,
epidemic_delta_params,
simulations_per_grid):
"""Estimate the best parameters for the simulation according to the
newly infected individuals target `ni_target`. This is a basic version
of the search algorithm upon which `search` is based on.
Args:
graph_initial_params (dict): initial parameters for the graph
epidemic_initial_params (dict): initial parameters for the epidemic,
namely beta and rho
graph_delta_params (dict): deltas for searching the parameter space
of the graph
epidemic_delta_params (dict): deltas for searching the parameter
space of the epidemic model
simulations_per_grid (int): number of simulations to run for each
parameter set
"""
graph_param_names = set(graph_initial_params.keys())
epidemic_param_names = set(epidemic_initial_params.keys())
# Merge dicts for constructing parameter space
current_params = {**graph_initial_params, **epidemic_initial_params}
delta_params = {**graph_delta_params, **epidemic_delta_params}
iteration_i = 0
end = False
start = time.time()
# Keep descending the gradient until we reach a local optimum,
# i.e., the previous set of best parameters is equal to the
# current set of best parameters.
while not end:
iteration_i += 1
print(f"Iteration {iteration_i} params={current_params}")
# Construct the search space with the following format.
# {"a": [current_params["a"]-delta_params["a"], current_params["a"], current_params["a"]+delta_params["a"],
# ...,
# "z": [current_params["z"]-delta_params["z"], current_params["z"], current_params["z"]+delta_params["z"]}
search_space_params = {}
for k, v in current_params.items():
search_space_params[k] = [v - delta_params[k], v, v + delta_params[k]]
# Generate the a list of parameters, based on the search space, over
# which simulations will be run. Our goal is to find the best set of
# parameters among these.
grid_params = list(ParameterGrid(search_space_params))
# Initialize the loss array (RMSE)
loss = np.full(len(grid_params), np.inf)
for i, grid_i in enumerate(grid_params):
# Split grid into epidemic parameters and graph parameters
graph_grid_i = {**{k: grid_i[k] for k in graph_param_names}, 'n': self.n_nodes}
epidemic_grid_i = {k: grid_i[k] for k in epidemic_param_names}
# Skip invalid grids
if not self.generator_check(**graph_grid_i) \
or not Epidemic.parameter_check(n_infected_init=self.n_infected_init, **epidemic_grid_i):
continue
loss[i] = self.search_step(graph_grid_i, epidemic_grid_i, simulations_per_grid)
prev_params = deepcopy(current_params)
current_params = grid_params[int(np.argmin(loss))]
if self.isclose(current_params, prev_params):
end = True
print(f"Lowest loss {np.min(loss)} for grid set {current_params}")
print(f"Best parameter set {current_params} after {iteration_i} iteration(s)")
print(f"Time elapsed: {time.time() - start}")
self.best_params = current_params
def search_step(self, graph_params, epidemic_params, simulations, seed=None):
"""Perform a search step consisting in simulating a specific parameter
grid for #simulations iterations. Returns the loss function computed
against the newly infected target vector.
Args:
graph_params (dict): parameters for the graph
epidemic_params (dict): parameters for the epidemic, namely beta
and rho
simulations (int): number of simulations to run for the current grid
seed (int): if not None, graph structure is predictable according to
the chosen seed
Returns:
loss: loss function computed on the current grid
"""
if seed is not None:
np.random.seed(seed) # Set the seed (predictable graph for same parameters)
graph = self.generator(**graph_params)
np.random.seed() # Release seed
else:
graph = self.generator(**graph_params)
epidemic = Epidemic('sirv', graph, self.steps,
n_infected_init=self.n_infected_init, vacc=self.vacc, **epidemic_params)
# Perform simulations_per_grid in order to find a significant
# result for newly infected nodes per week (variability is too
# high if we perform a single simulation)
ni = np.zeros((simulations, self.steps + 1))
for sim_id in range(simulations):
sim = epidemic.simulate()
ni[sim_id] = np.array(
[self.n_infected_init] +
[((sim[t - 1] == 0) & (sim[t] == 1)).sum() for t in range(1, self.steps + 1)],
dtype=int
)
ni = ni.mean(axis=0)
return self.rmse(ni, self.ni_target)
@staticmethod
def rmse(ni, ni_target):
n_steps = len(ni) - 1
return np.sqrt((1 / n_steps) * ((ni - ni_target) ** 2).sum())
@staticmethod
def isclose(dict1, dict2):
for k in dict1.keys():
if not np.isclose(dict1[k], dict2[k]):
return False
return True
class ParameterGrid:
"""scikit-learn (0.24.1) -- sklearn.model_selection._search.ParameterGrid"""
def __init__(self, param_grid):
if not isinstance(param_grid, (Mapping, Iterable)):
raise TypeError('Parameter grid is not a dict or '
'a list ({!r})'.format(param_grid))
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
# check if all entries are dictionaries of lists
for grid in param_grid:
if not isinstance(grid, dict):
raise TypeError('Parameter grid is not a '
'dict ({!r})'.format(grid))
for key in grid:
if not isinstance(grid[key], Iterable):
raise TypeError('Parameter grid value is not iterable '
'(key={!r}, value={!r})'
.format(key, grid[key]))
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid."""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
prod = partial(reduce, operator.mul)
return sum(prod(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
|
# inclass/elephant.py
# Not sure why, but the .env file dissapeared.
# I would recreate it, but I don't want to confuse
# Python when it goes to run the other files in the
# inclass folder.
import os
import psycopg2 as psycho
from dotenv import load_dotenv
load_dotenv()
DB_NAME = os.getenv("DB_NAME", "Invalid DB_NAME value")
DB_USER = os.getenv("DB_USER", "Invalid DB_USER value")
DB_PW = os.getenv("DB_PW", "Invalid DB_PW value")
DB_HOST = os.getenv("DB_HOST", "Invalid DB_HOST value")
connection = psycho.connect(dbname=DB_NAME,
user=DB_USER,
password=DB_PW,
host=DB_HOST)
print(type(connection)) #> <class 'psycopg2.extensions.connection'>
cursor = connection.cursor()
print(type(cursor)) #> <class 'psycopg2.extensions.cursor'>
cursor.execute("SELECT * from test_table;")
#results = cursor.fetchone()
results = cursor.fetchall()
for row in results:
print(type(row), row)
|
import uuid
from pprint import pformat
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import pandas as pd
from .utils import add_default_to_data
from .utils import create_db_folders
from .utils import dump_cached_schema
from .utils import dump_db
from .utils import generate_hash_id
from .utils import get_db_path
from .utils import load_cached_schema
from .utils import load_db
from .utils import validate_data_with_schema
from .utils import validate_query_data
from .utils import validate_schema
from .utils import validate_update_data
from onstrodb.errors.common_errors import DataDuplicateError
from onstrodb.errors.common_errors import DataError
from onstrodb.errors.schema_errors import SchemaError
# types
DBDataType = Dict[str, object]
SchemaDictType = Dict[str, Dict[str, object]]
GetType = Union[Dict[str, Union[Dict[str, object], str]], None]
class OnstroDb:
"""The main API for the DB"""
def __init__(self, db_name: str, schema: Optional[SchemaDictType] = None,
db_path: Optional[str] = None, allow_data_duplication: bool = False,
in_memory: bool = False) -> None:
self._db_name = db_name
self._schema = schema
self._data_dupe = allow_data_duplication
self._in_memory = in_memory
# db variables
self._db: pd.DataFrame = None
self._db_path: str = get_db_path(db_name)
if db_path:
self._db_path = f"{db_path}/{self._db_name}"
# validate the user defined schema
self._validate_schema()
# meta data about the db
if self._schema:
self._columns = list(self._schema.keys())
# start the loading sequence
self._load_initial_schema()
self._reload_db()
def __repr__(self) -> str:
return pformat(self._to_dict(self._db), indent=4, width=80, sort_dicts=False)
def __len__(self) -> int:
return len(self._db.index)
def add(self, values: List[Dict[str, object]], get_hash_id: bool = False) -> Union[None, List[str]]:
"""Adds a list of values to the DB"""
new_data: List[Dict[str, object]] = []
new_hashes: List[str] = []
for data in values:
if self._schema:
if validate_data_with_schema(data, self._schema):
data = add_default_to_data(data, self._schema)
hash_id = self._get_hash(
[str(i) for i in data.values()], list(self._db.index) + new_hashes)
new_data.append(data)
new_hashes.append(hash_id)
else:
raise DataError(
f"The data {data!r} does not comply with the schema")
new_df = pd.DataFrame(new_data, new_hashes)
try:
self._db = pd.concat([self._db, new_df],
verify_integrity=not self._data_dupe)
except ValueError:
raise DataDuplicateError(
"The data provided, contains duplicate values") from None
if get_hash_id:
return new_hashes
return None
def get_by_query(self, query: Dict[str, object]) -> GetType:
"""Get values from the DB. queries must comply with the schema and must be of length 1"""
if self._schema:
if validate_query_data(query, self._schema):
key = list(query)[0]
filt = self._db[key] == query[key]
return self._to_dict(self._db.loc[filt])
return None
def get_by_hash_id(self, hash_id: str) -> GetType:
"""Get values from the DB based on their hash ID"""
if hash_id in self._db.index:
return self._to_dict(self._db.loc[hash_id])
return {}
def get_hash_id(self, condition: Dict[str, object]) -> List[str]:
"""Returns a hash id or a list of ids that matches all the conditions"""
# the validate_update_method can be used as the same verification style is required here.
if self._schema:
if validate_update_data(condition, self._schema):
return list(self._db.loc[(self._db[list(condition)]
== pd.Series(condition)).all(axis=1)].index)
return []
def get_all(self) -> GetType:
"""Return the entire DB in a dict representation"""
return self._to_dict(self._db)
def update_by_query(self, query: Dict[str, object], update_data: DBDataType) -> Dict[str, str]:
"""Update the records in the DB with a query"""
u_db = self._db.copy(deep=True)
if self._schema:
if validate_query_data(query, self._schema) and validate_update_data(update_data, self._schema):
q_key = list(query)[0]
q_val = query[q_key]
filt = u_db[q_key] == q_val
for key, val in update_data.items():
u_db.loc[filt, key] = val
# update the indexes
new_vals = u_db.loc[filt].to_dict("index")
new_idx = self._verify_and_get_new_idx(
new_vals, list(u_db.index))
if new_idx:
new_df = self._update_hash_id(new_idx, u_db)
self._db = new_df.copy(deep=True)
del [u_db, new_df]
return new_idx
return {}
def update_by_hash_id(self, hash_id: str, update_data: DBDataType) -> Dict[str, str]:
"""Update the records in the DB using their hash id"""
u_db = self._db.copy(deep=True)
if hash_id in u_db.index:
if self._schema:
if validate_update_data(update_data, self._schema):
for key, val in update_data.items():
u_db.loc[hash_id, key] = val
# update the indexes
new_vals = pd.DataFrame(
u_db.loc[hash_id].to_dict(), index=[hash_id]).to_dict("index")
new_idx = self._verify_and_get_new_idx(
new_vals, list(u_db.index))
if new_idx:
new_df = self._update_hash_id(new_idx, u_db)
self._db = new_df.copy(deep=True)
del [u_db, new_df]
return new_idx
return {}
def delete_by_query(self, query: Dict[str, object]) -> None:
"""Delete the records from the db that complies to the query"""
if self._schema:
if validate_query_data(query, self._schema):
key = list(query)[0]
filt = self._db[key] != query[key]
self._db = self._db.loc[filt]
def delete_by_hash_id(self, hash_id: str) -> None:
"""Delete the a records from thr DB based on their hash_id"""
ids = list(self._db.index)
if hash_id in ids:
self._db = self._db.drop(hash_id)
def raw_db(self) -> pd.DataFrame:
"""Returns the in in memory representation of the DB"""
return self._db.copy(deep=True)
def purge(self) -> None:
"""Removes all the data from the runtime instance of the db"""
self._db = self._db.iloc[0:0]
def commit(self) -> None:
"""Store the current db in a file"""
if isinstance(self._db, pd.DataFrame):
if not self._in_memory:
dump_db(self._db, self._db_path, self._db_name)
def _get_hash(self, values: List[str], hash_list: List[str]) -> str:
"""returns the hash id based on the dupe value"""
def gen_dupe_hash(extra: int = 0) -> str:
if extra:
hash_ = generate_hash_id(values + [str(extra)])
else:
hash_ = generate_hash_id(values)
if hash_ in hash_list:
return gen_dupe_hash(uuid.uuid4().int)
else:
hash_list.append(hash_)
return hash_
if not self._data_dupe:
return generate_hash_id(values)
else:
return gen_dupe_hash()
def _update_hash_id(self, new_hashes: Dict[str, str], _df: pd.DataFrame) -> pd.DataFrame:
"""Updates the hash to the new hashes """
for idx, hash_ in new_hashes.items():
_df.rename(index={idx: hash_}, inplace=True)
return _df
def _verify_and_get_new_idx(self, new_vals: Dict[str, Dict[str, object]], hash_list: List[str]) -> Dict[str, str]:
"""verify whether the updated is not a duplicate of an existing data"""
new_hashes: Dict[str, str] = {}
idxs = list(new_vals)
for k, v in new_vals.items():
hash_ = self._get_hash(
list(map(str, v.values())), hash_list)
if hash_ in self._db.index or (hash_ in idxs and k != hash_) or hash_ in new_hashes.values():
if not self._data_dupe:
new_hashes.clear()
raise DataDuplicateError(
"The updated data is a duplicate of an existing data in the DB")
else:
new_hashes[k] = hash_
else:
new_hashes[k] = hash_
return new_hashes
def _to_dict(self, _df: Union[pd.DataFrame, pd.Series]) -> Dict[str, Union[Dict[str, object], str]]:
"""Returns the dict representation of the DB based on
the allow_data_duplication value
"""
if isinstance(_df, pd.DataFrame):
return _df.to_dict("index")
else:
return _df.to_dict()
def _validate_schema(self) -> None:
if self._schema:
validate_schema(self._schema)
def _reload_db(self) -> None:
"""Reload the the pandas DF"""
if not self._in_memory:
data = load_db(self._db_path, self._db_name)
if isinstance(data, pd.DataFrame):
self._db = data
else:
self._db = pd.DataFrame(columns=self._columns)
else:
self._db = pd.DataFrame(columns=self._columns)
def _load_initial_schema(self) -> None:
"""Loads the schema that was provided when the DB was created for the first time"""
if not self._in_memory:
create_db_folders(self._db_path)
if not self._in_memory:
schema = load_cached_schema(self._db_path)
else:
schema = None
if schema:
if self._schema:
if not schema == self._schema:
raise SchemaError(
"The schema provided does not match with the initial schema")
else:
self._schema = schema.copy()
self._columns = list(self._schema.keys())
else:
if not self._schema:
raise SchemaError("The schema is not provided")
else:
if not self._in_memory:
dump_cached_schema(self._db_path, self._schema)
|
#!/usr/bin/python
# Import modules for CGI handling
import cgi, cgitb
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
if form.getvalue('after845'):
after845 = "Y"
else:
after845 = "N"
if form.getvalue('noisyCats'):
noisyCats = "Y"
else:
noisyCats = "N"
if form.getvalue('foodInBowl'):
foodInBowl = "Y"
else:
foodInBowl = "N"
if form.getvalue('catReallyHungry'):
catReallyHungry = "Y"
else:
catReallyHungry = "N"
if form.getvalue('catReallyHungry'):
sixHoursLate = "Y"
else:
sixHoursLate = "N"
# Original Logic (pretty much)
if after845 == "Y" and noisyCats == "Y" and not foodInBowl == "Y" :
decision = "Feed the beasts now"
elif catReallyHungry == "Y" and sixHoursLate == "Y" :
decision = "Feed cats so they will shut up"
else :
decision = "Nothing to do yet"
# Build the html for the response
print "Content-type:text/html\r\n\r\n"
print "<html>"
print "<head>"
print "<title>Cat Feeding Dilemma</title>"
print "</head>"
print "<img src=/satisfied_cats.png >"
print "<body>"
print "<h1> To feed or not to feed ...</h1>"
print "<h2> After 8:45 is : %s</h2>" % after845
print "<h2> Cats Making Noise is : %s</h2>" % noisyCats
print "<h2> Food in Bowl is : %s</h2>" % foodInBowl
print "<h2> Cat Really Hungry is : %s</h2>" % catReallyHungry
print "<h2> Six hours since last feeding is : %s</h2>" % sixHoursLate
print ( "<h2> Decision: %s</h2>" % decision )
print "</body>"
print "</html>"
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Cross-Origin Resource Sharing (CORS) Headers."""
import freezegun
import responses
import d1_common
import d1_common.const
import d1_gmn.tests.gmn_mock
import d1_gmn.tests.gmn_test_case
import d1_test.d1_test_case
@d1_test.d1_test_case.reproducible_random_decorator("TestCors")
@freezegun.freeze_time("1981-01-02")
class TestCors(d1_gmn.tests.gmn_test_case.GMNTestCase):
@responses.activate
def test_1000(self, gmn_client_v1_v2):
"""No CORS headers are included in 404 caused by non-existing endpoint."""
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.GET(["bogus", "endpoint"])
self.sample.assert_equals(response, "get_bogus_endpoint", gmn_client_v1_v2)
@responses.activate
def test_1010(self, gmn_client_v1_v2):
"""Invalid method against endpoint raises 405 Method Not Allowed and returns
regular and CORS headers with allowed methods (POST.
/object/invalid_pid)
"""
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.POST(["object", "invalid_pid"])
self.sample.assert_equals(response, "post_object_pid", gmn_client_v1_v2)
@responses.activate
def test_1020(self, gmn_client_v1_v2):
"""listObjects(): The expected CORS headers are included in regular response."""
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.GET(["object"], params={"count": "10"})
self.sample.assert_equals(response, "get_listobjects", gmn_client_v1_v2)
@responses.activate
def test_1030(self, gmn_client_v1_v2):
"""get(): The expected CORS headers are included in regular response."""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(gmn_client_v1_v2)
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.GET(["object", pid])
self.sample.assert_equals(response, "get_valid_object", gmn_client_v1_v2)
@responses.activate
def test_1040(self, gmn_client_v1_v2):
"""listObjects(): OPTIONS request returns expected headers."""
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.OPTIONS(["object"])
self.sample.assert_equals(response, "list_objects_options", gmn_client_v1_v2)
@responses.activate
def test_1050(self, gmn_client_v1_v2):
"""get(): OPTIONS request returns expected headers."""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(gmn_client_v1_v2)
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.OPTIONS(["object", pid])
self.sample.assert_equals(response, "get_options", gmn_client_v1_v2)
@responses.activate
def test_1060(self, gmn_client_v2):
"""getPackage(): OPTIONS request returns expected headers."""
pid_list = self.create_multiple_objects(gmn_client_v2, object_count=2)
ore_pid = self.create_resource_map(gmn_client_v2, pid_list)
response = gmn_client_v2.OPTIONS(
["packages", d1_common.const.DEFAULT_DATA_PACKAGE_FORMAT_ID, ore_pid]
)
self.sample.assert_equals(response, "get_package_options", gmn_client_v2)
@responses.activate
def test_1070(self, gmn_client_v1_v2):
"""Invalid method against endpoint raises 405 Method Not Allowed and returns
regular and CORS headers with allowed methods (PUT /object/)"""
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.PUT(["object"])
self.sample.assert_equals(response, "put_object_list", gmn_client_v1_v2)
@responses.activate
def test_1080(self, gmn_client_v1_v2):
"""get(): WITHOUT Origin header sets Access-Control-Allow-Origin to wildcard."""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(gmn_client_v1_v2)
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.get(pid)
self.sample.assert_equals(
response.headers, "get_without_origin", gmn_client_v1_v2
)
assert (
response.headers["Access-Control-Allow-Origin"]
== "https://search.dataone.org"
)
@responses.activate
def test_1090(self, gmn_client_v1_v2):
"""get(): WITH Origin header sets Access-Control-Allow-Origin to the Origin."""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(gmn_client_v1_v2)
origin_url = "https://somewhere.com"
with d1_gmn.tests.gmn_mock.disable_auth():
response = gmn_client_v1_v2.get(pid, vendorSpecific={"Origin": origin_url})
self.sample.assert_equals(response.headers, "get_with_origin", gmn_client_v1_v2)
assert response.headers["Access-Control-Allow-Origin"] == origin_url
|
from typing import final
from weakref import WeakKeyDictionary, WeakValueDictionary
_combined_metaclasses = WeakValueDictionary()
def combine(*classes: type):
# Order of bases matters
# (A, B) and (B, A) different base tuples which lead to different MRO
# So combined classes of these bases are different
if classes in _combined_metaclasses:
return _combined_metaclasses[classes]
cls = CombineMeta(
'@'.join(
f'({cls.__qualname__})' if cls.__bases__ in _combined_metaclasses
else cls.__qualname__
for cls in classes
),
classes,
{'__module__': None}
)
_combined_metaclasses[classes] = cls
return cls
@final
class CombineMeta(type):
"""
A metaclass that allows metaclass combination
"""
# Original idea: https://stackoverflow.com/a/45873191
combine = staticmethod(combine)
def __matmul__(self, other, /):
return combine(self, other)
def __rmatmul__(self, other, /):
return combine(other, self)
def __init_subclass__(mcs, /):
raise TypeError(f'type {CombineMeta.__name__!r} is not an acceptable base type')
class Singleton(type, metaclass=CombineMeta):
# Use weak keys to automatically clean unused classes
__instances__ = WeakKeyDictionary()
def __call__(cls, /, *args, **kwargs):
instances = cls.__class__.__instances__
if cls not in instances:
instances[cls] = super().__call__(*args, **kwargs)
return instances[cls]
class SingletonWithInit(Singleton):
__call_init__ = WeakKeyDictionary()
def __new__(mcs, name: str, bases: tuple, namespace: dict, /, *, call_init: bool = False, **kwargs):
if not isinstance(call_init, bool):
raise TypeError(f'call_init argument must be bool, got {type(call_init)}')
cls = super().__new__(mcs, name, bases, namespace, **kwargs)
mcs.__call_init__[cls] = call_init
return cls
def __call__(cls, /, *args, **kwargs):
instance = super().__call__(*args, **kwargs)
if cls.__class__.__call_init__[cls]:
instance.__init__(*args, **kwargs)
return instance
class EmptySlotsByDefaults(type, metaclass=CombineMeta):
def __new__(mcs, name: str, bases: tuple, namespace: dict, /, **kwargs):
if '__slots__' not in namespace:
namespace['__slots__'] = ()
return super().__new__(mcs, name, bases, namespace, **kwargs)
class AllowInstantiation(type, metaclass=CombineMeta):
__allow__ = WeakKeyDictionary()
def __new__(mcs, name: str, bases: tuple, namespace: dict, /, *, allow_instances: bool = True, **kwargs):
if not isinstance(allow_instances, bool):
raise TypeError(f'allow_instances argument must be bool, got {type(allow_instances)}')
cls = super().__new__(mcs, name, bases, namespace, **kwargs)
mcs.__allow__[cls] = allow_instances
return cls
def __call__(cls, /, *args, **kwargs):
if cls.__class__.__allow__[cls]:
return super().__call__(*args, **kwargs)
raise TypeError(f'class {cls.__qualname__} cannot be instantiated')
|
#MenuTitle: Tab with compound glyphs
# -*- coding: utf-8 -*-
__doc__="""
Based con mekablue's "New Edit tab with compound glyphs".
"""
import GlyphsApp
Doc = Glyphs.currentDocument
Font = Glyphs.font
FontMaster = Font.selectedFontMaster
selectedLayers = Font.selectedLayers
editString = ""
Output = ""
for thisLayer in selectedLayers:
thisGlyphName = thisLayer.parent.name
compoundList = [ g.name for g in Font.glyphs if thisGlyphName in [ c.componentName for c in g.layers[ FontMaster.id ].components ] ]
if len(compoundList) > 1 :
Output += "Compounds with %s: " % thisGlyphName + " ".join( compoundList ) + "\n"
editString += "\n/" + thisGlyphName + "/colon /" + "/".join( compoundList )
# editString += "\n /%s: /%s" % ( thisGlyphName, "/".join( compoundList ) )
else :
Output += "No compound glyphs with %s\n" % thisGlyphName
editString = editString.lstrip()
print Output
thisFont.newTab(editString)
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2020, Maximilian Köhl <koehl@cs.uni-saarland.de>
import dataclasses as d
import typing as t
import fractions
import pathlib
import subprocess
import tempfile
import re
from .. import model
from ..analysis import checkers
from ..jani import dump_model
from .errors import ToolError, ToolTimeoutError
Timeout = t.Optional[t.Union[float, int]]
Command = t.Sequence[t.Union[str, pathlib.Path]]
# XXX: is there a better way to do this?
_result_regex = re.compile(
r"Model checking property \"(?P<prop_name>[^\"]+)\""
r".*?"
r"Result \(for initial states\): (?P<prop_value>\d+(\.\d+)?)",
flags=re.DOTALL,
)
@d.dataclass(frozen=True)
class Output:
stdout: str
stderr: str
@d.dataclass(eq=False)
class Toolset:
executable: t.Union[str, pathlib.Path]
environment: t.Optional[t.Mapping[str, str]] = None
def invoke(
self,
arguments: t.Iterable[t.Union[str, int, float, pathlib.Path]],
*,
timeout: Timeout = None,
) -> Output:
command: Command = (
self.executable,
*map(str, arguments),
)
try:
process = subprocess.run(
command,
env=self.environment,
timeout=timeout,
capture_output=True,
)
except subprocess.TimeoutExpired as timeout_error:
raise ToolTimeoutError(
"timeout expired during invocation of `storm`",
command=command,
stdout=timeout_error.stdout,
stderr=timeout_error.stderr,
)
if process.returncode != 0:
raise ToolError(
f"`storm` terminated with non-zero returncode {process.returncode}",
command=command,
stdout=process.stdout,
stderr=process.stderr,
returncode=process.returncode,
)
return Output(process.stdout.decode("utf-8"), process.stderr.decode("utf-8"))
@d.dataclass(frozen=True, eq=False)
class StormChecker(checkers.Checker):
toolset: Toolset
engine: str = "dd"
def check(
self,
network: model.Network,
*,
properties: t.Optional[checkers.Properties] = None,
property_names: t.Optional[t.Iterable[str]] = None,
) -> checkers.Result:
with tempfile.TemporaryDirectory(prefix="modest") as directory_name:
input_file = pathlib.Path(directory_name) / "input.jani"
named_properties: t.Dict[str, model.Expression] = {}
if properties is None and property_names is None:
named_properties.update(
{
definition.name: definition.expression
for definition in network.ctx.properties.values()
}
)
if property_names is not None:
for name in property_names:
named_properties[
name
] = network.ctx.get_property_definition_by_name(name).expression
input_file.write_text(
dump_model(network, properties=named_properties), encoding="utf-8"
)
output = self.toolset.invoke(
("--jani", input_file, "--janiproperty", "--engine", self.engine)
)
return {
match.group("prop_name"): fractions.Fraction(match.group("prop_value"))
for match in _result_regex.finditer(output.stdout)
}
toolset = Toolset("storm")
checker_sparse = StormChecker(toolset, engine="sparse")
checker_dd = StormChecker(toolset, engine="dd")
checker = checker_sparse
def get_checker(*, accept_license: bool) -> checkers.Checker:
try:
from . import storm_docker
return storm_docker.checker
except ImportError:
return checker
|
from . import generic
from .generic import *
from . import kafka
from .kafka import *
from . import csv
from .csv import *
from . import avro_file
from .avro_file import *
from . import json
from .json import *
|
import locale
import sys
from os.path import dirname, join, realpath
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.signal import resample
from base.config_loader import ConfigLoader
from base.data.dataloader import TuebingenDataloader
def alternate_signal_ww(signals, sample_left, sample_right):
""" stretching/compressing of the signal + resampling"""
# stretch/compress signal to the new window size using the sample to the left and right (for stretching)
orig_size = signals.shape[0]
new_size = int(ww_factor * orig_size)
total_win = np.r_[sample_left, signals, sample_right]
win_start = (total_win.shape[0] - new_size) // 2
orig_win = total_win[win_start:win_start + new_size]
# resample new signal to the old window size
win = resample(orig_win, orig_size, axis=0)
return win.astype('float32')
config = ConfigLoader('exp001', create_dirs=False)
config.DATA_FILE = 'D:/Python/mice_tuebingen/cache/dataset/data_tuebingen.h5'
sample = 10
ww_factor = 0.7
mapLoader = TuebingenDataloader(config, 'train', balanced=False, augment_data=False)
signal = mapLoader[sample][0].flatten()
mapLoader_augmented = TuebingenDataloader(config, 'train', balanced=False)
signal_augmented = alternate_signal_ww(signal, mapLoader[sample - 1][0].flatten()[:640],
mapLoader[sample + 1][0].flatten()[-640:])
plt.rcParams.update({'font.size': 12})
locale.setlocale(locale.LC_NUMERIC, "de_DE")
plt.rcParams['axes.formatter.use_locale'] = True
fig, (ax1, ax2) = plt.subplots(2, sharex='all', figsize=(8, 4))
ax1.plot(np.arange(signal.shape[0]), signal, label='originales Signal', c='k')
ax1.axvspan((1 - ww_factor) / 2 * 1920, (1 - (1 - ww_factor) / 2) * 1920, alpha=0.3, color='darkgreen',
label='neues Fenster')
ax1.legend()
ax1.set_ylabel('Amplitude')
ax2.plot(signal_augmented, label='transf. Signal', c='darkgreen')
ax2.legend()
ax2.set_ylabel('Amplitude')
ax2.set_ylim(ax1.get_ylim())
plt.xlabel('Fenstergröße in Datenpunkten')
plt.tight_layout()
plt.savefig(join(dirname(__file__), '../../..', 'results', 'plots', 'master', 'ww_example.svg'))
plt.show()
|
import simtk.openmm as mm
import simtk.unit as unit
import openmmnn as nn
import tensorflow as tf
import unittest
class TestNeuralNetworkForce(unittest.TestCase):
def testFreezeGraph(self):
graph = tf.Graph()
with graph.as_default():
positions = tf.placeholder(tf.float32, [None, 3], 'positions')
scale = tf.Variable(5.0)
energy = tf.multiply(scale, tf.reduce_sum(positions**2), name='energy')
forces = tf.identity(tf.gradients(-energy, positions), name='forces')
session = tf.Session()
session.run(tf.global_variables_initializer())
force = nn.NeuralNetworkForce(graph, session)
system = mm.System()
for i in range(3):
system.addParticle(1.0)
system.addForce(force)
integrator = mm.VerletIntegrator(0.001)
context = mm.Context(system, integrator)
positions = [mm.Vec3(3, 0, 0), mm.Vec3(0, 4, 0), mm.Vec3(3, 4, 0)]
context.setPositions(positions)
assert context.getState(getEnergy=True).getPotentialEnergy() == 250.0*unit.kilojoules_per_mole
if __name__ == '__main__':
unittest.main()
|
import typing
import pytest
from testsuite import annotations
from testsuite.mockserver import server
from testsuite.utils import callinfo
from testsuite.utils import http
TestpointHandler = typing.Callable[
[annotations.JsonAnyOptional],
annotations.MaybeAsyncResult[annotations.JsonAnyOptional],
]
TestpointDecorator = typing.Callable[
[TestpointHandler], callinfo.AsyncCallQueue,
]
class TestpointFixture:
"""Testpoint control object."""
def __init__(self) -> None:
self._handlers: typing.Dict[str, callinfo.AsyncCallQueue] = {}
def get_handler(
self, name: str,
) -> typing.Optional[callinfo.AsyncCallQueue]:
return self._handlers.get(name)
def __getitem__(self, name: str) -> callinfo.AsyncCallQueue:
return self._handlers[name]
def __call__(self, name: str) -> TestpointDecorator:
"""Returns decorator for registering testpoint called ``name``.
After decoration function is wrapped with `AsyncCallQueue`_.
"""
def decorator(func) -> callinfo.AsyncCallQueue:
wrapped = callinfo.acallqueue(func)
self._handlers[name] = wrapped
return wrapped
return decorator
@pytest.fixture
async def testpoint(mockserver: server.MockserverFixture) -> TestpointFixture:
"""Testpoint fixture returns testpoint session instance that works
as decorator that registers testpoint handler. Original function is
wrapped with :ref:`AsyncCallQueue`
:param name: testpoint name
:returns: decorator
.. code-block::
def test_foo(testpoint):
@testpoint('foo'):
def testpoint_handler(data):
pass
...
# testpoint_handler is AsyncCallQueue instance, e.g.:
assert testpoint_handler.has_calls
assert testpoint_handler.next_call == {...}
aseert testpoint_handler.wait_call() == {...}
"""
session = TestpointFixture()
@mockserver.json_handler('/testpoint')
async def _handler(request: http.Request):
body = request.json
handler = session.get_handler(body['name'])
if handler is not None:
data = await handler(body['data'])
else:
data = None
return {'data': data}
return session
|
#!/usr/bin/env python
# Copyright 2016 Melomap (www.melomap.com)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
##
## Thanks http://www.itu.int/dms_pub/itu-t/oth/02/02/T02020000920004PDFE.pdf
## https://www.numberingplans.com/?page=plans&sub=phonenr&alpha_2_input=MM¤t_page=74
##
class MMPhoneNumber():
def __init__(self):
self.OOREDOO = "Ooredoo"
self.TELENOR = "Telenor"
self.MPT = "MPT"
self.UNKNOWN = "Unknown"
self.GSM_TYPE = "GSM"
self.WCDMA_TYPE = "WCDMA"
self.CDMA_450_TYPE = "CDMA 450 MHz"
self.CDMA_800_TYPE = "CDMA 800 MHz"
self.ooredoo_re = r"^(09|\+?959)9(7|6)\d{7}$"
self.telenor_re = r"^(09|\+?959)7(9|8|7)\d{7}$"
self.mpt_re = r"^(09|\+?959)(5\d{6}|4\d{7,8}|2\d{6,8}|3\d{7,8}|6\d{6}|8\d{6}|7\d{7}|9(0|1|9)\d{5,6})$"
def is_valid_mm_phonenumber(self, phonenumber=None):
if phonenumber:
phonenumber = self.sanitize_phonenumber(phonenumber=phonenumber)
mm_phone_re = r"^(09|\+?950?9|\+?95950?9)\d{7,9}$"
if self.__check_regex([mm_phone_re], phonenumber):
return True
return False
def sanitize_phonenumber(self, phonenumber=None):
if phonenumber:
phonenumber = phonenumber.strip()
phonenumber = phonenumber.replace(" ", "")
phonenumber = phonenumber.replace("-", "")
country_code_re = r"^\+?950?9\d+$"
if self.__check_regex([country_code_re], phonenumber):
## try to remove double country code
double_country_code_re = r"^\+?95950?9\d{7,9}$"
if self.__check_regex([double_country_code_re], phonenumber):
## remove double country code
phonenumber = phonenumber.replace("9595", "95", 1)
## remove 0 before area code
zero_before_areacode_re = r"^\+?9509\d{7,9}$"
if self.__check_regex([zero_before_areacode_re], phonenumber):
## remove double country code
phonenumber = phonenumber.replace("9509", "959", 1)
return phonenumber
def get_telecom_name(self, phonenumber=None):
telecom_name = self.UNKNOWN
if phonenumber and self.is_valid_mm_phonenumber(phonenumber=phonenumber):
## sanitize the phonenumber first
phonenumber = self.sanitize_phonenumber(phonenumber=phonenumber)
if self.__check_regex([self.ooredoo_re], phonenumber):
telecom_name = self.OOREDOO
elif self.__check_regex([self.telenor_re], phonenumber):
telecom_name = self.TELENOR
elif self.__check_regex([self.mpt_re], phonenumber):
telecom_name = self.MPT
return telecom_name
def get_phone_network_type(self, phonenumber=None):
network_type = self.UNKNOWN
if phonenumber and self.is_valid_mm_phonenumber(phonenumber=phonenumber):
## sanitize the phonenumber first
phonenumber = self.sanitize_phonenumber(phonenumber=phonenumber)
if self.__check_regex([self.ooredoo_re, self.telenor_re], phonenumber):
network_type = self.GSM_TYPE
elif self.__check_regex([self.mpt_re], phonenumber):
wcdma_re = r"^(09|\+?959)(55\d{5}|25[2-4]\d{6}|26\d{7}|4(4|5|6)\d{7})$"
cdma_450_re = r"^(09|\+?959)(8\d{6}|6\d{6}|49\d{6})$"
cdma_800_re = r"^(09|\+?959)(3\d{7}|73\d{6}|91\d{6})$"
if self.__check_regex([wcdma_re], phonenumber):
network_type = self.WCDMA_TYPE
elif self.__check_regex([cdma_450_re], phonenumber):
network_type = self.CDMA_450_TYPE
elif self.__check_regex([cdma_800_re], phonenumber):
network_type = self.CDMA_800_TYPE
else:
network_type = self.GSM_TYPE
return network_type
def __check_regex(self, regex_array, input_string):
for regex in regex_array:
if re.search(regex, input_string):
return True
return False
|
from numpy import tan, cos, sin, linspace
from scipy.optimize import root
def fun(x):
return tan(x) + 2*x
def jac(x):
return 1/cos(x)**2 + 2
sols = set()
for x0 in linspace(0, 1000, 1e6):
ans = root(fun, [x0], jac=jac, method='hybr')
sols.add(ans.x[0])
print(sorted(list(sols)))
|
# Generated by Django 2.1.4 on 2019-01-12 03:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parser_app', '0008_auto_20181230_0303'),
]
operations = [
migrations.AddField(
model_name='resume',
name='experience',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='Experience'),
),
]
|
def list_join(list, conjunction_str, format_func=str, oxford_comma=True):
'''
Joins a list in a grammatically-correct fashion.
:param list:
the list to join.
:param conjunction_str:
the string to use as conjunction between two items.
:param format_func:
a function that takes in a string and returns a formatted string (default=str, optional).
:param oxford_comma:
indicates whether to use oxford comma styling (default=True, optional).
:returns:
a string representing the grammatically-correct joined list.
:usage::
>>> list_join(['apple', 'orange', 'pear'], 'and')
apple, orange and pear'`
'''
if not list: return ''
if len(list) == 1: return format_func(list[0])
first_part = ', '.join([format_func(i) for i in list[:-1]])
comma = ',' if oxford_comma and len(list) > 2 else ''
return '{}{} {} {}'.format(first_part, comma, conjunction_str, format_func(list[-1]))
def kwargs_to_list(kwargs_dict):
'''
Maps a set of keyword arguments in a dictionary to
a list of strings in the format "'key' (value='dict[key]')".
:param kwargs_dict:
a dictionary representing the keyword arguments to map.
:returns:
A list of strings in the format "'key' (value='dict[key]')".
:usage::
>>> kwargs_to_list({'name':'bob', 'id':2})
["'name' (value='bob')", "'id' (value='2')"]
'''
return ['\'{}\' (value=\'{}\')'.format(key, kwargs_dict[key]) for key in kwargs_dict if kwargs_dict[key] != None]
|
from _initdef import *
|
import os
import shutil
import numpy as np
import glob
import xml.etree.ElementTree as ET
import pandas as pd
def randCpFiles(src, destn, ratio=0.8, createDir=True, ext='', fileFilter=None):
# TODO - can let the user pass in the "ext" filter in the fileFilter too. THis will
# make the function more general.
'''
src :: string - the directory from which the files are to be copied
destn :: string - the directory to which the files are to be copied
ratio :: float - the ratio of files to be copied (0.8 is 80%)
createDir :: boolean - if true, the destination directory will be created if it doesn't exist
ext :: string - only move files with this extension
fileFilter :: (string) -> boolean - function to filter files which are to be copied.
File is selected if this function returns True
returns :: list<string> - names of files copied
'''
os.makedirs(destn) if createDir and (not os.path.exists(destn)) else None
# TODO - can replace with glob.glob
srcFiles = list(filter(lambda f: f.endswith(ext), os.listdir(src)))
if fileFilter: # fileFilter is not None
srcFiles = list(filter(fileFilter, srcFiles))
toCopy = np.random.choice(srcFiles, round(len(srcFiles) * ratio), replace=False)
list(map(lambda f: shutil.copy(os.path.join(src, f), os.path.join(destn, f)), toCopy ))
return toCopy
def cpFiles(src, destn, files, createDir = True):
'''
src :: string - the directory from which the files are to be copied
destn :: string - the directory to which the files are to be copied
files :: list<string> - list of files in src to be copied to destn
createDir :: boolean - if true, the destination directory will be created if it doesn't exist
'''
# TODO - os.makedirs(exist_ok=True) instead of (not os.path.exists(destn))
os.makedirs(destn) if createDir and (not os.path.exists(destn)) else None
list(map(lambda f: shutil.copy(os.path.join(src, f), os.path.join(destn, f)), files ))
def vocTrainTestSplit(src, destn, ratio=0.8, createDir=True, imgFmt = '.jpg', testFolName = 'valid', trainFolName = 'train'):
'''
src :: string - the directory from which the files are to be copied
destn :: string - the directory to which the files are to be copied.
Folders specified in trainFolName testFolName
will be created in this directory
ratio :: float - the ratio of files to be copied (0.8 is 80%)
createDir :: boolean - if true, the destination directory will be created if it doesn't exist
imgFmt :: string - the extension of the image files in src
testFolName :: string - the name of the test folder
trainFolName :: string - the name of the train folder
'''
# TODO - can extract the function outside (no need to create it everytime + takes up space inside the function)
isImgLabelled = lambda f: os.path.exists( os.path.join(src, f[:-len(imgFmt)] + '.xml' ) )
# TODO - can replace with glob.glob
imgFiles = list(filter(lambda f: f.endswith(imgFmt), os.listdir(src)))
labelledImgFiles = list(filter(isImgLabelled, imgFiles))
trainFiles = randCpFiles(src, os.path.join(destn, trainFolName),
ratio=ratio, createDir=createDir,ext=imgFmt, fileFilter=isImgLabelled)
testFiles = list(filter(lambda f: not f in trainFiles, labelledImgFiles ))
# TODO - the below two lambdas can be extracted into a function which filters a list of files according to the file format
trainXmls = list(map(lambda f: f[:-len(imgFmt)] + '.xml', trainFiles))
testXmls = list(map(lambda f: f[:-len(imgFmt)] + '.xml', testFiles))
# TODO - ideally, all of the below should be refactored into a
# "select train, validation images -> add .xml files to the list as well -> copy both images and xmls from src to destn"
cpFiles(src, os.path.join(destn, testFolName), testFiles, createDir=createDir)
cpFiles(src, os.path.join(destn, trainFolName), trainXmls, createDir=createDir)
cpFiles(src, os.path.join(destn, testFolName), testXmls, createDir=createDir)
print('Copied {0} training files'.format(len(trainFiles)))
print('Copied {0} test files'.format(len(testFiles)))
# credits - https://github.com/datitran/raccoon_dataset/blob/master/xml_to_csv.py
def xml_to_df(path):
'''
path - path containing all the xml files to be converted to csv
(combines and converts all the xml data into one DataFrame)
'''
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def xml_to_csv(src, csvFname):
'''
src :: string - path containing all the xml files to be converted to csv
(combines and converts all the xml data into one DataFrame)
csvFname :: string - path to the csv file (folders leading to this path must exist)
'''
xml_to_df(src).to_csv(csvFname, index=None)
|
import os
from tabulate import tabulate
from transformers import AutoTokenizer, AutoConfig
class InputLM():
def __init__(self, lm_path, max_length) -> None:
self.max_length = max_length
self.tokenizer = AutoTokenizer.from_pretrained(
lm_path, model_max_length=max_length)
def __call__(self, tokens, entities):
tokens = tokens.copy()
entities = entities.copy()
input_ids, attention_mask, encode_dict = self._tokenizer_input(tokens)
shifted_entities = self._shifted_entities_index(input_ids, entities, encode_dict)
lm_tokens=[self.tokenizer.decode(w)for w in input_ids]
return {
'attention_mask':attention_mask,
'input_ids':input_ids,
'lm_tokens':lm_tokens,
'lm_entities':shifted_entities,
'encode_dict':encode_dict}
def _tokenizer_input(self, tokens):
max_length = self.max_length
start_id = self.tokenizer.bos_token_id
end_id = self.tokenizer.eos_token_id
pad_id = self.tokenizer.pad_token_id
encode_dict = {}
input_ids = [start_id]
for index in range(len(tokens)):
word = tokens[index]
shifted = len(input_ids)
ids = self.tokenizer.encode(word)
ids = ids[1:-1]
input_ids.extend(ids)
encode_dict[index]=(shifted, shifted+len(ids))
input_ids.append(end_id) # Add end of word
num_ids = len(input_ids) # Create mask
mask = [1]*num_ids
mask+= [0]*(max_length-num_ids)
assert len(mask)==max_length, 'Error create mask'
input_ids+=[pad_id] * (max_length-num_ids) # Add padding
return input_ids, mask, encode_dict
def _shifted_entities_index(self, input_ids, entities, encode_dict):
shifted_entities = []
for index in range(len(entities)): # Shift labels index
entity = entities[index]
entity_type = entity['entity_type']
start, end = entity['span']
text = entity['text']
(shifted_start, _) = encode_dict.get(start) # shifting start, end
(_, shifted_end) = encode_dict.get(end-1)
decode_text = input_ids[shifted_start:shifted_end]
decode_text = [self.tokenizer.decode(w) for w in decode_text]
decode_text = "".join(decode_text)
shifted_entities.append({
'entity_type':entity_type,
'span':[shifted_start, shifted_end],
'text': decode_text})
return shifted_entities
@staticmethod
def check_entities(sample):
temp = [['original_en', 'orginal_span', 'decode_en', 'decode_span']]
for index in range(len(sample['org_entities'])):
org_entity = sample['org_entities'][index]
original_ne = org_entity['text']
original_span = org_entity['span']
decode_entity = sample['entities'][index]
decode_ne =decode_entity['text']
decode_span = decode_entity['span']
temp.append([original_ne,
original_span,
decode_ne,
decode_span])
print(tabulate(temp))
@staticmethod
def check_input_ids_and_mask(sample):
temp = [['index', 'input_text', 'input_ids', 'mask']]
for index in range(len(sample['input_ids'])):
original_ids = sample['input_ids'][index]
mask = sample['mask'][index]
input_text = sample['input_text'][index]
temp.append([index, input_text, original_ids, mask])
print(tabulate(temp))
|
from keras.models import load_model
import signal_data
import tensorflow as tf
train, test = signal_data.load_data()
features, labels = train
featuresT, labelsT =test
featuresarr=featuresT.__array__(dtype=int);
model = tf.keras.models.load_model("keras_modelv3.h5")
pred = model.predict(featuresarr)
print(pred)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.